net/hns3: fix vector Rx burst limitation

Currently, driver uses the macro HNS3_DEFAULT_RX_BURST whose value is
32 to limit the vector Rx burst size, as a result, the burst size
can't exceed 32.

This patch fixes this problem by support big burst size.
Also adjust HNS3_DEFAULT_RX_BURST to 64 as it performs better than 32.

Fixes: a3d4f4d291d7 ("net/hns3: support NEON Rx")
Fixes: 952ebacce4f2 ("net/hns3: support SVE Rx")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
This commit is contained in:
Chengwen Feng 2021-04-30 14:28:50 +08:00 committed by Ferruh Yigit
parent cc9dbd9aac
commit 2d408d0609
4 changed files with 58 additions and 15 deletions

View File

@ -20,7 +20,7 @@
#define HNS3_DEFAULT_TX_RS_THRESH 32 #define HNS3_DEFAULT_TX_RS_THRESH 32
#define HNS3_TX_FAST_FREE_AHEAD 64 #define HNS3_TX_FAST_FREE_AHEAD 64
#define HNS3_DEFAULT_RX_BURST 32 #define HNS3_DEFAULT_RX_BURST 64
#if (HNS3_DEFAULT_RX_BURST > 64) #if (HNS3_DEFAULT_RX_BURST > 64)
#error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n" #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
#endif #endif

View File

@ -108,14 +108,13 @@ hns3_recv_pkts_vec(void *__restrict rx_queue,
{ {
struct hns3_rx_queue *rxq = rx_queue; struct hns3_rx_queue *rxq = rx_queue;
struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use]; struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
uint64_t bd_err_mask; /* bit mask indicate whick pkts is error */ uint64_t pkt_err_mask; /* bit mask indicate whick pkts is error */
uint16_t nb_rx; uint16_t nb_rx;
nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_DEFAULT_DESCS_PER_LOOP);
rte_prefetch_non_temporal(rxdp); rte_prefetch_non_temporal(rxdp);
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_DEFAULT_DESCS_PER_LOOP);
if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
hns3_rxq_rearm_mbuf(rxq); hns3_rxq_rearm_mbuf(rxq);
@ -128,10 +127,31 @@ hns3_recv_pkts_vec(void *__restrict rx_queue,
rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 2].mbuf); rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 2].mbuf);
rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 3].mbuf); rte_prefetch0(rxq->sw_ring[rxq->next_to_use + 3].mbuf);
bd_err_mask = 0; if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) {
nb_rx = hns3_recv_burst_vec(rxq, rx_pkts, nb_pkts, &bd_err_mask); pkt_err_mask = 0;
if (unlikely(bd_err_mask)) nb_rx = hns3_recv_burst_vec(rxq, rx_pkts, nb_pkts,
nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask); &pkt_err_mask);
nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask);
return nb_rx;
}
nb_rx = 0;
while (nb_pkts > 0) {
uint16_t ret, n;
n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
pkt_err_mask = 0;
ret = hns3_recv_burst_vec(rxq, &rx_pkts[nb_rx], n,
&pkt_err_mask);
nb_pkts -= ret;
nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret,
pkt_err_mask);
if (ret < n)
break;
if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
hns3_rxq_rearm_mbuf(rxq);
}
return nb_rx; return nb_rx;
} }

View File

@ -71,6 +71,9 @@ hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
uint16_t count, i; uint16_t count, i;
uint64_t mask; uint64_t mask;
if (likely(pkt_err_mask == 0))
return nb_pkts;
count = 0; count = 0;
for (i = 0; i < nb_pkts; i++) { for (i = 0; i < nb_pkts; i++) {
mask = ((uint64_t)1u) << i; mask = ((uint64_t)1u) << i;

View File

@ -292,12 +292,11 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
{ {
struct hns3_rx_queue *rxq = rx_queue; struct hns3_rx_queue *rxq = rx_queue;
struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use]; struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
uint64_t bd_err_mask; /* bit mask indicate whick pkts is error */ uint64_t pkt_err_mask; /* bit mask indicate whick pkts is error */
uint16_t nb_rx; uint16_t nb_rx;
rte_prefetch_non_temporal(rxdp); rte_prefetch_non_temporal(rxdp);
nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP); nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
@ -309,10 +308,31 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]); hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
bd_err_mask = 0; if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) {
nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask); pkt_err_mask = 0;
if (unlikely(bd_err_mask)) nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts,
nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask); &pkt_err_mask);
nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask);
return nb_rx;
}
nb_rx = 0;
while (nb_pkts > 0) {
uint16_t ret, n;
n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
pkt_err_mask = 0;
ret = hns3_recv_burst_vec_sve(rxq, &rx_pkts[nb_rx], n,
&pkt_err_mask);
nb_pkts -= ret;
nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret,
pkt_err_mask);
if (ret < n)
break;
if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
hns3_rxq_rearm_mbuf_sve(rxq);
}
return nb_rx; return nb_rx;
} }