net/bnxt: fix Rx burst size constraint

The burst receive function should return all packets currently
present in the receive ring up to the requested burst size,
update vector mode receive functions accordingly.

Fixes: 398358341419 ("net/bnxt: support NEON")
Fixes: bc4a000f2f53 ("net/bnxt: implement SSE vector mode")
Cc: stable@dpdk.org

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Lance Richardson 2021-05-24 14:59:50 -04:00 committed by Ajit Khaparde
parent 1aa8a8c7b9
commit 008feb839f
2 changed files with 46 additions and 12 deletions

View File

@ -158,9 +158,8 @@ descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
}
uint16_t
bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
static uint16_t
recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@ -185,9 +184,6 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
bnxt_rxq_rearm(rxq, rxr);
/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
cons = raw_cons & (cp_ring_size - 1);
mbcons = (raw_cons / 2) & (rx_ring_size - 1);
@ -305,6 +301,27 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx_pkts;
}
uint16_t
bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
uint16_t cnt = 0;
while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
uint16_t burst;
burst = recv_burst_vec_neon(rx_queue, rx_pkts + cnt,
RTE_BNXT_MAX_RX_BURST);
cnt += burst;
nb_pkts -= burst;
if (burst < RTE_BNXT_MAX_RX_BURST)
return cnt;
}
return cnt + recv_burst_vec_neon(rx_queue, rx_pkts + cnt, nb_pkts);
}
static void
bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
{

View File

@ -149,9 +149,8 @@ descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
_mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
}
uint16_t
bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
static uint16_t
recv_burst_vec_sse(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct bnxt_rx_queue *rxq = rx_queue;
const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@ -176,9 +175,6 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
bnxt_rxq_rearm(rxq, rxr);
/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
cons = raw_cons & (cp_ring_size - 1);
mbcons = (raw_cons / 2) & (rx_ring_size - 1);
@ -286,6 +282,27 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx_pkts;
}
uint16_t
bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
uint16_t cnt = 0;
while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
uint16_t burst;
burst = recv_burst_vec_sse(rx_queue, rx_pkts + cnt,
RTE_BNXT_MAX_RX_BURST);
cnt += burst;
nb_pkts -= burst;
if (burst < RTE_BNXT_MAX_RX_BURST)
return cnt;
}
return cnt + recv_burst_vec_sse(rx_queue, rx_pkts + cnt, nb_pkts);
}
static void
bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
{