net/bnx2x: fix to sync fastpath Rx queue access

PMD handles fast path completions in the Rx handler and control path
completions in the interrupt handler. They both are processing
completions from the same fastpath completion queue. There is a
potential for race condition when these two paths are processing
the completions from the same queue and trying to updating Rx Producer.

Add a fastpath Rx lock between these two paths to close this race.

Fixes: 540a211084a7 ("bnx2x: driver core")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rmody@marvell.com>
This commit is contained in:
Rasesh Mody 2020-01-26 14:54:19 -08:00 committed by Ferruh Yigit
parent d836d5efb9
commit 141c86f55f
3 changed files with 22 additions and 1 deletions

View File

@ -1167,6 +1167,10 @@ static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
MAX_RCQ_ENTRIES(rxq)))
rx_cq_cons_sb++;
PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d",
rx_cq_cons_sb, rxq->rx_cq_head);
return rxq->rx_cq_head != rx_cq_cons_sb;
}
@ -1249,9 +1253,12 @@ static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp)
uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
rte_spinlock_lock(&(fp)->rx_mtx);
rxq = sc->rx_queues[fp->index];
if (!rxq) {
PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index);
rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
}
@ -1321,9 +1328,14 @@ next_cqe:
rxq->rx_cq_head = sw_cq_cons;
rxq->rx_cq_tail = sw_cq_prod;
PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d",
bd_prod_fw, sw_cq_prod);
/* Update producers */
bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
rte_spinlock_unlock(&(fp)->rx_mtx);
return sw_cq_cons != hw_cq_cons;
}

View File

@ -360,6 +360,9 @@ struct bnx2x_fastpath {
/* pointer back to parent structure */
struct bnx2x_softc *sc;
/* Used to synchronize fastpath Rx access */
rte_spinlock_t rx_mtx;
/* status block */
struct bnx2x_dma sb_dma;
union bnx2x_host_hc_status_block status_block;

View File

@ -346,6 +346,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t len, pad;
struct rte_mbuf *rx_mb = NULL;
rte_spinlock_lock(&(fp)->rx_mtx);
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
USABLE_RCQ_ENTRIES_PER_PAGE) {
@ -357,8 +359,10 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
sw_cq_cons = rxq->rx_cq_head;
sw_cq_prod = rxq->rx_cq_tail;
if (sw_cq_cons == hw_cq_cons)
if (sw_cq_cons == hw_cq_cons) {
rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
}
while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
@ -439,6 +443,8 @@ next_rx:
bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
rte_spinlock_unlock(&(fp)->rx_mtx);
return nb_rx;
}