net/bnxt: fix Rx performance by removing spinlock

The spinlock was trying to protect scenarios where rx_queue stop/start
could be initiated dynamically. Assigning bnxt_dummy_recv_pkts and
bnxt_dummy_xmit_pkts immediately to avoid concurrent access of mbuf in Rx
and cleanup path should help achieve the same result.

Fixes: 14255b351537 ("net/bnxt: fix queue start/stop operations")

Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Rahul Gupta <rahul.gupta@broadcom.com>
This commit is contained in:
Rahul Gupta 2020-10-25 20:56:16 -07:00 committed by Ferruh Yigit
parent e24a5d3f58
commit 33ac72d741
8 changed files with 18 additions and 15 deletions

View File

@ -890,6 +890,10 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp);
int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete);
uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
extern const struct rte_flow_ops bnxt_flow_ops;

View File

@ -121,6 +121,12 @@ void bnxt_handle_async_event(struct bnxt *bp,
PMD_DRV_LOG(INFO, "Port conn async event\n");
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
/*
* Avoid any rx/tx packet processing during firmware reset
* operation.
*/
bnxt_stop_rxtx(bp);
/* Ignore reset notify async events when stopping the port */
if (!bp->eth_dev->data->dev_started) {
bp->flags |= BNXT_FLAG_FATAL_ERROR;
@ -337,3 +343,9 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp)
return false;
}
void bnxt_stop_rxtx(struct bnxt *bp)
{
bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
}

View File

@ -126,4 +126,5 @@ void bnxt_wait_for_device_shutdown(struct bnxt *bp);
bool bnxt_is_recovery_enabled(struct bnxt *bp);
bool bnxt_is_master_func(struct bnxt *bp);
void bnxt_stop_rxtx(struct bnxt *bp);
#endif

View File

@ -210,8 +210,6 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
if (!rxq || !rxq->rx_ring)
return;
rte_spinlock_lock(&rxq->lock);
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
for (i = 0;
@ -248,7 +246,6 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
}
}
rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
@ -389,7 +386,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->rx_started = true;
}
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
rte_spinlock_init(&rxq->lock);
/* Configure mtu if it is different from what was configured before */
if (!queue_idx)

View File

@ -16,9 +16,6 @@ struct bnxt;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
rte_spinlock_t lock; /* Synchronize between rx_queue_stop
* and fast path
*/
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
uint64_t mbuf_initializer; /* val to init mbuf */
uint16_t nb_rx_desc; /* num of RX desc */

View File

@ -843,8 +843,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
/* If Rx Q was stopped return */
if (unlikely(!rxq->rx_started ||
!rte_spinlock_trylock(&rxq->lock)))
if (unlikely(!rxq->rx_started))
return 0;
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
@ -946,8 +945,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
done:
rte_spinlock_unlock(&rxq->lock);
return nb_rx_pkts;
}

View File

@ -77,8 +77,6 @@ struct bnxt_rx_ring_info {
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);

View File

@ -49,8 +49,6 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);