net/bnxt: fix queue start/stop operations

Packets destined to the to-be-stopped queue should not be dropped
(neither in HW nor in the driver), so re-program the RSS Table without
this queue on stop and add it back to the table on start unless it
is a Representor VF.

Since 0th entry is used for default ring, use fw_grp_id + 1 to change
the RSS table population logic by programming valid IDs instead of the
default zeroth entry in case of an invalid fw_grp_id.

Destroy and recreate the trio of Rx rings(compl, Rx, AG) every time in
start so that HW is in sync with software.

Fixes: 9b63c6fd70 ("net/bnxt: support Rx/Tx queue start/stop")
Cc: stable@dpdk.org

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ray Jui <ray.jui@broadcom.com>
Reviewed-by: Scott Branden <scott.branden@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Ajit Khaparde 2018-06-28 13:15:35 -07:00 committed by Ferruh Yigit
parent 12853bb6f7
commit 14255b3515
9 changed files with 206 additions and 67 deletions

View File

@ -27,6 +27,7 @@
#define BNXT_MIN_RING_DESC 16
#define BNXT_MAX_TX_RING_DESC 4096
#define BNXT_MAX_RX_RING_DESC 8192
#define BNXT_DB_SIZE 0x80
#define BNXT_INT_LAT_TMR_MIN 75
#define BNXT_INT_LAT_TMR_MAX 150

View File

@ -199,13 +199,14 @@ static int bnxt_alloc_mem(struct bnxt *bp)
static int bnxt_init_chip(struct bnxt *bp)
{
unsigned int i;
struct bnxt_rx_queue *rxq;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
unsigned int i, j;
int rc;
/* disable uio/vfio intr/eventfd mapping */
@ -279,6 +280,13 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
for (j = 0; j < bp->rx_nr_rings; j++) {
rxq = bp->eth_dev->data->rx_queues[j];
if (rxq->rx_deferred_start)
rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
rc = bnxt_vnic_rss_configure(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR,

View File

@ -1817,8 +1817,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
return rc;
}
static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
unsigned int idx __rte_unused)
static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
@ -1830,39 +1829,9 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->cp_raw_cons = 0;
}
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
{
unsigned int i;
int rc = 0;
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
unsigned int idx = bp->rx_cp_nr_rings + i;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_TX);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(txr->tx_desc_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_desc_ring));
memset(txr->tx_buf_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_buf_ring));
txr->tx_prod = 0;
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@ -1871,7 +1840,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->rx_desc_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_desc_ring));
@ -1889,26 +1858,47 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->ag_ring_struct->ring_size *
sizeof(*rxr->ag_buf_ring));
rxr->ag_prod = 0;
bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr);
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_TX);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(txr->tx_desc_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_desc_ring));
memset(txr->tx_buf_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_buf_ring));
txr->tx_prod = 0;
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, i);
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
bnxt_free_cp_ring(bp, cpr);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
}
/* Default completion ring */
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
for (i = 0; i < bp->rx_cp_nr_rings; i++)
bnxt_free_hwrm_rx_ring(bp, i);
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, 0);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
}
return rc;
return 0;
}
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)

View File

@ -107,6 +107,7 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_free_all_hwrm_resources(struct bnxt *bp);
void bnxt_free_hwrm_resources(struct bnxt *bp);
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);

View File

@ -276,6 +276,98 @@ static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
}
int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
{
struct rte_pci_device *pci_dev = bp->pdev;
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
int rc = 0;
bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
/* Rx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
queue_index, HWRM_NA_SIGNATURE,
HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
queue_index * BNXT_DB_SIZE;
bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
if (!queue_index) {
/*
* In order to save completion resources, use the first
* completion ring from PF or VF as the default completion ring
* for async event and HWRM forward response handling.
*/
bp->def_cp_ring = cpr;
rc = bnxt_hwrm_set_async_event_cr(bp);
if (rc)
goto err_out;
}
/* Rx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
queue_index, cpr->hw_stats_ctx_id,
cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
queue_index * BNXT_DB_SIZE;
bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
ring = rxr->ag_ring_struct;
/* Agg ring */
if (!ring)
PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
map_idx, HWRM_NA_SIGNATURE,
cp_ring->fw_ring_id);
if (rc)
goto err_out;
PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr +
map_idx * BNXT_DB_SIZE;
bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bp->eth_dev->data->rx_queue_state[queue_index] ==
RTE_ETH_QUEUE_STATE_STARTED) {
if (bnxt_init_one_rx_ring(rxq)) {
RTE_LOG(ERR, PMD,
"bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto err_out;
}
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
}
rxq->index = queue_index;
PMD_DRV_LOG(INFO,
"queue %d, rx_deferred_start %d, state %d!\n",
queue_index, rxq->rx_deferred_start,
bp->eth_dev->data->rx_queue_state[queue_index]);
err_out:
return rc;
}
/* ring_grp usage:
* [0] = default completion ring
* [1 -> +rx_cp_nr_rings] = rx_cp, rx rings

View File

@ -70,6 +70,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
#endif

View File

@ -199,12 +199,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
return rc;
}
static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
struct bnxt_tpa_info *tpa_info;
uint16_t i;
rte_spinlock_lock(&rxq->lock);
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
@ -239,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
}
}
}
rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
@ -286,6 +290,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
uint8_t queue_state;
if (queue_idx >= bp->max_rx_rings) {
PMD_DRV_LOG(ERR,
@ -341,6 +346,11 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
RTE_ETH_QUEUE_STATE_STARTED;
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
rte_spinlock_init(&rxq->lock);
out:
return rc;
}
@ -389,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
int rc = 0;
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@ -396,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
rxq->rx_deferred_start = false;
bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
return 0;
PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
PMD_DRV_LOG(DEBUG,
"vnic = %p fw_grp_id = %d\n",
vnic, bp->grp_info[rx_queue_id].fw_grp_id);
vnic->fw_grp_ids[rx_queue_id] =
bp->grp_info[rx_queue_id + 1].fw_grp_id;
return bnxt_vnic_rss_configure(bp, vnic);
bp->grp_info[rx_queue_id].fw_grp_id;
rc = bnxt_vnic_rss_configure(bp, vnic);
}
return 0;
if (rc == 0)
rxq->rx_deferred_start = false;
return rc;
}
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
struct bnxt_rx_queue *rxq = NULL;
int rc = 0;
/* Rx CQ 0 also works as Default CQ for async notifications */
if (!rx_queue_id) {
PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
return -EINVAL;
}
rxq = bp->rx_queues[rx_queue_id];
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@ -431,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
return bnxt_vnic_rss_configure(bp, vnic);
rc = bnxt_vnic_rss_configure(bp, vnic);
}
return 0;
if (rc == 0)
bnxt_rx_queue_release_mbufs(rxq);
return rc;
}

View File

@ -10,6 +10,9 @@ struct bnxt;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
rte_spinlock_t lock; /* Synchronize between rx_queue_stop
* and fast path
*/
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
@ -54,4 +57,5 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
#endif

View File

@ -541,7 +541,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
bool evt = false;
/* If Rx Q was stopped return. RxQ0 cannot be stopped. */
if (rxq->rx_deferred_start && rxq->queue_id)
if (unlikely(((rxq->rx_deferred_start ||
!rte_spinlock_trylock(&rxq->lock)) &&
rxq->queue_id)))
return 0;
/* Handle RX burst request */
@ -583,7 +585,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
*/
return nb_rx_pkts;
goto done;
}
if (prod != rxr->rx_prod)
@ -618,16 +620,22 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
done:
rte_spinlock_unlock(&rxq->lock);
return nb_rx_pkts;
}
void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
struct bnxt_rx_queue *rxq;
if (!bp->rx_queues)
return;
for (i = 0; i < (int)bp->rx_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
rxq = bp->rx_queues[i];
if (!rxq)
continue;