net/i40e: fix memzone leak on queue re-configure

Normally when closing the device the queue memzone should be
freed. But the memzone will be not freed, when device setup
ops like:

rte_eth_bond_slave_remove
-->__eth_bond_slave_remove_lock_free
---->slave_remove
------>rte_eth_dev_internal_reset
-------->rte_eth_dev_rx_queue_config
---------->eth_dev_rx_queue_config
------------>i40e_dev_rx_queue_release
rte_eth_dev_close
-->i40e_dev_close
---->i40e_dev_free_queues
------>i40e_dev_rx_queue_release
      (not been called due to nb_rx_queues and nb_tx_queues are 0)

And when queue number is changed to small size, the BIG memzone
queue index will be lost. This will lead to a memory leak. So we
should release the memzone when releasing queues.

Fixes: 460d1679586e ("drivers/net: delete HW rings while freeing queues")
Cc: stable@dpdk.org

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
This commit is contained in:
Yunjian Wang 2021-09-22 21:29:36 +08:00 committed by Ferruh Yigit
parent d3778bf39a
commit e3188d5f99
3 changed files with 8 additions and 5 deletions

View File

@ -284,7 +284,6 @@ i40e_fdir_teardown(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
vsi = pf->fdir.fdir_vsi;
if (!vsi)
@ -301,10 +300,8 @@ i40e_fdir_teardown(struct i40e_pf *pf)
if (err)
PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
i40e_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
i40e_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
i40e_vsi_release(vsi);

View File

@ -2024,6 +2024,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
rxq->mz = rz;
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
@ -2115,6 +2116,7 @@ i40e_rx_queue_release(void *rxq)
i40e_rx_queue_release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
}
@ -2427,6 +2429,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
txq->mz = tz;
txq->nb_tx_desc = nb_desc;
txq->tx_rs_thresh = tx_rs_thresh;
txq->tx_free_thresh = tx_free_thresh;
@ -2500,6 +2503,7 @@ i40e_tx_queue_release(void *txq)
i40e_tx_queue_release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
}
@ -3056,7 +3060,6 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
continue;
i40e_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@ -3064,7 +3067,6 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
continue;
i40e_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
@ -3107,6 +3109,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
return I40E_ERR_NO_MEMORY;
}
txq->mz = tz;
txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
txq->queue_id = I40E_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@ -3165,6 +3168,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_ERR_NO_MEMORY;
}
rxq->mz = rz;
rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
rxq->queue_id = I40E_FDIR_QUEUE_ID;
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;

View File

@ -121,6 +121,7 @@ struct i40e_rx_queue {
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
struct i40e_tx_entry {
@ -166,6 +167,7 @@ struct i40e_tx_queue {
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
const struct rte_memzone *mz;
};
/** Offload features */