net/e1000: fix memzone leak on queue re-configure
Normally when closing the device the queue memzone should be freed. But the memzone will be not freed, when device setup ops like: rte_eth_bond_slave_remove -->__eth_bond_slave_remove_lock_free ---->slave_remove ------>rte_eth_dev_internal_reset -------->rte_eth_dev_rx_queue_config ---------->eth_dev_rx_queue_config ------------>em_rx_queue_release rte_eth_dev_close -->eth_em_close ---->em_dev_free_queues ------>em_rx_queue_release (not been called due to nb_rx_queues and nb_tx_queues are 0) And when queue number is changed to small size, the BIG memzone queue index will be lost. This will lead to a memory leak. So we should release the memzone when releasing queues. Fixes: 460d1679586e ("drivers/net: delete HW rings while freeing queues") Cc: stable@dpdk.org Signed-off-by: Yunjian Wang <wangyunjian@huawei.com> Acked-by: Haiyue Wang <haiyue.wang@intel.com>
This commit is contained in:
parent
b225783dda
commit
09cbfa2da4
@ -104,6 +104,7 @@ struct em_rx_queue {
|
||||
uint8_t hthresh; /**< Host threshold register. */
|
||||
uint8_t wthresh; /**< Write-back threshold register. */
|
||||
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
|
||||
const struct rte_memzone *mz;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -173,6 +174,7 @@ struct em_tx_queue {
|
||||
struct em_ctx_info ctx_cache;
|
||||
/**< Hardware context history.*/
|
||||
uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
|
||||
const struct rte_memzone *mz;
|
||||
};
|
||||
|
||||
#if 1
|
||||
@ -1116,6 +1118,7 @@ em_tx_queue_release(struct em_tx_queue *txq)
|
||||
if (txq != NULL) {
|
||||
em_tx_queue_release_mbufs(txq);
|
||||
rte_free(txq->sw_ring);
|
||||
rte_memzone_free(txq->mz);
|
||||
rte_free(txq);
|
||||
}
|
||||
}
|
||||
@ -1286,6 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
txq->mz = tz;
|
||||
/* Allocate software ring */
|
||||
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
||||
sizeof(txq->sw_ring[0]) * nb_desc,
|
||||
@ -1338,6 +1342,7 @@ em_rx_queue_release(struct em_rx_queue *rxq)
|
||||
if (rxq != NULL) {
|
||||
em_rx_queue_release_mbufs(rxq);
|
||||
rte_free(rxq->sw_ring);
|
||||
rte_memzone_free(rxq->mz);
|
||||
rte_free(rxq);
|
||||
}
|
||||
}
|
||||
@ -1452,6 +1457,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rxq->mz = rz;
|
||||
/* Allocate software ring. */
|
||||
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
||||
sizeof (rxq->sw_ring[0]) * nb_desc,
|
||||
@ -1611,14 +1617,12 @@ em_dev_free_queues(struct rte_eth_dev *dev)
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
eth_em_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
eth_em_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
}
|
||||
|
@ -112,6 +112,7 @@ struct igb_rx_queue {
|
||||
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
|
||||
uint32_t flags; /**< RX flags. */
|
||||
uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
|
||||
const struct rte_memzone *mz;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -186,6 +187,7 @@ struct igb_tx_queue {
|
||||
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
|
||||
/**< Hardware context history.*/
|
||||
uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
|
||||
const struct rte_memzone *mz;
|
||||
};
|
||||
|
||||
#if 1
|
||||
@ -1276,6 +1278,7 @@ igb_tx_queue_release(struct igb_tx_queue *txq)
|
||||
if (txq != NULL) {
|
||||
igb_tx_queue_release_mbufs(txq);
|
||||
rte_free(txq->sw_ring);
|
||||
rte_memzone_free(txq->mz);
|
||||
rte_free(txq);
|
||||
}
|
||||
}
|
||||
@ -1545,6 +1548,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->mz = tz;
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
txq->pthresh = tx_conf->tx_thresh.pthresh;
|
||||
txq->hthresh = tx_conf->tx_thresh.hthresh;
|
||||
@ -1601,6 +1605,7 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)
|
||||
if (rxq != NULL) {
|
||||
igb_rx_queue_release_mbufs(rxq);
|
||||
rte_free(rxq->sw_ring);
|
||||
rte_memzone_free(rxq->mz);
|
||||
rte_free(rxq);
|
||||
}
|
||||
}
|
||||
@ -1746,6 +1751,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
igb_rx_queue_release(rxq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rxq->mz = rz;
|
||||
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
|
||||
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
|
||||
rxq->rx_ring_phys_addr = rz->iova;
|
||||
@ -1885,14 +1892,12 @@ igb_dev_free_queues(struct rte_eth_dev *dev)
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
eth_igb_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
eth_igb_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user