net/qede: fix DMA memory leak

Implement the macro OSAL_DMA_FREE_COHERENT to release DMA memories.
Track all DMA memory allocations using an array of memzone pointers and
use that to free memory resources along with other resource deallocation.
With this change there is no need to alter the base code to additionally
pass an unique string needed for memzone creation.

Fixes: ec94dbc573 ("qede: add base driver")
Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Harish Patil 2017-07-01 12:29:55 -07:00 committed by Ferruh Yigit
parent dce8a093de
commit a39001d90d
4 changed files with 82 additions and 19 deletions

View File

@ -16,6 +16,10 @@
#include "ecore_mcp_api.h"
#include "ecore_l2_api.h"
/* Array of memzone pointers */
static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
/* Counter to track current memzone allocated */
uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
@ -118,6 +122,13 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
if (ecore_mz_count >= RTE_MAX_MEMZONE) {
DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
RTE_MAX_MEMZONE);
*phys = 0;
return OSAL_NULL;
}
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@ -134,6 +145,7 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
"size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
mz->len, mz->phys_addr, mz->addr, socket_id);
@ -148,6 +160,13 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
if (ecore_mz_count >= RTE_MAX_MEMZONE) {
DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
RTE_MAX_MEMZONE);
*phys = 0;
return OSAL_NULL;
}
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@ -163,12 +182,29 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
"aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
mz->len, mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
{
uint16_t j;
for (j = 0 ; j < ecore_mz_count; j++) {
if (phys == ecore_mz_mapping[j]->phys_addr) {
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
return;
}
}
DP_ERR(p_dev, "Unexpected memory free request\n");
}
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)

View File

@ -107,14 +107,16 @@ void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
size_t, int);
void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
osal_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
osal_dma_alloc_coherent_aligned(dev, phys, size, align)
/* TODO: */
#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
osal_dma_free_mem(dev, phys)
/* HW reads/writes */

View File

@ -1143,7 +1143,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
* again and the fastpath pointers will be reinitialized there.
*/
if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qede_dealloc_fp_resc(eth_dev);
/* Proceed with updated queue count */
qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
@ -1373,7 +1373,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
uint8_t i;
PMD_INIT_FUNC_TRACE(edev);
@ -1389,12 +1388,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
if (eth_dev->data->rx_queues[i])
eth_dev->data->rx_queues[i] = NULL;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
if (eth_dev->data->tx_queues[i])
eth_dev->data->tx_queues[i] = NULL;
eth_dev->data->nb_rx_queues = 0;
eth_dev->data->nb_tx_queues = 0;

View File

@ -149,7 +149,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DP_NOTICE(edev, false,
"Unable to alloc memory for cqe ring on socket %u\n",
socket_id);
/* TBD: Freeing RX BD ring */
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
rte_free(rxq->sw_rx_ring);
rte_free(rxq);
return -ENOMEM;
@ -300,6 +300,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
socket_id);
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
qede_tx_queue_release(txq);
return -ENOMEM;
}
@ -363,23 +364,23 @@ static int
qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
uint16_t sb_id)
{
struct ecore_dev *edev = &qdev->edev;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
sizeof(struct status_block));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
}
rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
sb_phys, sb_id);
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
/* TBD: No dma_free_coherent possible */
OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
sizeof(struct status_block));
return rc;
}
@ -437,9 +438,12 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
__rte_unused struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct qede_fastpath *fp;
struct qede_rx_queue *rxq;
struct qede_tx_queue *txq;
uint16_t sb_idx;
uint8_t i;
PMD_INIT_FUNC_TRACE(edev);
@ -447,10 +451,38 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
fp = &qdev->fp_array[sb_idx];
DP_INFO(edev, "Free sb_info index 0x%x\n",
fp->sb_info->igu_sb_id);
if (fp->sb_info)
if (fp->sb_info) {
OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
fp->sb_info->sb_phys,
sizeof(struct status_block));
rte_free(fp->sb_info);
fp->sb_info = NULL;
fp->sb_info = NULL;
}
}
/* Free packet buffers and ring memories */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
if (eth_dev->data->rx_queues[i]) {
qede_rx_queue_release(eth_dev->data->rx_queues[i]);
rxq = eth_dev->data->rx_queues[i];
qdev->ops->common->chain_free(edev,
&rxq->rx_bd_ring);
qdev->ops->common->chain_free(edev,
&rxq->rx_comp_ring);
eth_dev->data->rx_queues[i] = NULL;
}
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
if (eth_dev->data->tx_queues[i]) {
txq = eth_dev->data->tx_queues[i];
qede_tx_queue_release(eth_dev->data->tx_queues[i]);
qdev->ops->common->chain_free(edev,
&txq->tx_pbl);
eth_dev->data->tx_queues[i] = NULL;
}
}
if (qdev->fp_array)
rte_free(qdev->fp_array);
qdev->fp_array = NULL;