common/mlx5: free MR resource on device DMA unmap

mlx5 PMD created the MR (Memory Region) resource on the
mlx5_dma_map call to make the memory available for DMA
operations. On the mlx5_dma_unmap call the MR resource
was not freed but inserted to MR Free list for further
garbage collection.
Actual MR resource destroying happened on device stop
call. That caused the runtime out of memory in case of
application performed multiple DMA map/unmap calls.

The fix immediately frees the MR resource on mlx5_dma_unmap
call not engaging the list. The export for mlx5_mr_free
function from common PMD part is added as well.

Fixes: 989e999d93 ("net/mlx5: support PCI device DMA map and unmap")
Cc: stable@dpdk.org

Signed-off-by: Jiawei Wang <jiaweiw@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Jiawei Wang 2020-11-02 04:22:28 +02:00 committed by Ferruh Yigit
parent 8178d9be73
commit 992e6df3da
4 changed files with 12 additions and 7 deletions

View File

@ -436,8 +436,8 @@ mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
* @param mr
* Pointer to MR to free.
*/
static void
mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
void
mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
{
if (mr == NULL)
return;
@ -492,7 +492,7 @@ mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
struct mlx5_mr *mr = mr_next;
mr_next = LIST_NEXT(mr, mr);
mr_free(mr, share_cache->dereg_mr_cb);
mlx5_mr_free(mr, share_cache->dereg_mr_cb);
}
}
@ -702,7 +702,7 @@ mlx5_mr_create_primary(void *pd,
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
rte_mcfg_mem_read_unlock();
mr_free(mr, share_cache->dereg_mr_cb);
mlx5_mr_free(mr, share_cache->dereg_mr_cb);
goto alloc_resources;
}
MLX5_ASSERT(data.msl == data_re.msl);
@ -725,7 +725,7 @@ mlx5_mr_create_primary(void *pd,
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
*/
mr_free(mr, share_cache->dereg_mr_cb);
mlx5_mr_free(mr, share_cache->dereg_mr_cb);
return entry->lkey;
}
/*
@ -801,7 +801,7 @@ mlx5_mr_create_primary(void *pd,
* calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
* inside.
*/
mr_free(mr, share_cache->dereg_mr_cb);
mlx5_mr_free(mr, share_cache->dereg_mr_cb);
return UINT32_MAX;
}

View File

@ -171,4 +171,8 @@ mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length,
__rte_internal
void
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
__rte_internal
void
mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
#endif /* RTE_PMD_MLX5_COMMON_MR_H_ */

View File

@ -65,6 +65,7 @@ INTERNAL {
mlx5_mr_lookup_list;
mlx5_mr_create_primary;
mlx5_mr_flush_local_cache;
mlx5_mr_free;
mlx5_nl_allmulti;
mlx5_nl_devlink_family_id_get;

View File

@ -404,7 +404,7 @@ mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
return -1;
}
LIST_REMOVE(mr, mr);
LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
(void *)mr);
mlx5_mr_rebuild_cache(&sh->share_cache);