net/mlx5: convert control path memory to unified malloc

This commit allocates the control path memory from unified malloc
function.

The objects be changed:

1. hlist;
2. rss key;
3. vlan vmwa;
4. indexed pool;
5. fdir objects;
6. meter profile;
7. flow counter pool;
8. hrxq and indirect table;
9. flow object cache resources;
10. temporary resources in flow create;

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
Suanming Mou 2020-06-28 15:35:26 +08:00 committed by Ferruh Yigit
parent 5522da6b20
commit 83c2047c5f
12 changed files with 190 additions and 146 deletions

View File

@ -40,6 +40,7 @@
#include <mlx5_common.h>
#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
@ -207,8 +208,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
{
@ -218,8 +219,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
{
@ -229,8 +230,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_tag_ipool",
},
{
@ -240,8 +241,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
{
@ -251,8 +252,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_jump_ipool",
},
#endif
@ -263,8 +264,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_meter_ipool",
},
{
@ -274,8 +275,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
{
@ -285,8 +286,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
{
@ -300,8 +301,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
{
@ -309,8 +310,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.trunk_size = 4096,
.need_lock = 1,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
.type = "rte_flow_ipool",
},
};
@ -336,15 +337,16 @@ mlx5_flow_id_pool_alloc(uint32_t max_id)
struct mlx5_flow_id_pool *pool;
void *mem;
pool = rte_zmalloc("id pool allocation", sizeof(*pool),
RTE_CACHE_LINE_SIZE);
pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!pool) {
DRV_LOG(ERR, "can't allocate id pool");
rte_errno = ENOMEM;
return NULL;
}
mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
RTE_CACHE_LINE_SIZE);
mem = mlx5_malloc(MLX5_MEM_ZERO,
MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
@ -357,7 +359,7 @@ mlx5_flow_id_pool_alloc(uint32_t max_id)
pool->max_id = max_id;
return pool;
error:
rte_free(pool);
mlx5_free(pool);
return NULL;
}
@ -370,8 +372,8 @@ error:
void
mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
{
rte_free(pool->free_arr);
rte_free(pool);
mlx5_free(pool->free_arr);
mlx5_free(pool);
}
/**
@ -423,14 +425,15 @@ mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id)
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
MLX5_ASSERT(size2 > size);
mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0,
SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
return -rte_errno;
}
memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
rte_free(pool->free_arr);
mlx5_free(pool->free_arr);
pool->free_arr = mem;
pool->curr = pool->free_arr + size;
pool->last = pool->free_arr + size2;
@ -499,7 +502,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
LIST_REMOVE(mng, next);
claim_zero(mlx5_devx_cmd_destroy(mng->dm));
claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
rte_free(mem);
mlx5_free(mem);
}
/**
@ -547,10 +550,10 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
(pool, j)->dcs));
}
TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
rte_free(pool);
mlx5_free(pool);
pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
}
rte_free(sh->cmng.ccont[i].pools);
mlx5_free(sh->cmng.ccont[i].pools);
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
@ -1000,7 +1003,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
mlx5_free(tbl_data);
}
table_key.direction = 1;
pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
@ -1009,7 +1012,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
mlx5_free(tbl_data);
}
table_key.direction = 0;
table_key.domain = 1;
@ -1019,7 +1022,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
mlx5_free(tbl_data);
}
mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
}
@ -1063,8 +1066,9 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
.direction = 0,
}
};
struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
sizeof(*tbl_data), 0);
struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(*tbl_data), 0,
SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
@ -1077,7 +1081,8 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
rte_atomic32_init(&tbl_data->tbl.refcnt);
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 1;
tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
@ -1090,7 +1095,8 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 0;
table_key.domain = 1;
tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
@ -1323,9 +1329,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
rte_free(priv->rss_conf.rss_key);
mlx5_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
mlx5_free(priv->reta_idx);
if (priv->config.vf)
mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
dev->data->mac_addrs,

View File

@ -21,6 +21,8 @@
#include <rte_rwlock.h>
#include <rte_cycles.h>
#include <mlx5_malloc.h>
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
@ -75,8 +77,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
return -rte_errno;
}
priv->rss_conf.rss_key =
rte_realloc(priv->rss_conf.rss_key,
MLX5_RSS_HASH_KEY_LEN, 0);
mlx5_realloc(priv->rss_conf.rss_key, MLX5_MEM_RTE,
MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
if (!priv->rss_conf.rss_key) {
DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
dev->data->port_id, rxqs_n);
@ -142,7 +144,8 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
if (priv->skip_default_rss_reta)
return ret;
rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
rss_queue_arr = mlx5_malloc(0, rxqs_n * sizeof(unsigned int), 0,
SOCKET_ID_ANY);
if (!rss_queue_arr) {
DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
dev->data->port_id, rxqs_n);
@ -163,7 +166,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
dev->data->port_id, rss_queue_n);
rte_errno = EINVAL;
rte_free(rss_queue_arr);
mlx5_free(rss_queue_arr);
return -rte_errno;
}
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
@ -179,7 +182,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
rss_queue_n));
ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
if (ret) {
rte_free(rss_queue_arr);
mlx5_free(rss_queue_arr);
return ret;
}
/*
@ -192,7 +195,7 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
if (++j == rss_queue_n)
j = 0;
}
rte_free(rss_queue_arr);
mlx5_free(rss_queue_arr);
return ret;
}

View File

@ -32,6 +32,7 @@
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
@ -4115,7 +4116,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct rte_flow_action_set_tag) +
sizeof(struct rte_flow_action_jump);
ext_actions = rte_zmalloc(__func__, act_size, 0);
ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -4151,7 +4153,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
*/
act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
sizeof(struct mlx5_flow_action_copy_mreg);
ext_actions = rte_zmalloc(__func__, act_size, 0);
ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
SOCKET_ID_ANY);
if (!ext_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -4245,7 +4248,7 @@ exit:
* by flow_drv_destroy.
*/
flow_qrss_free_id(dev, qrss_id);
rte_free(ext_actions);
mlx5_free(ext_actions);
return ret;
}
@ -4310,7 +4313,8 @@ flow_create_split_meter(struct rte_eth_dev *dev,
#define METER_SUFFIX_ITEM 4
item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
sizeof(struct mlx5_rte_flow_item_tag) * 2;
sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
0, SOCKET_ID_ANY);
if (!sfx_actions)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -4349,7 +4353,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
external, flow_idx, error);
exit:
if (sfx_actions)
rte_free(sfx_actions);
mlx5_free(sfx_actions);
return ret;
}
@ -4763,8 +4767,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
}
if (priv_fdir_flow) {
LIST_REMOVE(priv_fdir_flow, next);
rte_free(priv_fdir_flow->fdir);
rte_free(priv_fdir_flow);
mlx5_free(priv_fdir_flow->fdir);
mlx5_free(priv_fdir_flow);
}
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
@ -4904,11 +4908,12 @@ mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
if (!priv->inter_flows) {
priv->inter_flows = rte_calloc(__func__, 1,
priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,
MLX5_NUM_MAX_DEV_FLOWS *
sizeof(struct mlx5_flow) +
(sizeof(struct mlx5_flow_rss_desc) +
sizeof(uint16_t) * UINT16_MAX) * 2, 0);
sizeof(uint16_t) * UINT16_MAX) * 2, 0,
SOCKET_ID_ANY);
if (!priv->inter_flows) {
DRV_LOG(ERR, "can't allocate intermediate memory.");
return;
@ -4932,7 +4937,7 @@ mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
rte_free(priv->inter_flows);
mlx5_free(priv->inter_flows);
priv->inter_flows = NULL;
}
@ -5572,7 +5577,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
uint32_t flow_idx;
int ret;
fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
SOCKET_ID_ANY);
if (!fdir_flow) {
rte_errno = ENOMEM;
return -rte_errno;
@ -5585,8 +5591,9 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
rte_errno = EEXIST;
goto error;
}
priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
0);
priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_fdir_flow),
0, SOCKET_ID_ANY);
if (!priv_fdir_flow) {
rte_errno = ENOMEM;
goto error;
@ -5605,8 +5612,8 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
dev->data->port_id, (void *)flow);
return 0;
error:
rte_free(priv_fdir_flow);
rte_free(fdir_flow);
mlx5_free(priv_fdir_flow);
mlx5_free(fdir_flow);
return -rte_errno;
}
@ -5646,8 +5653,8 @@ flow_fdir_filter_delete(struct rte_eth_dev *dev,
LIST_REMOVE(priv_fdir_flow, next);
flow_idx = priv_fdir_flow->rix_flow;
flow_list_destroy(dev, &priv->flows, flow_idx);
rte_free(priv_fdir_flow->fdir);
rte_free(priv_fdir_flow);
mlx5_free(priv_fdir_flow->fdir);
mlx5_free(priv_fdir_flow);
DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
dev->data->port_id, flow_idx);
return 0;
@ -5692,8 +5699,8 @@ flow_fdir_filter_flush(struct rte_eth_dev *dev)
priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
LIST_REMOVE(priv_fdir_flow, next);
flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
rte_free(priv_fdir_flow->fdir);
rte_free(priv_fdir_flow);
mlx5_free(priv_fdir_flow->fdir);
mlx5_free(priv_fdir_flow);
}
}

View File

@ -32,6 +32,7 @@
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
@ -2615,7 +2616,7 @@ flow_dv_encap_decap_resource_register
(sh->ctx, domain, cache_resource,
&cache_resource->action);
if (ret) {
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
@ -2772,7 +2773,7 @@ flow_dv_port_id_action_resource_register
(priv->sh->fdb_domain, resource->port_id,
&cache_resource->action);
if (ret) {
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
@ -2851,7 +2852,7 @@ flow_dv_push_vlan_action_resource_register
(domain, resource->vlan_tag,
&cache_resource->action);
if (ret) {
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
@ -4024,8 +4025,9 @@ flow_dv_modify_hdr_resource_register
}
}
/* Register new modify-header resource. */
cache_resource = rte_calloc(__func__, 1,
sizeof(*cache_resource) + actions_len, 0);
cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(*cache_resource) + actions_len, 0,
SOCKET_ID_ANY);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@ -4036,7 +4038,7 @@ flow_dv_modify_hdr_resource_register
(sh->ctx, ns, cache_resource,
actions_len, &cache_resource->action);
if (ret) {
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
@ -4175,7 +4177,8 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, sysconf(_SC_PAGESIZE),
SOCKET_ID_ANY);
int i;
if (!mem) {
@ -4188,7 +4191,7 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
rte_free(mem);
mlx5_free(mem);
return NULL;
}
mkey_attr.addr = (uintptr_t)mem;
@ -4207,7 +4210,7 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
rte_errno = errno;
rte_free(mem);
mlx5_free(mem);
return NULL;
}
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
@ -4244,7 +4247,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev,
void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
void *pools = rte_calloc(__func__, 1, mem_size, 0);
void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
if (!pools) {
rte_errno = ENOMEM;
@ -4263,7 +4266,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev,
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
rte_free(pools);
mlx5_free(pools);
return -ENOMEM;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
@ -4278,7 +4281,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev,
cont->pools = pools;
rte_spinlock_unlock(&cont->resize_sl);
if (old_pools)
rte_free(old_pools);
mlx5_free(old_pools);
return 0;
}
@ -4367,7 +4370,7 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
pool = rte_calloc(__func__, 1, size, 0);
pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
@ -7585,7 +7588,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
}
}
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
SOCKET_ID_ANY);
if (!cache_matcher) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
@ -7601,7 +7605,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
&cache_matcher->matcher_object);
if (ret) {
rte_free(cache_matcher);
mlx5_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
#endif
@ -7676,7 +7680,7 @@ flow_dv_tag_resource_register
ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
&cache_resource->action);
if (ret) {
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
@ -7685,7 +7689,7 @@ flow_dv_tag_resource_register
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
mlx5_flow_os_destroy_flow_action(cache_resource->action);
rte_free(cache_resource);
mlx5_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
@ -8908,7 +8912,7 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
flow_dv_tbl_resource_release(dev, matcher->tbl);
rte_free(matcher);
mlx5_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
return 0;
@ -9050,7 +9054,7 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
mlx5_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
return 0;
@ -9423,7 +9427,7 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
rte_free(mtd);
mlx5_free(mtd);
return 0;
}
@ -9556,7 +9560,7 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
rte_errno = ENOTSUP;
return NULL;
}
mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;

View File

@ -10,6 +10,7 @@
#include <rte_mtr_driver.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_flow.h"
@ -356,8 +357,8 @@ mlx5_flow_meter_profile_add(struct rte_eth_dev *dev,
if (ret)
return ret;
/* Meter profile memory allocation. */
fmp = rte_calloc(__func__, 1, sizeof(struct mlx5_flow_meter_profile),
RTE_CACHE_LINE_SIZE);
fmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flow_meter_profile),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (fmp == NULL)
return -rte_mtr_error_set(error, ENOMEM,
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
@ -374,7 +375,7 @@ mlx5_flow_meter_profile_add(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(fmps, fmp, next);
return 0;
error:
rte_free(fmp);
mlx5_free(fmp);
return ret;
}
@ -417,7 +418,7 @@ mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev,
NULL, "Meter profile is in use.");
/* Remove from list. */
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
rte_free(fmp);
mlx5_free(fmp);
return 0;
}
@ -1286,7 +1287,7 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
MLX5_ASSERT(!fmp->ref_cnt);
/* Remove from list. */
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
rte_free(fmp);
mlx5_free(fmp);
}
return 0;
}

View File

@ -28,6 +28,7 @@
#include <mlx5_glue.h>
#include <mlx5_prm.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
@ -188,14 +189,15 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
/* Resize the container pool array. */
size = sizeof(struct mlx5_flow_counter_pool *) *
(n_valid + MLX5_CNT_CONTAINER_RESIZE);
pools = rte_zmalloc(__func__, size, 0);
pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
SOCKET_ID_ANY);
if (!pools)
return 0;
if (n_valid) {
memcpy(pools, cont->pools,
sizeof(struct mlx5_flow_counter_pool *) *
n_valid);
rte_free(cont->pools);
mlx5_free(cont->pools);
}
cont->pools = pools;
cont->n += MLX5_CNT_CONTAINER_RESIZE;
@ -203,7 +205,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
/* Allocate memory for new pool*/
size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) *
MLX5_COUNTERS_PER_POOL;
pool = rte_calloc(__func__, 1, size, 0);
pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool)
return 0;
pool->type |= CNT_POOL_TYPE_EXT;

View File

@ -12,6 +12,7 @@
#include <mlx5_common_mp.h>
#include <mlx5_common_mr.h>
#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
@ -181,7 +182,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)
}
}
exit:
free(mp_rep.msgs);
mlx5_free(mp_rep.msgs);
}
/**

View File

@ -21,6 +21,8 @@
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
@ -57,8 +59,10 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
rte_errno = EINVAL;
return -rte_errno;
}
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
rss_conf->rss_key_len, 0);
priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key,
MLX5_MEM_RTE,
rss_conf->rss_key_len,
0, SOCKET_ID_ANY);
if (!priv->rss_conf.rss_key) {
rte_errno = ENOMEM;
return -rte_errno;
@ -131,8 +135,9 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
if (priv->reta_idx_n == reta_size)
return 0;
mem = rte_realloc(priv->reta_idx,
reta_size * sizeof((*priv->reta_idx)[0]), 0);
mem = mlx5_realloc(priv->reta_idx, MLX5_MEM_RTE,
reta_size * sizeof((*priv->reta_idx)[0]), 0,
SOCKET_ID_ANY);
if (!mem) {
rte_errno = ENOMEM;
return -rte_errno;

View File

@ -31,6 +31,7 @@
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
@ -734,7 +735,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
intr_handle->intr_vec = mlx5_malloc(0,
n * sizeof(intr_handle->intr_vec[0]),
0, SOCKET_ID_ANY);
if (intr_handle->intr_vec == NULL) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
@ -831,7 +834,7 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
free:
rte_intr_free_epoll_fd(intr_handle);
if (intr_handle->intr_vec)
free(intr_handle->intr_vec);
mlx5_free(intr_handle->intr_vec);
intr_handle->nb_efd = 0;
intr_handle->intr_vec = NULL;
}
@ -2187,8 +2190,8 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
struct mlx5_ind_table_obj *ind_tbl;
unsigned int i = 0, j = 0, k = 0;
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0);
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
@ -2231,8 +2234,9 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
log2above(queues_n) :
log2above(priv->config.ind_table_max_size));
rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
rqt_n * sizeof(uint32_t), 0);
rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
rqt_n * sizeof(uint32_t), 0,
SOCKET_ID_ANY);
if (!rqt_attr) {
DRV_LOG(ERR, "port %u cannot allocate RQT resources",
dev->data->port_id);
@ -2254,7 +2258,7 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
rqt_attr);
rte_free(rqt_attr);
mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "port %u cannot create DevX RQT",
dev->data->port_id);
@ -2269,7 +2273,7 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
error:
for (j = 0; j < i; j++)
mlx5_rxq_release(dev, ind_tbl->queues[j]);
rte_free(ind_tbl);
mlx5_free(ind_tbl);
DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
@ -2339,7 +2343,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
rte_free(ind_tbl);
mlx5_free(ind_tbl);
return 0;
}
return 1;
@ -2761,7 +2765,7 @@ mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
rte_errno = errno;
goto error;
}
rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
if (!rxq) {
DEBUG("port %u cannot allocate drop Rx queue memory",
dev->data->port_id);
@ -2799,7 +2803,7 @@ mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
if (rxq->cq)
claim_zero(mlx5_glue->destroy_cq(rxq->cq));
rte_free(rxq);
mlx5_free(rxq);
priv->drop_queue.rxq = NULL;
}
@ -2837,7 +2841,8 @@ mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
rte_errno = errno;
goto error;
}
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
SOCKET_ID_ANY);
if (!ind_tbl) {
rte_errno = ENOMEM;
goto error;
@ -2863,7 +2868,7 @@ mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
mlx5_rxq_obj_drop_release(dev);
rte_free(ind_tbl);
mlx5_free(ind_tbl);
priv->drop_queue.hrxq->ind_table = NULL;
}
@ -2888,7 +2893,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
"port %u cannot allocate memory for drop queue",
@ -2945,7 +2950,7 @@ error:
mlx5_ind_table_obj_drop_release(dev);
if (hrxq) {
priv->drop_queue.hrxq = NULL;
rte_free(hrxq);
mlx5_free(hrxq);
}
return NULL;
}
@ -2968,7 +2973,7 @@ mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
mlx5_ind_table_obj_drop_release(dev);
rte_free(hrxq);
mlx5_free(hrxq);
priv->drop_queue.hrxq = NULL;
}
}

View File

@ -5,6 +5,8 @@
#include <rte_malloc.h>
#include <rte_hash_crc.h>
#include <mlx5_malloc.h>
#include "mlx5_utils.h"
struct mlx5_hlist *
@ -27,7 +29,8 @@ mlx5_hlist_create(const char *name, uint32_t size)
alloc_size = sizeof(struct mlx5_hlist) +
sizeof(struct mlx5_hlist_head) * act_size;
/* Using zmalloc, then no need to initialize the heads. */
h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE);
h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!h) {
DRV_LOG(ERR, "No memory for hash list %s creation",
name ? name : "None");
@ -112,10 +115,10 @@ mlx5_hlist_destroy(struct mlx5_hlist *h,
if (cb)
cb(entry, ctx);
else
rte_free(entry);
mlx5_free(entry);
}
}
rte_free(h);
mlx5_free(h);
}
static inline void
@ -193,16 +196,17 @@ mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
(cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
return NULL;
pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk *
sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE);
pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!pool)
return NULL;
pool->cfg = *cfg;
if (!pool->cfg.trunk_size)
pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
if (!cfg->malloc && !cfg->free) {
pool->cfg.malloc = rte_malloc_socket;
pool->cfg.free = rte_free;
pool->cfg.malloc = mlx5_malloc;
pool->cfg.free = mlx5_free;
}
pool->free_list = TRUNK_INVALID;
if (pool->cfg.need_lock)
@ -237,10 +241,9 @@ mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
int n_grow = pool->n_trunk_valid ? pool->n_trunk :
RTE_CACHE_LINE_SIZE / sizeof(void *);
p = pool->cfg.malloc(pool->cfg.type,
(pool->n_trunk_valid + n_grow) *
sizeof(struct mlx5_indexed_trunk *),
RTE_CACHE_LINE_SIZE, rte_socket_id());
p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
sizeof(struct mlx5_indexed_trunk *),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!p)
return -ENOMEM;
if (pool->trunks)
@ -268,7 +271,7 @@ mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
/* rte_bitmap requires memory cacheline aligned. */
trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
trunk_size += bmp_size;
trunk = pool->cfg.malloc(pool->cfg.type, trunk_size,
trunk = pool->cfg.malloc(0, trunk_size,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!trunk)
return -ENOMEM;
@ -464,7 +467,7 @@ mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
if (!pool->trunks)
pool->cfg.free(pool->trunks);
mlx5_ipool_unlock(pool);
rte_free(pool);
mlx5_free(pool);
return 0;
}
@ -493,15 +496,16 @@ mlx5_l3t_create(enum mlx5_l3t_type type)
.grow_shift = 1,
.need_lock = 0,
.release_mem_en = 1,
.malloc = rte_malloc_socket,
.free = rte_free,
.malloc = mlx5_malloc,
.free = mlx5_free,
};
if (type >= MLX5_L3T_TYPE_MAX) {
rte_errno = EINVAL;
return NULL;
}
tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_tbl), 1);
tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
SOCKET_ID_ANY);
if (!tbl) {
rte_errno = ENOMEM;
return NULL;
@ -532,7 +536,7 @@ mlx5_l3t_create(enum mlx5_l3t_type type)
tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
if (!tbl->eip) {
rte_errno = ENOMEM;
rte_free(tbl);
mlx5_free(tbl);
tbl = NULL;
}
return tbl;
@ -565,17 +569,17 @@ mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
break;
}
MLX5_ASSERT(!m_tbl->ref_cnt);
rte_free(g_tbl->tbl[i]);
mlx5_free(g_tbl->tbl[i]);
g_tbl->tbl[i] = 0;
if (!(--g_tbl->ref_cnt))
break;
}
MLX5_ASSERT(!g_tbl->ref_cnt);
rte_free(tbl->tbl);
mlx5_free(tbl->tbl);
tbl->tbl = 0;
}
mlx5_ipool_destroy(tbl->eip);
rte_free(tbl);
mlx5_free(tbl);
}
uint32_t
@ -667,11 +671,11 @@ mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
NULL;
if (!(--m_tbl->ref_cnt)) {
rte_free(m_tbl);
mlx5_free(m_tbl);
g_tbl->tbl
[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
if (!(--g_tbl->ref_cnt)) {
rte_free(g_tbl);
mlx5_free(g_tbl);
tbl->tbl = 0;
}
}
@ -693,8 +697,10 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
/* Check the global table, create it if empty. */
g_tbl = tbl->tbl;
if (!g_tbl) {
g_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
sizeof(void *) * MLX5_L3T_GT_SIZE, 1);
g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_l3t_level_tbl) +
sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
SOCKET_ID_ANY);
if (!g_tbl) {
rte_errno = ENOMEM;
return -1;
@ -707,8 +713,10 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
*/
m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
if (!m_tbl) {
m_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
sizeof(void *) * MLX5_L3T_MT_SIZE, 1);
m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_l3t_level_tbl) +
sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
SOCKET_ID_ANY);
if (!m_tbl) {
rte_errno = ENOMEM;
return -1;

View File

@ -193,7 +193,7 @@ struct mlx5_indexed_pool_config {
/* Lock is needed for multiple thread usage. */
uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
const char *type; /* Memory allocate type name. */
void *(*malloc)(const char *type, size_t size, unsigned int align,
void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
int socket);
/* User defined memory allocator. */
void (*free)(void *addr); /* User defined memory release. */

View File

@ -33,6 +33,7 @@
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_nl.h>
#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_autoconf.h"
@ -288,7 +289,8 @@ mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
*/
return NULL;
}
vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
SOCKET_ID_ANY);
if (!vmwa) {
DRV_LOG(WARNING,
"Can not allocate memory"
@ -300,7 +302,7 @@ mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
DRV_LOG(WARNING,
"Can not create Netlink socket"
" for VLAN workaround context");
rte_free(vmwa);
mlx5_free(vmwa);
return NULL;
}
vmwa->vf_ifindex = ifindex;
@ -323,5 +325,5 @@ void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
}
if (vmwa->nl_socket >= 0)
close(vmwa->nl_socket);
rte_free(vmwa);
mlx5_free(vmwa);
}