common/mlx5: add per-lcore sharing flag in object list

Without lcores_share flag, mlx5 PMD was sharing the rdma-core objects
between all lcores.

Having lcores_share flag disabled, means each lcore will have its own
objects, which will eventually lead to increased insertion/deletion
rates.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Suanming Mou 2021-07-13 11:44:48 +03:00 committed by Raslan Darawsheh
parent 9c373c524b
commit d03b786005
5 changed files with 56 additions and 23 deletions

View File

@ -14,7 +14,7 @@
/********************* mlx5 list ************************/
struct mlx5_list *
mlx5_list_create(const char *name, void *ctx,
mlx5_list_create(const char *name, void *ctx, bool lcores_share,
mlx5_list_create_cb cb_create,
mlx5_list_match_cb cb_match,
mlx5_list_remove_cb cb_remove,
@ -35,6 +35,7 @@ mlx5_list_create(const char *name, void *ctx,
if (name)
snprintf(list->name, sizeof(list->name), "%s", name);
list->ctx = ctx;
list->lcores_share = lcores_share;
list->cb_create = cb_create;
list->cb_match = cb_match;
list->cb_remove = cb_remove;
@ -119,7 +120,10 @@ __list_cache_clean(struct mlx5_list *list, int lcore_index)
if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
LIST_REMOVE(entry, next);
list->cb_clone_free(list, entry);
if (list->lcores_share)
list->cb_clone_free(list, entry);
else
list->cb_remove(list, entry);
inv_cnt--;
}
entry = nentry;
@ -129,7 +133,7 @@ __list_cache_clean(struct mlx5_list *list, int lcore_index)
struct mlx5_list_entry *
mlx5_list_register(struct mlx5_list *list, void *ctx)
{
struct mlx5_list_entry *entry, *local_entry;
struct mlx5_list_entry *entry = NULL, *local_entry;
volatile uint32_t prev_gen_cnt = 0;
int lcore_index = rte_lcore_index(rte_lcore_id());
@ -145,25 +149,36 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)
local_entry = __list_lookup(list, lcore_index, ctx, true);
if (local_entry)
return local_entry;
/* 2. Lookup with read lock on global list, reuse if found. */
rte_rwlock_read_lock(&list->lock);
entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
if (likely(entry)) {
if (list->lcores_share) {
/* 2. Lookup with read lock on global list, reuse if found. */
rte_rwlock_read_lock(&list->lock);
entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
if (likely(entry)) {
rte_rwlock_read_unlock(&list->lock);
return mlx5_list_cache_insert(list, lcore_index, entry,
ctx);
}
prev_gen_cnt = list->gen_cnt;
rte_rwlock_read_unlock(&list->lock);
return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
}
prev_gen_cnt = list->gen_cnt;
rte_rwlock_read_unlock(&list->lock);
/* 3. Prepare new entry for global list and for cache. */
entry = list->cb_create(list, entry, ctx);
if (unlikely(!entry))
return NULL;
entry->ref_cnt = 1u;
if (!list->lcores_share) {
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&list->cache[lcore_index].h, entry, next);
__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
list->name, lcore_index, (void *)entry, entry->ref_cnt);
return entry;
}
local_entry = list->cb_clone(list, entry, ctx);
if (unlikely(!local_entry)) {
list->cb_remove(list, entry);
return NULL;
}
entry->ref_cnt = 1u;
local_entry->ref_cnt = 1u;
local_entry->gentry = entry;
local_entry->lcore_idx = (uint32_t)lcore_index;
@ -207,13 +222,22 @@ mlx5_list_unregister(struct mlx5_list *list,
MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
list->cb_clone_free(list, entry);
if (list->lcores_share)
list->cb_clone_free(list, entry);
else
list->cb_remove(list, entry);
} else if (likely(lcore_idx != -1)) {
__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
__ATOMIC_RELAXED);
} else {
return 0;
}
if (!list->lcores_share) {
__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
list->name, (void *)entry);
return 0;
}
if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
return 1;
rte_rwlock_write_lock(&list->lock);

View File

@ -100,11 +100,8 @@ typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
*/
struct mlx5_list {
char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
volatile uint32_t gen_cnt;
/* List modification will update generation count. */
volatile uint32_t count; /* number of entries in list. */
void *ctx; /* user objects target to callback. */
rte_rwlock_t lock; /* read/write lock. */
bool lcores_share; /* Whether to share objects between the lcores. */
mlx5_list_create_cb cb_create; /**< entry create callback. */
mlx5_list_match_cb cb_match; /**< entry match callback. */
mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
@ -112,17 +109,27 @@ struct mlx5_list {
mlx5_list_clone_free_cb cb_clone_free;
struct mlx5_list_cache cache[RTE_MAX_LCORE + 1];
/* Lcore cache, last index is the global cache. */
volatile uint32_t gen_cnt; /* List modification may update it. */
volatile uint32_t count; /* number of entries in list. */
rte_rwlock_t lock; /* read/write lock. */
};
/**
* Create a mlx5 list.
*
* For actions in SW-steering is only memory and can be allowed
* to create duplicate objects, the lists don't need to check if
* there are existing same objects in other sub local lists,
* search the object only in local list will be more efficient.
*
* @param list
* Pointer to the hast list table.
* @param name
* Name of the mlx5 list.
* @param ctx
* Pointer to the list context data.
* @param lcores_share
* Whether to share objects between the lcores.
* @param cb_create
* Callback function for entry create.
* @param cb_match
@ -134,6 +141,7 @@ struct mlx5_list {
*/
__rte_internal
struct mlx5_list *mlx5_list_create(const char *name, void *ctx,
bool lcores_share,
mlx5_list_create_cb cb_create,
mlx5_list_match_cb cb_match,
mlx5_list_remove_cb cb_remove,

View File

@ -347,7 +347,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/* Init port id action list. */
snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
sh->port_id_action_list = mlx5_list_create(s, sh,
sh->port_id_action_list = mlx5_list_create(s, sh, true,
flow_dv_port_id_create_cb,
flow_dv_port_id_match_cb,
flow_dv_port_id_remove_cb,
@ -357,7 +357,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init push vlan action list. */
snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
sh->push_vlan_action_list = mlx5_list_create(s, sh,
sh->push_vlan_action_list = mlx5_list_create(s, sh, true,
flow_dv_push_vlan_create_cb,
flow_dv_push_vlan_match_cb,
flow_dv_push_vlan_remove_cb,
@ -367,7 +367,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init sample action list. */
snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
sh->sample_action_list = mlx5_list_create(s, sh,
sh->sample_action_list = mlx5_list_create(s, sh, true,
flow_dv_sample_create_cb,
flow_dv_sample_match_cb,
flow_dv_sample_remove_cb,
@ -377,7 +377,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
goto error;
/* Init dest array action list. */
snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
sh->dest_array_list = mlx5_list_create(s, sh,
sh->dest_array_list = mlx5_list_create(s, sh, true,
flow_dv_dest_array_create_cb,
flow_dv_dest_array_match_cb,
flow_dv_dest_array_remove_cb,
@ -1836,7 +1836,8 @@ err_secondary:
err = ENOTSUP;
goto error;
}
priv->hrxqs = mlx5_list_create("hrxq", eth_dev, mlx5_hrxq_create_cb,
priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
mlx5_hrxq_create_cb,
mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb,
mlx5_hrxq_clone_cb,

View File

@ -10117,7 +10117,7 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
key.level, key.id);
tbl_data->matchers = mlx5_list_create(matcher_name, sh,
tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
flow_dv_matcher_create_cb,
flow_dv_matcher_match_cb,
flow_dv_matcher_remove_cb,

View File

@ -610,7 +610,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
priv->hrxqs = mlx5_list_create("hrxq", eth_dev,
priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
mlx5_hrxq_clone_free_cb);