net/mlx5: make Rx queue thread safe

This commit applies the cache linked list to Rx queue to make it thread
safe.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Suanming Mou 2020-10-28 17:33:42 +08:00 committed by Ferruh Yigit
parent 84d3389048
commit e1592b6c4d
8 changed files with 228 additions and 218 deletions

View File

@ -1468,6 +1468,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
mlx5_hrxq_create_cb,
mlx5_hrxq_match_cb,
mlx5_hrxq_remove_cb);
/* Query availability of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {
@ -1520,6 +1524,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
mlx5_drop_action_destroy(eth_dev);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
mlx5_cache_list_destroy(&priv->hrxqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;

View File

@ -1286,6 +1286,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
mlx5_cache_list_destroy(&priv->hrxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like

View File

@ -65,6 +65,13 @@ enum mlx5_reclaim_mem_mode {
MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
};
/* Hash and cache list callback context. */
struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
struct rte_flow_error *error;
void *data;
};
/* Device attributes used in mlx5 PMD */
struct mlx5_dev_attr {
uint64_t device_cap_flags_ex;
@ -684,6 +691,22 @@ TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
/* MTR list. */
TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
/* RSS description. */
struct mlx5_flow_rss_desc {
uint32_t level;
uint32_t queue_num; /**< Number of entries in @p queue. */
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint32_t key_len; /**< RSS hash key len. */
uint32_t tunnel; /**< Queue in tunnel. */
union {
uint16_t *queue; /**< Destination queues. */
const uint16_t *const_q; /**< Const pointer convert. */
};
bool standalone; /**< Queue is standalone or not. */
};
#define MLX5_PROC_PRIV(port_id) \
((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
@ -723,7 +746,7 @@ struct mlx5_ind_table_obj {
/* Hash Rx queue. */
__extension__
struct mlx5_hrxq {
ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
struct mlx5_cache_entry entry; /* Cache entry. */
uint32_t refcnt; /* Reference counter. */
uint32_t standalone:1; /* This object used in shared action. */
struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
@ -737,6 +760,7 @@ struct mlx5_hrxq {
#endif
uint64_t hash_fields; /* Verbs Hash fields. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint32_t idx; /* Hash Rx queue index. */
uint8_t rss_key[]; /* Hash key. */
};
@ -853,7 +877,7 @@ struct mlx5_priv {
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
uint32_t hrxqs; /* Verbs Hash Rx queues. */
struct mlx5_cache_list hrxqs; /* Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
/* Indirection tables. */

View File

@ -384,13 +384,6 @@ enum mlx5_flow_fate_type {
MLX5_FLOW_FATE_MAX,
};
/* Hash list callback context */
struct mlx5_flow_cb_ctx {
struct rte_eth_dev *dev;
struct rte_flow_error *error;
void *data;
};
/* Matcher PRM representation */
struct mlx5_flow_dv_match_params {
size_t size;
@ -609,15 +602,6 @@ struct ibv_spec_header {
uint16_t size;
};
/* RSS description. */
struct mlx5_flow_rss_desc {
uint32_t level;
uint32_t queue_num; /**< Number of entries in @p queue. */
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t *queue; /**< Destination queues. */
};
/* PMD flow priority for tunnel */
#define MLX5_TUNNEL_PRIO_GET(rss_desc) \
((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)

View File

@ -8527,7 +8527,7 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
}
/**
* Create an Rx Hash queue.
* Prepare an Rx Hash queue.
*
* @param dev
* Pointer to Ethernet device.
@ -8542,29 +8542,23 @@ flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
* The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
static struct mlx5_hrxq *
flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
struct mlx5_flow_rss_desc *rss_desc,
uint32_t *hrxq_idx)
flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
struct mlx5_flow_rss_desc *rss_desc,
uint32_t *hrxq_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *dh = dev_flow->handle;
struct mlx5_hrxq *hrxq;
MLX5_ASSERT(rss_desc->queue_num);
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue, rss_desc->queue_num);
if (!*hrxq_idx) {
*hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue, rss_desc->queue_num,
!!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
false);
if (!*hrxq_idx)
return NULL;
}
rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
rss_desc->standalone = false;
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
if (!*hrxq_idx)
return NULL;
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
*hrxq_idx);
return hrxq;
@ -8927,8 +8921,8 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
queue = sub_actions->conf;
rss_desc->queue_num = 1;
rss_desc->queue[0] = queue->index;
hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
rss_desc, &hrxq_idx);
hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
rss_desc, &hrxq_idx);
if (!hrxq)
return rte_flow_error_set
(error, rte_errno,
@ -9125,8 +9119,8 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev,
if (num_of_dest > 1) {
if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
/* Handle QP action for mirroring */
hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
rss_desc, &hrxq_idx);
hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
rss_desc, &hrxq_idx);
if (!hrxq)
return rte_flow_error_set
(error, rte_errno,
@ -10261,24 +10255,8 @@ __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
struct mlx5_flow_rss_desc *rss_desc =
&wks->rss_desc[!!wks->flow_nested_idx];
MLX5_ASSERT(rss_desc->queue_num);
hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue, rss_desc->queue_num);
if (!hrxq_idx) {
hrxq_idx = mlx5_hrxq_new(dev,
rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue,
rss_desc->queue_num,
!!(dev_flow->handle->layers &
MLX5_FLOW_LAYER_TUNNEL),
false);
}
*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
&hrxq_idx);
}
return hrxq_idx;
}
@ -10332,7 +10310,6 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
(dev, flow, dev_flow, &hrxq);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
@ -10954,21 +10931,24 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
struct mlx5_shared_action_rss *action,
struct rte_flow_error *error)
{
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = action->origin.queue;
rss_desc.queue_num = action->origin.queue_num;
rss_desc.standalone = true;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
int tunnel;
for (tunnel = 0; tunnel < 2; tunnel++) {
hrxq_idx = mlx5_hrxq_new(dev, action->origin.key,
MLX5_RSS_HASH_KEY_LEN,
hash_fields,
action->origin.queue,
action->origin.queue_num,
tunnel, true);
rss_desc.tunnel = tunnel;
rss_desc.hash_fields = hash_fields;
hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
if (!hrxq_idx) {
rte_flow_error_set
(error, rte_errno,

View File

@ -1968,21 +1968,12 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
&wks->rss_desc[!!wks->flow_nested_idx];
MLX5_ASSERT(rss_desc->queue_num);
hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue,
rss_desc->queue_num);
if (!hrxq_idx)
hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
rss_desc->queue,
rss_desc->queue_num,
!!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL),
false);
rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL);
rss_desc->standalone = false;
hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
if (!hrxq) {

View File

@ -2047,54 +2047,38 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
}
/**
* Get an Rx Hash queue.
* Match an Rx Hash queue.
*
* @param dev
* Pointer to Ethernet device.
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
* @param list
* Cache list pointer.
* @param entry
* Hash queue entry pointer.
* @param cb_ctx
* Context of the callback function.
*
* @return
* An hash Rx queue index on success.
* 0 if match, none zero if not match.
*/
uint32_t
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n)
int
mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry,
void *cb_ctx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
uint32_t idx;
struct rte_eth_dev *dev = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
struct mlx5_ind_table_obj *ind_tbl;
queues_n = hash_fields ? queues_n : 1;
ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
hrxq, next) {
struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
continue;
if (hrxq->hash_fields != hash_fields)
continue;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
mlx5_ind_table_obj_release(dev, ind_tbl,
hrxq->standalone);
continue;
}
__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
return idx;
}
return 0;
if (hrxq->rss_key_len != rss_desc->key_len ||
memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
hrxq->hash_fields != rss_desc->hash_fields)
return 1;
ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
rss_desc->queue_num);
if (ind_tbl)
mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
return ind_tbl != hrxq->ind_table;
}
/**
@ -2179,6 +2163,19 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
return -rte_errno;
}
static void
__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
/**
* Release the hash Rx queue.
*
@ -2187,106 +2184,142 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
* @param hrxq
* Index to Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
* @param list
* Cache list pointer.
* @param entry
* Hash queue entry pointer.
*/
int
mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
void
mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct rte_eth_dev *dev = list->ctx;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (!hrxq)
return 0;
if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
hrxq->standalone);
if (!hrxq->standalone)
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ],
&priv->hrxqs, hrxq_idx, hrxq, next);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return 0;
}
claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table,
hrxq->standalone));
return 1;
__mlx5_hrxq_remove(dev, hrxq);
}
/**
* Create an Rx Hash queue.
*
* @param dev
* Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* RSS key length.
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
* @param tunnel
* Tunnel type.
* @param standalone
* Object of Rx Hash queue will be used in standalone shared action or not.
*
* @return
* The DevX object initialized index, 0 otherwise and rte_errno is set.
*/
uint32_t
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel, bool standalone)
static struct mlx5_hrxq *
__mlx5_hrxq_create(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
const uint8_t *rss_key = rss_desc->key;
uint32_t rss_key_len = rss_desc->key_len;
const uint16_t *queues =
rss_desc->standalone ? rss_desc->const_q : rss_desc->queue;
uint32_t queues_n = rss_desc->queue_num;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
struct mlx5_ind_table_obj *ind_tbl;
int ret;
queues_n = hash_fields ? queues_n : 1;
queues_n = rss_desc->hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
standalone);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
}
rss_desc->standalone);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->standalone = !!standalone;
hrxq->standalone = rss_desc->standalone;
hrxq->idx = hrxq_idx;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
hrxq->hash_fields = rss_desc->hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
if (ret < 0) {
rte_errno = errno;
ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
if (ret < 0)
goto error;
}
__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
if (!hrxq->standalone)
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
hrxq_idx, hrxq, next);
return hrxq_idx;
return hrxq;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
* Create an Rx Hash queue.
*
* @param list
* Cache list pointer.
* @param entry
* Hash queue entry pointer.
* @param cb_ctx
* Context of the callback function.
*
* @return
* queue entry on success, NULL otherwise.
*/
struct mlx5_cache_entry *
mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry __rte_unused,
void *cb_ctx)
{
struct rte_eth_dev *dev = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq;
hrxq = __mlx5_hrxq_create(dev, rss_desc);
return hrxq ? &hrxq->entry : NULL;
}
/**
* Get an Rx Hash queue.
*
* @param dev
* Pointer to Ethernet device.
* @param rss_desc
* RSS configuration for the Rx hash queue.
*
* @return
* An hash Rx queue index on success.
*/
uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_cache_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
};
if (rss_desc->standalone) {
hrxq = __mlx5_hrxq_create(dev, rss_desc);
} else {
entry = mlx5_cache_register(&priv->hrxqs, &ctx);
if (!entry)
return 0;
hrxq = container_of(entry, typeof(*hrxq), entry);
}
return hrxq->idx;
}
/**
* Release the hash Rx queue.
*
* @param dev
* Pointer to Ethernet device.
* @param hrxq_idx
* Index to Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (!hrxq->standalone)
return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
__mlx5_hrxq_remove(dev, hrxq);
return 0;
}
@ -2372,22 +2405,12 @@ mlx5_drop_action_destroy(struct rte_eth_dev *dev)
* @return
* The number of object not released.
*/
int
uint32_t
mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
uint32_t idx;
int ret = 0;
ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
hrxq, next) {
DRV_LOG(DEBUG,
"port %u hash Rx queue %p still referenced",
dev->data->port_id, (void *)hrxq);
++ret;
}
return ret;
return mlx5_cache_list_get_entry_num(&priv->hrxqs);
}
/**

View File

@ -351,17 +351,19 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
bool standalone);
uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel, bool standalone);
struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry,
void *cb_ctx);
void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
struct mlx5_cache_entry *entry);
uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n);
struct mlx5_flow_rss_desc *rss_desc);
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
int mlx5_hrxq_verify(struct rte_eth_dev *dev);
uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf
(struct rte_eth_dev *dev, uint16_t idx);