net/mlx5: fix Rx queue reference count for indirect RSS

mlx5_ind_table_obj_modify() was not changing the reference counters
of neither the new set of RxQs, nor the old set of RxQs.
On the other hand, creation of the RSS incremented the RxQ refcnt.
If an RxQ was present in both the initial and the modified set,
its reference counter was incremented one extra time
compared to the queues that were only present in the new set.
This prevented releasing said RxQ resources on port stop:

    flow indirect_action 0 create action_id 1 \
        action rss queues 0 1 end / end
    flow indirect_action 0 update 1 \
        action rss queues 2 3 end / end
    quit
    ...
    mlx5_net: mlx5.c:1622: mlx5_dev_close():
        port 0 some Rx queue objects still remain
    mlx5_net: mlx5.c:1626: mlx5_dev_close():
        port 0 some Rx queues still remain

Increment reference counters for the new set of RxQs
and decrement them for the old set of RxQs when needed.
Remove explicit referencing of RxQ from mlx5_ind_table_obj_attach()
because it reuses mlx5_ind_table_obj_modify() code doing this.

Fixes: ec4e11d41d ("net/mlx5: preserve indirect actions on restart")
Cc: stable@dpdk.org

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Reviewed-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Dmitry Kozlyuk 2021-11-24 11:40:30 +02:00 committed by Raslan Darawsheh
parent c65d684497
commit ec9b812b6c
3 changed files with 34 additions and 19 deletions

View File

@ -15073,6 +15073,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
void *queue = NULL;
uint16_t *queue_old = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
bool dev_started = !!dev->data->dev_started;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
@ -15095,7 +15096,10 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
rte_spinlock_lock(&shared_rss->action_rss_sl);
queue_old = shared_rss->ind_tbl->queues;
ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
queue, action_conf->queue_num, true);
queue, action_conf->queue_num,
true /* standalone */,
dev_started /* ref_new_qs */,
dev_started /* deref_old_qs */);
if (ret) {
mlx5_free(queue);
ret = rte_flow_error_set(error, rte_errno,

View File

@ -233,7 +233,8 @@ int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
bool standalone);
bool standalone,
bool ref_new_qs, bool deref_old_qs);
int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl);
int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,

View File

@ -2387,6 +2387,10 @@ mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
* @param ref_new_qs
* Whether to increment new RxQ set reference counters.
* @param deref_old_qs
* Whether to decrement old RxQ set reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
@ -2395,10 +2399,10 @@ int
mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
bool standalone)
bool standalone, bool ref_new_qs, bool deref_old_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@ -2408,22 +2412,30 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
RTE_SET_USED(standalone);
if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
return -rte_errno;
for (i = 0; i != queues_n; ++i) {
if (!mlx5_rxq_get(dev, queues[i])) {
ret = -rte_errno;
goto error;
if (ref_new_qs)
for (i = 0; i != queues_n; ++i) {
if (!mlx5_rxq_ref(dev, queues[i])) {
ret = -rte_errno;
goto error;
}
}
}
MLX5_ASSERT(priv->obj_ops.ind_table_modify);
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
if (deref_old_qs)
for (i = 0; i < ind_tbl->queues_n; i++)
claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
err = rte_errno;
rte_errno = err;
if (ref_new_qs) {
err = rte_errno;
for (j = 0; j < i; j++)
mlx5_rxq_deref(dev, queues[j]);
rte_errno = err;
}
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
@ -2444,19 +2456,17 @@ int
mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl)
{
unsigned int i;
int ret;
ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
ind_tbl->queues_n, true);
if (ret != 0) {
ind_tbl->queues_n,
true /* standalone */,
true /* ref_new_qs */,
false /* deref_old_qs */);
if (ret != 0)
DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
dev->data->port_id, (void *)ind_tbl);
return ret;
}
for (i = 0; i < ind_tbl->queues_n; i++)
mlx5_rxq_ref(dev, ind_tbl->queues[i]);
return 0;
return ret;
}
/**