net/mlx5: fix metering cleanup on stop
A meter may handle Rx queue reference in his sub-policies.
In stop operation, all the Rx queues are released.
Wrongly, the meter reference was not released before
destroying the Rx queues what cause an error in stop.
Release the Rx queues meter references in stop operation.
Fixes: fc6ce56bba
("net/mlx5: prepare sub-policy for flow with meter")
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
4fd5e14848
commit
ec962bad14
@ -668,6 +668,8 @@ struct mlx5_meter_policy_action_container {
|
||||
/* Index to port ID action resource. */
|
||||
void *dr_jump_action[MLX5_MTR_DOMAIN_MAX];
|
||||
/* Jump/drop action per color. */
|
||||
uint16_t queue;
|
||||
/* Queue action configuration. */
|
||||
};
|
||||
};
|
||||
|
||||
@ -681,6 +683,8 @@ struct mlx5_flow_meter_policy {
|
||||
/* Rule applies to egress domain. */
|
||||
uint32_t transfer:1;
|
||||
/* Rule applies to transfer domain. */
|
||||
uint32_t is_queue:1;
|
||||
/* Is queue action in policy table. */
|
||||
rte_spinlock_t sl;
|
||||
uint32_t ref_cnt;
|
||||
/* Use count. */
|
||||
@ -1655,6 +1659,7 @@ struct mlx5_flow_meter_policy *mlx5_flow_meter_policy_find
|
||||
uint32_t *policy_idx);
|
||||
int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
|
||||
struct rte_mtr_error *error);
|
||||
void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
|
||||
|
||||
/* mlx5_os.c */
|
||||
struct rte_pci_driver;
|
||||
|
@ -4567,7 +4567,9 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
|
||||
"Failed to find Meter Policy.");
|
||||
goto exit;
|
||||
}
|
||||
if (policy->is_rss) {
|
||||
if (policy->is_rss ||
|
||||
(policy->is_queue &&
|
||||
!policy->sub_policys[MLX5_MTR_DOMAIN_INGRESS][0]->rix_hrxq[0])) {
|
||||
struct mlx5_flow_workspace *wks =
|
||||
mlx5_flow_get_thread_workspace();
|
||||
struct mlx5_flow_rss_desc rss_desc_v[MLX5_MTR_RTE_COLORS];
|
||||
@ -4583,34 +4585,49 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
|
||||
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
|
||||
struct mlx5_flow dev_flow = {0};
|
||||
struct mlx5_flow_handle dev_handle = { {0} };
|
||||
const void *rss_act = policy->act_cnt[i].rss->conf;
|
||||
struct rte_flow_action rss_actions[2] = {
|
||||
[0] = {
|
||||
|
||||
rss_desc_v[i] = wks->rss_desc;
|
||||
if (policy->is_rss) {
|
||||
const void *rss_act =
|
||||
policy->act_cnt[i].rss->conf;
|
||||
struct rte_flow_action rss_actions[2] = {
|
||||
[0] = {
|
||||
.type = RTE_FLOW_ACTION_TYPE_RSS,
|
||||
.conf = rss_act
|
||||
},
|
||||
[1] = {
|
||||
},
|
||||
[1] = {
|
||||
.type = RTE_FLOW_ACTION_TYPE_END,
|
||||
.conf = NULL
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
dev_flow.handle = &dev_handle;
|
||||
dev_flow.ingress = attr->ingress;
|
||||
dev_flow.flow = flow;
|
||||
dev_flow.external = 0;
|
||||
dev_flow.handle = &dev_handle;
|
||||
dev_flow.ingress = attr->ingress;
|
||||
dev_flow.flow = flow;
|
||||
dev_flow.external = 0;
|
||||
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
|
||||
dev_flow.dv.transfer = attr->transfer;
|
||||
dev_flow.dv.transfer = attr->transfer;
|
||||
#endif
|
||||
/* Translate RSS action to get rss hash fields. */
|
||||
if (flow_drv_translate(dev, &dev_flow, attr,
|
||||
/**
|
||||
* Translate RSS action to get rss hash fields.
|
||||
*/
|
||||
if (flow_drv_translate(dev, &dev_flow, attr,
|
||||
items, rss_actions, error))
|
||||
goto exit;
|
||||
rss_desc_v[i] = wks->rss_desc;
|
||||
rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
|
||||
rss_desc_v[i].hash_fields = dev_flow.hash_fields;
|
||||
rss_desc_v[i].queue_num = rss_desc_v[i].hash_fields ?
|
||||
rss_desc_v[i].queue_num : 1;
|
||||
goto exit;
|
||||
rss_desc_v[i].key_len = MLX5_RSS_HASH_KEY_LEN;
|
||||
rss_desc_v[i].hash_fields =
|
||||
dev_flow.hash_fields;
|
||||
rss_desc_v[i].queue_num =
|
||||
rss_desc_v[i].hash_fields ?
|
||||
rss_desc_v[i].queue_num : 1;
|
||||
} else {
|
||||
/* This is queue action. */
|
||||
rss_desc_v[i].key_len = 0;
|
||||
rss_desc_v[i].hash_fields = 0;
|
||||
rss_desc_v[i].queue =
|
||||
&policy->act_cnt[i].queue;
|
||||
rss_desc_v[i].queue_num = 1;
|
||||
}
|
||||
rss_desc[i] = &rss_desc_v[i];
|
||||
}
|
||||
sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
|
||||
@ -7223,6 +7240,24 @@ mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
|
||||
fops->destroy_mtr_drop_tbls(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the sub policy table with RX queue.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to Ethernet device.
|
||||
* @param[in] mtr_policy
|
||||
* Pointer to meter policy table.
|
||||
*/
|
||||
void
|
||||
mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy)
|
||||
{
|
||||
const struct mlx5_flow_driver_ops *fops;
|
||||
|
||||
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
|
||||
fops->destroy_sub_policy_with_rxq(dev, mtr_policy);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate the needed aso flow meter id.
|
||||
*
|
||||
|
@ -1156,6 +1156,9 @@ typedef struct mlx5_flow_meter_sub_policy *
|
||||
(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy,
|
||||
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
|
||||
typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
|
||||
(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy);
|
||||
typedef uint32_t (*mlx5_flow_mtr_alloc_t)
|
||||
(struct rte_eth_dev *dev);
|
||||
typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
|
||||
@ -1249,6 +1252,7 @@ struct mlx5_flow_driver_ops {
|
||||
mlx5_flow_create_def_policy_t create_def_policy;
|
||||
mlx5_flow_destroy_def_policy_t destroy_def_policy;
|
||||
mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
|
||||
mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
|
||||
mlx5_flow_counter_alloc_t counter_alloc;
|
||||
mlx5_flow_counter_free_t counter_free;
|
||||
mlx5_flow_counter_query_t counter_query;
|
||||
@ -1562,6 +1566,8 @@ struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
|
||||
(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy,
|
||||
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
|
||||
void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy);
|
||||
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
|
||||
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
|
||||
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
|
||||
|
@ -14707,12 +14707,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
|
||||
MLX5_ASSERT(dev_flow.dv.tag_resource);
|
||||
act_cnt->rix_mark =
|
||||
dev_flow.handle->dvh.rix_tag;
|
||||
if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
|
||||
dev_flow.handle->rix_hrxq =
|
||||
mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
|
||||
flow_drv_rxq_flags_set(dev,
|
||||
dev_flow.handle);
|
||||
}
|
||||
action_flags |= MLX5_FLOW_ACTION_MARK;
|
||||
break;
|
||||
}
|
||||
@ -14760,12 +14754,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
|
||||
"set tag action");
|
||||
act_cnt->modify_hdr =
|
||||
dev_flow.handle->dvh.modify_hdr;
|
||||
if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
|
||||
dev_flow.handle->rix_hrxq =
|
||||
mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
|
||||
flow_drv_rxq_flags_set(dev,
|
||||
dev_flow.handle);
|
||||
}
|
||||
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
|
||||
break;
|
||||
}
|
||||
@ -14809,41 +14797,20 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
|
||||
}
|
||||
case RTE_FLOW_ACTION_TYPE_QUEUE:
|
||||
{
|
||||
struct mlx5_hrxq *hrxq;
|
||||
uint32_t hrxq_idx;
|
||||
struct mlx5_flow_rss_desc rss_desc;
|
||||
struct mlx5_flow_meter_sub_policy *sub_policy =
|
||||
mtr_policy->sub_policys[domain][0];
|
||||
|
||||
if (i >= MLX5_MTR_RTE_COLORS)
|
||||
return -rte_mtr_error_set(error,
|
||||
ENOTSUP,
|
||||
RTE_MTR_ERROR_TYPE_METER_POLICY,
|
||||
NULL, "cannot create policy "
|
||||
"fate queue for this color");
|
||||
memset(&rss_desc, 0,
|
||||
sizeof(struct mlx5_flow_rss_desc));
|
||||
rss_desc.queue_num = 1;
|
||||
rss_desc.const_q = act->conf;
|
||||
hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
|
||||
&rss_desc, &hrxq_idx);
|
||||
if (!hrxq)
|
||||
return -rte_mtr_error_set(error,
|
||||
ENOTSUP,
|
||||
RTE_MTR_ERROR_TYPE_METER_POLICY,
|
||||
NULL,
|
||||
"cannot create policy fate queue");
|
||||
sub_policy->rix_hrxq[i] = hrxq_idx;
|
||||
act_cnt->queue =
|
||||
((const struct rte_flow_action_queue *)
|
||||
(act->conf))->index;
|
||||
act_cnt->fate_action =
|
||||
MLX5_FLOW_FATE_QUEUE;
|
||||
dev_flow.handle->fate_action =
|
||||
MLX5_FLOW_FATE_QUEUE;
|
||||
if (action_flags & MLX5_FLOW_ACTION_MARK ||
|
||||
action_flags & MLX5_FLOW_ACTION_SET_TAG) {
|
||||
dev_flow.handle->rix_hrxq = hrxq_idx;
|
||||
flow_drv_rxq_flags_set(dev,
|
||||
dev_flow.handle);
|
||||
}
|
||||
mtr_policy->is_queue = 1;
|
||||
action_flags |= MLX5_FLOW_ACTION_QUEUE;
|
||||
break;
|
||||
}
|
||||
@ -16057,6 +16024,73 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Destroy the sub policy table with RX queue.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to Ethernet device.
|
||||
* @param[in] mtr_policy
|
||||
* Pointer to meter policy table.
|
||||
*/
|
||||
static void
|
||||
flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_meter_policy *mtr_policy)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
|
||||
uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
|
||||
uint32_t i, j;
|
||||
uint16_t sub_policy_num, new_policy_num;
|
||||
|
||||
rte_spinlock_lock(&mtr_policy->sl);
|
||||
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
|
||||
switch (mtr_policy->act_cnt[i].fate_action) {
|
||||
case MLX5_FLOW_FATE_SHARED_RSS:
|
||||
sub_policy_num = (mtr_policy->sub_policy_num >>
|
||||
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
|
||||
MLX5_MTR_SUB_POLICY_NUM_MASK;
|
||||
new_policy_num = sub_policy_num;
|
||||
for (j = 0; j < sub_policy_num; j++) {
|
||||
sub_policy =
|
||||
mtr_policy->sub_policys[domain][j];
|
||||
if (sub_policy) {
|
||||
__flow_dv_destroy_sub_policy_rules(dev,
|
||||
sub_policy);
|
||||
if (sub_policy !=
|
||||
mtr_policy->sub_policys[domain][0]) {
|
||||
mtr_policy->sub_policys[domain][j] =
|
||||
NULL;
|
||||
mlx5_ipool_free
|
||||
(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
|
||||
sub_policy->idx);
|
||||
new_policy_num--;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (new_policy_num != sub_policy_num) {
|
||||
mtr_policy->sub_policy_num &=
|
||||
~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
|
||||
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
|
||||
mtr_policy->sub_policy_num |=
|
||||
(new_policy_num &
|
||||
MLX5_MTR_SUB_POLICY_NUM_MASK) <<
|
||||
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
|
||||
}
|
||||
break;
|
||||
case MLX5_FLOW_FATE_QUEUE:
|
||||
sub_policy = mtr_policy->sub_policys[domain][0];
|
||||
__flow_dv_destroy_sub_policy_rules(dev,
|
||||
sub_policy);
|
||||
break;
|
||||
default:
|
||||
/*Other actions without queue and do nothing*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
rte_spinlock_unlock(&mtr_policy->sl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the batch counter support in root table.
|
||||
*
|
||||
@ -16666,6 +16700,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
|
||||
.create_def_policy = flow_dv_create_def_policy,
|
||||
.destroy_def_policy = flow_dv_destroy_def_policy,
|
||||
.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
|
||||
.destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
|
||||
.counter_alloc = flow_dv_counter_allocate,
|
||||
.counter_free = flow_dv_counter_free,
|
||||
.counter_query = flow_dv_counter_query,
|
||||
|
@ -748,7 +748,7 @@ mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
|
||||
policy->actions, error);
|
||||
if (ret)
|
||||
goto policy_add_err;
|
||||
if (!is_rss) {
|
||||
if (!is_rss && !mtr_policy->is_queue) {
|
||||
/* Create policy rules in HW. */
|
||||
ret = mlx5_flow_create_policy_rules(dev, mtr_policy);
|
||||
if (ret)
|
||||
@ -1808,6 +1808,40 @@ mlx5_flow_meter_detach(struct mlx5_priv *priv,
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush meter with Rx queue configuration.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to Ethernet device.
|
||||
*/
|
||||
void
|
||||
mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_meter_sub_policy *sub_policy;
|
||||
struct mlx5_flow_meter_policy *mtr_policy;
|
||||
void *entry;
|
||||
uint32_t i, policy_idx;
|
||||
|
||||
if (!priv->mtr_en)
|
||||
return;
|
||||
if (priv->sh->mtrmng->policy_idx_tbl && priv->sh->refcnt == 1) {
|
||||
MLX5_L3T_FOREACH(priv->sh->mtrmng->policy_idx_tbl,
|
||||
i, entry) {
|
||||
policy_idx = *(uint32_t *)entry;
|
||||
sub_policy = mlx5_ipool_get
|
||||
(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
|
||||
policy_idx);
|
||||
if (!sub_policy || !sub_policy->main_policy)
|
||||
continue;
|
||||
mtr_policy = sub_policy->main_policy;
|
||||
if (mtr_policy->is_queue || mtr_policy->is_rss)
|
||||
mlx5_flow_destroy_sub_policy_with_rxq(dev,
|
||||
mtr_policy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush meter configuration.
|
||||
*
|
||||
|
@ -1180,7 +1180,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
mlx5_traffic_disable(dev);
|
||||
/* All RX queue flags will be cleared in the flush interface. */
|
||||
mlx5_flow_list_flush(dev, &priv->flows, true);
|
||||
mlx5_flow_meter_flush(dev, NULL);
|
||||
mlx5_flow_meter_rxq_flush(dev);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
|
||||
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
|
||||
|
Loading…
Reference in New Issue
Block a user