net/mlx5: fix marks on Rx packets
If HW Steering is enabled, Rx queues were configured to receive MARKs
when a table with MARK actions was created. After stopping the port,
Rx queue configuration is released, but during starting the port
the mark flag was not updated in the Rx queue configuration.
This patch introduces a reference count on the MARK action and it
increases/decreases per template_table create/destroy.
When the port is stopped, Rx queue configuration is not cleared if
reference count is not zero.
Fixes: 3a2f674b6a
("net/mlx5: add queue and RSS HW steering action")
Cc: stable@dpdk.org
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
13c5c09390
commit
f64a79464c
@ -2010,7 +2010,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
* If default mreg copy action is removed at the stop stage,
|
||||
* the search will return none and nothing will be done anymore.
|
||||
*/
|
||||
mlx5_flow_stop_default(dev);
|
||||
if (priv->sh->config.dv_flow_en != 2)
|
||||
mlx5_flow_stop_default(dev);
|
||||
mlx5_traffic_disable(dev);
|
||||
/*
|
||||
* If all the flows are already flushed in the device stop stage,
|
||||
|
@ -1747,6 +1747,7 @@ struct mlx5_priv {
|
||||
uint32_t flex_item_map; /* Map of allocated flex item elements. */
|
||||
uint32_t nb_queue; /* HW steering queue number. */
|
||||
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
|
||||
uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
|
||||
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
|
||||
/* Item template list. */
|
||||
LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
|
||||
|
@ -7499,6 +7499,34 @@ mlx5_flow_stop_default(struct rte_eth_dev *dev)
|
||||
flow_rxq_flags_clear(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set rxq flag.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to the rte_eth_dev structure.
|
||||
* @param[in] enable
|
||||
* Flag to enable or not.
|
||||
*/
|
||||
void
|
||||
flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
|
||||
if ((!priv->mark_enabled && !enable) ||
|
||||
(priv->mark_enabled && enable))
|
||||
return;
|
||||
for (i = 0; i < priv->rxqs_n; ++i) {
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
|
||||
|
||||
/* With RXQ start/stop feature, RXQ might be stopped. */
|
||||
if (!rxq_ctrl)
|
||||
continue;
|
||||
rxq_ctrl->rxq.mark = enable;
|
||||
}
|
||||
priv->mark_enabled = enable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start all default actions for flows.
|
||||
*
|
||||
|
@ -2516,6 +2516,8 @@ mlx5_get_tof(const struct rte_flow_item *items,
|
||||
enum mlx5_tof_rule_type *rule_type);
|
||||
void
|
||||
flow_hw_resource_release(struct rte_eth_dev *dev);
|
||||
void
|
||||
flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
|
||||
int flow_dv_action_validate(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_indir_action_conf *conf,
|
||||
const struct rte_flow_action *action,
|
||||
|
@ -155,34 +155,6 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
|
||||
.type = 0,
|
||||
};
|
||||
|
||||
/**
|
||||
* Set rxq flag.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to the rte_eth_dev structure.
|
||||
* @param[in] enable
|
||||
* Flag to enable or not.
|
||||
*/
|
||||
static void
|
||||
flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
|
||||
if ((!priv->mark_enabled && !enable) ||
|
||||
(priv->mark_enabled && enable))
|
||||
return;
|
||||
for (i = 0; i < priv->rxqs_n; ++i) {
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
|
||||
|
||||
/* With RXQ start/stop feature, RXQ might be stopped. */
|
||||
if (!rxq_ctrl)
|
||||
continue;
|
||||
rxq_ctrl->rxq.mark = enable;
|
||||
}
|
||||
priv->mark_enabled = enable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the hash fields according to the @p rss_desc information.
|
||||
*
|
||||
@ -462,6 +434,10 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
|
||||
mlx5_ipool_free(priv->acts_ipool, data->idx);
|
||||
}
|
||||
|
||||
if (acts->mark)
|
||||
if (!__atomic_sub_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED))
|
||||
flow_hw_rxq_flag_set(dev, false);
|
||||
|
||||
if (acts->jump) {
|
||||
struct mlx5_flow_group *grp;
|
||||
|
||||
@ -484,6 +460,7 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
|
||||
if (acts->mhdr->action)
|
||||
mlx5dr_action_destroy(acts->mhdr->action);
|
||||
mlx5_free(acts->mhdr);
|
||||
acts->mhdr = NULL;
|
||||
}
|
||||
if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
|
||||
mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
|
||||
@ -1422,6 +1399,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
|
||||
goto err;
|
||||
acts->rule_acts[action_pos].action =
|
||||
priv->hw_tag[!!attr->group];
|
||||
__atomic_add_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
|
||||
flow_hw_rxq_flag_set(dev, true);
|
||||
break;
|
||||
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
|
||||
|
@ -1424,7 +1424,12 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
mlx5_mp_os_req_stop_rxtx(dev);
|
||||
rte_delay_us_sleep(1000 * priv->rxqs_n);
|
||||
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
|
||||
mlx5_flow_stop_default(dev);
|
||||
if (priv->sh->config.dv_flow_en == 2) {
|
||||
if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
|
||||
flow_hw_rxq_flag_set(dev, false);
|
||||
} else {
|
||||
mlx5_flow_stop_default(dev);
|
||||
}
|
||||
/* Control flows for default traffic can be removed firstly. */
|
||||
mlx5_traffic_disable(dev);
|
||||
/* All RX queue flags will be cleared in the flush interface. */
|
||||
|
Loading…
Reference in New Issue
Block a user