net/mlx5: change operations for non-cached flows
When stopping a mlx5 device, all the flows inserted will be flushed since they are with non-cached mode. And no more action will be done for these flows in the device closing stage. If the device restarts after stopped, no flow with non-cached mode will be re-inserted. The flush operation through rte interface will remain the same, and all the flows will be flushed actively. Signed-off-by: Bing Zhao <bingz@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
parent
dac98e8780
commit
8db7e3b698
@ -1234,8 +1234,17 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
/* In case mlx5_dev_stop() has not been called. */
|
||||
mlx5_dev_interrupt_handler_uninstall(dev);
|
||||
mlx5_dev_interrupt_handler_devx_uninstall(dev);
|
||||
/*
|
||||
* If default mreg copy action is removed at the stop stage,
|
||||
* the search will return none and nothing will be done anymore.
|
||||
*/
|
||||
mlx5_flow_stop_default(dev);
|
||||
mlx5_traffic_disable(dev);
|
||||
mlx5_flow_flush(dev, NULL);
|
||||
/*
|
||||
* If all the flows are already flushed in the device stop stage,
|
||||
* then this will return directly without any action.
|
||||
*/
|
||||
mlx5_flow_list_flush(dev, &priv->flows, true);
|
||||
mlx5_flow_meter_flush(dev, NULL);
|
||||
/* Prevent crashes when queues are still in use. */
|
||||
dev->rx_pkt_burst = removed_rx_burst;
|
||||
|
@ -712,7 +712,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
|
||||
struct rte_flow_error *error);
|
||||
void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
|
||||
void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
|
||||
bool active);
|
||||
int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
|
||||
int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
|
||||
const struct rte_flow_action *action, void *data,
|
||||
@ -725,6 +726,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
void *arg);
|
||||
int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
|
||||
void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
|
||||
int mlx5_flow_start_default(struct rte_eth_dev *dev);
|
||||
void mlx5_flow_stop_default(struct rte_eth_dev *dev);
|
||||
int mlx5_flow_verify(struct rte_eth_dev *dev);
|
||||
int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
|
||||
int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <stdalign.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* Verbs header. */
|
||||
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
|
||||
@ -4449,15 +4450,25 @@ flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
|
||||
* Pointer to Ethernet device.
|
||||
* @param list
|
||||
* Pointer to a TAILQ flow list.
|
||||
* @param active
|
||||
* If flushing is called avtively.
|
||||
*/
|
||||
void
|
||||
mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
|
||||
bool active)
|
||||
{
|
||||
uint32_t num_flushed = 0;
|
||||
|
||||
while (!TAILQ_EMPTY(list)) {
|
||||
struct rte_flow *flow;
|
||||
|
||||
flow = TAILQ_FIRST(list);
|
||||
flow_list_destroy(dev, list, flow);
|
||||
num_flushed++;
|
||||
}
|
||||
if (active) {
|
||||
DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
|
||||
dev->data->port_id, num_flushed);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4522,6 +4533,37 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all default actions for flows.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device.
|
||||
* @param list
|
||||
* Pointer to a TAILQ flow list.
|
||||
*/
|
||||
void
|
||||
mlx5_flow_stop_default(struct rte_eth_dev *dev)
|
||||
{
|
||||
flow_mreg_del_default_copy_action(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start all default actions for flows.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device.
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_start_default(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct rte_flow_error error;
|
||||
|
||||
/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
|
||||
return flow_mreg_add_default_copy_action(dev, &error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the flow list is empty
|
||||
*
|
||||
@ -4737,7 +4779,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
|
||||
mlx5_flow_list_flush(dev, &priv->flows);
|
||||
mlx5_flow_list_flush(dev, &priv->flows, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5179,7 +5221,7 @@ flow_fdir_filter_flush(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
|
||||
mlx5_flow_list_flush(dev, &priv->flows);
|
||||
mlx5_flow_list_flush(dev, &priv->flows, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -269,7 +269,6 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev)
|
||||
int
|
||||
mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
int ret;
|
||||
int fine_inline;
|
||||
|
||||
@ -318,14 +317,19 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
mlx5_stats_init(dev);
|
||||
ret = mlx5_traffic_enable(dev);
|
||||
if (ret) {
|
||||
DRV_LOG(DEBUG, "port %u failed to set defaults flows",
|
||||
DRV_LOG(ERR, "port %u failed to set defaults flows",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
ret = mlx5_flow_start(dev, &priv->flows);
|
||||
/*
|
||||
* In non-cached mode, it only needs to start the default mreg copy
|
||||
* action and no flow created by application exists anymore.
|
||||
* But it is worth wrapping the interface for further usage.
|
||||
*/
|
||||
ret = mlx5_flow_start_default(dev);
|
||||
if (ret) {
|
||||
DRV_LOG(DEBUG, "port %u failed to set flows",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
rte_wmb();
|
||||
@ -339,7 +343,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
/* Rollback. */
|
||||
dev->data->dev_started = 0;
|
||||
mlx5_flow_stop(dev, &priv->flows);
|
||||
mlx5_flow_stop_default(dev);
|
||||
mlx5_traffic_disable(dev);
|
||||
mlx5_txq_stop(dev);
|
||||
mlx5_rxq_stop(dev);
|
||||
@ -369,8 +373,11 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
mlx5_mp_req_stop_rxtx(dev);
|
||||
usleep(1000 * priv->rxqs_n);
|
||||
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
|
||||
mlx5_flow_stop(dev, &priv->flows);
|
||||
mlx5_flow_stop_default(dev);
|
||||
/* Control flows for default traffic can be removed firstly. */
|
||||
mlx5_traffic_disable(dev);
|
||||
/* All RX queue flags will be cleared in the flush interface. */
|
||||
mlx5_flow_list_flush(dev, &priv->flows, true);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
mlx5_dev_interrupt_handler_uninstall(dev);
|
||||
mlx5_txq_stop(dev);
|
||||
@ -529,7 +536,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
|
||||
mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -546,7 +553,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
|
||||
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
|
||||
mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user