net/mlx5: use flow to enable promiscuous mode

RSS hash configuration is currently ignored by the PMD, this commits
removes the RSS feature on promiscuous mode.

This functionality will be added in a later commit.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Nélio Laranjeiro 2017-10-09 16:44:53 +02:00 committed by Ferruh Yigit
parent 35a010ad48
commit 1b37f5d898
7 changed files with 166 additions and 73 deletions

View File

@ -201,7 +201,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
priv_special_flow_disable_all(priv); priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv); priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv); priv_destroy_hash_rxqs(priv);
priv_flow_flush(priv, &priv->flows);
/* Prevent crashes when queues are still in use. */ /* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst; dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst; dev->tx_pkt_burst = removed_tx_burst;
@ -884,6 +884,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->dev = eth_dev; priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops; eth_dev->dev_ops = &mlx5_dev_ops;
TAILQ_INIT(&priv->flows); TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
/* Hint libmlx5 to use PMD allocator for data plane resources */ /* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5dv_ctx_allocators alctr = { struct mlx5dv_ctx_allocators alctr = {

View File

@ -39,6 +39,7 @@
#include <limits.h> #include <limits.h>
#include <net/if.h> #include <net/if.h>
#include <netinet/in.h> #include <netinet/in.h>
#include <sys/queue.h>
/* Verbs header. */ /* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@ -86,6 +87,9 @@ struct mlx5_xstats_ctrl {
uint64_t base[MLX5_MAX_XSTATS]; uint64_t base[MLX5_MAX_XSTATS];
}; };
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
struct priv { struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */ struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */ struct ibv_context *ctx; /* Verbs context. */
@ -104,7 +108,6 @@ struct priv {
/* Device properties. */ /* Device properties. */
uint16_t mtu; /* Configured MTU. */ uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */ uint8_t port; /* Physical port number. */
unsigned int promisc_req:1; /* Promiscuous mode requested. */
unsigned int allmulti_req:1; /* All multicast mode requested. */ unsigned int allmulti_req:1; /* All multicast mode requested. */
unsigned int hw_csum:1; /* Checksum offload is supported. */ unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
@ -145,7 +148,8 @@ struct priv {
unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */ unsigned int reta_idx_n; /* RETA index size. */
struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */ struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
@ -293,11 +297,14 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
struct rte_flow_error *); struct rte_flow_error *);
int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *); struct rte_flow_error *);
void priv_flow_flush(struct priv *, struct mlx5_flows *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
int priv_flow_start(struct priv *); int priv_flow_start(struct priv *, struct mlx5_flows *);
void priv_flow_stop(struct priv *); void priv_flow_stop(struct priv *, struct mlx5_flows *);
int priv_flow_verify(struct priv *); int priv_flow_verify(struct priv *);
int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
struct rte_flow_item_eth *, unsigned int);
/* mlx5_socket.c */ /* mlx5_socket.c */

View File

@ -52,6 +52,9 @@
#include "mlx5.h" #include "mlx5.h"
#include "mlx5_prm.h" #include "mlx5_prm.h"
/* Define minimal priority for control plane flows. */
#define MLX5_CTRL_FLOW_PRIORITY 4
static int static int
mlx5_flow_create_eth(const struct rte_flow_item *item, mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask, const void *default_mask,
@ -451,7 +454,7 @@ priv_flow_validate(struct priv *priv,
"groups are not supported"); "groups are not supported");
return -rte_errno; return -rte_errno;
} }
if (attr->priority) { if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP, rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL, NULL,
@ -1169,6 +1172,8 @@ priv_flow_create_action_queue(struct priv *priv,
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] attr * @param[in] attr
* Flow rule attributes. * Flow rule attributes.
* @param[in] pattern * @param[in] pattern
@ -1183,6 +1188,7 @@ priv_flow_create_action_queue(struct priv *priv,
*/ */
static struct rte_flow * static struct rte_flow *
priv_flow_create(struct priv *priv, priv_flow_create(struct priv *priv,
struct mlx5_flows *list,
const struct rte_flow_attr *attr, const struct rte_flow_attr *attr,
const struct rte_flow_item items[], const struct rte_flow_item items[],
const struct rte_flow_action actions[], const struct rte_flow_action actions[],
@ -1232,6 +1238,10 @@ priv_flow_create(struct priv *priv,
rte_flow = priv_flow_create_action_queue(priv, &flow, error); rte_flow = priv_flow_create_action_queue(priv, &flow, error);
if (!rte_flow) if (!rte_flow)
goto exit; goto exit;
if (rte_flow) {
TAILQ_INSERT_TAIL(list, rte_flow, next);
DEBUG("Flow created %p", (void *)rte_flow);
}
return rte_flow; return rte_flow;
exit: exit:
rte_free(flow.ibv_attr); rte_free(flow.ibv_attr);
@ -1255,11 +1265,8 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow *flow; struct rte_flow *flow;
priv_lock(priv); priv_lock(priv);
flow = priv_flow_create(priv, attr, items, actions, error); flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
if (flow) { error);
TAILQ_INSERT_TAIL(&priv->flows, flow, next);
DEBUG("Flow created %p", (void *)flow);
}
priv_unlock(priv); priv_unlock(priv);
return flow; return flow;
} }
@ -1269,11 +1276,14 @@ mlx5_flow_create(struct rte_eth_dev *dev,
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] flow * @param[in] flow
* Flow to destroy. * Flow to destroy.
*/ */
static void static void
priv_flow_destroy(struct priv *priv, priv_flow_destroy(struct priv *priv,
struct mlx5_flows *list,
struct rte_flow *flow) struct rte_flow *flow)
{ {
unsigned int i; unsigned int i;
@ -1293,7 +1303,7 @@ priv_flow_destroy(struct priv *priv,
* To remove the mark from the queue, the queue must not be * To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not). * present in any other marked flow (RSS or not).
*/ */
TAILQ_FOREACH(tmp, &priv->flows, next) { TAILQ_FOREACH(tmp, list, next) {
unsigned int j; unsigned int j;
if (!tmp->mark) if (!tmp->mark)
@ -1313,7 +1323,7 @@ priv_flow_destroy(struct priv *priv,
claim_zero(ibv_destroy_flow(flow->ibv_flow)); claim_zero(ibv_destroy_flow(flow->ibv_flow));
if (!flow->drop) if (!flow->drop)
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq); mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
TAILQ_REMOVE(&priv->flows, flow, next); TAILQ_REMOVE(list, flow, next);
rte_free(flow->ibv_attr); rte_free(flow->ibv_attr);
DEBUG("Flow destroyed %p", (void *)flow); DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow); rte_free(flow);
@ -1334,7 +1344,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
(void)error; (void)error;
priv_lock(priv); priv_lock(priv);
priv_flow_destroy(priv, flow); priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv); priv_unlock(priv);
return 0; return 0;
} }
@ -1344,15 +1354,17 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
* @param list
* Pointer to a TAILQ flow list.
*/ */
static void void
priv_flow_flush(struct priv *priv) priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{ {
while (!TAILQ_EMPTY(&priv->flows)) { while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow; struct rte_flow *flow;
flow = TAILQ_FIRST(&priv->flows); flow = TAILQ_FIRST(list);
priv_flow_destroy(priv, flow); priv_flow_destroy(priv, list, flow);
} }
} }
@ -1370,7 +1382,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
(void)error; (void)error;
priv_lock(priv); priv_lock(priv);
priv_flow_flush(priv); priv_flow_flush(priv, &priv->flows);
priv_unlock(priv); priv_unlock(priv);
return 0; return 0;
} }
@ -1493,13 +1505,15 @@ priv_flow_delete_drop_queue(struct priv *priv)
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
* @param list
* Pointer to a TAILQ flow list.
*/ */
void void
priv_flow_stop(struct priv *priv) priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{ {
struct rte_flow *flow; struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) { TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
claim_zero(ibv_destroy_flow(flow->ibv_flow)); claim_zero(ibv_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL; flow->ibv_flow = NULL;
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq); mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
@ -1522,12 +1536,14 @@ priv_flow_stop(struct priv *priv)
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
* @param list
* Pointer to a TAILQ flow list.
* *
* @return * @return
* 0 on success, a errno value otherwise and rte_errno is set. * 0 on success, a errno value otherwise and rte_errno is set.
*/ */
int int
priv_flow_start(struct priv *priv) priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{ {
int ret; int ret;
struct rte_flow *flow; struct rte_flow *flow;
@ -1535,7 +1551,7 @@ priv_flow_start(struct priv *priv)
ret = priv_flow_create_drop_queue(priv); ret = priv_flow_create_drop_queue(priv);
if (ret) if (ret)
return -1; return -1;
TAILQ_FOREACH(flow, &priv->flows, next) { TAILQ_FOREACH(flow, list, next) {
if (flow->frxq.hrxq) if (flow->frxq.hrxq)
goto flow_create; goto flow_create;
flow->frxq.hrxq = flow->frxq.hrxq =
@ -1630,3 +1646,90 @@ priv_flow_verify(struct priv *priv)
} }
return ret; return ret;
} }
/**
* Enable/disable a control flow configured from the control plane.
*
* @param dev
* Pointer to Ethernet device.
* @param spec
* An Ethernet flow spec to apply.
* @param mask
* An Ethernet flow mask to apply.
* @param enable
* Enable/disable the flow.
*
* @return
* 0 on success.
*/
int
mlx5_ctrl_flow(struct rte_eth_dev *dev,
struct rte_flow_item_eth *spec,
struct rte_flow_item_eth *mask,
unsigned int enable)
{
struct priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
.priority = MLX5_CTRL_FLOW_PRIORITY,
};
struct rte_flow_item items[] = {
{
.type = RTE_FLOW_ITEM_TYPE_ETH,
.spec = spec,
.last = NULL,
.mask = mask,
},
{
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
struct rte_flow_action actions[] = {
{
.type = RTE_FLOW_ACTION_TYPE_QUEUE,
.conf = &(struct rte_flow_action_queue){
.index = 0,
},
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
struct rte_flow *flow;
struct rte_flow_error error;
if (enable) {
flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items,
actions, &error);
if (!flow)
return 1;
} else {
struct spec {
struct ibv_flow_attr ibv_attr;
struct ibv_flow_spec_eth eth;
} spec;
struct mlx5_flow_parse parser = {
.ibv_attr = &spec.ibv_attr,
.offset = sizeof(struct ibv_flow_attr),
};
struct ibv_flow_spec_eth *eth;
const unsigned int attr_size = sizeof(struct ibv_flow_attr);
claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser));
TAILQ_FOREACH(flow, &priv->ctrl_flows, next) {
eth = (void *)((uintptr_t)flow->ibv_attr + attr_size);
assert(eth->type == IBV_FLOW_SPEC_ETH);
if (!memcmp(eth, &spec.eth, sizeof(*eth)))
break;
}
if (flow) {
claim_zero(ibv_destroy_flow(flow->ibv_flow));
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
rte_free(flow->ibv_attr);
DEBUG("Control flow destroyed %p", (void *)flow);
TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
rte_free(flow);
}
}
return 0;
}

View File

@ -53,20 +53,6 @@
/* Initialization data for special flows. */ /* Initialization data for special flows. */
static const struct special_flow_init special_flow_init[] = { static const struct special_flow_init special_flow_init[] = {
[HASH_RXQ_FLOW_TYPE_PROMISC] = {
.dst_mac_val = "\x00\x00\x00\x00\x00\x00",
.dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
.hash_types =
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
1 << HASH_RXQ_TCPV6 |
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
1 << HASH_RXQ_ETH |
0,
.per_vlan = 0,
},
[HASH_RXQ_FLOW_TYPE_ALLMULTI] = { [HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
.dst_mac_val = "\x01\x00\x00\x00\x00\x00", .dst_mac_val = "\x01\x00\x00\x00\x00\x00",
.dst_mac_mask = "\x01\x00\x00\x00\x00\x00", .dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
@ -346,7 +332,7 @@ priv_special_flow_enable_all(struct priv *priv)
if (priv->isolated) if (priv->isolated)
return 0; return 0;
for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
flow_type != HASH_RXQ_FLOW_TYPE_MAC; flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type) { ++flow_type) {
int ret; int ret;
@ -373,7 +359,7 @@ priv_special_flow_disable_all(struct priv *priv)
{ {
enum hash_rxq_flow_type flow_type; enum hash_rxq_flow_type flow_type;
for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
flow_type != HASH_RXQ_FLOW_TYPE_MAC; flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type) ++flow_type)
priv_special_flow_disable(priv, flow_type); priv_special_flow_disable(priv, flow_type);
@ -388,19 +374,16 @@ priv_special_flow_disable_all(struct priv *priv)
void void
mlx5_promiscuous_enable(struct rte_eth_dev *dev) mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{ {
struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth eth = {
int ret; .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
.type = 0,
};
if (mlx5_is_secondary()) if (mlx5_is_secondary())
return; return;
dev->data->promiscuous = 1;
priv_lock(priv); claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
priv->promisc_req = 1;
ret = priv_rehash_flows(priv);
if (ret)
ERROR("error while enabling promiscuous mode: %s",
strerror(ret));
priv_unlock(priv);
} }
/** /**
@ -412,19 +395,16 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void void
mlx5_promiscuous_disable(struct rte_eth_dev *dev) mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{ {
struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth eth = {
int ret; .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
.type = 0,
};
if (mlx5_is_secondary()) if (mlx5_is_secondary())
return; return;
dev->data->promiscuous = 0;
priv_lock(priv); claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
priv->promisc_req = 0;
ret = priv_rehash_flows(priv);
if (ret)
ERROR("error while disabling promiscuous mode: %s",
strerror(ret));
priv_unlock(priv);
} }
/** /**

View File

@ -571,13 +571,7 @@ priv_destroy_hash_rxqs(struct priv *priv)
int int
priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
{ {
/* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
* has been requested. */
if (priv->promisc_req)
return type == HASH_RXQ_FLOW_TYPE_PROMISC;
switch (type) { switch (type) {
case HASH_RXQ_FLOW_TYPE_PROMISC:
return !!priv->promisc_req;
case HASH_RXQ_FLOW_TYPE_ALLMULTI: case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return !!priv->allmulti_req; return !!priv->allmulti_req;
case HASH_RXQ_FLOW_TYPE_BROADCAST: case HASH_RXQ_FLOW_TYPE_BROADCAST:

View File

@ -237,7 +237,6 @@ struct special_flow_init {
}; };
enum hash_rxq_flow_type { enum hash_rxq_flow_type {
HASH_RXQ_FLOW_TYPE_PROMISC,
HASH_RXQ_FLOW_TYPE_ALLMULTI, HASH_RXQ_FLOW_TYPE_ALLMULTI,
HASH_RXQ_FLOW_TYPE_BROADCAST, HASH_RXQ_FLOW_TYPE_BROADCAST,
HASH_RXQ_FLOW_TYPE_IPV6MULTI, HASH_RXQ_FLOW_TYPE_IPV6MULTI,
@ -249,8 +248,6 @@ static inline const char *
hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type) hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
{ {
switch (flow_type) { switch (flow_type) {
case HASH_RXQ_FLOW_TYPE_PROMISC:
return "promiscuous";
case HASH_RXQ_FLOW_TYPE_ALLMULTI: case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return "allmulticast"; return "allmulticast";
case HASH_RXQ_FLOW_TYPE_BROADCAST: case HASH_RXQ_FLOW_TYPE_BROADCAST:

View File

@ -163,7 +163,16 @@ mlx5_dev_start(struct rte_eth_dev *dev)
(void *)priv, strerror(err)); (void *)priv, strerror(err));
goto error; goto error;
} }
err = priv_flow_start(priv); if (dev->data->promiscuous)
mlx5_promiscuous_enable(dev);
err = priv_flow_start(priv, &priv->ctrl_flows);
if (err) {
ERROR("%p: an error occurred while configuring control flows:"
" %s",
(void *)priv, strerror(err));
goto error;
}
err = priv_flow_start(priv, &priv->flows);
if (err) { if (err) {
ERROR("%p: an error occurred while configuring flows:" ERROR("%p: an error occurred while configuring flows:"
" %s", " %s",
@ -187,7 +196,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
priv_special_flow_disable_all(priv); priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv); priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv); priv_destroy_hash_rxqs(priv);
priv_flow_stop(priv); priv_flow_stop(priv, &priv->flows);
priv_flow_flush(priv, &priv->ctrl_flows);
priv_rxq_stop(priv); priv_rxq_stop(priv);
priv_txq_stop(priv); priv_txq_stop(priv);
priv_unlock(priv); priv_unlock(priv);
@ -222,13 +232,14 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_special_flow_disable_all(priv); priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv); priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv); priv_destroy_hash_rxqs(priv);
priv_flow_stop(priv); priv_flow_stop(priv, &priv->flows);
priv_flow_flush(priv, &priv->ctrl_flows);
priv_rx_intr_vec_disable(priv); priv_rx_intr_vec_disable(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
priv_txq_stop(priv); priv_txq_stop(priv);
priv_rxq_stop(priv); priv_rxq_stop(priv);
LIST_FOREACH(mr, &priv->mr, next) { LIST_FOREACH(mr, &priv->mr, next) {
priv_mr_release(priv, mr); priv_mr_release(priv, mr);
} }
priv_dev_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv); priv_unlock(priv);
} }