mlx5: refactor special flows handling
Merge redundant code by adding a static initialization table to manage promiscuous and allmulticast (special) flows. New function priv_rehash_flows() implements the logic to enable/disable relevant flows in one place from any context. Signed-off-by: Yaacov Hazan <yaacovh@mellanox.com> Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
a94fda95ed
commit
083c2dd317
@ -88,8 +88,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
||||
/* In case mlx5_dev_stop() has not been called. */
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
priv_allmulticast_disable(priv);
|
||||
priv_promiscuous_disable(priv);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_ALLMULTI);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_PROMISC);
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
/* Prevent crashes when queues are still in use. */
|
||||
|
@ -195,13 +195,11 @@ int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
|
||||
|
||||
/* mlx5_rxmode.c */
|
||||
|
||||
int priv_promiscuous_enable(struct priv *);
|
||||
int priv_special_flow_enable(struct priv *, enum hash_rxq_flow_type);
|
||||
void priv_special_flow_disable(struct priv *, enum hash_rxq_flow_type);
|
||||
void mlx5_promiscuous_enable(struct rte_eth_dev *);
|
||||
void priv_promiscuous_disable(struct priv *);
|
||||
void mlx5_promiscuous_disable(struct rte_eth_dev *);
|
||||
int priv_allmulticast_enable(struct priv *);
|
||||
void mlx5_allmulticast_enable(struct rte_eth_dev *);
|
||||
void priv_allmulticast_disable(struct priv *);
|
||||
void mlx5_allmulticast_disable(struct rte_eth_dev *);
|
||||
|
||||
/* mlx5_stats.c */
|
||||
|
@ -43,6 +43,9 @@
|
||||
/* Maximum number of simultaneous VLAN filters. */
|
||||
#define MLX5_MAX_VLAN_IDS 128
|
||||
|
||||
/* Maximum number of special flows. */
|
||||
#define MLX5_MAX_SPECIAL_FLOWS 2
|
||||
|
||||
/* Request send completion once in every 64 sends, might be less. */
|
||||
#define MLX5_PMD_TX_PER_COMP_REQ 64
|
||||
|
||||
|
@ -58,31 +58,96 @@
|
||||
#include "mlx5_rxtx.h"
|
||||
#include "mlx5_utils.h"
|
||||
|
||||
static void hash_rxq_promiscuous_disable(struct hash_rxq *);
|
||||
static void hash_rxq_allmulticast_disable(struct hash_rxq *);
|
||||
/* Initialization data for special flows. */
|
||||
static const struct special_flow_init special_flow_init[] = {
|
||||
[HASH_RXQ_FLOW_TYPE_PROMISC] = {
|
||||
.dst_mac_val = "\x00\x00\x00\x00\x00\x00",
|
||||
.dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
|
||||
.hash_types =
|
||||
1 << HASH_RXQ_TCPV4 |
|
||||
1 << HASH_RXQ_UDPV4 |
|
||||
1 << HASH_RXQ_IPV4 |
|
||||
#ifdef HAVE_FLOW_SPEC_IPV6
|
||||
1 << HASH_RXQ_TCPV6 |
|
||||
1 << HASH_RXQ_UDPV6 |
|
||||
1 << HASH_RXQ_IPV6 |
|
||||
#endif /* HAVE_FLOW_SPEC_IPV6 */
|
||||
1 << HASH_RXQ_ETH |
|
||||
0,
|
||||
},
|
||||
[HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
|
||||
.dst_mac_val = "\x01\x00\x00\x00\x00\x00",
|
||||
.dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
|
||||
.hash_types =
|
||||
1 << HASH_RXQ_UDPV4 |
|
||||
1 << HASH_RXQ_IPV4 |
|
||||
#ifdef HAVE_FLOW_SPEC_IPV6
|
||||
1 << HASH_RXQ_UDPV6 |
|
||||
1 << HASH_RXQ_IPV6 |
|
||||
#endif /* HAVE_FLOW_SPEC_IPV6 */
|
||||
1 << HASH_RXQ_ETH |
|
||||
0,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Enable promiscuous mode in a hash RX queue.
|
||||
* Enable a special flow in a hash RX queue.
|
||||
*
|
||||
* @param hash_rxq
|
||||
* Pointer to hash RX queue structure.
|
||||
* @param flow_type
|
||||
* Special flow type.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
static int
|
||||
hash_rxq_promiscuous_enable(struct hash_rxq *hash_rxq)
|
||||
hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
|
||||
enum hash_rxq_flow_type flow_type)
|
||||
{
|
||||
struct ibv_exp_flow *flow;
|
||||
FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0));
|
||||
struct ibv_exp_flow_attr *attr = &data->attr;
|
||||
struct ibv_exp_flow_spec_eth *spec = &data->spec;
|
||||
const uint8_t *mac;
|
||||
const uint8_t *mask;
|
||||
|
||||
if (hash_rxq->promisc_flow != NULL)
|
||||
/* Check if flow is relevant for this hash_rxq. */
|
||||
if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
|
||||
return 0;
|
||||
DEBUG("%p: enabling promiscuous mode", (void *)hash_rxq);
|
||||
/* Promiscuous flows only differ from normal flows by not filtering
|
||||
* on specific MAC addresses. */
|
||||
/* Check if flow already exists. */
|
||||
if (hash_rxq->special_flow[flow_type] != NULL)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* No padding must be inserted by the compiler between attr and spec.
|
||||
* This layout is expected by libibverbs.
|
||||
*/
|
||||
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
|
||||
hash_rxq_flow_attr(hash_rxq, attr, sizeof(data));
|
||||
/* The first specification must be Ethernet. */
|
||||
assert(spec->type == IBV_EXP_FLOW_SPEC_ETH);
|
||||
assert(spec->size == sizeof(*spec));
|
||||
|
||||
mac = special_flow_init[flow_type].dst_mac_val;
|
||||
mask = special_flow_init[flow_type].dst_mac_mask;
|
||||
*spec = (struct ibv_exp_flow_spec_eth){
|
||||
.type = IBV_EXP_FLOW_SPEC_ETH,
|
||||
.size = sizeof(*spec),
|
||||
.val = {
|
||||
.dst_mac = {
|
||||
mac[0], mac[1], mac[2],
|
||||
mac[3], mac[4], mac[5],
|
||||
},
|
||||
},
|
||||
.mask = {
|
||||
.dst_mac = {
|
||||
mask[0], mask[1], mask[2],
|
||||
mask[3], mask[4], mask[5],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
errno = 0;
|
||||
flow = ibv_exp_create_flow(hash_rxq->qp, attr);
|
||||
if (flow == NULL) {
|
||||
@ -94,44 +159,89 @@ hash_rxq_promiscuous_enable(struct hash_rxq *hash_rxq)
|
||||
return errno;
|
||||
return EINVAL;
|
||||
}
|
||||
hash_rxq->promisc_flow = flow;
|
||||
DEBUG("%p: promiscuous mode enabled", (void *)hash_rxq);
|
||||
hash_rxq->special_flow[flow_type] = flow;
|
||||
DEBUG("%p: enabling special flow %s (%d)",
|
||||
(void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable promiscuous mode in all hash RX queues.
|
||||
* Disable a special flow in a hash RX queue.
|
||||
*
|
||||
* @param hash_rxq
|
||||
* Pointer to hash RX queue structure.
|
||||
* @param flow_type
|
||||
* Special flow type.
|
||||
*/
|
||||
static void
|
||||
hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
|
||||
enum hash_rxq_flow_type flow_type)
|
||||
{
|
||||
if (hash_rxq->special_flow[flow_type] == NULL)
|
||||
return;
|
||||
DEBUG("%p: disabling special flow %s (%d)",
|
||||
(void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type);
|
||||
claim_zero(ibv_exp_destroy_flow(hash_rxq->special_flow[flow_type]));
|
||||
hash_rxq->special_flow[flow_type] = NULL;
|
||||
DEBUG("%p: special flow %s (%d) disabled",
|
||||
(void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable a special flow in all hash RX queues.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
* @param flow_type
|
||||
* Special flow type.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
priv_promiscuous_enable(struct priv *priv)
|
||||
priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_PROMISC))
|
||||
if (!priv_allow_flow_type(priv, flow_type))
|
||||
return 0;
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
|
||||
struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
|
||||
int ret;
|
||||
|
||||
ret = hash_rxq_promiscuous_enable(hash_rxq);
|
||||
ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
|
||||
if (!ret)
|
||||
continue;
|
||||
/* Failure, rollback. */
|
||||
while (i != 0) {
|
||||
hash_rxq = &(*priv->hash_rxqs)[--i];
|
||||
hash_rxq_promiscuous_disable(hash_rxq);
|
||||
hash_rxq_special_flow_disable(hash_rxq, flow_type);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable a special flow in all hash RX queues.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
* @param flow_type
|
||||
* Special flow type.
|
||||
*/
|
||||
void
|
||||
priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
|
||||
struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
|
||||
|
||||
hash_rxq_special_flow_disable(hash_rxq, flow_type);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to enable promiscuous mode.
|
||||
*
|
||||
@ -146,48 +256,13 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
|
||||
|
||||
priv_lock(priv);
|
||||
priv->promisc_req = 1;
|
||||
ret = priv_promiscuous_enable(priv);
|
||||
ret = priv_rehash_flows(priv);
|
||||
if (ret)
|
||||
ERROR("cannot enable promiscuous mode: %s", strerror(ret));
|
||||
else {
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_allmulticast_disable(priv);
|
||||
}
|
||||
ERROR("error while enabling promiscuous mode: %s",
|
||||
strerror(ret));
|
||||
priv_unlock(priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable promiscuous mode in a hash RX queue.
|
||||
*
|
||||
* @param hash_rxq
|
||||
* Pointer to hash RX queue structure.
|
||||
*/
|
||||
static void
|
||||
hash_rxq_promiscuous_disable(struct hash_rxq *hash_rxq)
|
||||
{
|
||||
if (hash_rxq->promisc_flow == NULL)
|
||||
return;
|
||||
DEBUG("%p: disabling promiscuous mode", (void *)hash_rxq);
|
||||
claim_zero(ibv_exp_destroy_flow(hash_rxq->promisc_flow));
|
||||
hash_rxq->promisc_flow = NULL;
|
||||
DEBUG("%p: promiscuous mode disabled", (void *)hash_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable promiscuous mode in all hash RX queues.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
*/
|
||||
void
|
||||
priv_promiscuous_disable(struct priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i)
|
||||
hash_rxq_promiscuous_disable(&(*priv->hash_rxqs)[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to disable promiscuous mode.
|
||||
*
|
||||
@ -198,104 +273,17 @@ void
|
||||
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int ret;
|
||||
|
||||
priv_lock(priv);
|
||||
priv->promisc_req = 0;
|
||||
priv_promiscuous_disable(priv);
|
||||
priv_mac_addrs_enable(priv);
|
||||
priv_allmulticast_enable(priv);
|
||||
ret = priv_rehash_flows(priv);
|
||||
if (ret)
|
||||
ERROR("error while disabling promiscuous mode: %s",
|
||||
strerror(ret));
|
||||
priv_unlock(priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable allmulti mode in a hash RX queue.
|
||||
*
|
||||
* @param hash_rxq
|
||||
* Pointer to hash RX queue structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
static int
|
||||
hash_rxq_allmulticast_enable(struct hash_rxq *hash_rxq)
|
||||
{
|
||||
struct ibv_exp_flow *flow;
|
||||
FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0));
|
||||
struct ibv_exp_flow_attr *attr = &data->attr;
|
||||
struct ibv_exp_flow_spec_eth *spec = &data->spec;
|
||||
|
||||
if (hash_rxq->allmulti_flow != NULL)
|
||||
return 0;
|
||||
DEBUG("%p: enabling allmulticast mode", (void *)hash_rxq);
|
||||
/*
|
||||
* No padding must be inserted by the compiler between attr and spec.
|
||||
* This layout is expected by libibverbs.
|
||||
*/
|
||||
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
|
||||
hash_rxq_flow_attr(hash_rxq, attr, sizeof(data));
|
||||
*spec = (struct ibv_exp_flow_spec_eth){
|
||||
.type = IBV_EXP_FLOW_SPEC_ETH,
|
||||
.size = sizeof(*spec),
|
||||
.val = {
|
||||
.dst_mac = "\x01\x00\x00\x00\x00\x00",
|
||||
},
|
||||
.mask = {
|
||||
.dst_mac = "\x01\x00\x00\x00\x00\x00",
|
||||
},
|
||||
};
|
||||
errno = 0;
|
||||
flow = ibv_exp_create_flow(hash_rxq->qp, attr);
|
||||
if (flow == NULL) {
|
||||
/* It's not clear whether errno is always set in this case. */
|
||||
ERROR("%p: flow configuration failed, errno=%d: %s",
|
||||
(void *)hash_rxq, errno,
|
||||
(errno ? strerror(errno) : "Unknown error"));
|
||||
if (errno)
|
||||
return errno;
|
||||
return EINVAL;
|
||||
}
|
||||
hash_rxq->allmulti_flow = flow;
|
||||
DEBUG("%p: allmulticast mode enabled", (void *)hash_rxq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable allmulti mode in most hash RX queues.
|
||||
* TCP queues are exempted to save resources.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
priv_allmulticast_enable(struct priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_ALLMULTI))
|
||||
return 0;
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
|
||||
struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
|
||||
int ret;
|
||||
|
||||
/* allmulticast not relevant for TCP. */
|
||||
if (hash_rxq->type == HASH_RXQ_TCPV4)
|
||||
continue;
|
||||
ret = hash_rxq_allmulticast_enable(hash_rxq);
|
||||
if (!ret)
|
||||
continue;
|
||||
/* Failure, rollback. */
|
||||
while (i != 0) {
|
||||
hash_rxq = &(*priv->hash_rxqs)[--i];
|
||||
hash_rxq_allmulticast_disable(hash_rxq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to enable allmulti mode.
|
||||
*
|
||||
@ -310,44 +298,13 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
|
||||
|
||||
priv_lock(priv);
|
||||
priv->allmulti_req = 1;
|
||||
ret = priv_allmulticast_enable(priv);
|
||||
ret = priv_rehash_flows(priv);
|
||||
if (ret)
|
||||
ERROR("cannot enable allmulticast mode: %s", strerror(ret));
|
||||
ERROR("error while enabling allmulticast mode: %s",
|
||||
strerror(ret));
|
||||
priv_unlock(priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable allmulti mode in a hash RX queue.
|
||||
*
|
||||
* @param hash_rxq
|
||||
* Pointer to hash RX queue structure.
|
||||
*/
|
||||
static void
|
||||
hash_rxq_allmulticast_disable(struct hash_rxq *hash_rxq)
|
||||
{
|
||||
if (hash_rxq->allmulti_flow == NULL)
|
||||
return;
|
||||
DEBUG("%p: disabling allmulticast mode", (void *)hash_rxq);
|
||||
claim_zero(ibv_exp_destroy_flow(hash_rxq->allmulti_flow));
|
||||
hash_rxq->allmulti_flow = NULL;
|
||||
DEBUG("%p: allmulticast mode disabled", (void *)hash_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable allmulti mode in all hash RX queues.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
*/
|
||||
void
|
||||
priv_allmulticast_disable(struct priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i)
|
||||
hash_rxq_allmulticast_disable(&(*priv->hash_rxqs)[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to disable allmulti mode.
|
||||
*
|
||||
@ -358,9 +315,13 @@ void
|
||||
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int ret;
|
||||
|
||||
priv_lock(priv);
|
||||
priv->allmulti_req = 0;
|
||||
priv_allmulticast_disable(priv);
|
||||
ret = priv_rehash_flows(priv);
|
||||
if (ret)
|
||||
ERROR("error while disabling allmulticast mode: %s",
|
||||
strerror(ret));
|
||||
priv_unlock(priv);
|
||||
}
|
||||
|
@ -534,8 +534,8 @@ priv_destroy_hash_rxqs(struct priv *priv)
|
||||
assert(hash_rxq->priv == priv);
|
||||
assert(hash_rxq->qp != NULL);
|
||||
/* Also check that there are no remaining flows. */
|
||||
assert(hash_rxq->allmulti_flow == NULL);
|
||||
assert(hash_rxq->promisc_flow == NULL);
|
||||
for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
|
||||
assert(hash_rxq->special_flow[j] == NULL);
|
||||
for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
|
||||
for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
|
||||
assert(hash_rxq->mac_flow[j][k] == NULL);
|
||||
@ -585,6 +585,35 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Automatically enable/disable flows according to configuration.
|
||||
*
|
||||
* @param priv
|
||||
* Private structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
priv_rehash_flows(struct priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i)
|
||||
if (!priv_allow_flow_type(priv, i)) {
|
||||
priv_special_flow_disable(priv, i);
|
||||
} else {
|
||||
int ret = priv_special_flow_enable(priv, i);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
|
||||
return priv_mac_addrs_enable(priv);
|
||||
priv_mac_addrs_disable(priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate RX queue elements with scattered packets support.
|
||||
*
|
||||
|
@ -176,20 +176,42 @@ struct ind_table_init {
|
||||
unsigned int hash_types_n;
|
||||
};
|
||||
|
||||
/* Initialization data for special flows. */
|
||||
struct special_flow_init {
|
||||
uint8_t dst_mac_val[6];
|
||||
uint8_t dst_mac_mask[6];
|
||||
unsigned int hash_types;
|
||||
};
|
||||
|
||||
enum hash_rxq_flow_type {
|
||||
HASH_RXQ_FLOW_TYPE_MAC,
|
||||
HASH_RXQ_FLOW_TYPE_PROMISC,
|
||||
HASH_RXQ_FLOW_TYPE_ALLMULTI,
|
||||
HASH_RXQ_FLOW_TYPE_MAC,
|
||||
};
|
||||
|
||||
#ifndef NDEBUG
|
||||
static inline const char *
|
||||
hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
|
||||
{
|
||||
switch (flow_type) {
|
||||
case HASH_RXQ_FLOW_TYPE_PROMISC:
|
||||
return "promiscuous";
|
||||
case HASH_RXQ_FLOW_TYPE_ALLMULTI:
|
||||
return "allmulticast";
|
||||
case HASH_RXQ_FLOW_TYPE_MAC:
|
||||
return "MAC";
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
#endif /* NDEBUG */
|
||||
|
||||
struct hash_rxq {
|
||||
struct priv *priv; /* Back pointer to private data. */
|
||||
struct ibv_qp *qp; /* Hash RX QP. */
|
||||
enum hash_rxq_type type; /* Hash RX queue type. */
|
||||
/* MAC flow steering rules, one per VLAN ID. */
|
||||
struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
|
||||
struct ibv_exp_flow *promisc_flow; /* Promiscuous flow. */
|
||||
struct ibv_exp_flow *allmulti_flow; /* Multicast flow. */
|
||||
struct ibv_exp_flow *special_flow[MLX5_MAX_SPECIAL_FLOWS];
|
||||
};
|
||||
|
||||
/* TX element. */
|
||||
@ -247,6 +269,7 @@ size_t hash_rxq_flow_attr(const struct hash_rxq *, struct ibv_exp_flow_attr *,
|
||||
int priv_create_hash_rxqs(struct priv *);
|
||||
void priv_destroy_hash_rxqs(struct priv *);
|
||||
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
|
||||
int priv_rehash_flows(struct priv *);
|
||||
void rxq_cleanup(struct rxq *);
|
||||
int rxq_rehash(struct rte_eth_dev *, struct rxq *);
|
||||
int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int,
|
||||
|
@ -72,11 +72,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
|
||||
err = priv_create_hash_rxqs(priv);
|
||||
if (!err)
|
||||
err = priv_promiscuous_enable(priv);
|
||||
if (!err)
|
||||
err = priv_mac_addrs_enable(priv);
|
||||
if (!err)
|
||||
err = priv_allmulticast_enable(priv);
|
||||
err = priv_rehash_flows(priv);
|
||||
if (!err)
|
||||
priv->started = 1;
|
||||
else {
|
||||
@ -84,8 +80,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
" %s",
|
||||
(void *)priv, strerror(err));
|
||||
/* Rollback. */
|
||||
priv_allmulticast_disable(priv);
|
||||
priv_promiscuous_disable(priv);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_ALLMULTI);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_PROMISC);
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
}
|
||||
@ -113,8 +109,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
return;
|
||||
}
|
||||
DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
|
||||
priv_allmulticast_disable(priv);
|
||||
priv_promiscuous_disable(priv);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_ALLMULTI);
|
||||
priv_special_flow_disable(priv, HASH_RXQ_FLOW_TYPE_PROMISC);
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
|
Loading…
Reference in New Issue
Block a user