net/mlx5: remove hash Rx queues support
From this commit the RSS support becomes un-available until it is replaced by the generic flow implementation. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
29c1d8bb3e
commit
29957ec421
@ -198,7 +198,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
||||
/* In case mlx5_dev_stop() has not been called. */
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_dev_traffic_disable(priv, dev);
|
||||
/* Prevent crashes when queues are still in use. */
|
||||
dev->rx_pkt_burst = removed_rx_burst;
|
||||
|
@ -127,13 +127,7 @@ struct priv {
|
||||
unsigned int txqs_n; /* TX queues array size. */
|
||||
struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
|
||||
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
|
||||
/* Indirection tables referencing all RX WQs. */
|
||||
struct ibv_rwq_ind_table *(*ind_tables)[];
|
||||
unsigned int ind_tables_n; /* Number of indirection tables. */
|
||||
unsigned int ind_table_max_size; /* Maximum indirection table size. */
|
||||
/* Hash RX QPs feeding the indirection table. */
|
||||
struct hash_rxq (*hash_rxqs)[];
|
||||
unsigned int hash_rxqs_n; /* Hash RX QPs array size. */
|
||||
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
|
||||
struct rte_intr_handle intr_handle; /* Interrupt handler. */
|
||||
unsigned int (*reta_idx)[]; /* RETA index table. */
|
||||
|
@ -64,122 +64,6 @@
|
||||
#include "mlx5_autoconf.h"
|
||||
#include "mlx5_defs.h"
|
||||
|
||||
/* Initialization data for hash RX queues. */
|
||||
const struct hash_rxq_init hash_rxq_init[] = {
|
||||
[HASH_RXQ_TCPV4] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
|
||||
IBV_RX_HASH_DST_IPV4 |
|
||||
IBV_RX_HASH_SRC_PORT_TCP |
|
||||
IBV_RX_HASH_DST_PORT_TCP),
|
||||
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
|
||||
.flow_priority = 0,
|
||||
.flow_spec.tcp_udp = {
|
||||
.type = IBV_FLOW_SPEC_TCP,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
|
||||
},
|
||||
[HASH_RXQ_UDPV4] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
|
||||
IBV_RX_HASH_DST_IPV4 |
|
||||
IBV_RX_HASH_SRC_PORT_UDP |
|
||||
IBV_RX_HASH_DST_PORT_UDP),
|
||||
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
|
||||
.flow_priority = 0,
|
||||
.flow_spec.tcp_udp = {
|
||||
.type = IBV_FLOW_SPEC_UDP,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
|
||||
},
|
||||
[HASH_RXQ_IPV4] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV4 |
|
||||
IBV_RX_HASH_DST_IPV4),
|
||||
.dpdk_rss_hf = (ETH_RSS_IPV4 |
|
||||
ETH_RSS_FRAG_IPV4),
|
||||
.flow_priority = 1,
|
||||
.flow_spec.ipv4 = {
|
||||
.type = IBV_FLOW_SPEC_IPV4,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
|
||||
},
|
||||
[HASH_RXQ_TCPV6] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
|
||||
IBV_RX_HASH_DST_IPV6 |
|
||||
IBV_RX_HASH_SRC_PORT_TCP |
|
||||
IBV_RX_HASH_DST_PORT_TCP),
|
||||
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
|
||||
.flow_priority = 0,
|
||||
.flow_spec.tcp_udp = {
|
||||
.type = IBV_FLOW_SPEC_TCP,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
|
||||
},
|
||||
[HASH_RXQ_UDPV6] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
|
||||
IBV_RX_HASH_DST_IPV6 |
|
||||
IBV_RX_HASH_SRC_PORT_UDP |
|
||||
IBV_RX_HASH_DST_PORT_UDP),
|
||||
.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
|
||||
.flow_priority = 0,
|
||||
.flow_spec.tcp_udp = {
|
||||
.type = IBV_FLOW_SPEC_UDP,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
|
||||
},
|
||||
[HASH_RXQ_IPV6] = {
|
||||
.hash_fields = (IBV_RX_HASH_SRC_IPV6 |
|
||||
IBV_RX_HASH_DST_IPV6),
|
||||
.dpdk_rss_hf = (ETH_RSS_IPV6 |
|
||||
ETH_RSS_FRAG_IPV6),
|
||||
.flow_priority = 1,
|
||||
.flow_spec.ipv6 = {
|
||||
.type = IBV_FLOW_SPEC_IPV6,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
|
||||
},
|
||||
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
|
||||
},
|
||||
[HASH_RXQ_ETH] = {
|
||||
.hash_fields = 0,
|
||||
.dpdk_rss_hf = 0,
|
||||
.flow_priority = 2,
|
||||
.flow_spec.eth = {
|
||||
.type = IBV_FLOW_SPEC_ETH,
|
||||
.size = sizeof(hash_rxq_init[0].flow_spec.eth),
|
||||
},
|
||||
.underlayer = NULL,
|
||||
},
|
||||
};
|
||||
|
||||
/* Number of entries in hash_rxq_init[]. */
|
||||
const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
|
||||
|
||||
/* Initialization data for hash RX queue indirection tables. */
|
||||
static const struct ind_table_init ind_table_init[] = {
|
||||
{
|
||||
.max_size = -1u, /* Superseded by HW limitations. */
|
||||
.hash_types =
|
||||
1 << HASH_RXQ_TCPV4 |
|
||||
1 << HASH_RXQ_UDPV4 |
|
||||
1 << HASH_RXQ_IPV4 |
|
||||
1 << HASH_RXQ_TCPV6 |
|
||||
1 << HASH_RXQ_UDPV6 |
|
||||
1 << HASH_RXQ_IPV6 |
|
||||
0,
|
||||
.hash_types_n = 6,
|
||||
},
|
||||
{
|
||||
.max_size = 1,
|
||||
.hash_types = 1 << HASH_RXQ_ETH,
|
||||
.hash_types_n = 1,
|
||||
},
|
||||
};
|
||||
|
||||
#define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
|
||||
|
||||
/* Default RSS hash key also used for ConnectX-3. */
|
||||
uint8_t rss_hash_default_key[] = {
|
||||
0x2c, 0xc6, 0x81, 0xd1,
|
||||
@ -197,359 +81,6 @@ uint8_t rss_hash_default_key[] = {
|
||||
/* Length of the default RSS hash key. */
|
||||
const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
|
||||
|
||||
/**
|
||||
* Populate flow steering rule for a given hash RX queue type using
|
||||
* information from hash_rxq_init[]. Nothing is written to flow_attr when
|
||||
* flow_attr_size is not large enough, but the required size is still returned.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param[out] flow_attr
|
||||
* Pointer to flow attribute structure to fill. Note that the allocated
|
||||
* area must be larger and large enough to hold all flow specifications.
|
||||
* @param flow_attr_size
|
||||
* Entire size of flow_attr and trailing room for flow specifications.
|
||||
* @param type
|
||||
* Hash RX queue type to use for flow steering rule.
|
||||
*
|
||||
* @return
|
||||
* Total size of the flow attribute buffer. No errors are defined.
|
||||
*/
|
||||
size_t
|
||||
priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr,
|
||||
size_t flow_attr_size, enum hash_rxq_type type)
|
||||
{
|
||||
size_t offset = sizeof(*flow_attr);
|
||||
const struct hash_rxq_init *init = &hash_rxq_init[type];
|
||||
|
||||
assert(priv != NULL);
|
||||
assert((size_t)type < RTE_DIM(hash_rxq_init));
|
||||
do {
|
||||
offset += init->flow_spec.hdr.size;
|
||||
init = init->underlayer;
|
||||
} while (init != NULL);
|
||||
if (offset > flow_attr_size)
|
||||
return offset;
|
||||
flow_attr_size = offset;
|
||||
init = &hash_rxq_init[type];
|
||||
*flow_attr = (struct ibv_flow_attr){
|
||||
.type = IBV_FLOW_ATTR_NORMAL,
|
||||
/* Priorities < 3 are reserved for flow director. */
|
||||
.priority = init->flow_priority + 3,
|
||||
.num_of_specs = 0,
|
||||
.port = priv->port,
|
||||
.flags = 0,
|
||||
};
|
||||
do {
|
||||
offset -= init->flow_spec.hdr.size;
|
||||
memcpy((void *)((uintptr_t)flow_attr + offset),
|
||||
&init->flow_spec,
|
||||
init->flow_spec.hdr.size);
|
||||
++flow_attr->num_of_specs;
|
||||
init = init->underlayer;
|
||||
} while (init != NULL);
|
||||
return flow_attr_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert hash type position in indirection table initializer to
|
||||
* hash RX queue type.
|
||||
*
|
||||
* @param table
|
||||
* Indirection table initializer.
|
||||
* @param pos
|
||||
* Hash type position.
|
||||
*
|
||||
* @return
|
||||
* Hash RX queue type.
|
||||
*/
|
||||
static enum hash_rxq_type
|
||||
hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
|
||||
{
|
||||
enum hash_rxq_type type = HASH_RXQ_TCPV4;
|
||||
|
||||
assert(pos < table->hash_types_n);
|
||||
do {
|
||||
if ((table->hash_types & (1 << type)) && (pos-- == 0))
|
||||
break;
|
||||
++type;
|
||||
} while (1);
|
||||
return type;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter out disabled hash RX queue types from ind_table_init[].
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param[out] table
|
||||
* Output table.
|
||||
*
|
||||
* @return
|
||||
* Number of table entries.
|
||||
*/
|
||||
static unsigned int
|
||||
priv_make_ind_table_init(struct priv *priv,
|
||||
struct ind_table_init (*table)[IND_TABLE_INIT_N])
|
||||
{
|
||||
uint64_t rss_hf;
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
unsigned int table_n = 0;
|
||||
/* Mandatory to receive frames not handled by normal hash RX queues. */
|
||||
unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
|
||||
|
||||
rss_hf = priv->rss_conf.rss_hf;
|
||||
/* Process other protocols only if more than one queue. */
|
||||
if (priv->rxqs_n > 1)
|
||||
for (i = 0; (i != hash_rxq_init_n); ++i)
|
||||
if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
|
||||
hash_types_sup |= (1 << i);
|
||||
|
||||
/* Filter out entries whose protocols are not in the set. */
|
||||
for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
|
||||
unsigned int nb;
|
||||
unsigned int h;
|
||||
|
||||
/* j is increased only if the table has valid protocols. */
|
||||
assert(j <= i);
|
||||
(*table)[j] = ind_table_init[i];
|
||||
(*table)[j].hash_types &= hash_types_sup;
|
||||
for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
|
||||
if (((*table)[j].hash_types >> h) & 0x1)
|
||||
++nb;
|
||||
(*table)[i].hash_types_n = nb;
|
||||
if (nb) {
|
||||
++table_n;
|
||||
++j;
|
||||
}
|
||||
}
|
||||
return table_n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize hash RX queues and indirection table.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
priv_create_hash_rxqs(struct priv *priv)
|
||||
{
|
||||
struct ibv_wq *wqs[priv->reta_idx_n];
|
||||
struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
|
||||
unsigned int ind_tables_n =
|
||||
priv_make_ind_table_init(priv, &ind_table_init);
|
||||
unsigned int hash_rxqs_n = 0;
|
||||
struct hash_rxq (*hash_rxqs)[] = NULL;
|
||||
struct ibv_rwq_ind_table *(*ind_tables)[] = NULL;
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
unsigned int k;
|
||||
int err = 0;
|
||||
|
||||
assert(priv->ind_tables == NULL);
|
||||
assert(priv->ind_tables_n == 0);
|
||||
assert(priv->hash_rxqs == NULL);
|
||||
assert(priv->hash_rxqs_n == 0);
|
||||
assert(priv->pd != NULL);
|
||||
assert(priv->ctx != NULL);
|
||||
if (priv->isolated)
|
||||
return 0;
|
||||
if (priv->rxqs_n == 0)
|
||||
return EINVAL;
|
||||
assert(priv->rxqs != NULL);
|
||||
if (ind_tables_n == 0) {
|
||||
ERROR("all hash RX queue types have been filtered out,"
|
||||
" indirection table cannot be created");
|
||||
return EINVAL;
|
||||
}
|
||||
if (priv->rxqs_n & (priv->rxqs_n - 1)) {
|
||||
INFO("%u RX queues are configured, consider rounding this"
|
||||
" number to the next power of two for better balancing",
|
||||
priv->rxqs_n);
|
||||
DEBUG("indirection table extended to assume %u WQs",
|
||||
priv->reta_idx_n);
|
||||
}
|
||||
for (i = 0; (i != priv->reta_idx_n); ++i) {
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl;
|
||||
|
||||
rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
|
||||
struct mlx5_rxq_ctrl, rxq);
|
||||
wqs[i] = rxq_ctrl->ibv->wq;
|
||||
}
|
||||
/* Get number of hash RX queues to configure. */
|
||||
for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
|
||||
hash_rxqs_n += ind_table_init[i].hash_types_n;
|
||||
DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
|
||||
hash_rxqs_n, priv->rxqs_n, ind_tables_n);
|
||||
/* Create indirection tables. */
|
||||
ind_tables = rte_calloc(__func__, ind_tables_n,
|
||||
sizeof((*ind_tables)[0]), 0);
|
||||
if (ind_tables == NULL) {
|
||||
err = ENOMEM;
|
||||
ERROR("cannot allocate indirection tables container: %s",
|
||||
strerror(err));
|
||||
goto error;
|
||||
}
|
||||
for (i = 0; (i != ind_tables_n); ++i) {
|
||||
struct ibv_rwq_ind_table_init_attr ind_init_attr = {
|
||||
.log_ind_tbl_size = 0, /* Set below. */
|
||||
.ind_tbl = wqs,
|
||||
.comp_mask = 0,
|
||||
};
|
||||
unsigned int ind_tbl_size = ind_table_init[i].max_size;
|
||||
struct ibv_rwq_ind_table *ind_table;
|
||||
|
||||
if (priv->reta_idx_n < ind_tbl_size)
|
||||
ind_tbl_size = priv->reta_idx_n;
|
||||
ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
|
||||
errno = 0;
|
||||
ind_table = ibv_create_rwq_ind_table(priv->ctx,
|
||||
&ind_init_attr);
|
||||
if (ind_table != NULL) {
|
||||
(*ind_tables)[i] = ind_table;
|
||||
continue;
|
||||
}
|
||||
/* Not clear whether errno is set. */
|
||||
err = (errno ? errno : EINVAL);
|
||||
ERROR("RX indirection table creation failed with error %d: %s",
|
||||
err, strerror(err));
|
||||
goto error;
|
||||
}
|
||||
/* Allocate array that holds hash RX queues and related data. */
|
||||
hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
|
||||
sizeof((*hash_rxqs)[0]), 0);
|
||||
if (hash_rxqs == NULL) {
|
||||
err = ENOMEM;
|
||||
ERROR("cannot allocate hash RX queues container: %s",
|
||||
strerror(err));
|
||||
goto error;
|
||||
}
|
||||
for (i = 0, j = 0, k = 0;
|
||||
((i != hash_rxqs_n) && (j != ind_tables_n));
|
||||
++i) {
|
||||
struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
|
||||
enum hash_rxq_type type =
|
||||
hash_rxq_type_from_pos(&ind_table_init[j], k);
|
||||
struct rte_eth_rss_conf *priv_rss_conf = &priv->rss_conf;
|
||||
struct ibv_rx_hash_conf hash_conf = {
|
||||
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
|
||||
.rx_hash_key_len = (priv_rss_conf ?
|
||||
priv_rss_conf->rss_key_len :
|
||||
rss_hash_default_key_len),
|
||||
.rx_hash_key = (priv_rss_conf ?
|
||||
priv_rss_conf->rss_key :
|
||||
rss_hash_default_key),
|
||||
.rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
|
||||
};
|
||||
struct ibv_qp_init_attr_ex qp_init_attr = {
|
||||
.qp_type = IBV_QPT_RAW_PACKET,
|
||||
.comp_mask = (IBV_QP_INIT_ATTR_PD |
|
||||
IBV_QP_INIT_ATTR_IND_TABLE |
|
||||
IBV_QP_INIT_ATTR_RX_HASH),
|
||||
.rx_hash_conf = hash_conf,
|
||||
.rwq_ind_tbl = (*ind_tables)[j],
|
||||
.pd = priv->pd,
|
||||
};
|
||||
|
||||
DEBUG("using indirection table %u for hash RX queue %u type %d",
|
||||
j, i, type);
|
||||
*hash_rxq = (struct hash_rxq){
|
||||
.priv = priv,
|
||||
.qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr),
|
||||
.type = type,
|
||||
};
|
||||
if (hash_rxq->qp == NULL) {
|
||||
err = (errno ? errno : EINVAL);
|
||||
ERROR("Hash RX QP creation failure: %s",
|
||||
strerror(err));
|
||||
goto error;
|
||||
}
|
||||
if (++k < ind_table_init[j].hash_types_n)
|
||||
continue;
|
||||
/* Switch to the next indirection table and reset hash RX
|
||||
* queue type array index. */
|
||||
++j;
|
||||
k = 0;
|
||||
}
|
||||
priv->ind_tables = ind_tables;
|
||||
priv->ind_tables_n = ind_tables_n;
|
||||
priv->hash_rxqs = hash_rxqs;
|
||||
priv->hash_rxqs_n = hash_rxqs_n;
|
||||
assert(err == 0);
|
||||
return 0;
|
||||
error:
|
||||
if (hash_rxqs != NULL) {
|
||||
for (i = 0; (i != hash_rxqs_n); ++i) {
|
||||
struct ibv_qp *qp = (*hash_rxqs)[i].qp;
|
||||
|
||||
if (qp == NULL)
|
||||
continue;
|
||||
claim_zero(ibv_destroy_qp(qp));
|
||||
}
|
||||
rte_free(hash_rxqs);
|
||||
}
|
||||
if (ind_tables != NULL) {
|
||||
for (j = 0; (j != ind_tables_n); ++j) {
|
||||
struct ibv_rwq_ind_table *ind_table =
|
||||
(*ind_tables)[j];
|
||||
|
||||
if (ind_table == NULL)
|
||||
continue;
|
||||
claim_zero(ibv_destroy_rwq_ind_table(ind_table));
|
||||
}
|
||||
rte_free(ind_tables);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up hash RX queues and indirection table.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*/
|
||||
void
|
||||
priv_destroy_hash_rxqs(struct priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
|
||||
if (priv->hash_rxqs_n == 0) {
|
||||
assert(priv->hash_rxqs == NULL);
|
||||
assert(priv->ind_tables == NULL);
|
||||
return;
|
||||
}
|
||||
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
|
||||
struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
|
||||
unsigned int j, k;
|
||||
|
||||
assert(hash_rxq->priv == priv);
|
||||
assert(hash_rxq->qp != NULL);
|
||||
for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
|
||||
for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
|
||||
assert(hash_rxq->mac_flow[j][k] == NULL);
|
||||
claim_zero(ibv_destroy_qp(hash_rxq->qp));
|
||||
}
|
||||
priv->hash_rxqs_n = 0;
|
||||
rte_free(priv->hash_rxqs);
|
||||
priv->hash_rxqs = NULL;
|
||||
for (i = 0; (i != priv->ind_tables_n); ++i) {
|
||||
struct ibv_rwq_ind_table *ind_table =
|
||||
(*priv->ind_tables)[i];
|
||||
|
||||
assert(ind_table != NULL);
|
||||
claim_zero(ibv_destroy_rwq_ind_table(ind_table));
|
||||
}
|
||||
priv->ind_tables_n = 0;
|
||||
rte_free(priv->ind_tables);
|
||||
priv->ind_tables = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate RX queue elements.
|
||||
*
|
||||
|
@ -176,75 +176,6 @@ struct mlx5_hrxq {
|
||||
uint8_t rss_key[]; /* Hash key. */
|
||||
};
|
||||
|
||||
/* Hash RX queue types. */
|
||||
enum hash_rxq_type {
|
||||
HASH_RXQ_TCPV4,
|
||||
HASH_RXQ_UDPV4,
|
||||
HASH_RXQ_IPV4,
|
||||
HASH_RXQ_TCPV6,
|
||||
HASH_RXQ_UDPV6,
|
||||
HASH_RXQ_IPV6,
|
||||
HASH_RXQ_ETH,
|
||||
};
|
||||
|
||||
/* Flow structure with Ethernet specification. It is packed to prevent padding
|
||||
* between attr and spec as this layout is expected by libibverbs. */
|
||||
struct flow_attr_spec_eth {
|
||||
struct ibv_flow_attr attr;
|
||||
struct ibv_flow_spec_eth spec;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Define a struct flow_attr_spec_eth object as an array of at least
|
||||
* "size" bytes. Room after the first index is normally used to store
|
||||
* extra flow specifications. */
|
||||
#define FLOW_ATTR_SPEC_ETH(name, size) \
|
||||
struct flow_attr_spec_eth name \
|
||||
[((size) / sizeof(struct flow_attr_spec_eth)) + \
|
||||
!!((size) % sizeof(struct flow_attr_spec_eth))]
|
||||
|
||||
/* Initialization data for hash RX queue. */
|
||||
struct hash_rxq_init {
|
||||
uint64_t hash_fields; /* Fields that participate in the hash. */
|
||||
uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
|
||||
unsigned int flow_priority; /* Flow priority to use. */
|
||||
union {
|
||||
struct {
|
||||
enum ibv_flow_spec_type type;
|
||||
uint16_t size;
|
||||
} hdr;
|
||||
struct ibv_flow_spec_tcp_udp tcp_udp;
|
||||
struct ibv_flow_spec_ipv4 ipv4;
|
||||
struct ibv_flow_spec_ipv6 ipv6;
|
||||
struct ibv_flow_spec_eth eth;
|
||||
} flow_spec; /* Flow specification template. */
|
||||
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
|
||||
};
|
||||
|
||||
/* Initialization data for indirection table. */
|
||||
struct ind_table_init {
|
||||
unsigned int max_size; /* Maximum number of WQs. */
|
||||
/* Hash RX queues using this table. */
|
||||
unsigned int hash_types;
|
||||
unsigned int hash_types_n;
|
||||
};
|
||||
|
||||
/* Initialization data for special flows. */
|
||||
struct special_flow_init {
|
||||
uint8_t dst_mac_val[6];
|
||||
uint8_t dst_mac_mask[6];
|
||||
unsigned int hash_types;
|
||||
unsigned int per_vlan:1;
|
||||
};
|
||||
|
||||
struct hash_rxq {
|
||||
struct priv *priv; /* Back pointer to private data. */
|
||||
struct ibv_qp *qp; /* Hash RX QP. */
|
||||
enum hash_rxq_type type; /* Hash RX queue type. */
|
||||
/* MAC flow steering rules, one per VLAN ID. */
|
||||
struct ibv_flow *mac_flow
|
||||
[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
|
||||
};
|
||||
|
||||
/* TX queue descriptor. */
|
||||
__extension__
|
||||
struct mlx5_txq_data {
|
||||
@ -302,16 +233,9 @@ struct mlx5_txq_ctrl {
|
||||
|
||||
/* mlx5_rxq.c */
|
||||
|
||||
extern const struct hash_rxq_init hash_rxq_init[];
|
||||
extern const unsigned int hash_rxq_init_n;
|
||||
|
||||
extern uint8_t rss_hash_default_key[];
|
||||
extern const size_t rss_hash_default_key_len;
|
||||
|
||||
size_t priv_flow_attr(struct priv *, struct ibv_flow_attr *,
|
||||
size_t, enum hash_rxq_type);
|
||||
int priv_create_hash_rxqs(struct priv *);
|
||||
void priv_destroy_hash_rxqs(struct priv *);
|
||||
void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
|
||||
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
|
||||
const struct rte_eth_rxconf *, struct rte_mempool *);
|
||||
|
@ -161,9 +161,9 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
}
|
||||
/* Update receive callback. */
|
||||
priv_dev_select_rx_function(priv, dev);
|
||||
err = priv_create_hash_rxqs(priv);
|
||||
err = priv_dev_traffic_enable(priv, dev);
|
||||
if (err) {
|
||||
ERROR("%p: an error occurred while configuring hash RX queues:"
|
||||
ERROR("%p: an error occurred while configuring control flows:"
|
||||
" %s",
|
||||
(void *)priv, strerror(err));
|
||||
goto error;
|
||||
@ -190,8 +190,8 @@ error:
|
||||
dev->data->dev_started = 0;
|
||||
LIST_FOREACH(mr, &priv->mr, next)
|
||||
priv_mr_release(priv, mr);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_flow_stop(priv, &priv->flows);
|
||||
priv_dev_traffic_disable(priv, dev);
|
||||
priv_txq_stop(priv);
|
||||
priv_rxq_stop(priv);
|
||||
priv_flow_delete_drop_queue(priv);
|
||||
@ -224,9 +224,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
rte_wmb();
|
||||
usleep(1000 * priv->rxqs_n);
|
||||
DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_flow_stop(priv, &priv->flows);
|
||||
priv_flow_flush(priv, &priv->ctrl_flows);
|
||||
priv_dev_traffic_disable(priv, dev);
|
||||
priv_rx_intr_vec_disable(priv);
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
priv_txq_stop(priv);
|
||||
|
Loading…
x
Reference in New Issue
Block a user