mlx5: adapt indirection table size depending on Rx queues number
Use the maximum size of the indirection table when the number of requested RX queues is not a power of two, this help to improve RSS balancing. A message informs users that balancing is not optimal in such cases. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
a76133214d
commit
95e16ef325
@ -299,7 +299,9 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||||||
struct ether_addr mac;
|
struct ether_addr mac;
|
||||||
|
|
||||||
#ifdef HAVE_EXP_QUERY_DEVICE
|
#ifdef HAVE_EXP_QUERY_DEVICE
|
||||||
exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
|
exp_device_attr.comp_mask =
|
||||||
|
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
|
||||||
|
IBV_EXP_DEVICE_ATTR_RX_HASH;
|
||||||
#endif /* HAVE_EXP_QUERY_DEVICE */
|
#endif /* HAVE_EXP_QUERY_DEVICE */
|
||||||
|
|
||||||
DEBUG("using port %u (%08" PRIx32 ")", port, test);
|
DEBUG("using port %u (%08" PRIx32 ")", port, test);
|
||||||
@ -363,6 +365,12 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||||||
DEBUG("L2 tunnel checksum offloads are %ssupported",
|
DEBUG("L2 tunnel checksum offloads are %ssupported",
|
||||||
(priv->hw_csum_l2tun ? "" : "not "));
|
(priv->hw_csum_l2tun ? "" : "not "));
|
||||||
|
|
||||||
|
priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;
|
||||||
|
DEBUG("maximum RX indirection table size is %u",
|
||||||
|
priv->ind_table_max_size);
|
||||||
|
|
||||||
|
#else /* HAVE_EXP_QUERY_DEVICE */
|
||||||
|
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
|
||||||
#endif /* HAVE_EXP_QUERY_DEVICE */
|
#endif /* HAVE_EXP_QUERY_DEVICE */
|
||||||
|
|
||||||
priv->vf = vf;
|
priv->vf = vf;
|
||||||
|
@ -109,6 +109,7 @@ struct priv {
|
|||||||
/* Indirection tables referencing all RX WQs. */
|
/* Indirection tables referencing all RX WQs. */
|
||||||
struct ibv_exp_rwq_ind_table *(*ind_tables)[];
|
struct ibv_exp_rwq_ind_table *(*ind_tables)[];
|
||||||
unsigned int ind_tables_n; /* Number of indirection tables. */
|
unsigned int ind_tables_n; /* Number of indirection tables. */
|
||||||
|
unsigned int ind_table_max_size; /* Maximum indirection table size. */
|
||||||
/* Hash RX QPs feeding the indirection table. */
|
/* Hash RX QPs feeding the indirection table. */
|
||||||
struct hash_rxq (*hash_rxqs)[];
|
struct hash_rxq (*hash_rxqs)[];
|
||||||
unsigned int hash_rxqs_n; /* Hash RX QPs array size. */
|
unsigned int hash_rxqs_n; /* Hash RX QPs array size. */
|
||||||
|
@ -46,6 +46,9 @@
|
|||||||
/* Request send completion once in every 64 sends, might be less. */
|
/* Request send completion once in every 64 sends, might be less. */
|
||||||
#define MLX5_PMD_TX_PER_COMP_REQ 64
|
#define MLX5_PMD_TX_PER_COMP_REQ 64
|
||||||
|
|
||||||
|
/* RSS Indirection table size. */
|
||||||
|
#define RSS_INDIRECTION_TABLE_SIZE 128
|
||||||
|
|
||||||
/* Maximum number of Scatter/Gather Elements per Work Request. */
|
/* Maximum number of Scatter/Gather Elements per Work Request. */
|
||||||
#ifndef MLX5_PMD_SGE_WR_N
|
#ifndef MLX5_PMD_SGE_WR_N
|
||||||
#define MLX5_PMD_SGE_WR_N 4
|
#define MLX5_PMD_SGE_WR_N 4
|
||||||
|
@ -224,7 +224,13 @@ priv_make_ind_table_init(struct priv *priv,
|
|||||||
int
|
int
|
||||||
priv_create_hash_rxqs(struct priv *priv)
|
priv_create_hash_rxqs(struct priv *priv)
|
||||||
{
|
{
|
||||||
unsigned int wqs_n = (1 << log2above(priv->rxqs_n));
|
/* If the requested number of WQs is not a power of two, use the
|
||||||
|
* maximum indirection table size for better balancing.
|
||||||
|
* The result is always rounded to the next power of two. */
|
||||||
|
unsigned int wqs_n =
|
||||||
|
(1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ?
|
||||||
|
priv->ind_table_max_size :
|
||||||
|
priv->rxqs_n));
|
||||||
struct ibv_exp_wq *wqs[wqs_n];
|
struct ibv_exp_wq *wqs[wqs_n];
|
||||||
struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
|
struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
|
||||||
unsigned int ind_tables_n =
|
unsigned int ind_tables_n =
|
||||||
@ -251,16 +257,17 @@ priv_create_hash_rxqs(struct priv *priv)
|
|||||||
" indirection table cannot be created");
|
" indirection table cannot be created");
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
if (wqs_n < priv->rxqs_n) {
|
if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) {
|
||||||
ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n);
|
ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n);
|
||||||
err = ERANGE;
|
err = ERANGE;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (wqs_n != priv->rxqs_n)
|
if (wqs_n != priv->rxqs_n) {
|
||||||
WARN("%u RX queues are configured, consider rounding this"
|
INFO("%u RX queues are configured, consider rounding this"
|
||||||
" number to the next power of two (%u) for optimal"
|
" number to the next power of two for better balancing",
|
||||||
" performance",
|
priv->rxqs_n);
|
||||||
priv->rxqs_n, wqs_n);
|
DEBUG("indirection table extended to assume %u WQs", wqs_n);
|
||||||
|
}
|
||||||
/* When the number of RX queues is not a power of two, the remaining
|
/* When the number of RX queues is not a power of two, the remaining
|
||||||
* table entries are padded with reused WQs and hashes are not spread
|
* table entries are padded with reused WQs and hashes are not spread
|
||||||
* uniformly. */
|
* uniformly. */
|
||||||
|
Loading…
Reference in New Issue
Block a user