net/mlx5: share Rx hash queue code

Move Rx hash queue object similar resources allocations from DevX and
Verbs modules to a shared location.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Michael Baum 2020-09-03 10:13:47 +00:00 committed by Ferruh Yigit
parent 25ae7f1a5d
commit 5a959cbfa6
7 changed files with 110 additions and 111 deletions

View File

@ -502,45 +502,24 @@ mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
*
* @param dev
* Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* RSS key length.
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
* @param hrxq
* Pointer to Rx Hash queue.
* @param tunnel
* Tunnel type.
*
* @return
* The Verbs object initialized index, 0 otherwise and rte_errno is set.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static uint32_t
mlx5_ibv_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
static int
mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
int tunnel __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
struct ibv_qp *qp = NULL;
struct mlx5_ind_table_obj *ind_tbl;
struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
const uint8_t *rss_key = hrxq->rss_key;
uint64_t hash_fields = hrxq->hash_fields;
int err;
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
}
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
struct mlx5dv_qp_init_attr qp_init_attr;
@ -570,7 +549,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function =
IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len,
.rx_hash_key_len = hrxq->rss_key_len,
.rx_hash_key =
(void *)(uintptr_t)rss_key,
.rx_hash_fields_mask = hash_fields,
@ -591,7 +570,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function =
IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len,
.rx_hash_key_len = hrxq->rss_key_len,
.rx_hash_key =
(void *)(uintptr_t)rss_key,
.rx_hash_fields_mask = hash_fields,
@ -604,10 +583,6 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev,
rte_errno = errno;
goto error;
}
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->qp = qp;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
@ -616,22 +591,13 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev,
goto error;
}
#endif
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
hrxq, next);
return hrxq_idx;
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
rte_errno = err; /* Restore rte_errno. */
return 0;
return -rte_errno;
}
/**

View File

@ -745,9 +745,7 @@ struct mlx5_obj_ops {
int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
struct mlx5_ind_table_obj *ind_tbl);
void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
uint32_t (*hrxq_new)(struct rte_eth_dev *dev, const uint8_t *rss_key,
uint32_t rss_key_len, uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
int tunnel __rte_unused);
void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
};

View File

@ -679,54 +679,33 @@ mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
*
* @param dev
* Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* RSS key length.
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
* @param hrxq
* Pointer to Rx Hash queue.
* @param tunnel
* Tunnel type.
*
* @return
* The DevX object initialized index, 0 otherwise and rte_errno is set.
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static uint32_t
mlx5_devx_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
static int
mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
int tunnel __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
struct mlx5_ind_table_obj *ind_tbl;
struct mlx5_devx_obj *tir = NULL;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_tir_attr tir_attr;
int err;
uint32_t i;
const uint8_t *rss_key = hrxq->rss_key;
uint64_t hash_fields = hrxq->hash_fields;
bool lro = true;
uint32_t i;
int err;
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
}
/* Enable TIR LRO only if all the queues were configured for. */
for (i = 0; i < queues_n; ++i) {
if (!(*priv->rxqs)[queues[i]]->lro) {
for (i = 0; i < ind_tbl->queues_n; ++i) {
if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
lro = false;
break;
}
@ -776,18 +755,13 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev,
tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
}
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!tir) {
hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!hrxq->tir) {
DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
dev->data->port_id);
rte_errno = errno;
goto error;
}
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->tir = tir;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
(hrxq->tir->obj);
@ -796,22 +770,13 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev,
goto error;
}
#endif
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
hrxq, next);
return hrxq_idx;
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl);
if (tir)
claim_zero(mlx5_devx_cmd_destroy(tir));
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (hrxq->tir)
claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
rte_errno = err; /* Restore rte_errno. */
return 0;
return -rte_errno;
}
/**

View File

@ -8949,7 +8949,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
rss_desc->queue,
rss_desc->queue_num);
if (!hrxq_idx) {
hrxq_idx = priv->obj_ops->hrxq_new
hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,

View File

@ -1986,7 +1986,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
rss_desc->queue,
rss_desc->queue_num);
if (!hrxq_idx)
hrxq_idx = priv->obj_ops->hrxq_new
hrxq_idx = mlx5_hrxq_new
(dev, rss_desc->key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,

View File

@ -1811,7 +1811,7 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
struct mlx5_ind_table_obj *
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
@ -1937,6 +1937,74 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
return 1;
}
/**
* Create an Rx Hash queue.
*
* @param dev
* Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* RSS key length.
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
* Queues entering in hash queue. In case of empty hash_fields only the
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
* @param tunnel
* Tunnel type.
*
* @return
* The DevX object initialized index, 0 otherwise and rte_errno is set.
*/
uint32_t
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
struct mlx5_ind_table_obj *ind_tbl;
int ret;
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
}
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
ret = priv->obj_ops->hrxq_new(dev, hrxq, tunnel);
if (ret < 0) {
rte_errno = errno;
goto error;
}
rte_atomic32_inc(&hrxq->refcnt);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
hrxq, next);
return hrxq_idx;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_obj_release(dev, ind_tbl);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
rte_errno = ret; /* Restore rte_errno. */
return 0;
}
/**
* Verify the Rx Queue list is empty
*

View File

@ -365,14 +365,16 @@ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,
const uint16_t *queues,
uint32_t queues_n);
struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
const uint16_t *queues,
uint32_t queues_n);
int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl);
uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused);
uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,