common/mlx5: add MR control initialization

Add function for MR control structure initialization.
This function include:
 - btree initialization.
 - dev_gen_ptr initialization.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Michael Baum 2021-10-19 23:55:57 +03:00 committed by Thomas Monjalon
parent 05fa53d6a0
commit 85c7005e84
8 changed files with 42 additions and 19 deletions

View File

@ -271,6 +271,34 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
#endif
}
/**
* Initialize per-queue MR control descriptor.
*
* @param mr_ctrl
* Pointer to MR control structure.
* @param dev_gen_ptr
* Pointer to generation number of global cache.
* @param socket
* NUMA socket on which memory must be allocated.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket)
{
if (mr_ctrl == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
/* Save pointer of global generation number to check memory event. */
mr_ctrl->dev_gen_ptr = dev_gen_ptr;
/* Initialize B-tree and allocate memory for bottom-half cache table. */
return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
socket);
}
/**
* Find virtually contiguous memory chunk in a given MR.
*

View File

@ -124,6 +124,9 @@ mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
return UINT32_MAX;
}
__rte_internal
int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket);
__rte_internal
int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
__rte_internal

View File

@ -111,6 +111,7 @@ INTERNAL {
mlx5_mr_btree_free;
mlx5_mr_btree_init;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
mlx5_mr_dump_cache;
mlx5_mr_flush_local_cache;
mlx5_mr_free;

View File

@ -206,8 +206,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
return -rte_errno;
}
dev->data->queue_pairs[qp_id] = qp;
if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
priv->dev_config.socket_id)) {
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
priv->dev_config.socket_id)) {
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
(uint32_t)qp_id);
rte_errno = ENOMEM;
@ -258,8 +258,6 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
ret = mlx5_devx_qp2rts(&qp->qp, 0);
if (ret)
goto err;
/* Save pointer of global generation number to check memory event. */
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u",
(uint32_t)qp_id, qp->qp.qp->id, qp->cq.cq->id, qp->entries_n);
return 0;

View File

@ -677,14 +677,13 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
DRV_LOG(ERR, "Failed to create QP.");
goto error;
}
if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
priv->dev_config.socket_id) != 0) {
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
priv->dev_config.socket_id) != 0) {
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
(uint32_t)qp_id);
rte_errno = ENOMEM;
goto error;
}
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
/*
* In Order to configure self loopback, when calling devx qp2rts the
* remote QP id that is used is the id of the same QP.

View File

@ -1449,13 +1449,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
MLX5_MR_BTREE_CACHE_N, socket)) {
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
&priv->sh->share_cache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
/* Rx queues don't use this pointer, but we want a valid structure. */
tmpl->rxq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;

View File

@ -1134,13 +1134,11 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
return NULL;
}
if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
MLX5_MR_BTREE_CACHE_N, socket)) {
if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
&priv->sh->share_cache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
/* Save pointer of global generation number to check memory event. */
tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
tmpl->txq.offloads = conf->offloads |
dev->data->dev_conf.txmode.offloads;

View File

@ -242,10 +242,8 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
nb_sq_config++;
}
/* Save pointer of global generation number to check memory event. */
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
rte_socket_id());
ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
rte_socket_id());
if (ret) {
DRV_LOG(ERR, "Error setting up mr btree");
goto err_btree;