regex/mlx5: move DevX CQ creation to common

Using common function for DevX CQ creation.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Michael Baum 2021-01-06 08:19:27 +00:00 committed by Ferruh Yigit
parent 9dab4d62b4
commit 3ddf57069b
4 changed files with 20 additions and 90 deletions

View File

@ -170,12 +170,6 @@ mlx5_regex_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
rte_errno = rte_errno ? rte_errno : EINVAL;
goto error;
}
ret = mlx5_glue->devx_query_eqn(ctx, 0, &priv->eqn);
if (ret) {
DRV_LOG(ERR, "can't query event queue number.");
rte_errno = ENOMEM;
goto error;
}
/*
* This PMD always claims the write memory barrier on UAR
* registers writings, it is safe to allocate UAR with any

View File

@ -12,6 +12,7 @@
#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include <mlx5_common_devx.h>
#include "mlx5_rxp.h"
@ -30,13 +31,8 @@ struct mlx5_regex_sq {
struct mlx5_regex_cq {
uint32_t log_nb_desc; /* Log 2 number of desc for this object. */
struct mlx5_devx_obj *obj; /* The CQ DevX object. */
int64_t dbr_offset; /* Door bell record offset. */
uint32_t dbr_umem; /* Door bell record umem id. */
volatile struct mlx5_cqe *cqe; /* The CQ ring buffer. */
struct mlx5dv_devx_umem *cqe_umem; /* CQ buffer umem. */
struct mlx5_devx_cq cq_obj; /* The CQ DevX object. */
size_t ci;
uint32_t *dbr;
};
struct mlx5_regex_qp {
@ -75,7 +71,6 @@ struct mlx5_regex_priv {
struct mlx5_regex_db db[MLX5_RXP_MAX_ENGINES +
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
uint32_t eqn; /* EQ number. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
struct ibv_pd *pd;
struct mlx5_dbr_page_list dbrpgs; /* Door-bell pages. */

View File

@ -6,6 +6,7 @@
#include <rte_log.h>
#include <rte_errno.h>
#include <rte_memory.h>
#include <rte_malloc.h>
#include <rte_regexdev.h>
#include <rte_regexdev_core.h>
@ -17,6 +18,7 @@
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
#include <mlx5_common_os.h>
#include <mlx5_common_devx.h>
#include "mlx5_regex.h"
#include "mlx5_regex_utils.h"
@ -44,8 +46,6 @@ regex_ctrl_get_nb_obj(uint16_t nb_desc)
/**
* destroy CQ.
*
* @param priv
* Pointer to the priv object.
* @param cp
* Pointer to the CQ to be destroyed.
*
@ -53,24 +53,10 @@ regex_ctrl_get_nb_obj(uint16_t nb_desc)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
{
if (cq->cqe_umem) {
mlx5_glue->devx_umem_dereg(cq->cqe_umem);
cq->cqe_umem = NULL;
}
if (cq->cqe) {
rte_free((void *)(uintptr_t)cq->cqe);
cq->cqe = NULL;
}
if (cq->dbr_offset) {
mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
cq->dbr_offset = -1;
}
if (cq->obj) {
mlx5_devx_cmd_destroy(cq->obj);
cq->obj = NULL;
}
mlx5_devx_cq_destroy(&cq->cq_obj);
memset(cq, 0, sizeof(*cq));
return 0;
}
@ -89,65 +75,20 @@ static int
regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
{
struct mlx5_devx_cq_attr attr = {
.q_umem_valid = 1,
.db_umem_valid = 1,
.eqn = priv->eqn,
.uar_page_id = priv->uar->page_id,
};
struct mlx5_devx_dbr_page *dbr_page = NULL;
void *buf = NULL;
size_t pgsize = sysconf(_SC_PAGESIZE);
uint32_t cq_size = 1 << cq->log_nb_desc;
uint32_t i;
int ret;
cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
if (cq->dbr_offset < 0) {
DRV_LOG(ERR, "Can't allocate cq door bell record.");
rte_errno = ENOMEM;
goto error;
}
cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
(uintptr_t)cq->dbr_offset);
buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096);
if (!buf) {
DRV_LOG(ERR, "Can't allocate cqe buffer.");
rte_errno = ENOMEM;
goto error;
}
cq->cqe = buf;
for (i = 0; i < cq_size; i++)
cq->cqe[i].op_own = 0xff;
cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf,
sizeof(struct mlx5_cqe) *
cq_size, 7);
cq->ci = 0;
if (!cq->cqe_umem) {
DRV_LOG(ERR, "Can't register cqe mem.");
rte_errno = ENOMEM;
goto error;
}
attr.db_umem_offset = cq->dbr_offset;
attr.db_umem_id = cq->dbr_umem;
attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem);
attr.log_cq_size = cq->log_nb_desc;
attr.uar_page_id = priv->uar->page_id;
attr.log_page_size = rte_log2_u32(pgsize);
cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
if (!cq->obj) {
DRV_LOG(ERR, "Can't create cq object.");
rte_errno = ENOMEM;
goto error;
ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
&attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Can't create CQ object.");
memset(cq, 0, sizeof(*cq));
rte_errno = ENOMEM;
return -rte_errno;
}
return 0;
error:
if (cq->cqe_umem)
mlx5_glue->devx_umem_dereg(cq->cqe_umem);
if (buf)
rte_free(buf);
if (cq->dbr_offset)
mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
return -rte_errno;
}
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
@ -232,7 +173,7 @@ regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
attr.tis_lst_sz = 0;
attr.tis_num = 0;
attr.user_index = q_ind;
attr.cqn = qp->cq.obj->id;
attr.cqn = qp->cq.cq_obj.cq->id;
wq_attr->uar_page = priv->uar->page_id;
regex_get_pdn(priv->pd, &pd_num);
wq_attr->pd = pd_num;
@ -389,7 +330,7 @@ err_fp:
err_btree:
for (i = 0; i < nb_sq_config; i++)
regex_ctrl_destroy_sq(priv, qp, i);
regex_ctrl_destroy_cq(priv, &qp->cq);
regex_ctrl_destroy_cq(&qp->cq);
err_cq:
rte_free(qp->sqs);
return ret;

View File

@ -245,7 +245,7 @@ poll_one(struct mlx5_regex_cq *cq)
size_t next_cqe_offset;
next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1));
cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset);
cqe = (volatile struct mlx5_cqe *)(cq->cq_obj.cqes + next_cqe_offset);
rte_io_wmb();
int ret = check_cqe(cqe, cq_size_get(cq), cq->ci);
@ -306,7 +306,7 @@ mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
}
cq->ci = (cq->ci + 1) & 0xffffff;
rte_wmb();
cq->dbr[0] = rte_cpu_to_be_32(cq->ci);
cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci);
queue->free_sqs |= (1 << sqid);
}