common/mlx5: share DevX queue pair operations

Currently drivers using QP (vDPA, crypto and compress, regex soon)
manage their memory, creation, modification and destruction of the QP,
in almost identical code.
Move QP memory management, creation and destruction to common.
Add common function to change QP state to RTS.
Add user_index attribute to QP creation.
It's for better code maintenance and reuse.

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Raja Zidane 2021-10-05 12:27:29 +00:00 committed by Thomas Monjalon
parent cb3dd14c77
commit f9213ab12c
9 changed files with 217 additions and 126 deletions

View File

@ -271,6 +271,115 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
return -rte_errno;
}
/**
* Destroy DevX Queue Pair.
*
* @param[in] qp
* DevX QP to destroy.
*/
void
mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp)
{
if (qp->qp)
claim_zero(mlx5_devx_cmd_destroy(qp->qp));
if (qp->umem_obj)
claim_zero(mlx5_os_umem_dereg(qp->umem_obj));
if (qp->umem_buf)
mlx5_free((void *)(uintptr_t)qp->umem_buf);
}
/**
* Create Queue Pair using DevX API.
*
* Get a pointer to partially initialized attributes structure, and updates the
* following fields:
* wq_umem_id
* wq_umem_offset
* dbr_umem_valid
* dbr_umem_id
* dbr_address
* log_page_size
* All other fields are updated by caller.
*
* @param[in] ctx
* Context returned from mlx5 open_device() glue function.
* @param[in/out] qp_obj
* Pointer to QP to create.
* @param[in] log_wqbb_n
* Log of number of WQBBs in queue.
* @param[in] attr
* Pointer to QP attributes structure.
* @param[in] socket
* Socket to use for allocation.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n,
struct mlx5_devx_qp_attr *attr, int socket)
{
struct mlx5_devx_obj *qp = NULL;
struct mlx5dv_devx_umem *umem_obj = NULL;
void *umem_buf = NULL;
size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
uint32_t umem_size, umem_dbrec;
uint16_t qp_size = 1 << log_wqbb_n;
int ret;
if (alignment == (size_t)-1) {
DRV_LOG(ERR, "Failed to get WQE buf alignment.");
rte_errno = ENOMEM;
return -rte_errno;
}
/* Allocate memory buffer for WQEs and doorbell record. */
umem_size = MLX5_WQE_SIZE * qp_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
alignment, socket);
if (!umem_buf) {
DRV_LOG(ERR, "Failed to allocate memory for QP.");
rte_errno = ENOMEM;
return -rte_errno;
}
/* Register allocated buffer in user space with DevX. */
umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
IBV_ACCESS_LOCAL_WRITE);
if (!umem_obj) {
DRV_LOG(ERR, "Failed to register umem for QP.");
rte_errno = errno;
goto error;
}
/* Fill attributes for SQ object creation. */
attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
attr->wq_umem_offset = 0;
attr->dbr_umem_valid = 1;
attr->dbr_umem_id = attr->wq_umem_id;
attr->dbr_address = umem_dbrec;
attr->log_page_size = MLX5_LOG_PAGE_SIZE;
/* Create send queue object with DevX. */
qp = mlx5_devx_cmd_create_qp(ctx, attr);
if (!qp) {
DRV_LOG(ERR, "Can't create DevX QP object.");
rte_errno = ENOMEM;
goto error;
}
qp_obj->umem_buf = umem_buf;
qp_obj->umem_obj = umem_obj;
qp_obj->qp = qp;
qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec);
return 0;
error:
ret = rte_errno;
if (umem_obj)
claim_zero(mlx5_os_umem_dereg(umem_obj));
if (umem_buf)
mlx5_free((void *)(uintptr_t)umem_buf);
rte_errno = ret;
return -rte_errno;
}
/**
* Destroy DevX Receive Queue.
*
@ -385,3 +494,38 @@ mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,
return -rte_errno;
}
/**
* Change QP state to RTS.
*
* @param[in] qp
* DevX QP to change.
* @param[in] remote_qp_id
* The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id)
{
if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP,
remote_qp_id)) {
DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP,
remote_qp_id)) {
DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP,
remote_qp_id)) {
DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
rte_errno);
return -1;
}
return 0;
}

View File

@ -33,6 +33,18 @@ struct mlx5_devx_sq {
volatile uint32_t *db_rec; /* The SQ doorbell record. */
};
/* DevX Queue Pair structure. */
struct mlx5_devx_qp {
struct mlx5_devx_obj *qp; /* The QP DevX object. */
void *umem_obj; /* The QP umem object. */
union {
void *umem_buf;
struct mlx5_wqe *wqes; /* The QP ring buffer. */
struct mlx5_aso_wqe *aso_wqes;
};
volatile uint32_t *db_rec; /* The QP doorbell record. */
};
/* DevX Receive Queue structure. */
struct mlx5_devx_rq {
struct mlx5_devx_obj *rq; /* The RQ DevX object. */
@ -59,6 +71,14 @@ int mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj,
uint16_t log_wqbb_n,
struct mlx5_devx_create_sq_attr *attr, int socket);
__rte_internal
void mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp);
__rte_internal
int mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj,
uint16_t log_wqbb_n,
struct mlx5_devx_qp_attr *attr, int socket);
__rte_internal
void mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq);
@ -67,4 +87,7 @@ int mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj,
uint32_t wqe_size, uint16_t log_wqbb_n,
struct mlx5_devx_create_rq_attr *attr, int socket);
__rte_internal
int mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id);
#endif /* RTE_PMD_MLX5_COMMON_DEVX_H_ */

View File

@ -2021,6 +2021,7 @@ mlx5_devx_cmd_create_qp(void *ctx,
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pd, attr->pd);
MLX5_SET(qpc, qpc, ts_format, attr->ts_format);
MLX5_SET(qpc, qpc, user_index, attr->user_index);
if (attr->uar_index) {
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, uar_page, attr->uar_index);

View File

@ -397,6 +397,7 @@ struct mlx5_devx_qp_attr {
uint64_t dbr_address;
uint32_t wq_umem_id;
uint64_t wq_umem_offset;
uint32_t user_index:24;
};
struct mlx5_devx_virtio_q_couners_attr {

View File

@ -67,6 +67,9 @@ INTERNAL {
mlx5_devx_get_out_command_status;
mlx5_devx_qp2rts;
mlx5_devx_qp_create;
mlx5_devx_qp_destroy;
mlx5_devx_rq_create;
mlx5_devx_rq_destroy;
mlx5_devx_sq_create;

View File

@ -267,12 +267,7 @@ mlx5_crypto_qp_release(struct mlx5_crypto_qp *qp)
{
if (qp == NULL)
return;
if (qp->qp_obj != NULL)
claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
if (qp->umem_obj != NULL)
claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
if (qp->umem_buf != NULL)
rte_free(qp->umem_buf);
mlx5_devx_qp_destroy(&qp->qp_obj);
mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
mlx5_devx_cq_destroy(&qp->cq_obj);
rte_free(qp);
@ -289,34 +284,6 @@ mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
return 0;
}
static int
mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
{
/*
* In Order to configure self loopback, when calling these functions the
* remote QP id that is used is the id of the same QP.
*/
if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
qp->qp_obj->id)) {
DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
qp->qp_obj->id)) {
DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
qp->qp_obj->id)) {
DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
rte_errno);
return -1;
}
return 0;
}
static __rte_noinline uint32_t
mlx5_crypto_get_block_size(struct rte_crypto_op *op)
{
@ -471,7 +438,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
}
ds = 2 + klm_n;
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_RDMA_WRITE);
ds = RTE_ALIGN(ds, 4);
@ -480,7 +447,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
if (priv->max_rdmar_ds > ds) {
cseg += ds;
ds = priv->max_rdmar_ds - ds;
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_NOP);
qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
@ -524,7 +491,8 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
do {
idx = qp->pi & mask;
op = *ops++;
umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * idx);
umr = RTE_PTR_ADD(qp->qp_obj.umem_buf,
priv->wqe_set_size * idx);
if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
qp->stats.enqueue_err_count++;
if (remain != nb_ops) {
@ -538,7 +506,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
} while (--remain);
qp->stats.enqueued_count += nb_ops;
rte_io_wmb();
qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
qp->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
rte_wmb();
mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
rte_wmb();
@ -604,8 +572,8 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
uint32_t i;
for (i = 0 ; i < qp->entries_n; i++) {
struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
priv->wqe_set_size);
struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,
i * priv->wqe_set_size);
struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
(cseg + 1);
struct mlx5_wqe_umr_bsf_seg *bsf =
@ -614,7 +582,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
struct mlx5_wqe_rseg *rseg;
/* Init UMR WQE. */
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
(priv->umr_wqe_size / MLX5_WSEG_SIZE));
cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
MLX5_COMP_MODE_OFFSET);
@ -649,7 +617,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
.klm_num = RTE_ALIGN(priv->max_segs_num, 4),
};
for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
@ -672,9 +640,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct mlx5_devx_qp_attr attr = {0};
struct mlx5_crypto_qp *qp;
uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
uint32_t umem_size = RTE_BIT32(log_nb_desc) *
priv->wqe_set_size +
sizeof(*qp->db_rec) * 2;
uint32_t ret;
uint32_t alloc_size = sizeof(*qp);
struct mlx5_devx_cq_attr cq_attr = {
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
@ -698,18 +664,16 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
if (qp->umem_buf == NULL) {
DRV_LOG(ERR, "Failed to allocate QP umem.");
rte_errno = ENOMEM;
goto error;
}
qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
(void *)(uintptr_t)qp->umem_buf,
umem_size,
IBV_ACCESS_LOCAL_WRITE);
if (qp->umem_obj == NULL) {
DRV_LOG(ERR, "Failed to register QP umem.");
attr.pd = priv->pdn;
attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
attr.cqn = qp->cq_obj.cq->id;
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(log_nb_desc);
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr,
socket_id);
if (ret) {
DRV_LOG(ERR, "Failed to create QP.");
goto error;
}
if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
@ -720,25 +684,11 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto error;
}
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
attr.pd = priv->pdn;
attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
attr.cqn = qp->cq_obj.cq->id;
attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(log_nb_desc);
attr.dbr_umem_valid = 1;
attr.wq_umem_id = qp->umem_obj->umem_id;
attr.wq_umem_offset = 0;
attr.dbr_umem_id = qp->umem_obj->umem_id;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
if (qp->qp_obj == NULL) {
DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
goto error;
}
qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
if (mlx5_crypto_qp2rts(qp))
/*
* In Order to configure self loopback, when calling devx qp2rts the
* remote QP id that is used is the id of the same QP.
*/
if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))
goto error;
qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
RTE_CACHE_LINE_SIZE);

View File

@ -44,11 +44,8 @@ struct mlx5_crypto_priv {
struct mlx5_crypto_qp {
struct mlx5_crypto_priv *priv;
struct mlx5_devx_cq cq_obj;
struct mlx5_devx_obj *qp_obj;
struct mlx5_devx_qp qp_obj;
struct rte_cryptodev_stats stats;
struct mlx5dv_devx_umem *umem_obj;
void *umem_buf;
volatile uint32_t *db_rec;
struct rte_crypto_op **ops;
struct mlx5_devx_obj **mkey; /* WQE's indirect mekys. */
struct mlx5_mr_ctrl mr_ctrl;

View File

@ -54,10 +54,7 @@ struct mlx5_vdpa_cq {
struct mlx5_vdpa_event_qp {
struct mlx5_vdpa_cq cq;
struct mlx5_devx_obj *fw_qp;
struct mlx5_devx_obj *sw_qp;
struct mlx5dv_devx_umem *umem_obj;
void *umem_buf;
volatile uint32_t *db_rec;
struct mlx5_devx_qp sw_qp;
};
struct mlx5_vdpa_query_mr {

View File

@ -179,7 +179,7 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
rte_io_wmb();
/* Ring SW QP doorbell record. */
eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
}
return comp;
}
@ -531,12 +531,7 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
{
if (eqp->sw_qp)
claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
if (eqp->umem_obj)
claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
if (eqp->umem_buf)
rte_free(eqp->umem_buf);
mlx5_devx_qp_destroy(&eqp->sw_qp);
if (eqp->fw_qp)
claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
mlx5_vdpa_cq_destroy(&eqp->cq);
@ -547,36 +542,36 @@ static int
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
{
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
eqp->sw_qp->id)) {
eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
eqp->fw_qp->id)) {
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
eqp->sw_qp->id)) {
eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
eqp->fw_qp->id)) {
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
eqp->sw_qp->id)) {
eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
rte_errno);
@ -591,8 +586,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
{
struct mlx5_devx_qp_attr attr = {0};
uint16_t log_desc_n = rte_log2_u32(desc_n);
uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
sizeof(*eqp->db_rec) * 2;
uint32_t ret;
if (mlx5_vdpa_event_qp_global_prepare(priv))
return -1;
@ -605,42 +599,23 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
goto error;
}
eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
if (!eqp->umem_buf) {
DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
rte_errno = ENOMEM;
goto error;
}
eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
(void *)(uintptr_t)eqp->umem_buf,
umem_size,
IBV_ACCESS_LOCAL_WRITE);
if (!eqp->umem_obj) {
DRV_LOG(ERR, "Failed to register umem for SW QP.");
goto error;
}
attr.uar_index = priv->uar->page_id;
attr.cqn = eqp->cq.cq_obj.cq->id;
attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
attr.rq_size = 1 << log_desc_n;
attr.rq_size = RTE_BIT32(log_desc_n);
attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
attr.sq_size = 0; /* No need SQ. */
attr.dbr_umem_valid = 1;
attr.wq_umem_id = eqp->umem_obj->umem_id;
attr.wq_umem_offset = 0;
attr.dbr_umem_id = eqp->umem_obj->umem_id;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
attr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;
eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
if (!eqp->sw_qp) {
ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr,
SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
}
eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
if (mlx5_vdpa_qps2rts(eqp))
goto error;
/* First ringing. */
rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
&eqp->sw_qp.db_rec[0]);
return 0;
error:
mlx5_vdpa_event_qp_destroy(eqp);