common/mlx5: share DevX CQ creation

The CQ object in DevX is created in several places and in several
different drivers.
In all places almost all the details are the same, and in particular the
allocations of the required resources.

Add a structure that contains all the resources, and provide creation
and release functions for it.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Michael Baum 2021-01-06 08:19:26 +00:00 committed by Ferruh Yigit
parent 0e8273176e
commit 9dab4d62b4
6 changed files with 194 additions and 0 deletions

View File

@ -16,6 +16,7 @@ sources += files(
'mlx5_common_mr.c',
'mlx5_malloc.c',
'mlx5_common_pci.c',
'mlx5_common_devx.c',
)
cflags_options = [

View File

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2020 Mellanox Technologies, Ltd
*/
#include <stdint.h>
#include <rte_errno.h>
#include <rte_common.h>
#include <rte_eal_paging.h>
#include <mlx5_glue.h>
#include <mlx5_common_os.h>
#include "mlx5_prm.h"
#include "mlx5_devx_cmds.h"
#include "mlx5_common_utils.h"
#include "mlx5_malloc.h"
#include "mlx5_common.h"
#include "mlx5_common_devx.h"
/**
* Destroy DevX Completion Queue.
*
* @param[in] cq
* DevX CQ to destroy.
*/
void
mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq)
{
if (cq->cq)
claim_zero(mlx5_devx_cmd_destroy(cq->cq));
if (cq->umem_obj)
claim_zero(mlx5_os_umem_dereg(cq->umem_obj));
if (cq->umem_buf)
mlx5_free((void *)(uintptr_t)cq->umem_buf);
}
/* Mark all CQEs initially as invalid. */
static void
mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size)
{
volatile struct mlx5_cqe *cqe = cq_obj->cqes;
uint16_t i;
for (i = 0; i < cq_size; i++, cqe++)
cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
}
/**
* Create Completion Queue using DevX API.
*
* Get a pointer to partially initialized attributes structure, and updates the
* following fields:
* q_umem_valid
* q_umem_id
* q_umem_offset
* db_umem_valid
* db_umem_id
* db_umem_offset
* eqn
* log_cq_size
* log_page_size
* All other fields are updated by caller.
*
* @param[in] ctx
* Context returned from mlx5 open_device() glue function.
* @param[in/out] cq_obj
* Pointer to CQ to create.
* @param[in] log_desc_n
* Log of number of descriptors in queue.
* @param[in] attr
* Pointer to CQ attributes structure.
* @param[in] socket
* Socket to use for allocation.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n,
struct mlx5_devx_cq_attr *attr, int socket)
{
struct mlx5_devx_obj *cq = NULL;
struct mlx5dv_devx_umem *umem_obj = NULL;
void *umem_buf = NULL;
size_t page_size = rte_mem_page_size();
size_t alignment = MLX5_CQE_BUF_ALIGNMENT;
uint32_t umem_size, umem_dbrec;
uint32_t eqn;
uint16_t cq_size = 1 << log_desc_n;
int ret;
if (page_size == (size_t)-1 || alignment == (size_t)-1) {
DRV_LOG(ERR, "Failed to get page_size.");
rte_errno = ENOMEM;
return -rte_errno;
}
/* Query first EQN. */
ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn);
if (ret) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to query event queue number.");
return -rte_errno;
}
/* Allocate memory buffer for CQEs and doorbell record. */
umem_size = sizeof(struct mlx5_cqe) * cq_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
alignment, socket);
if (!umem_buf) {
DRV_LOG(ERR, "Failed to allocate memory for CQ.");
rte_errno = ENOMEM;
return -rte_errno;
}
/* Register allocated buffer in user space with DevX. */
umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
IBV_ACCESS_LOCAL_WRITE);
if (!umem_obj) {
DRV_LOG(ERR, "Failed to register umem for CQ.");
rte_errno = errno;
goto error;
}
/* Fill attributes for CQ object creation. */
attr->q_umem_valid = 1;
attr->q_umem_id = mlx5_os_get_umem_id(umem_obj);
attr->q_umem_offset = 0;
attr->db_umem_valid = 1;
attr->db_umem_id = attr->q_umem_id;
attr->db_umem_offset = umem_dbrec;
attr->eqn = eqn;
attr->log_cq_size = log_desc_n;
attr->log_page_size = rte_log2_u32(page_size);
/* Create completion queue object with DevX. */
cq = mlx5_devx_cmd_create_cq(ctx, attr);
if (!cq) {
DRV_LOG(ERR, "Can't create DevX CQ object.");
rte_errno = ENOMEM;
goto error;
}
cq_obj->umem_buf = umem_buf;
cq_obj->umem_obj = umem_obj;
cq_obj->cq = cq;
cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec);
/* Mark all CQEs initially as invalid. */
mlx5_cq_init(cq_obj, cq_size);
return 0;
error:
ret = rte_errno;
if (umem_obj)
claim_zero(mlx5_os_umem_dereg(umem_obj));
if (umem_buf)
mlx5_free((void *)(uintptr_t)umem_buf);
rte_errno = ret;
return -rte_errno;
}

View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2020 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_COMMON_DEVX_H_
#define RTE_PMD_MLX5_COMMON_DEVX_H_
#include "mlx5_devx_cmds.h"
/* DevX Completion Queue structure. */
struct mlx5_devx_cq {
struct mlx5_devx_obj *cq; /* The CQ DevX object. */
void *umem_obj; /* The CQ umem object. */
union {
volatile void *umem_buf;
volatile struct mlx5_cqe *cqes; /* The CQ ring buffer. */
};
volatile uint32_t *db_rec; /* The CQ doorbell record. */
};
/* mlx5_common_devx.c */
__rte_internal
void mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq);
__rte_internal
int mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj,
uint16_t log_desc_n, struct mlx5_devx_cq_attr *attr,
int socket);
#endif /* RTE_PMD_MLX5_COMMON_DEVX_H_ */

View File

@ -35,6 +35,9 @@ EXPORTS
mlx5_devx_get_out_command_status
mlx5_devx_cmd_create_flow_hit_aso_obj
mlx5_devx_cq_create
mlx5_devx_cq_destroy
mlx5_get_dbr
mlx5_glue

View File

@ -43,6 +43,9 @@ INTERNAL {
mlx5_devx_get_out_command_status;
mlx5_devx_alloc_uar;
mlx5_devx_cq_create;
mlx5_devx_cq_destroy;
mlx5_get_ifname_sysfs;
mlx5_get_dbr;

View File

@ -9,6 +9,7 @@
extern "C" {
#endif
#include "mlx5_prm.h"
#include "mlx5devx.h"
typedef struct mlx5_context {