common/mlx5: share device context object
Create shared context device in common area and add it as a field of common device. Use this context device in all drivers and remove the ctx field from their private structure. Signed-off-by: Michael Baum <michaelba@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
5bc38358b5
commit
ca1418ce39
@ -424,8 +424,13 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
|
|||||||
ibv_match = ibv_list[n];
|
ibv_match = ibv_list[n];
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ibv_match == NULL)
|
if (ibv_match == NULL) {
|
||||||
|
DRV_LOG(WARNING,
|
||||||
|
"No Verbs device matches PCI device " PCI_PRI_FMT ","
|
||||||
|
" are kernel drivers loaded?",
|
||||||
|
addr->domain, addr->bus, addr->devid, addr->function);
|
||||||
rte_errno = ENOENT;
|
rte_errno = ENOENT;
|
||||||
|
}
|
||||||
mlx5_glue->free_device_list(ibv_list);
|
mlx5_glue->free_device_list(ibv_list);
|
||||||
return ibv_match;
|
return ibv_match;
|
||||||
}
|
}
|
||||||
@ -465,14 +470,14 @@ mlx5_restore_doorbell_mapping_env(int value)
|
|||||||
*
|
*
|
||||||
* @param cdev
|
* @param cdev
|
||||||
* Pointer to the mlx5 device.
|
* Pointer to the mlx5 device.
|
||||||
* @param ctx_ptr
|
* @param classes
|
||||||
* Pointer to fill inside pointer to device context.
|
* Chosen classes come from device arguments.
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx_ptr)
|
mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes)
|
||||||
{
|
{
|
||||||
struct ibv_device *ibv;
|
struct ibv_device *ibv;
|
||||||
struct ibv_context *ctx = NULL;
|
struct ibv_context *ctx = NULL;
|
||||||
@ -494,18 +499,20 @@ mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx_ptr)
|
|||||||
if (ctx) {
|
if (ctx) {
|
||||||
cdev->config.devx = 1;
|
cdev->config.devx = 1;
|
||||||
DRV_LOG(DEBUG, "DevX is supported.");
|
DRV_LOG(DEBUG, "DevX is supported.");
|
||||||
} else {
|
} else if (classes == MLX5_CLASS_ETH) {
|
||||||
/* The environment variable is still configured. */
|
/* The environment variable is still configured. */
|
||||||
ctx = mlx5_glue->open_device(ibv);
|
ctx = mlx5_glue->open_device(ibv);
|
||||||
if (ctx == NULL)
|
if (ctx == NULL)
|
||||||
goto error;
|
goto error;
|
||||||
DRV_LOG(DEBUG, "DevX is NOT supported.");
|
DRV_LOG(DEBUG, "DevX is NOT supported.");
|
||||||
|
} else {
|
||||||
|
goto error;
|
||||||
}
|
}
|
||||||
/* The device is created, no need for environment. */
|
/* The device is created, no need for environment. */
|
||||||
mlx5_restore_doorbell_mapping_env(dbmap_env);
|
mlx5_restore_doorbell_mapping_env(dbmap_env);
|
||||||
/* Hint libmlx5 to use PMD allocator for data plane resources */
|
/* Hint libmlx5 to use PMD allocator for data plane resources */
|
||||||
mlx5_set_context_attr(cdev->dev, ctx);
|
mlx5_set_context_attr(cdev->dev, ctx);
|
||||||
*ctx_ptr = (void *)ctx;
|
cdev->ctx = ctx;
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
error:
|
||||||
rte_errno = errno ? errno : ENODEV;
|
rte_errno = errno ? errno : ENODEV;
|
||||||
|
@ -308,17 +308,60 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Uninitialize all HW global of device context.
|
||||||
|
*
|
||||||
|
* @param cdev
|
||||||
|
* Pointer to mlx5 device structure.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
|
||||||
|
{
|
||||||
|
if (cdev->ctx != NULL) {
|
||||||
|
claim_zero(mlx5_glue->close_device(cdev->ctx));
|
||||||
|
cdev->ctx = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize all HW global of device context.
|
||||||
|
*
|
||||||
|
* @param cdev
|
||||||
|
* Pointer to mlx5 device structure.
|
||||||
|
* @param classes
|
||||||
|
* Chosen classes come from user device arguments.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Create context device */
|
||||||
|
ret = mlx5_os_open_device(cdev, classes);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mlx5_common_dev_release(struct mlx5_common_device *cdev)
|
mlx5_common_dev_release(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&devices_list_lock);
|
pthread_mutex_lock(&devices_list_lock);
|
||||||
TAILQ_REMOVE(&devices_list, cdev, next);
|
TAILQ_REMOVE(&devices_list, cdev, next);
|
||||||
pthread_mutex_unlock(&devices_list_lock);
|
pthread_mutex_unlock(&devices_list_lock);
|
||||||
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
||||||
|
mlx5_dev_hw_global_release(cdev);
|
||||||
rte_free(cdev);
|
rte_free(cdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mlx5_common_device *
|
static struct mlx5_common_device *
|
||||||
mlx5_common_dev_create(struct rte_device *eal_dev)
|
mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
|
||||||
{
|
{
|
||||||
struct mlx5_common_device *cdev;
|
struct mlx5_common_device *cdev;
|
||||||
int ret;
|
int ret;
|
||||||
@ -341,6 +384,13 @@ mlx5_common_dev_create(struct rte_device *eal_dev)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mlx5_malloc_mem_select(cdev->config.sys_mem_en);
|
mlx5_malloc_mem_select(cdev->config.sys_mem_en);
|
||||||
|
/* Initialize all HW global of device context. */
|
||||||
|
ret = mlx5_dev_hw_global_prepare(cdev, classes);
|
||||||
|
if (ret) {
|
||||||
|
DRV_LOG(ERR, "Failed to initialize device context.");
|
||||||
|
rte_free(cdev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
exit:
|
exit:
|
||||||
pthread_mutex_lock(&devices_list_lock);
|
pthread_mutex_lock(&devices_list_lock);
|
||||||
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
|
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
|
||||||
@ -433,7 +483,7 @@ mlx5_common_dev_probe(struct rte_device *eal_dev)
|
|||||||
classes = MLX5_CLASS_ETH;
|
classes = MLX5_CLASS_ETH;
|
||||||
cdev = to_mlx5_device(eal_dev);
|
cdev = to_mlx5_device(eal_dev);
|
||||||
if (!cdev) {
|
if (!cdev) {
|
||||||
cdev = mlx5_common_dev_create(eal_dev);
|
cdev = mlx5_common_dev_create(eal_dev, classes);
|
||||||
if (!cdev)
|
if (!cdev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
new_device = true;
|
new_device = true;
|
||||||
|
@ -346,6 +346,7 @@ struct mlx5_common_device {
|
|||||||
struct rte_device *dev;
|
struct rte_device *dev;
|
||||||
TAILQ_ENTRY(mlx5_common_device) next;
|
TAILQ_ENTRY(mlx5_common_device) next;
|
||||||
uint32_t classes_loaded;
|
uint32_t classes_loaded;
|
||||||
|
void *ctx; /* Verbs/DV/DevX context. */
|
||||||
struct mlx5_common_dev_config config; /* Device configuration. */
|
struct mlx5_common_dev_config config; /* Device configuration. */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -446,7 +447,6 @@ mlx5_dev_is_pci(const struct rte_device *dev);
|
|||||||
|
|
||||||
/* mlx5_common_os.c */
|
/* mlx5_common_os.c */
|
||||||
|
|
||||||
__rte_internal
|
int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
|
||||||
int mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx);
|
|
||||||
|
|
||||||
#endif /* RTE_PMD_MLX5_COMMON_H_ */
|
#endif /* RTE_PMD_MLX5_COMMON_H_ */
|
||||||
|
@ -144,7 +144,6 @@ INTERNAL {
|
|||||||
mlx5_os_dealloc_pd;
|
mlx5_os_dealloc_pd;
|
||||||
mlx5_os_dereg_mr;
|
mlx5_os_dereg_mr;
|
||||||
mlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT
|
mlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT
|
||||||
mlx5_os_open_device;
|
|
||||||
mlx5_os_reg_mr;
|
mlx5_os_reg_mr;
|
||||||
mlx5_os_umem_dereg;
|
mlx5_os_umem_dereg;
|
||||||
mlx5_os_umem_reg;
|
mlx5_os_umem_reg;
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include <rte_mempool.h>
|
#include <rte_mempool.h>
|
||||||
|
#include <rte_bus_pci.h>
|
||||||
#include <rte_malloc.h>
|
#include <rte_malloc.h>
|
||||||
#include <rte_errno.h>
|
#include <rte_errno.h>
|
||||||
|
|
||||||
@ -17,7 +18,7 @@
|
|||||||
#include "mlx5_malloc.h"
|
#include "mlx5_malloc.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialization routine for run-time dependency on external lib
|
* Initialization routine for run-time dependency on external lib.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
mlx5_glue_constructor(void)
|
mlx5_glue_constructor(void)
|
||||||
@ -25,7 +26,7 @@ mlx5_glue_constructor(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate PD. Given a devx context object
|
* Allocate PD. Given a DevX context object
|
||||||
* return an mlx5-pd object.
|
* return an mlx5-pd object.
|
||||||
*
|
*
|
||||||
* @param[in] ctx
|
* @param[in] ctx
|
||||||
@ -37,8 +38,8 @@ mlx5_glue_constructor(void)
|
|||||||
void *
|
void *
|
||||||
mlx5_os_alloc_pd(void *ctx)
|
mlx5_os_alloc_pd(void *ctx)
|
||||||
{
|
{
|
||||||
struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO,
|
struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_pd),
|
||||||
sizeof(struct mlx5_pd), 0, SOCKET_ID_ANY);
|
0, SOCKET_ID_ANY);
|
||||||
if (!ppd)
|
if (!ppd)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -60,7 +61,7 @@ mlx5_os_alloc_pd(void *ctx)
|
|||||||
* Pointer to mlx5_pd.
|
* Pointer to mlx5_pd.
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* Zero if pd is released successfully, negative number otherwise.
|
* Zero if pd is released successfully, negative number otherwise.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
mlx5_os_dealloc_pd(void *pd)
|
mlx5_os_dealloc_pd(void *pd)
|
||||||
@ -184,22 +185,28 @@ mlx5_os_get_devx_device(struct rte_device *dev,
|
|||||||
*
|
*
|
||||||
* This function calls the Windows glue APIs to open a device.
|
* This function calls the Windows glue APIs to open a device.
|
||||||
*
|
*
|
||||||
* @param dev
|
* @param cdev
|
||||||
* Pointer to mlx5 device structure.
|
* Pointer to mlx5 device structure.
|
||||||
* @param ctx
|
* @param classes
|
||||||
* Pointer to fill inside pointer to device context.
|
* Chosen classes come from user device arguments.
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx)
|
mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes)
|
||||||
{
|
{
|
||||||
struct devx_device_bdf *devx_bdf_dev = NULL;
|
struct devx_device_bdf *devx_bdf_dev = NULL;
|
||||||
struct devx_device_bdf *devx_list;
|
struct devx_device_bdf *devx_list;
|
||||||
struct mlx5_context *mlx5_ctx = NULL;
|
struct mlx5_context *mlx5_ctx = NULL;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
|
if (classes != MLX5_CLASS_ETH) {
|
||||||
|
DRV_LOG(ERR,
|
||||||
|
"The chosen classes are not supported on Windows.");
|
||||||
|
rte_errno = ENOTSUP;
|
||||||
|
return -rte_errno;
|
||||||
|
}
|
||||||
errno = 0;
|
errno = 0;
|
||||||
devx_list = mlx5_glue->get_device_list(&n);
|
devx_list = mlx5_glue->get_device_list(&n);
|
||||||
if (devx_list == NULL) {
|
if (devx_list == NULL) {
|
||||||
@ -223,7 +230,7 @@ mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
cdev->config.devx = 1;
|
cdev->config.devx = 1;
|
||||||
*ctx = (void *)mlx5_ctx;
|
cdev->ctx = mlx5_ctx;
|
||||||
mlx5_glue->free_device_list(devx_list);
|
mlx5_glue->free_device_list(devx_list);
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
error:
|
||||||
|
@ -35,8 +35,8 @@ struct mlx5_compress_xform {
|
|||||||
|
|
||||||
struct mlx5_compress_priv {
|
struct mlx5_compress_priv {
|
||||||
TAILQ_ENTRY(mlx5_compress_priv) next;
|
TAILQ_ENTRY(mlx5_compress_priv) next;
|
||||||
struct ibv_context *ctx; /* Device context. */
|
|
||||||
struct rte_compressdev *compressdev;
|
struct rte_compressdev *compressdev;
|
||||||
|
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
||||||
void *uar;
|
void *uar;
|
||||||
uint32_t pdn; /* Protection Domain number. */
|
uint32_t pdn; /* Protection Domain number. */
|
||||||
uint8_t min_block_size;
|
uint8_t min_block_size;
|
||||||
@ -238,7 +238,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
|
|||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
|
ret = mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq, log_ops_n, &cq_attr,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
DRV_LOG(ERR, "Failed to create CQ.");
|
DRV_LOG(ERR, "Failed to create CQ.");
|
||||||
@ -250,7 +250,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
|
|||||||
qp_attr.sq_size = RTE_BIT32(log_ops_n);
|
qp_attr.sq_size = RTE_BIT32(log_ops_n);
|
||||||
qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
|
qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
|
||||||
&& priv->mmo_dma_qp;
|
&& priv->mmo_dma_qp;
|
||||||
ret = mlx5_devx_qp_create(priv->ctx, &qp->qp, log_ops_n, &qp_attr,
|
ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, log_ops_n, &qp_attr,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
DRV_LOG(ERR, "Failed to create QP.");
|
DRV_LOG(ERR, "Failed to create QP.");
|
||||||
@ -711,7 +711,7 @@ mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
|
|||||||
struct mlx5dv_pd pd_info;
|
struct mlx5dv_pd pd_info;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
priv->pd = mlx5_glue->alloc_pd(priv->ctx);
|
priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
|
||||||
if (priv->pd == NULL) {
|
if (priv->pd == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to allocate PD.");
|
DRV_LOG(ERR, "Failed to allocate PD.");
|
||||||
return errno ? -errno : -ENOMEM;
|
return errno ? -errno : -ENOMEM;
|
||||||
@ -739,7 +739,7 @@ mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
|
|||||||
{
|
{
|
||||||
if (mlx5_compress_pd_create(priv) != 0)
|
if (mlx5_compress_pd_create(priv) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
|
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
|
||||||
if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
|
if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
|
||||||
NULL) {
|
NULL) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
@ -779,7 +779,8 @@ mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
/* Iterate all the existing mlx5 devices. */
|
/* Iterate all the existing mlx5 devices. */
|
||||||
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
|
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
mlx5_free_mr_by_addr(&priv->mr_scache,
|
||||||
priv->ctx->device->name,
|
mlx5_os_get_ctx_device_name
|
||||||
|
(priv->cdev->ctx),
|
||||||
addr, len);
|
addr, len);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
break;
|
break;
|
||||||
@ -792,49 +793,37 @@ mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
static int
|
static int
|
||||||
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
struct ibv_device *ibv;
|
|
||||||
struct rte_compressdev *compressdev;
|
struct rte_compressdev *compressdev;
|
||||||
struct ibv_context *ctx;
|
|
||||||
struct mlx5_compress_priv *priv;
|
struct mlx5_compress_priv *priv;
|
||||||
struct mlx5_hca_attr att = { 0 };
|
struct mlx5_hca_attr att = { 0 };
|
||||||
struct rte_compressdev_pmd_init_params init_params = {
|
struct rte_compressdev_pmd_init_params init_params = {
|
||||||
.name = "",
|
.name = "",
|
||||||
.socket_id = cdev->dev->numa_node,
|
.socket_id = cdev->dev->numa_node,
|
||||||
};
|
};
|
||||||
|
const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
|
||||||
|
|
||||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
||||||
DRV_LOG(ERR, "Non-primary process type is not supported.");
|
DRV_LOG(ERR, "Non-primary process type is not supported.");
|
||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
ibv = mlx5_os_get_ibv_dev(cdev->dev);
|
if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &att) != 0 ||
|
||||||
if (ibv == NULL)
|
|
||||||
return -rte_errno;
|
|
||||||
ctx = mlx5_glue->dv_open_device(ibv);
|
|
||||||
if (ctx == NULL) {
|
|
||||||
DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
|
|
||||||
rte_errno = ENODEV;
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
|
|
||||||
((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
|
((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
|
||||||
att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
|
att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
|
||||||
att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
|
att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
|
||||||
DRV_LOG(ERR, "Not enough capabilities to support compress "
|
DRV_LOG(ERR, "Not enough capabilities to support compress "
|
||||||
"operations, maybe old FW/OFED version?");
|
"operations, maybe old FW/OFED version?");
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
return -ENOTSUP;
|
return -ENOTSUP;
|
||||||
}
|
}
|
||||||
compressdev = rte_compressdev_pmd_create(ibv->name, cdev->dev,
|
compressdev = rte_compressdev_pmd_create(ibdev_name, cdev->dev,
|
||||||
sizeof(*priv), &init_params);
|
sizeof(*priv), &init_params);
|
||||||
if (compressdev == NULL) {
|
if (compressdev == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
|
DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
DRV_LOG(INFO,
|
DRV_LOG(INFO,
|
||||||
"Compress device %s was created successfully.", ibv->name);
|
"Compress device %s was created successfully.", ibdev_name);
|
||||||
compressdev->dev_ops = &mlx5_compress_ops;
|
compressdev->dev_ops = &mlx5_compress_ops;
|
||||||
compressdev->dequeue_burst = mlx5_compress_dequeue_burst;
|
compressdev->dequeue_burst = mlx5_compress_dequeue_burst;
|
||||||
compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
|
compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
|
||||||
@ -846,13 +835,12 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
priv->mmo_comp_qp = att.mmo_compress_qp_en;
|
priv->mmo_comp_qp = att.mmo_compress_qp_en;
|
||||||
priv->mmo_dma_sq = att.mmo_dma_sq_en;
|
priv->mmo_dma_sq = att.mmo_dma_sq_en;
|
||||||
priv->mmo_dma_qp = att.mmo_dma_qp_en;
|
priv->mmo_dma_qp = att.mmo_dma_qp_en;
|
||||||
priv->ctx = ctx;
|
priv->cdev = cdev;
|
||||||
priv->compressdev = compressdev;
|
priv->compressdev = compressdev;
|
||||||
priv->min_block_size = att.compress_min_block_size;
|
priv->min_block_size = att.compress_min_block_size;
|
||||||
priv->qp_ts_format = att.qp_ts_format;
|
priv->qp_ts_format = att.qp_ts_format;
|
||||||
if (mlx5_compress_hw_global_prepare(priv) != 0) {
|
if (mlx5_compress_hw_global_prepare(priv) != 0) {
|
||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
rte_compressdev_pmd_destroy(priv->compressdev);
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
|
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
|
||||||
@ -860,7 +848,6 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
||||||
mlx5_compress_hw_global_release(priv);
|
mlx5_compress_hw_global_release(priv);
|
||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
rte_compressdev_pmd_destroy(priv->compressdev);
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
@ -896,7 +883,6 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
mlx5_mr_release_cache(&priv->mr_scache);
|
mlx5_mr_release_cache(&priv->mr_scache);
|
||||||
mlx5_compress_hw_global_release(priv);
|
mlx5_compress_hw_global_release(priv);
|
||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
rte_compressdev_pmd_destroy(priv->compressdev);
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -620,7 +620,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
|
|||||||
for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
|
for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
|
||||||
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
|
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
|
||||||
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
|
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
|
||||||
qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
|
qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &attr);
|
||||||
if (!qp->mkey[i])
|
if (!qp->mkey[i])
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
@ -659,7 +659,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
|
|||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc,
|
if (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,
|
||||||
&cq_attr, socket_id) != 0) {
|
&cq_attr, socket_id) != 0) {
|
||||||
DRV_LOG(ERR, "Failed to create CQ.");
|
DRV_LOG(ERR, "Failed to create CQ.");
|
||||||
goto error;
|
goto error;
|
||||||
@ -670,8 +670,8 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
|
|||||||
attr.rq_size = 0;
|
attr.rq_size = 0;
|
||||||
attr.sq_size = RTE_BIT32(log_nb_desc);
|
attr.sq_size = RTE_BIT32(log_nb_desc);
|
||||||
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
|
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
|
||||||
ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr,
|
ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj, log_nb_desc,
|
||||||
socket_id);
|
&attr, socket_id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Failed to create QP.");
|
DRV_LOG(ERR, "Failed to create QP.");
|
||||||
goto error;
|
goto error;
|
||||||
@ -774,7 +774,7 @@ mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
|
|||||||
struct mlx5dv_pd pd_info;
|
struct mlx5dv_pd pd_info;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
priv->pd = mlx5_glue->alloc_pd(priv->ctx);
|
priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
|
||||||
if (priv->pd == NULL) {
|
if (priv->pd == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to allocate PD.");
|
DRV_LOG(ERR, "Failed to allocate PD.");
|
||||||
return errno ? -errno : -ENOMEM;
|
return errno ? -errno : -ENOMEM;
|
||||||
@ -802,7 +802,7 @@ mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
|
|||||||
{
|
{
|
||||||
if (mlx5_crypto_pd_create(priv) != 0)
|
if (mlx5_crypto_pd_create(priv) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
|
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
|
||||||
if (priv->uar)
|
if (priv->uar)
|
||||||
priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
|
priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
|
||||||
if (priv->uar == NULL || priv->uar_addr == NULL) {
|
if (priv->uar == NULL || priv->uar_addr == NULL) {
|
||||||
@ -940,7 +940,8 @@ mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
/* Iterate all the existing mlx5 devices. */
|
/* Iterate all the existing mlx5 devices. */
|
||||||
TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
|
TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
mlx5_free_mr_by_addr(&priv->mr_scache,
|
||||||
priv->ctx->device->name,
|
mlx5_os_get_ctx_device_name
|
||||||
|
(priv->cdev->ctx),
|
||||||
addr, len);
|
addr, len);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
break;
|
break;
|
||||||
@ -953,9 +954,7 @@ mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
static int
|
static int
|
||||||
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
struct ibv_device *ibv;
|
|
||||||
struct rte_cryptodev *crypto_dev;
|
struct rte_cryptodev *crypto_dev;
|
||||||
struct ibv_context *ctx;
|
|
||||||
struct mlx5_devx_obj *login;
|
struct mlx5_devx_obj *login;
|
||||||
struct mlx5_crypto_priv *priv;
|
struct mlx5_crypto_priv *priv;
|
||||||
struct mlx5_crypto_devarg_params devarg_prms = { 0 };
|
struct mlx5_crypto_devarg_params devarg_prms = { 0 };
|
||||||
@ -967,6 +966,7 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
.max_nb_queue_pairs =
|
.max_nb_queue_pairs =
|
||||||
RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
|
RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
|
||||||
};
|
};
|
||||||
|
const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
|
||||||
uint16_t rdmw_wqe_size;
|
uint16_t rdmw_wqe_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -975,58 +975,44 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
ibv = mlx5_os_get_ibv_dev(cdev->dev);
|
if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr) != 0 ||
|
||||||
if (ibv == NULL)
|
|
||||||
return -rte_errno;
|
|
||||||
ctx = mlx5_glue->dv_open_device(ibv);
|
|
||||||
if (ctx == NULL) {
|
|
||||||
DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
|
|
||||||
rte_errno = ENODEV;
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 ||
|
|
||||||
attr.crypto == 0 || attr.aes_xts == 0) {
|
attr.crypto == 0 || attr.aes_xts == 0) {
|
||||||
DRV_LOG(ERR, "Not enough capabilities to support crypto "
|
DRV_LOG(ERR, "Not enough capabilities to support crypto "
|
||||||
"operations, maybe old FW/OFED version?");
|
"operations, maybe old FW/OFED version?");
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
return -ENOTSUP;
|
return -ENOTSUP;
|
||||||
}
|
}
|
||||||
ret = mlx5_crypto_parse_devargs(cdev->dev->devargs, &devarg_prms);
|
ret = mlx5_crypto_parse_devargs(cdev->dev->devargs, &devarg_prms);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Failed to parse devargs.");
|
DRV_LOG(ERR, "Failed to parse devargs.");
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
login = mlx5_devx_cmd_create_crypto_login_obj(ctx,
|
login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx,
|
||||||
&devarg_prms.login_attr);
|
&devarg_prms.login_attr);
|
||||||
if (login == NULL) {
|
if (login == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to configure login.");
|
DRV_LOG(ERR, "Failed to configure login.");
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
crypto_dev = rte_cryptodev_pmd_create(ibv->name, cdev->dev,
|
crypto_dev = rte_cryptodev_pmd_create(ibdev_name, cdev->dev,
|
||||||
&init_params);
|
&init_params);
|
||||||
if (crypto_dev == NULL) {
|
if (crypto_dev == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
|
DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
DRV_LOG(INFO,
|
DRV_LOG(INFO,
|
||||||
"Crypto device %s was created successfully.", ibv->name);
|
"Crypto device %s was created successfully.", ibdev_name);
|
||||||
crypto_dev->dev_ops = &mlx5_crypto_ops;
|
crypto_dev->dev_ops = &mlx5_crypto_ops;
|
||||||
crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
|
crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
|
||||||
crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
|
crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
|
||||||
crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
|
crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
|
||||||
crypto_dev->driver_id = mlx5_crypto_driver_id;
|
crypto_dev->driver_id = mlx5_crypto_driver_id;
|
||||||
priv = crypto_dev->data->dev_private;
|
priv = crypto_dev->data->dev_private;
|
||||||
priv->ctx = ctx;
|
priv->cdev = cdev;
|
||||||
priv->login_obj = login;
|
priv->login_obj = login;
|
||||||
priv->crypto_dev = crypto_dev;
|
priv->crypto_dev = crypto_dev;
|
||||||
priv->qp_ts_format = attr.qp_ts_format;
|
priv->qp_ts_format = attr.qp_ts_format;
|
||||||
if (mlx5_crypto_hw_global_prepare(priv) != 0) {
|
if (mlx5_crypto_hw_global_prepare(priv) != 0) {
|
||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
|
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
|
||||||
@ -1034,7 +1020,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
||||||
mlx5_crypto_hw_global_release(priv);
|
mlx5_crypto_hw_global_release(priv);
|
||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
@ -1084,7 +1069,6 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
mlx5_crypto_hw_global_release(priv);
|
mlx5_crypto_hw_global_release(priv);
|
||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
||||||
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
|
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
|
||||||
claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
struct mlx5_crypto_priv {
|
struct mlx5_crypto_priv {
|
||||||
TAILQ_ENTRY(mlx5_crypto_priv) next;
|
TAILQ_ENTRY(mlx5_crypto_priv) next;
|
||||||
struct ibv_context *ctx; /* Device context. */
|
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
||||||
struct rte_cryptodev *crypto_dev;
|
struct rte_cryptodev *crypto_dev;
|
||||||
void *uar; /* User Access Region. */
|
void *uar; /* User Access Region. */
|
||||||
volatile uint64_t *uar_addr;
|
volatile uint64_t *uar_addr;
|
||||||
|
@ -117,7 +117,8 @@ mlx5_crypto_dek_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
memcpy(&dek_attr.key, cipher_ctx->key.data, cipher_ctx->key.length);
|
memcpy(&dek_attr.key, cipher_ctx->key.data, cipher_ctx->key.length);
|
||||||
dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->ctx, &dek_attr);
|
dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->cdev->ctx,
|
||||||
|
&dek_attr);
|
||||||
if (dek->obj == NULL) {
|
if (dek->obj == NULL) {
|
||||||
rte_free(dek);
|
rte_free(dek);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -324,7 +324,7 @@ int
|
|||||||
mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
|
mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
|
||||||
{
|
{
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct ibv_context *ctx = priv->sh->ctx;
|
struct ibv_context *ctx = priv->sh->cdev->ctx;
|
||||||
struct ibv_values_ex values;
|
struct ibv_values_ex values;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
@ -778,7 +778,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
|
|||||||
struct rte_eth_dev *dev;
|
struct rte_eth_dev *dev;
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
|
|
||||||
if (mlx5_glue->get_async_event(sh->ctx, &event))
|
if (mlx5_glue->get_async_event(sh->cdev->ctx, &event))
|
||||||
break;
|
break;
|
||||||
/* Retrieve and check IB port index. */
|
/* Retrieve and check IB port index. */
|
||||||
tmp = (uint32_t)event.element.port_num;
|
tmp = (uint32_t)event.element.port_num;
|
||||||
@ -990,7 +990,7 @@ mlx5_is_removed(struct rte_eth_dev *dev)
|
|||||||
struct ibv_device_attr device_attr;
|
struct ibv_device_attr device_attr;
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
|
|
||||||
if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
|
if (mlx5_glue->query_device(priv->sh->cdev->ctx, &device_attr) == EIO)
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
|
|||||||
case MLX5_MP_REQ_VERBS_CMD_FD:
|
case MLX5_MP_REQ_VERBS_CMD_FD:
|
||||||
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
||||||
mp_res.num_fds = 1;
|
mp_res.num_fds = 1;
|
||||||
mp_res.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
|
mp_res.fds[0] = ((struct ibv_context *)cdev->ctx)->cmd_fd;
|
||||||
res->result = 0;
|
res->result = 0;
|
||||||
ret = rte_mp_reply(&mp_res, peer);
|
ret = rte_mp_reply(&mp_res, peer);
|
||||||
break;
|
break;
|
||||||
@ -248,7 +248,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)
|
|||||||
mp_init_msg(&priv->mp_id, &mp_req, type);
|
mp_init_msg(&priv->mp_id, &mp_req, type);
|
||||||
if (type == MLX5_MP_REQ_START_RXTX) {
|
if (type == MLX5_MP_REQ_START_RXTX) {
|
||||||
mp_req.num_fds = 1;
|
mp_req.num_fds = 1;
|
||||||
mp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
|
mp_req.fds[0] =
|
||||||
|
((struct ibv_context *)priv->sh->cdev->ctx)->cmd_fd;
|
||||||
}
|
}
|
||||||
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
|
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -257,7 +257,7 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
|
|||||||
metadata_reg_c_0, 0xffff);
|
metadata_reg_c_0, 0xffff);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx,
|
matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->cdev->ctx,
|
||||||
&dv_attr, tbl);
|
&dv_attr, tbl);
|
||||||
if (matcher) {
|
if (matcher) {
|
||||||
priv->sh->misc5_cap = 1;
|
priv->sh->misc5_cap = 1;
|
||||||
@ -341,7 +341,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
|
|||||||
void *domain;
|
void *domain;
|
||||||
|
|
||||||
/* Reference counter is zero, we should initialize structures. */
|
/* Reference counter is zero, we should initialize structures. */
|
||||||
domain = mlx5_glue->dr_create_domain(sh->ctx,
|
domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
|
||||||
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
|
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
|
||||||
if (!domain) {
|
if (!domain) {
|
||||||
DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
|
DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
|
||||||
@ -349,7 +349,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
sh->rx_domain = domain;
|
sh->rx_domain = domain;
|
||||||
domain = mlx5_glue->dr_create_domain(sh->ctx,
|
domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
|
||||||
MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
|
MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
|
||||||
if (!domain) {
|
if (!domain) {
|
||||||
DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
|
DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
|
||||||
@ -359,8 +359,8 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
|
|||||||
sh->tx_domain = domain;
|
sh->tx_domain = domain;
|
||||||
#ifdef HAVE_MLX5DV_DR_ESWITCH
|
#ifdef HAVE_MLX5DV_DR_ESWITCH
|
||||||
if (priv->config.dv_esw_en) {
|
if (priv->config.dv_esw_en) {
|
||||||
domain = mlx5_glue->dr_create_domain
|
domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
|
||||||
(sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
|
MLX5DV_DR_DOMAIN_TYPE_FDB);
|
||||||
if (!domain) {
|
if (!domain) {
|
||||||
DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
|
DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
|
||||||
err = errno;
|
err = errno;
|
||||||
@ -768,7 +768,7 @@ static void
|
|||||||
mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
|
mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
void *ctx = priv->sh->ctx;
|
void *ctx = priv->sh->cdev->ctx;
|
||||||
|
|
||||||
priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
|
priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
|
||||||
if (!priv->q_counters) {
|
if (!priv->q_counters) {
|
||||||
@ -1040,7 +1040,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
|
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
|
||||||
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
|
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
|
||||||
#endif
|
#endif
|
||||||
mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
|
mlx5_glue->dv_query_device(sh->cdev->ctx, &dv_attr);
|
||||||
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
|
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
|
||||||
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
|
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
|
||||||
DRV_LOG(DEBUG, "enhanced MPW is supported");
|
DRV_LOG(DEBUG, "enhanced MPW is supported");
|
||||||
@ -1125,7 +1125,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
#endif
|
#endif
|
||||||
config->mpls_en = mpls_en;
|
config->mpls_en = mpls_en;
|
||||||
/* Check port status. */
|
/* Check port status. */
|
||||||
err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
|
err = mlx5_glue->query_port(sh->cdev->ctx, spawn->phys_port,
|
||||||
|
&port_attr);
|
||||||
if (err) {
|
if (err) {
|
||||||
DRV_LOG(ERR, "port query failed: %s", strerror(err));
|
DRV_LOG(ERR, "port query failed: %s", strerror(err));
|
||||||
goto error;
|
goto error;
|
||||||
@ -1175,7 +1176,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
* register is defined by mask.
|
* register is defined by mask.
|
||||||
*/
|
*/
|
||||||
if (switch_info->representor || switch_info->master) {
|
if (switch_info->representor || switch_info->master) {
|
||||||
err = mlx5_glue->devx_port_query(sh->ctx,
|
err = mlx5_glue->devx_port_query(sh->cdev->ctx,
|
||||||
spawn->phys_port,
|
spawn->phys_port,
|
||||||
&vport_info);
|
&vport_info);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -1332,7 +1333,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
config->mps == MLX5_MPW ? "legacy " : "",
|
config->mps == MLX5_MPW ? "legacy " : "",
|
||||||
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
|
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
|
||||||
if (sh->devx) {
|
if (sh->devx) {
|
||||||
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
|
err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
|
||||||
|
&config->hca_attr);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -err;
|
err = -err;
|
||||||
goto error;
|
goto error;
|
||||||
@ -1555,7 +1557,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
|
|
||||||
err = config->hca_attr.access_register_user ?
|
err = config->hca_attr.access_register_user ?
|
||||||
mlx5_devx_cmd_register_read
|
mlx5_devx_cmd_register_read
|
||||||
(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
|
(sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
|
||||||
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
|
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
|
||||||
if (!err) {
|
if (!err) {
|
||||||
uint32_t ts_mode;
|
uint32_t ts_mode;
|
||||||
@ -1934,14 +1936,14 @@ mlx5_dev_spawn_data_cmp(const void *a, const void *b)
|
|||||||
/**
|
/**
|
||||||
* Match PCI information for possible slaves of bonding device.
|
* Match PCI information for possible slaves of bonding device.
|
||||||
*
|
*
|
||||||
* @param[in] ibv_dev
|
* @param[in] ibdev_name
|
||||||
* Pointer to Infiniband device structure.
|
* Name of Infiniband device.
|
||||||
* @param[in] pci_dev
|
* @param[in] pci_dev
|
||||||
* Pointer to primary PCI address structure to match.
|
* Pointer to primary PCI address structure to match.
|
||||||
* @param[in] nl_rdma
|
* @param[in] nl_rdma
|
||||||
* Netlink RDMA group socket handle.
|
* Netlink RDMA group socket handle.
|
||||||
* @param[in] owner
|
* @param[in] owner
|
||||||
* Rerepsentor owner PF index.
|
* Representor owner PF index.
|
||||||
* @param[out] bond_info
|
* @param[out] bond_info
|
||||||
* Pointer to bonding information.
|
* Pointer to bonding information.
|
||||||
*
|
*
|
||||||
@ -1950,7 +1952,7 @@ mlx5_dev_spawn_data_cmp(const void *a, const void *b)
|
|||||||
* positive index of slave PF in bonding.
|
* positive index of slave PF in bonding.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
|
mlx5_device_bond_pci_match(const char *ibdev_name,
|
||||||
const struct rte_pci_addr *pci_dev,
|
const struct rte_pci_addr *pci_dev,
|
||||||
int nl_rdma, uint16_t owner,
|
int nl_rdma, uint16_t owner,
|
||||||
struct mlx5_bond_info *bond_info)
|
struct mlx5_bond_info *bond_info)
|
||||||
@ -1963,27 +1965,25 @@ mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to get master device name. If something goes
|
* Try to get master device name. If something goes wrong suppose
|
||||||
* wrong suppose the lack of kernel support and no
|
* the lack of kernel support and no bonding devices.
|
||||||
* bonding devices.
|
|
||||||
*/
|
*/
|
||||||
memset(bond_info, 0, sizeof(*bond_info));
|
memset(bond_info, 0, sizeof(*bond_info));
|
||||||
if (nl_rdma < 0)
|
if (nl_rdma < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (!strstr(ibv_dev->name, "bond"))
|
if (!strstr(ibdev_name, "bond"))
|
||||||
return -1;
|
return -1;
|
||||||
np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
|
np = mlx5_nl_portnum(nl_rdma, ibdev_name);
|
||||||
if (!np)
|
if (!np)
|
||||||
return -1;
|
return -1;
|
||||||
/*
|
/*
|
||||||
* The Master device might not be on the predefined
|
* The master device might not be on the predefined port(not on port
|
||||||
* port (not on port index 1, it is not garanted),
|
* index 1, it is not guaranteed), we have to scan all Infiniband
|
||||||
* we have to scan all Infiniband device port and
|
* device ports and find master.
|
||||||
* find master.
|
|
||||||
*/
|
*/
|
||||||
for (i = 1; i <= np; ++i) {
|
for (i = 1; i <= np; ++i) {
|
||||||
/* Check whether Infiniband port is populated. */
|
/* Check whether Infiniband port is populated. */
|
||||||
ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
|
ifindex = mlx5_nl_ifindex(nl_rdma, ibdev_name, i);
|
||||||
if (!ifindex)
|
if (!ifindex)
|
||||||
continue;
|
continue;
|
||||||
if (!if_indextoname(ifindex, ifname))
|
if (!if_indextoname(ifindex, ifname))
|
||||||
@ -2008,8 +2008,9 @@ mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
|
|||||||
snprintf(tmp_str, sizeof(tmp_str),
|
snprintf(tmp_str, sizeof(tmp_str),
|
||||||
"/sys/class/net/%s", ifname);
|
"/sys/class/net/%s", ifname);
|
||||||
if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
|
if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
|
||||||
DRV_LOG(WARNING, "can not get PCI address"
|
DRV_LOG(WARNING,
|
||||||
" for netdev \"%s\"", ifname);
|
"Cannot get PCI address for netdev \"%s\".",
|
||||||
|
ifname);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* Slave interface PCI address match found. */
|
/* Slave interface PCI address match found. */
|
||||||
@ -2110,7 +2111,7 @@ mlx5_os_config_default(struct mlx5_dev_config *config)
|
|||||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
|
||||||
struct rte_eth_devargs *req_eth_da,
|
struct rte_eth_devargs *req_eth_da,
|
||||||
uint16_t owner_id)
|
uint16_t owner_id)
|
||||||
{
|
{
|
||||||
@ -2168,9 +2169,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
struct rte_pci_addr pci_addr;
|
struct rte_pci_addr pci_addr;
|
||||||
|
|
||||||
DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
|
DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
|
||||||
bd = mlx5_device_bond_pci_match
|
bd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,
|
||||||
(ibv_list[ret], &owner_pci, nl_rdma, owner_id,
|
nl_rdma, owner_id, &bond_info);
|
||||||
&bond_info);
|
|
||||||
if (bd >= 0) {
|
if (bd >= 0) {
|
||||||
/*
|
/*
|
||||||
* Bonding device detected. Only one match is allowed,
|
* Bonding device detected. Only one match is allowed,
|
||||||
@ -2190,9 +2190,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
/* Amend owner pci address if owner PF ID specified. */
|
/* Amend owner pci address if owner PF ID specified. */
|
||||||
if (eth_da.nb_representor_ports)
|
if (eth_da.nb_representor_ports)
|
||||||
owner_pci.function += owner_id;
|
owner_pci.function += owner_id;
|
||||||
DRV_LOG(INFO, "PCI information matches for"
|
DRV_LOG(INFO,
|
||||||
" slave %d bonding device \"%s\"",
|
"PCI information matches for slave %d bonding device \"%s\"",
|
||||||
bd, ibv_list[ret]->name);
|
bd, ibv_list[ret]->name);
|
||||||
ibv_match[nd++] = ibv_list[ret];
|
ibv_match[nd++] = ibv_list[ret];
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
@ -2266,7 +2266,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
list[ns].max_port = np;
|
list[ns].max_port = np;
|
||||||
list[ns].phys_port = i;
|
list[ns].phys_port = i;
|
||||||
list[ns].phys_dev_name = ibv_match[0]->name;
|
list[ns].phys_dev_name = ibv_match[0]->name;
|
||||||
list[ns].ctx = ctx;
|
|
||||||
list[ns].eth_dev = NULL;
|
list[ns].eth_dev = NULL;
|
||||||
list[ns].pci_dev = pci_dev;
|
list[ns].pci_dev = pci_dev;
|
||||||
list[ns].cdev = cdev;
|
list[ns].cdev = cdev;
|
||||||
@ -2362,7 +2361,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
list[ns].max_port = 1;
|
list[ns].max_port = 1;
|
||||||
list[ns].phys_port = 1;
|
list[ns].phys_port = 1;
|
||||||
list[ns].phys_dev_name = ibv_match[i]->name;
|
list[ns].phys_dev_name = ibv_match[i]->name;
|
||||||
list[ns].ctx = ctx;
|
|
||||||
list[ns].eth_dev = NULL;
|
list[ns].eth_dev = NULL;
|
||||||
list[ns].pci_dev = pci_dev;
|
list[ns].pci_dev = pci_dev;
|
||||||
list[ns].cdev = cdev;
|
list[ns].cdev = cdev;
|
||||||
@ -2411,10 +2409,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
}
|
}
|
||||||
ret = -1;
|
ret = -1;
|
||||||
if (nl_route >= 0)
|
if (nl_route >= 0)
|
||||||
ret = mlx5_nl_switch_info
|
ret = mlx5_nl_switch_info(nl_route,
|
||||||
(nl_route,
|
list[ns].ifindex,
|
||||||
list[ns].ifindex,
|
&list[ns].info);
|
||||||
&list[ns].info);
|
|
||||||
if (ret || (!list[ns].info.representor &&
|
if (ret || (!list[ns].info.representor &&
|
||||||
!list[ns].info.master)) {
|
!list[ns].info.master)) {
|
||||||
/*
|
/*
|
||||||
@ -2451,10 +2448,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
|
|||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* New kernels may add the switch_id attribute for the case
|
* New kernels may add the switch_id attribute for the case
|
||||||
* there is no E-Switch and we wrongly recognized the
|
* there is no E-Switch and we wrongly recognized the only
|
||||||
* only device as master. Override this if there is the
|
* device as master. Override this if there is the single
|
||||||
* single device with single port and new device name
|
* device with single port and new device name format present.
|
||||||
* format present.
|
|
||||||
*/
|
*/
|
||||||
if (nd == 1 &&
|
if (nd == 1 &&
|
||||||
list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
|
list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
|
||||||
@ -2627,7 +2623,7 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev,
|
|||||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
mlx5_os_pci_probe(struct mlx5_common_device *cdev, void *ctx)
|
mlx5_os_pci_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
|
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
|
||||||
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
|
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
|
||||||
@ -2641,7 +2637,7 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev, void *ctx)
|
|||||||
if (eth_da.nb_ports > 0) {
|
if (eth_da.nb_ports > 0) {
|
||||||
/* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
|
/* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
|
||||||
for (p = 0; p < eth_da.nb_ports; p++) {
|
for (p = 0; p < eth_da.nb_ports; p++) {
|
||||||
ret = mlx5_os_pci_probe_pf(cdev, ctx, ð_da,
|
ret = mlx5_os_pci_probe_pf(cdev, ð_da,
|
||||||
eth_da.ports[p]);
|
eth_da.ports[p]);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
@ -2655,14 +2651,14 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev, void *ctx)
|
|||||||
mlx5_net_remove(cdev);
|
mlx5_net_remove(cdev);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = mlx5_os_pci_probe_pf(cdev, ctx, ð_da, 0);
|
ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Probe a single SF device on auxiliary bus, no representor support. */
|
/* Probe a single SF device on auxiliary bus, no representor support. */
|
||||||
static int
|
static int
|
||||||
mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev, void *ctx)
|
mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
|
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
|
||||||
struct mlx5_dev_config config;
|
struct mlx5_dev_config config;
|
||||||
@ -2682,8 +2678,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev, void *ctx)
|
|||||||
/* Init spawn data. */
|
/* Init spawn data. */
|
||||||
spawn.max_port = 1;
|
spawn.max_port = 1;
|
||||||
spawn.phys_port = 1;
|
spawn.phys_port = 1;
|
||||||
spawn.ctx = ctx;
|
spawn.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
|
||||||
spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
|
|
||||||
ret = mlx5_auxiliary_get_ifindex(dev->name);
|
ret = mlx5_auxiliary_get_ifindex(dev->name);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
|
DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
|
||||||
@ -2721,28 +2716,19 @@ int
|
|||||||
mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
void *ctx = NULL;
|
|
||||||
|
|
||||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
||||||
ret = mlx5_os_open_device(cdev, &ctx);
|
|
||||||
if (ret) {
|
|
||||||
DRV_LOG(ERR, "Fail to open device %s", cdev->dev->name);
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
mlx5_pmd_socket_init();
|
mlx5_pmd_socket_init();
|
||||||
}
|
|
||||||
ret = mlx5_init_once();
|
ret = mlx5_init_once();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Unable to init PMD global data: %s",
|
DRV_LOG(ERR, "Unable to init PMD global data: %s",
|
||||||
strerror(rte_errno));
|
strerror(rte_errno));
|
||||||
if (ctx != NULL)
|
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
if (mlx5_dev_is_pci(cdev->dev))
|
if (mlx5_dev_is_pci(cdev->dev))
|
||||||
return mlx5_os_pci_probe(cdev, ctx);
|
return mlx5_os_pci_probe(cdev);
|
||||||
else
|
else
|
||||||
return mlx5_os_auxiliary_probe(cdev, ctx);
|
return mlx5_os_auxiliary_probe(cdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2793,16 +2779,16 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int flags;
|
int flags;
|
||||||
|
struct ibv_context *ctx = sh->cdev->ctx;
|
||||||
|
|
||||||
sh->intr_handle.fd = -1;
|
sh->intr_handle.fd = -1;
|
||||||
flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
|
flags = fcntl(ctx->async_fd, F_GETFL);
|
||||||
ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
|
ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
|
||||||
F_SETFL, flags | O_NONBLOCK);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(INFO, "failed to change file descriptor async event"
|
DRV_LOG(INFO, "failed to change file descriptor async event"
|
||||||
" queue");
|
" queue");
|
||||||
} else {
|
} else {
|
||||||
sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
|
sh->intr_handle.fd = ctx->async_fd;
|
||||||
sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
|
sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
|
||||||
if (rte_intr_callback_register(&sh->intr_handle,
|
if (rte_intr_callback_register(&sh->intr_handle,
|
||||||
mlx5_dev_interrupt_handler, sh)) {
|
mlx5_dev_interrupt_handler, sh)) {
|
||||||
@ -2813,8 +2799,7 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
|
|||||||
if (sh->devx) {
|
if (sh->devx) {
|
||||||
#ifdef HAVE_IBV_DEVX_ASYNC
|
#ifdef HAVE_IBV_DEVX_ASYNC
|
||||||
sh->intr_handle_devx.fd = -1;
|
sh->intr_handle_devx.fd = -1;
|
||||||
sh->devx_comp =
|
sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
|
||||||
(void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
|
|
||||||
struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
|
struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
|
||||||
if (!devx_comp) {
|
if (!devx_comp) {
|
||||||
DRV_LOG(INFO, "failed to allocate devx_comp.");
|
DRV_LOG(INFO, "failed to allocate devx_comp.");
|
||||||
|
@ -249,9 +249,10 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
|
cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
|
return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
|
||||||
&cq_attr.ibv,
|
(priv->sh->cdev->ctx,
|
||||||
&cq_attr.mlx5));
|
&cq_attr.ibv,
|
||||||
|
&cq_attr.mlx5));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -323,10 +324,10 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
|
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
|
rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
|
||||||
&wq_attr.mlx5);
|
&wq_attr.mlx5);
|
||||||
#else
|
#else
|
||||||
rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
|
rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
|
||||||
#endif
|
#endif
|
||||||
if (rxq_obj->wq) {
|
if (rxq_obj->wq) {
|
||||||
/*
|
/*
|
||||||
@ -379,7 +380,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
tmpl->rxq_ctrl = rxq_ctrl;
|
tmpl->rxq_ctrl = rxq_ctrl;
|
||||||
if (rxq_ctrl->irq) {
|
if (rxq_ctrl->irq) {
|
||||||
tmpl->ibv_channel =
|
tmpl->ibv_channel =
|
||||||
mlx5_glue->create_comp_channel(priv->sh->ctx);
|
mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
|
||||||
if (!tmpl->ibv_channel) {
|
if (!tmpl->ibv_channel) {
|
||||||
DRV_LOG(ERR, "Port %u: comp channel creation failure.",
|
DRV_LOG(ERR, "Port %u: comp channel creation failure.",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
@ -542,12 +543,13 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
|
|||||||
/* Finalise indirection table. */
|
/* Finalise indirection table. */
|
||||||
for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
|
for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
|
||||||
wq[i] = wq[j];
|
wq[i] = wq[j];
|
||||||
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
|
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
|
||||||
&(struct ibv_rwq_ind_table_init_attr){
|
(priv->sh->cdev->ctx,
|
||||||
.log_ind_tbl_size = log_n,
|
&(struct ibv_rwq_ind_table_init_attr){
|
||||||
.ind_tbl = wq,
|
.log_ind_tbl_size = log_n,
|
||||||
.comp_mask = 0,
|
.ind_tbl = wq,
|
||||||
});
|
.comp_mask = 0,
|
||||||
|
});
|
||||||
if (!ind_tbl->ind_table) {
|
if (!ind_tbl->ind_table) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
@ -609,7 +611,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
qp = mlx5_glue->dv_create_qp
|
qp = mlx5_glue->dv_create_qp
|
||||||
(priv->sh->ctx,
|
(priv->sh->cdev->ctx,
|
||||||
&(struct ibv_qp_init_attr_ex){
|
&(struct ibv_qp_init_attr_ex){
|
||||||
.qp_type = IBV_QPT_RAW_PACKET,
|
.qp_type = IBV_QPT_RAW_PACKET,
|
||||||
.comp_mask =
|
.comp_mask =
|
||||||
@ -630,7 +632,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
|
|||||||
&qp_init_attr);
|
&qp_init_attr);
|
||||||
#else
|
#else
|
||||||
qp = mlx5_glue->create_qp_ex
|
qp = mlx5_glue->create_qp_ex
|
||||||
(priv->sh->ctx,
|
(priv->sh->cdev->ctx,
|
||||||
&(struct ibv_qp_init_attr_ex){
|
&(struct ibv_qp_init_attr_ex){
|
||||||
.qp_type = IBV_QPT_RAW_PACKET,
|
.qp_type = IBV_QPT_RAW_PACKET,
|
||||||
.comp_mask =
|
.comp_mask =
|
||||||
@ -715,7 +717,7 @@ static int
|
|||||||
mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
|
mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct ibv_context *ctx = priv->sh->ctx;
|
struct ibv_context *ctx = priv->sh->cdev->ctx;
|
||||||
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
|
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
|
||||||
|
|
||||||
if (rxq)
|
if (rxq)
|
||||||
@ -779,7 +781,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
|
|||||||
goto error;
|
goto error;
|
||||||
rxq = priv->drop_queue.rxq;
|
rxq = priv->drop_queue.rxq;
|
||||||
ind_tbl = mlx5_glue->create_rwq_ind_table
|
ind_tbl = mlx5_glue->create_rwq_ind_table
|
||||||
(priv->sh->ctx,
|
(priv->sh->cdev->ctx,
|
||||||
&(struct ibv_rwq_ind_table_init_attr){
|
&(struct ibv_rwq_ind_table_init_attr){
|
||||||
.log_ind_tbl_size = 0,
|
.log_ind_tbl_size = 0,
|
||||||
.ind_tbl = (struct ibv_wq **)&rxq->wq,
|
.ind_tbl = (struct ibv_wq **)&rxq->wq,
|
||||||
@ -792,7 +794,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
|
|||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
|
hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
|
||||||
&(struct ibv_qp_init_attr_ex){
|
&(struct ibv_qp_init_attr_ex){
|
||||||
.qp_type = IBV_QPT_RAW_PACKET,
|
.qp_type = IBV_QPT_RAW_PACKET,
|
||||||
.comp_mask = IBV_QP_INIT_ATTR_PD |
|
.comp_mask = IBV_QP_INIT_ATTR_PD |
|
||||||
@ -901,7 +903,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
qp_attr.max_tso_header = txq_ctrl->max_tso_header;
|
qp_attr.max_tso_header = txq_ctrl->max_tso_header;
|
||||||
qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
|
qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
|
||||||
}
|
}
|
||||||
qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr);
|
qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
|
||||||
if (qp_obj == NULL) {
|
if (qp_obj == NULL) {
|
||||||
DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
|
DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
|
||||||
dev->data->port_id, idx);
|
dev->data->port_id, idx);
|
||||||
@ -947,7 +949,8 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
}
|
}
|
||||||
cqe_n = desc / MLX5_TX_COMP_THRESH +
|
cqe_n = desc / MLX5_TX_COMP_THRESH +
|
||||||
1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
|
1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
|
||||||
txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
|
txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
|
||||||
|
NULL, NULL, 0);
|
||||||
if (txq_obj->cq == NULL) {
|
if (txq_obj->cq == NULL) {
|
||||||
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
|
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
|
||||||
dev->data->port_id, idx);
|
dev->data->port_id, idx);
|
||||||
@ -1070,7 +1073,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
|
|||||||
#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
|
#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
||||||
struct ibv_context *ctx = sh->ctx;
|
struct ibv_context *ctx = sh->cdev->ctx;
|
||||||
struct mlx5dv_qp_init_attr qp_init_attr = {0};
|
struct mlx5dv_qp_init_attr qp_init_attr = {0};
|
||||||
struct {
|
struct {
|
||||||
struct ibv_cq_init_attr_ex ibv;
|
struct ibv_cq_init_attr_ex ibv;
|
||||||
|
@ -916,7 +916,7 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
|
|||||||
* start after the common header that with the length of a DW(u32).
|
* start after the common header that with the length of a DW(u32).
|
||||||
*/
|
*/
|
||||||
node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
|
node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
|
||||||
prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
|
prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
|
||||||
if (!prf->obj) {
|
if (!prf->obj) {
|
||||||
DRV_LOG(ERR, "Failed to create flex parser node object.");
|
DRV_LOG(ERR, "Failed to create flex parser node object.");
|
||||||
return (rte_errno == 0) ? -ENODEV : -rte_errno;
|
return (rte_errno == 0) ? -ENODEV : -rte_errno;
|
||||||
@ -1021,7 +1021,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
|||||||
*/
|
*/
|
||||||
uar_mapping = 0;
|
uar_mapping = 0;
|
||||||
#endif
|
#endif
|
||||||
sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);
|
sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
|
||||||
|
uar_mapping);
|
||||||
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
|
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
|
||||||
if (!sh->tx_uar &&
|
if (!sh->tx_uar &&
|
||||||
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
|
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
|
||||||
@ -1039,8 +1040,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
|||||||
*/
|
*/
|
||||||
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
|
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
|
||||||
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
|
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
|
||||||
sh->tx_uar = mlx5_glue->devx_alloc_uar
|
sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
|
||||||
(sh->ctx, uar_mapping);
|
uar_mapping);
|
||||||
} else if (!sh->tx_uar &&
|
} else if (!sh->tx_uar &&
|
||||||
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
|
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
|
||||||
if (config->dbnc == MLX5_TXDB_NCACHED)
|
if (config->dbnc == MLX5_TXDB_NCACHED)
|
||||||
@ -1052,8 +1053,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
|||||||
*/
|
*/
|
||||||
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
|
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
|
||||||
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
|
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
|
||||||
sh->tx_uar = mlx5_glue->devx_alloc_uar
|
sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
|
||||||
(sh->ctx, uar_mapping);
|
uar_mapping);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!sh->tx_uar) {
|
if (!sh->tx_uar) {
|
||||||
@ -1080,8 +1081,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
|||||||
}
|
}
|
||||||
for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
|
for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
|
||||||
uar_mapping = 0;
|
uar_mapping = 0;
|
||||||
sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
|
sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
|
||||||
(sh->ctx, uar_mapping);
|
uar_mapping);
|
||||||
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
|
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
|
||||||
if (!sh->devx_rx_uar &&
|
if (!sh->devx_rx_uar &&
|
||||||
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
|
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
|
||||||
@ -1093,7 +1094,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
|||||||
DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
|
DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
|
||||||
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
|
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
|
||||||
sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
|
sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
|
||||||
(sh->ctx, uar_mapping);
|
(sh->cdev->ctx, uar_mapping);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!sh->devx_rx_uar) {
|
if (!sh->devx_rx_uar) {
|
||||||
@ -1278,7 +1279,7 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
|
|||||||
*/
|
*/
|
||||||
struct mlx5_dev_ctx_shared *
|
struct mlx5_dev_ctx_shared *
|
||||||
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
||||||
const struct mlx5_dev_config *config)
|
const struct mlx5_dev_config *config)
|
||||||
{
|
{
|
||||||
struct mlx5_dev_ctx_shared *sh;
|
struct mlx5_dev_ctx_shared *sh;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@ -1291,8 +1292,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
|
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
|
||||||
/* Search for IB context by device name. */
|
/* Search for IB context by device name. */
|
||||||
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
|
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
|
||||||
if (!strcmp(sh->ibdev_name,
|
if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
|
||||||
mlx5_os_get_ctx_device_name(spawn->ctx))) {
|
|
||||||
sh->refcnt++;
|
sh->refcnt++;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -1313,10 +1313,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
sh->numa_node = spawn->cdev->dev->numa_node;
|
sh->numa_node = spawn->cdev->dev->numa_node;
|
||||||
sh->cdev = spawn->cdev;
|
sh->cdev = spawn->cdev;
|
||||||
sh->devx = sh->cdev->config.devx;
|
sh->devx = sh->cdev->config.devx;
|
||||||
sh->ctx = spawn->ctx;
|
|
||||||
if (spawn->bond_info)
|
if (spawn->bond_info)
|
||||||
sh->bond = *spawn->bond_info;
|
sh->bond = *spawn->bond_info;
|
||||||
err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
|
err = mlx5_os_get_dev_attr(sh->cdev->ctx, &sh->device_attr);
|
||||||
if (err) {
|
if (err) {
|
||||||
DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
|
DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
|
||||||
goto error;
|
goto error;
|
||||||
@ -1324,9 +1323,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
sh->refcnt = 1;
|
sh->refcnt = 1;
|
||||||
sh->max_port = spawn->max_port;
|
sh->max_port = spawn->max_port;
|
||||||
sh->reclaim_mode = config->reclaim_mode;
|
sh->reclaim_mode = config->reclaim_mode;
|
||||||
strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
|
strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
|
||||||
sizeof(sh->ibdev_name) - 1);
|
sizeof(sh->ibdev_name) - 1);
|
||||||
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),
|
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
|
||||||
sizeof(sh->ibdev_path) - 1);
|
sizeof(sh->ibdev_path) - 1);
|
||||||
/*
|
/*
|
||||||
* Setting port_id to max unallowed value means
|
* Setting port_id to max unallowed value means
|
||||||
@ -1337,7 +1336,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
|
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
|
||||||
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
|
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
|
||||||
}
|
}
|
||||||
sh->pd = mlx5_os_alloc_pd(sh->ctx);
|
sh->pd = mlx5_os_alloc_pd(sh->cdev->ctx);
|
||||||
if (sh->pd == NULL) {
|
if (sh->pd == NULL) {
|
||||||
DRV_LOG(ERR, "PD allocation failure");
|
DRV_LOG(ERR, "PD allocation failure");
|
||||||
err = ENOMEM;
|
err = ENOMEM;
|
||||||
@ -1349,14 +1348,14 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
DRV_LOG(ERR, "Fail to extract pdn from PD");
|
DRV_LOG(ERR, "Fail to extract pdn from PD");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
sh->td = mlx5_devx_cmd_create_td(sh->ctx);
|
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
|
||||||
if (!sh->td) {
|
if (!sh->td) {
|
||||||
DRV_LOG(ERR, "TD allocation failure");
|
DRV_LOG(ERR, "TD allocation failure");
|
||||||
err = ENOMEM;
|
err = ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
tis_attr.transport_domain = sh->td->id;
|
tis_attr.transport_domain = sh->td->id;
|
||||||
sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
|
sh->tis = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
|
||||||
if (!sh->tis) {
|
if (!sh->tis) {
|
||||||
DRV_LOG(ERR, "TIS allocation failure");
|
DRV_LOG(ERR, "TIS allocation failure");
|
||||||
err = ENOMEM;
|
err = ENOMEM;
|
||||||
@ -1431,8 +1430,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
mlx5_glue->devx_free_uar(sh->tx_uar);
|
mlx5_glue->devx_free_uar(sh->tx_uar);
|
||||||
if (sh->pd)
|
if (sh->pd)
|
||||||
claim_zero(mlx5_os_dealloc_pd(sh->pd));
|
claim_zero(mlx5_os_dealloc_pd(sh->pd));
|
||||||
if (sh->ctx)
|
|
||||||
claim_zero(mlx5_glue->close_device(sh->ctx));
|
|
||||||
mlx5_free(sh);
|
mlx5_free(sh);
|
||||||
MLX5_ASSERT(err > 0);
|
MLX5_ASSERT(err > 0);
|
||||||
rte_errno = err;
|
rte_errno = err;
|
||||||
@ -1517,8 +1514,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
|
|||||||
claim_zero(mlx5_devx_cmd_destroy(sh->td));
|
claim_zero(mlx5_devx_cmd_destroy(sh->td));
|
||||||
if (sh->devx_rx_uar)
|
if (sh->devx_rx_uar)
|
||||||
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
|
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
|
||||||
if (sh->ctx)
|
|
||||||
claim_zero(mlx5_glue->close_device(sh->ctx));
|
|
||||||
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
|
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
|
||||||
pthread_mutex_destroy(&sh->txpp.mutex);
|
pthread_mutex_destroy(&sh->txpp.mutex);
|
||||||
mlx5_free(sh);
|
mlx5_free(sh);
|
||||||
@ -1724,8 +1719,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
|
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
|
||||||
dev->data->port_id,
|
dev->data->port_id,
|
||||||
((priv->sh->ctx != NULL) ?
|
((priv->sh->cdev->ctx != NULL) ?
|
||||||
mlx5_os_get_ctx_device_name(priv->sh->ctx) : ""));
|
mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
|
||||||
/*
|
/*
|
||||||
* If default mreg copy action is removed at the stop stage,
|
* If default mreg copy action is removed at the stop stage,
|
||||||
* the search will return none and nothing will be done anymore.
|
* the search will return none and nothing will be done anymore.
|
||||||
|
@ -137,7 +137,6 @@ struct mlx5_dev_spawn_data {
|
|||||||
int pf_bond; /**< bonding device PF index. < 0 - no bonding */
|
int pf_bond; /**< bonding device PF index. < 0 - no bonding */
|
||||||
struct mlx5_switch_info info; /**< Switch information. */
|
struct mlx5_switch_info info; /**< Switch information. */
|
||||||
const char *phys_dev_name; /**< Name of physical device. */
|
const char *phys_dev_name; /**< Name of physical device. */
|
||||||
void *ctx; /**< Associated physical device context. */
|
|
||||||
struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
|
struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
|
||||||
struct rte_pci_device *pci_dev; /**< Backend PCI device. */
|
struct rte_pci_device *pci_dev; /**< Backend PCI device. */
|
||||||
struct mlx5_common_device *cdev; /**< Backend common device. */
|
struct mlx5_common_device *cdev; /**< Backend common device. */
|
||||||
@ -1134,7 +1133,6 @@ struct mlx5_dev_ctx_shared {
|
|||||||
uint32_t max_port; /* Maximal IB device port index. */
|
uint32_t max_port; /* Maximal IB device port index. */
|
||||||
struct mlx5_bond_info bond; /* Bonding information. */
|
struct mlx5_bond_info bond; /* Bonding information. */
|
||||||
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
||||||
void *ctx; /* Verbs/DV/DevX context. */
|
|
||||||
void *pd; /* Protection Domain. */
|
void *pd; /* Protection Domain. */
|
||||||
uint32_t pdn; /* Protection Domain number. */
|
uint32_t pdn; /* Protection Domain number. */
|
||||||
uint32_t tdn; /* Transport Domain number. */
|
uint32_t tdn; /* Transport Domain number. */
|
||||||
|
@ -279,7 +279,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
rq_attr.wq_attr.pd = priv->sh->pdn;
|
rq_attr.wq_attr.pd = priv->sh->pdn;
|
||||||
rq_attr.counter_set_id = priv->counter_set_id;
|
rq_attr.counter_set_id = priv->counter_set_id;
|
||||||
/* Create RQ using DevX API. */
|
/* Create RQ using DevX API. */
|
||||||
return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
|
return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
|
||||||
wqe_size, log_desc_n, &rq_attr,
|
wqe_size, log_desc_n, &rq_attr,
|
||||||
rxq_ctrl->socket);
|
rxq_ctrl->socket);
|
||||||
}
|
}
|
||||||
@ -365,8 +365,8 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
|
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
|
||||||
log_cqe_n = log2above(cqe_n);
|
log_cqe_n = log2above(cqe_n);
|
||||||
/* Create CQ using DevX API. */
|
/* Create CQ using DevX API. */
|
||||||
ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
|
ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
|
||||||
&cq_attr, sh->numa_node);
|
log_cqe_n, &cq_attr, sh->numa_node);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
cq_obj = &rxq_ctrl->obj->cq_obj;
|
cq_obj = &rxq_ctrl->obj->cq_obj;
|
||||||
@ -442,7 +442,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
attr.wq_attr.log_hairpin_data_sz -
|
attr.wq_attr.log_hairpin_data_sz -
|
||||||
MLX5_HAIRPIN_QUEUE_STRIDE;
|
MLX5_HAIRPIN_QUEUE_STRIDE;
|
||||||
attr.counter_set_id = priv->counter_set_id;
|
attr.counter_set_id = priv->counter_set_id;
|
||||||
tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
|
tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
|
||||||
rxq_ctrl->socket);
|
rxq_ctrl->socket);
|
||||||
if (!tmpl->rq) {
|
if (!tmpl->rq) {
|
||||||
DRV_LOG(ERR,
|
DRV_LOG(ERR,
|
||||||
@ -486,8 +486,8 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
|
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
|
||||||
|
|
||||||
tmpl->devx_channel = mlx5_os_devx_create_event_channel
|
tmpl->devx_channel = mlx5_os_devx_create_event_channel
|
||||||
(priv->sh->ctx,
|
(priv->sh->cdev->ctx,
|
||||||
devx_ev_flag);
|
devx_ev_flag);
|
||||||
if (!tmpl->devx_channel) {
|
if (!tmpl->devx_channel) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
DRV_LOG(ERR, "Failed to create event channel %d.",
|
DRV_LOG(ERR, "Failed to create event channel %d.",
|
||||||
@ -602,7 +602,7 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
|
|||||||
ind_tbl->queues_n);
|
ind_tbl->queues_n);
|
||||||
if (!rqt_attr)
|
if (!rqt_attr)
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
|
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
|
||||||
mlx5_free(rqt_attr);
|
mlx5_free(rqt_attr);
|
||||||
if (!ind_tbl->rqt) {
|
if (!ind_tbl->rqt) {
|
||||||
DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
|
DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
|
||||||
@ -770,7 +770,7 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
|
|||||||
|
|
||||||
mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
|
mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
|
||||||
hrxq->ind_table, tunnel, &tir_attr);
|
hrxq->ind_table, tunnel, &tir_attr);
|
||||||
hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
|
hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
|
||||||
if (!hrxq->tir) {
|
if (!hrxq->tir) {
|
||||||
DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
|
DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
@ -936,7 +936,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
attr.wq_attr.log_hairpin_data_sz -
|
attr.wq_attr.log_hairpin_data_sz -
|
||||||
MLX5_HAIRPIN_QUEUE_STRIDE;
|
MLX5_HAIRPIN_QUEUE_STRIDE;
|
||||||
attr.tis_num = priv->sh->tis->id;
|
attr.tis_num = priv->sh->tis->id;
|
||||||
tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
|
tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
|
||||||
if (!tmpl->sq) {
|
if (!tmpl->sq) {
|
||||||
DRV_LOG(ERR,
|
DRV_LOG(ERR,
|
||||||
"Port %u tx hairpin queue %u can't create SQ object.",
|
"Port %u tx hairpin queue %u can't create SQ object.",
|
||||||
@ -1001,8 +1001,8 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
|
|||||||
.ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
|
.ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
|
||||||
};
|
};
|
||||||
/* Create Send Queue object with DevX. */
|
/* Create Send Queue object with DevX. */
|
||||||
return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
|
return mlx5_devx_sq_create(priv->sh->cdev->ctx, &txq_obj->sq_obj,
|
||||||
&sq_attr, priv->sh->numa_node);
|
log_desc_n, &sq_attr, priv->sh->numa_node);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1058,7 +1058,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* Create completion queue object with DevX. */
|
/* Create completion queue object with DevX. */
|
||||||
ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
|
ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
|
||||||
&cq_attr, priv->sh->numa_node);
|
&cq_attr, priv->sh->numa_node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
|
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
|
||||||
|
@ -7651,7 +7651,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
|
|||||||
}
|
}
|
||||||
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
|
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
|
||||||
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
|
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
|
||||||
mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
|
mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
|
||||||
IBV_ACCESS_LOCAL_WRITE);
|
IBV_ACCESS_LOCAL_WRITE);
|
||||||
if (!mem_mng->umem) {
|
if (!mem_mng->umem) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
@ -7665,7 +7665,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
|
|||||||
mkey_attr.pd = sh->pdn;
|
mkey_attr.pd = sh->pdn;
|
||||||
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
|
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
|
||||||
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
|
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
|
||||||
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
|
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
|
||||||
if (!mem_mng->dm) {
|
if (!mem_mng->dm) {
|
||||||
mlx5_os_umem_dereg(mem_mng->umem);
|
mlx5_os_umem_dereg(mem_mng->umem);
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
|
@ -309,13 +309,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
|
|||||||
enum mlx5_access_aso_opc_mod aso_opc_mod)
|
enum mlx5_access_aso_opc_mod aso_opc_mod)
|
||||||
{
|
{
|
||||||
uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
|
uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
|
||||||
|
struct mlx5_common_device *cdev = sh->cdev;
|
||||||
|
|
||||||
switch (aso_opc_mod) {
|
switch (aso_opc_mod) {
|
||||||
case ASO_OPC_MOD_FLOW_HIT:
|
case ASO_OPC_MOD_FLOW_HIT:
|
||||||
if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
|
if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
|
||||||
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
|
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
|
||||||
return -1;
|
return -1;
|
||||||
if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
|
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
|
||||||
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
||||||
sh->sq_ts_format)) {
|
sh->sq_ts_format)) {
|
||||||
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
|
||||||
@ -324,7 +325,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
|
|||||||
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
|
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
|
||||||
break;
|
break;
|
||||||
case ASO_OPC_MOD_POLICER:
|
case ASO_OPC_MOD_POLICER:
|
||||||
if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
|
if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
|
||||||
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
||||||
sh->sq_ts_format))
|
sh->sq_ts_format))
|
||||||
return -1;
|
return -1;
|
||||||
@ -335,7 +336,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
|
|||||||
if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
|
if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
|
||||||
&sh->ct_mng->aso_sq.mr, 0))
|
&sh->ct_mng->aso_sq.mr, 0))
|
||||||
return -1;
|
return -1;
|
||||||
if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
|
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
|
||||||
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
|
||||||
sh->sq_ts_format)) {
|
sh->sq_ts_format)) {
|
||||||
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
|
||||||
|
@ -3618,8 +3618,8 @@ flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
|
|||||||
}
|
}
|
||||||
*resource = *ctx_resource;
|
*resource = *ctx_resource;
|
||||||
resource->idx = idx;
|
resource->idx = idx;
|
||||||
ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
|
ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
|
||||||
resource,
|
domain, resource,
|
||||||
&resource->action);
|
&resource->action);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
|
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
|
||||||
@ -5434,7 +5434,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
|
|||||||
else
|
else
|
||||||
ns = sh->rx_domain;
|
ns = sh->rx_domain;
|
||||||
ret = mlx5_flow_os_create_flow_action_modify_header
|
ret = mlx5_flow_os_create_flow_action_modify_header
|
||||||
(sh->ctx, ns, entry,
|
(sh->cdev->ctx, ns, entry,
|
||||||
data_len, &entry->action);
|
data_len, &entry->action);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
|
mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
|
||||||
@ -6056,7 +6056,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
if (fallback) {
|
if (fallback) {
|
||||||
/* bulk_bitmap must be 0 for single counter allocation. */
|
/* bulk_bitmap must be 0 for single counter allocation. */
|
||||||
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
|
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
|
||||||
if (!dcs)
|
if (!dcs)
|
||||||
return NULL;
|
return NULL;
|
||||||
pool = flow_dv_find_pool_by_id(cmng, dcs->id);
|
pool = flow_dv_find_pool_by_id(cmng, dcs->id);
|
||||||
@ -6074,7 +6074,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
|
|||||||
*cnt_free = cnt;
|
*cnt_free = cnt;
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
|
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
|
||||||
if (!dcs) {
|
if (!dcs) {
|
||||||
rte_errno = ENODATA;
|
rte_errno = ENODATA;
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -6369,7 +6369,7 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
|
|||||||
uint32_t log_obj_size;
|
uint32_t log_obj_size;
|
||||||
|
|
||||||
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
|
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
|
||||||
dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
|
dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
|
||||||
priv->sh->pdn, log_obj_size);
|
priv->sh->pdn, log_obj_size);
|
||||||
if (!dcs) {
|
if (!dcs) {
|
||||||
rte_errno = ENODATA;
|
rte_errno = ENODATA;
|
||||||
@ -9067,7 +9067,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Create a GENEVE TLV object and resource. */
|
/* Create a GENEVE TLV object and resource. */
|
||||||
obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
|
obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
|
||||||
geneve_opt_v->option_class,
|
geneve_opt_v->option_class,
|
||||||
geneve_opt_v->option_type,
|
geneve_opt_v->option_type,
|
||||||
geneve_opt_v->option_len);
|
geneve_opt_v->option_len);
|
||||||
@ -10427,7 +10427,8 @@ flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
|
|||||||
dv_attr.priority = ref->priority;
|
dv_attr.priority = ref->priority;
|
||||||
if (tbl->is_egress)
|
if (tbl->is_egress)
|
||||||
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
|
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
|
||||||
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
|
ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
|
||||||
|
tbl->tbl.obj,
|
||||||
&resource->matcher_object);
|
&resource->matcher_object);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlx5_free(resource);
|
mlx5_free(resource);
|
||||||
@ -11864,7 +11865,7 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,
|
|||||||
struct mlx5_devx_obj *obj = NULL;
|
struct mlx5_devx_obj *obj = NULL;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
|
obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
|
||||||
priv->sh->pdn);
|
priv->sh->pdn);
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
rte_errno = ENODATA;
|
rte_errno = ENODATA;
|
||||||
@ -12292,7 +12293,7 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
|
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
|
||||||
|
|
||||||
obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
|
obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
|
||||||
priv->sh->pdn, log_obj_size);
|
priv->sh->pdn, log_obj_size);
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
rte_errno = ENODATA;
|
rte_errno = ENODATA;
|
||||||
@ -17099,8 +17100,8 @@ mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
|
|||||||
goto err;
|
goto err;
|
||||||
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
|
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
|
||||||
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
|
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
|
||||||
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
|
ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
|
||||||
&matcher);
|
tbl->obj, &matcher);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
|
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
|
||||||
@ -17168,7 +17169,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
|
|||||||
0, 0, 0, NULL);
|
0, 0, 0, NULL);
|
||||||
if (!tbl)
|
if (!tbl)
|
||||||
goto err;
|
goto err;
|
||||||
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
|
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
|
||||||
if (!dcs)
|
if (!dcs)
|
||||||
goto err;
|
goto err;
|
||||||
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
|
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
|
||||||
@ -17177,8 +17178,8 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
|
|||||||
goto err;
|
goto err;
|
||||||
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
|
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
|
||||||
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
|
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
|
||||||
ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
|
ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
|
||||||
&matcher);
|
tbl->obj, &matcher);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
|
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
|
||||||
|
@ -198,7 +198,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
|
|||||||
{
|
{
|
||||||
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
|
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct ibv_context *ctx = priv->sh->ctx;
|
struct ibv_context *ctx = priv->sh->cdev->ctx;
|
||||||
struct ibv_counter_set_init_attr init = {
|
struct ibv_counter_set_init_attr init = {
|
||||||
.counter_set_id = counter->shared_info.id};
|
.counter_set_id = counter->shared_info.id};
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
|
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct ibv_context *ctx = priv->sh->ctx;
|
struct ibv_context *ctx = priv->sh->cdev->ctx;
|
||||||
struct ibv_counters_init_attr init = {0};
|
struct ibv_counters_init_attr init = {0};
|
||||||
struct ibv_counter_attach_attr attach;
|
struct ibv_counter_attach_attr attach;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -49,7 +49,7 @@ static int
|
|||||||
mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
|
mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
|
||||||
{
|
{
|
||||||
MLX5_ASSERT(!sh->txpp.echan);
|
MLX5_ASSERT(!sh->txpp.echan);
|
||||||
sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
|
sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
|
||||||
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
|
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
|
||||||
if (!sh->txpp.echan) {
|
if (!sh->txpp.echan) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
@ -104,7 +104,7 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
|
|||||||
MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
|
MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
|
||||||
sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
|
sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
|
||||||
sh->txpp.pp = mlx5_glue->dv_alloc_pp
|
sh->txpp.pp = mlx5_glue->dv_alloc_pp
|
||||||
(sh->ctx, sizeof(pp), &pp,
|
(sh->cdev->ctx, sizeof(pp), &pp,
|
||||||
MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
|
MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
|
||||||
if (sh->txpp.pp == NULL) {
|
if (sh->txpp.pp == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to allocate packet pacing index.");
|
DRV_LOG(ERR, "Failed to allocate packet pacing index.");
|
||||||
@ -245,7 +245,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Create completion queue object for Rearm Queue. */
|
/* Create completion queue object for Rearm Queue. */
|
||||||
ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
|
ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
|
||||||
log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
|
log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
|
||||||
sh->numa_node);
|
sh->numa_node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -259,7 +259,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
|
|||||||
/* Create send queue object for Rearm Queue. */
|
/* Create send queue object for Rearm Queue. */
|
||||||
sq_attr.cqn = wq->cq_obj.cq->id;
|
sq_attr.cqn = wq->cq_obj.cq->id;
|
||||||
/* There should be no WQE leftovers in the cyclic queue. */
|
/* There should be no WQE leftovers in the cyclic queue. */
|
||||||
ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
|
ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
|
||||||
log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
|
log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
|
||||||
sh->numa_node);
|
sh->numa_node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -409,7 +409,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
|
|||||||
sh->txpp.ts_p = 0;
|
sh->txpp.ts_p = 0;
|
||||||
sh->txpp.ts_n = 0;
|
sh->txpp.ts_n = 0;
|
||||||
/* Create completion queue object for Clock Queue. */
|
/* Create completion queue object for Clock Queue. */
|
||||||
ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
|
ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
|
||||||
log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
|
log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
|
||||||
sh->numa_node);
|
sh->numa_node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -446,7 +446,8 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
|
|||||||
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||||
sq_attr.wq_attr.pd = sh->pdn;
|
sq_attr.wq_attr.pd = sh->pdn;
|
||||||
sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
|
sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
|
||||||
ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
|
ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
|
||||||
|
log2above(wq->sq_size),
|
||||||
&sq_attr, sh->numa_node);
|
&sq_attr, sh->numa_node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
rte_errno = errno;
|
rte_errno = errno;
|
||||||
|
@ -38,7 +38,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])
|
|||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
context_obj = (mlx5_context_st *)priv->sh->ctx;
|
context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
memcpy(mac, context_obj->mlx5_dev.eth_mac, RTE_ETHER_ADDR_LEN);
|
memcpy(mac, context_obj->mlx5_dev.eth_mac, RTE_ETHER_ADDR_LEN);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -66,7 +66,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE])
|
|||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
context_obj = (mlx5_context_st *)priv->sh->ctx;
|
context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
strncpy(*ifname, context_obj->mlx5_dev.name, MLX5_NAMESIZE);
|
strncpy(*ifname, context_obj->mlx5_dev.name, MLX5_NAMESIZE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -93,7 +93,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
|
|||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
context_obj = (mlx5_context_st *)priv->sh->ctx;
|
context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
*mtu = context_obj->mlx5_dev.mtu_bytes;
|
*mtu = context_obj->mlx5_dev.mtu_bytes;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -253,7 +253,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
|
|||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
context_obj = (mlx5_context_st *)priv->sh->ctx;
|
context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
dev_link.link_speed = context_obj->mlx5_dev.link_speed / (1000 * 1000);
|
dev_link.link_speed = context_obj->mlx5_dev.link_speed / (1000 * 1000);
|
||||||
dev_link.link_status =
|
dev_link.link_status =
|
||||||
(context_obj->mlx5_dev.link_state == 1 && !mlx5_is_removed(dev))
|
(context_obj->mlx5_dev.link_state == 1 && !mlx5_is_removed(dev))
|
||||||
@ -359,7 +359,7 @@ mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
|
|||||||
int err;
|
int err;
|
||||||
struct mlx5_devx_clock mlx5_clock;
|
struct mlx5_devx_clock mlx5_clock;
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
|
mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
|
|
||||||
err = mlx5_glue->query_rt_values(context_obj, &mlx5_clock);
|
err = mlx5_glue->query_rt_values(context_obj, &mlx5_clock);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@ -383,7 +383,7 @@ int
|
|||||||
mlx5_is_removed(struct rte_eth_dev *dev)
|
mlx5_is_removed(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
|
mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
|
||||||
|
|
||||||
if (*context_obj->shutdown_event_obj.p_flag)
|
if (*context_obj->shutdown_event_obj.p_flag)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -357,14 +357,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
/* Initialize the shutdown event in mlx5_dev_spawn to
|
/* Initialize the shutdown event in mlx5_dev_spawn to
|
||||||
* support mlx5_is_removed for Windows.
|
* support mlx5_is_removed for Windows.
|
||||||
*/
|
*/
|
||||||
err = mlx5_glue->devx_init_showdown_event(sh->ctx);
|
err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
|
||||||
if (err) {
|
if (err) {
|
||||||
DRV_LOG(ERR, "failed to init showdown event: %s",
|
DRV_LOG(ERR, "failed to init showdown event: %s",
|
||||||
strerror(errno));
|
strerror(errno));
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
DRV_LOG(DEBUG, "MPW isn't supported");
|
DRV_LOG(DEBUG, "MPW isn't supported");
|
||||||
mlx5_os_get_dev_attr(sh->ctx, &device_attr);
|
mlx5_os_get_dev_attr(sh->cdev->ctx, &device_attr);
|
||||||
config->swp = device_attr.sw_parsing_offloads &
|
config->swp = device_attr.sw_parsing_offloads &
|
||||||
(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
|
(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
|
||||||
MLX5_SW_PARSING_TSO_CAP);
|
MLX5_SW_PARSING_TSO_CAP);
|
||||||
@ -472,7 +472,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
config->cqe_comp = 0;
|
config->cqe_comp = 0;
|
||||||
}
|
}
|
||||||
if (sh->devx) {
|
if (sh->devx) {
|
||||||
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
|
err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
|
||||||
|
&config->hca_attr);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -err;
|
err = -err;
|
||||||
goto error;
|
goto error;
|
||||||
@ -499,7 +500,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
|
|
||||||
err = config->hca_attr.access_register_user ?
|
err = config->hca_attr.access_register_user ?
|
||||||
mlx5_devx_cmd_register_read
|
mlx5_devx_cmd_register_read
|
||||||
(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
|
(sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
|
||||||
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
|
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
|
||||||
if (!err) {
|
if (!err) {
|
||||||
uint32_t ts_mode;
|
uint32_t ts_mode;
|
||||||
@ -924,6 +925,7 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
|||||||
.pf_bond = -1,
|
.pf_bond = -1,
|
||||||
.max_port = 1,
|
.max_port = 1,
|
||||||
.phys_port = 1,
|
.phys_port = 1,
|
||||||
|
.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
|
||||||
.pci_dev = pci_dev,
|
.pci_dev = pci_dev,
|
||||||
.cdev = cdev,
|
.cdev = cdev,
|
||||||
.ifindex = -1, /* Spawn will assign */
|
.ifindex = -1, /* Spawn will assign */
|
||||||
@ -944,7 +946,6 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
|||||||
.dv_flow_en = 1,
|
.dv_flow_en = 1,
|
||||||
.log_hp_size = MLX5_ARG_UNSET,
|
.log_hp_size = MLX5_ARG_UNSET,
|
||||||
};
|
};
|
||||||
void *ctx;
|
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t restore;
|
uint32_t restore;
|
||||||
|
|
||||||
@ -952,20 +953,12 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
|||||||
DRV_LOG(ERR, "Secondary process is not supported on Windows.");
|
DRV_LOG(ERR, "Secondary process is not supported on Windows.");
|
||||||
return -ENOTSUP;
|
return -ENOTSUP;
|
||||||
}
|
}
|
||||||
ret = mlx5_os_open_device(cdev, &ctx);
|
|
||||||
if (ret) {
|
|
||||||
DRV_LOG(ERR, "Fail to open DevX device %s", cdev->dev->name);
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
ret = mlx5_init_once();
|
ret = mlx5_init_once();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "unable to init PMD global data: %s",
|
DRV_LOG(ERR, "unable to init PMD global data: %s",
|
||||||
strerror(rte_errno));
|
strerror(rte_errno));
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
spawn.ctx = ctx;
|
|
||||||
spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
|
|
||||||
/* Device specific configuration. */
|
/* Device specific configuration. */
|
||||||
switch (pci_dev->id.device_id) {
|
switch (pci_dev->id.device_id) {
|
||||||
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
|
||||||
@ -982,10 +975,8 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
|
spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
|
||||||
if (!spawn.eth_dev) {
|
if (!spawn.eth_dev)
|
||||||
claim_zero(mlx5_glue->close_device(ctx));
|
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
|
||||||
restore = spawn.eth_dev->data->dev_flags;
|
restore = spawn.eth_dev->data->dev_flags;
|
||||||
rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
|
rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
|
||||||
/* Restore non-PCI flags cleared by the above call. */
|
/* Restore non-PCI flags cleared by the above call. */
|
||||||
|
@ -110,7 +110,8 @@ mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
/* Iterate all the existing mlx5 devices. */
|
/* Iterate all the existing mlx5 devices. */
|
||||||
TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
|
TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
mlx5_free_mr_by_addr(&priv->mr_scache,
|
||||||
priv->ctx->device->name,
|
mlx5_os_get_ctx_device_name
|
||||||
|
(priv->cdev->ctx),
|
||||||
addr, len);
|
addr, len);
|
||||||
pthread_mutex_unlock(&mem_event_list_lock);
|
pthread_mutex_unlock(&mem_event_list_lock);
|
||||||
break;
|
break;
|
||||||
@ -123,54 +124,42 @@ mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|||||||
static int
|
static int
|
||||||
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
struct ibv_device *ibv;
|
|
||||||
struct mlx5_regex_priv *priv = NULL;
|
struct mlx5_regex_priv *priv = NULL;
|
||||||
struct ibv_context *ctx = NULL;
|
|
||||||
struct mlx5_hca_attr attr;
|
struct mlx5_hca_attr attr;
|
||||||
char name[RTE_REGEXDEV_NAME_MAX_LEN];
|
char name[RTE_REGEXDEV_NAME_MAX_LEN];
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
ibv = mlx5_os_get_ibv_dev(cdev->dev);
|
ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
|
||||||
if (ibv == NULL)
|
|
||||||
return -rte_errno;
|
|
||||||
DRV_LOG(INFO, "Probe device \"%s\".", ibv->name);
|
|
||||||
ctx = mlx5_glue->dv_open_device(ibv);
|
|
||||||
if (!ctx) {
|
|
||||||
DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
|
|
||||||
rte_errno = ENODEV;
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Unable to read HCA capabilities.");
|
DRV_LOG(ERR, "Unable to read HCA capabilities.");
|
||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
goto dev_error;
|
return -rte_errno;
|
||||||
} else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
|
} else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
|
||||||
(!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
|
(!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
|
||||||
DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
|
DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
|
||||||
"old FW/OFED version?");
|
"old FW/OFED version?");
|
||||||
rte_errno = ENOTSUP;
|
rte_errno = ENOTSUP;
|
||||||
goto dev_error;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
if (mlx5_regex_engines_status(ctx, 2)) {
|
if (mlx5_regex_engines_status(cdev->ctx, 2)) {
|
||||||
DRV_LOG(ERR, "RegEx engine error.");
|
DRV_LOG(ERR, "RegEx engine error.");
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
goto dev_error;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv),
|
priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv),
|
||||||
RTE_CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (!priv) {
|
if (!priv) {
|
||||||
DRV_LOG(ERR, "Failed to allocate private memory.");
|
DRV_LOG(ERR, "Failed to allocate private memory.");
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
goto dev_error;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
|
priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
|
||||||
priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
|
priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
|
||||||
priv->qp_ts_format = attr.qp_ts_format;
|
priv->qp_ts_format = attr.qp_ts_format;
|
||||||
priv->ctx = ctx;
|
priv->cdev = cdev;
|
||||||
priv->nb_engines = 2; /* attr.regexp_num_of_engines */
|
priv->nb_engines = 2; /* attr.regexp_num_of_engines */
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, 0,
|
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
|
||||||
MLX5_RXP_CSR_IDENTIFIER, &val);
|
MLX5_RXP_CSR_IDENTIFIER, &val);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "CSR read failed!");
|
DRV_LOG(ERR, "CSR read failed!");
|
||||||
@ -185,20 +174,20 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
if (priv->regexdev == NULL) {
|
if (priv->regexdev == NULL) {
|
||||||
DRV_LOG(ERR, "Failed to register RegEx device.");
|
DRV_LOG(ERR, "Failed to register RegEx device.");
|
||||||
rte_errno = rte_errno ? rte_errno : EINVAL;
|
rte_errno = rte_errno ? rte_errno : EINVAL;
|
||||||
goto error;
|
goto dev_error;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* This PMD always claims the write memory barrier on UAR
|
* This PMD always claims the write memory barrier on UAR
|
||||||
* registers writings, it is safe to allocate UAR with any
|
* registers writings, it is safe to allocate UAR with any
|
||||||
* memory mapping type.
|
* memory mapping type.
|
||||||
*/
|
*/
|
||||||
priv->uar = mlx5_devx_alloc_uar(ctx, -1);
|
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
|
||||||
if (!priv->uar) {
|
if (!priv->uar) {
|
||||||
DRV_LOG(ERR, "can't allocate uar.");
|
DRV_LOG(ERR, "can't allocate uar.");
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
priv->pd = mlx5_glue->alloc_pd(ctx);
|
priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
|
||||||
if (!priv->pd) {
|
if (!priv->pd) {
|
||||||
DRV_LOG(ERR, "can't allocate pd.");
|
DRV_LOG(ERR, "can't allocate pd.");
|
||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
@ -248,8 +237,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
if (priv->regexdev)
|
if (priv->regexdev)
|
||||||
rte_regexdev_unregister(priv->regexdev);
|
rte_regexdev_unregister(priv->regexdev);
|
||||||
dev_error:
|
dev_error:
|
||||||
if (ctx)
|
|
||||||
mlx5_glue->close_device(ctx);
|
|
||||||
if (priv)
|
if (priv)
|
||||||
rte_free(priv);
|
rte_free(priv);
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
@ -283,8 +270,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
mlx5_glue->devx_free_uar(priv->uar);
|
mlx5_glue->devx_free_uar(priv->uar);
|
||||||
if (priv->regexdev)
|
if (priv->regexdev)
|
||||||
rte_regexdev_unregister(priv->regexdev);
|
rte_regexdev_unregister(priv->regexdev);
|
||||||
if (priv->ctx)
|
|
||||||
mlx5_glue->close_device(priv->ctx);
|
|
||||||
rte_free(priv);
|
rte_free(priv);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -58,7 +58,7 @@ struct mlx5_regex_db {
|
|||||||
|
|
||||||
struct mlx5_regex_priv {
|
struct mlx5_regex_priv {
|
||||||
TAILQ_ENTRY(mlx5_regex_priv) next;
|
TAILQ_ENTRY(mlx5_regex_priv) next;
|
||||||
struct ibv_context *ctx; /* Device context. */
|
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
||||||
struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */
|
struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */
|
||||||
uint16_t nb_queues; /* Number of queues. */
|
uint16_t nb_queues; /* Number of queues. */
|
||||||
struct mlx5_regex_qp *qps; /* Pointer to the qp array. */
|
struct mlx5_regex_qp *qps; /* Pointer to the qp array. */
|
||||||
|
@ -83,7 +83,7 @@ regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
cq->ci = 0;
|
cq->ci = 0;
|
||||||
ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
|
ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
|
||||||
&attr, SOCKET_ID_ANY);
|
&attr, SOCKET_ID_ANY);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Can't create CQ object.");
|
DRV_LOG(ERR, "Can't create CQ object.");
|
||||||
@ -157,7 +157,7 @@ regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
|
|||||||
attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
|
attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
|
||||||
log_nb_desc));
|
log_nb_desc));
|
||||||
attr.mmo = priv->mmo_regex_qp_cap;
|
attr.mmo = priv->mmo_regex_qp_cap;
|
||||||
ret = mlx5_devx_qp_create(priv->ctx, &qp_obj->qp_obj,
|
ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
|
||||||
MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
|
MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
|
||||||
&attr, SOCKET_ID_ANY);
|
&attr, SOCKET_ID_ANY);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -756,8 +756,8 @@ mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
|
|||||||
for (i = 0; i < qp->nb_desc; i++) {
|
for (i = 0; i < qp->nb_desc; i++) {
|
||||||
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
|
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
|
||||||
attr.klm_array = qp->jobs[i].imkey_array;
|
attr.klm_array = qp->jobs[i].imkey_array;
|
||||||
qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create(priv->ctx,
|
qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create
|
||||||
&attr);
|
(priv->cdev->ctx, &attr);
|
||||||
if (!qp->jobs[i].imkey) {
|
if (!qp->jobs[i].imkey) {
|
||||||
err = -rte_errno;
|
err = -rte_errno;
|
||||||
DRV_LOG(ERR, "Failed to allocate imkey.");
|
DRV_LOG(ERR, "Failed to allocate imkey.");
|
||||||
|
@ -167,7 +167,7 @@ rxp_init_rtru(struct mlx5_regex_priv *priv, uint8_t id, uint32_t init_bits)
|
|||||||
uint32_t poll_value;
|
uint32_t poll_value;
|
||||||
uint32_t expected_value;
|
uint32_t expected_value;
|
||||||
uint32_t expected_mask;
|
uint32_t expected_mask;
|
||||||
struct ibv_context *ctx = priv->ctx;
|
struct ibv_context *ctx = priv->cdev->ctx;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* Read the rtru ctrl CSR. */
|
/* Read the rtru ctrl CSR. */
|
||||||
@ -313,7 +313,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
tmp_addr = rxp_get_reg_address(address);
|
tmp_addr = rxp_get_reg_address(address);
|
||||||
if (tmp_addr == UINT32_MAX)
|
if (tmp_addr == UINT32_MAX)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id,
|
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
|
||||||
tmp_addr, ®_val);
|
tmp_addr, ®_val);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
@ -337,7 +337,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
tmp_addr = rxp_get_reg_address(address);
|
tmp_addr = rxp_get_reg_address(address);
|
||||||
if (tmp_addr == UINT32_MAX)
|
if (tmp_addr == UINT32_MAX)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id,
|
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
|
||||||
tmp_addr, ®_val);
|
tmp_addr, ®_val);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
@ -359,7 +359,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
tmp_addr = rxp_get_reg_address(address);
|
tmp_addr = rxp_get_reg_address(address);
|
||||||
if (tmp_addr == UINT32_MAX)
|
if (tmp_addr == UINT32_MAX)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id,
|
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
|
||||||
tmp_addr, ®_val);
|
tmp_addr, ®_val);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
@ -395,7 +395,7 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
if (tmp_addr == UINT32_MAX)
|
if (tmp_addr == UINT32_MAX)
|
||||||
goto parse_error;
|
goto parse_error;
|
||||||
|
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id,
|
ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
|
||||||
tmp_addr, ®_val);
|
tmp_addr, ®_val);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "RXP CSR read failed!");
|
DRV_LOG(ERR, "RXP CSR read failed!");
|
||||||
@ -418,17 +418,17 @@ rxp_program_rof(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
*/
|
*/
|
||||||
temp = val;
|
temp = val;
|
||||||
ret |= mlx5_devx_regex_register_write
|
ret |= mlx5_devx_regex_register_write
|
||||||
(priv->ctx, id,
|
(priv->cdev->ctx, id,
|
||||||
MLX5_RXP_RTRU_CSR_DATA_0, temp);
|
MLX5_RXP_RTRU_CSR_DATA_0, temp);
|
||||||
temp = (uint32_t)(val >> 32);
|
temp = (uint32_t)(val >> 32);
|
||||||
ret |= mlx5_devx_regex_register_write
|
ret |= mlx5_devx_regex_register_write
|
||||||
(priv->ctx, id,
|
(priv->cdev->ctx, id,
|
||||||
MLX5_RXP_RTRU_CSR_DATA_0 +
|
MLX5_RXP_RTRU_CSR_DATA_0 +
|
||||||
MLX5_RXP_CSR_WIDTH, temp);
|
MLX5_RXP_CSR_WIDTH, temp);
|
||||||
temp = address;
|
temp = address;
|
||||||
ret |= mlx5_devx_regex_register_write
|
ret |= mlx5_devx_regex_register_write
|
||||||
(priv->ctx, id, MLX5_RXP_RTRU_CSR_ADDR,
|
(priv->cdev->ctx, id,
|
||||||
temp);
|
MLX5_RXP_RTRU_CSR_ADDR, temp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR,
|
DRV_LOG(ERR,
|
||||||
"Failed to copy instructions to RXP.");
|
"Failed to copy instructions to RXP.");
|
||||||
@ -506,13 +506,13 @@ mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use)
|
|||||||
int ret;
|
int ret;
|
||||||
uint32_t umem_id;
|
uint32_t umem_id;
|
||||||
|
|
||||||
ret = mlx5_devx_regex_database_stop(priv->ctx, id);
|
ret = mlx5_devx_regex_database_stop(priv->cdev->ctx, id);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DRV_LOG(ERR, "stop engine failed!");
|
DRV_LOG(ERR, "stop engine failed!");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
umem_id = mlx5_os_get_umem_id(priv->db[db_to_use].umem.umem);
|
umem_id = mlx5_os_get_umem_id(priv->db[db_to_use].umem.umem);
|
||||||
ret = mlx5_devx_regex_database_program(priv->ctx, id, umem_id, 0);
|
ret = mlx5_devx_regex_database_program(priv->cdev->ctx, id, umem_id, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DRV_LOG(ERR, "program db failed!");
|
DRV_LOG(ERR, "program db failed!");
|
||||||
return ret;
|
return ret;
|
||||||
@ -523,7 +523,7 @@ mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use)
|
|||||||
static int
|
static int
|
||||||
mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id)
|
mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id)
|
||||||
{
|
{
|
||||||
mlx5_devx_regex_database_resume(priv->ctx, id);
|
mlx5_devx_regex_database_resume(priv->cdev->ctx, id);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -588,13 +588,13 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
struct ibv_context *ctx = priv->cdev->ctx;
|
||||||
|
|
||||||
ret = rxp_init_eng(priv, id);
|
ret = rxp_init_eng(priv, id);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
/* Confirm the RXP is initialised. */
|
/* Confirm the RXP is initialised. */
|
||||||
if (mlx5_devx_regex_register_read(priv->ctx, id,
|
if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_STATUS, &val)) {
|
||||||
MLX5_RXP_CSR_STATUS, &val)) {
|
|
||||||
DRV_LOG(ERR, "Failed to read from RXP!");
|
DRV_LOG(ERR, "Failed to read from RXP!");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
@ -602,14 +602,14 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
DRV_LOG(ERR, "RXP not initialised...");
|
DRV_LOG(ERR, "RXP not initialised...");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id,
|
ret = mlx5_devx_regex_register_read(ctx, id,
|
||||||
MLX5_RXP_RTRU_CSR_CTRL, &val);
|
MLX5_RXP_RTRU_CSR_CTRL, &val);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "CSR read failed!");
|
DRV_LOG(ERR, "CSR read failed!");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
val |= MLX5_RXP_RTRU_CSR_CTRL_GO;
|
val |= MLX5_RXP_RTRU_CSR_CTRL_GO;
|
||||||
ret = mlx5_devx_regex_register_write(priv->ctx, id,
|
ret = mlx5_devx_regex_register_write(ctx, id,
|
||||||
MLX5_RXP_RTRU_CSR_CTRL, val);
|
MLX5_RXP_RTRU_CSR_CTRL, val);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Can't program rof file!");
|
DRV_LOG(ERR, "Can't program rof file!");
|
||||||
@ -622,7 +622,7 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
}
|
}
|
||||||
if (priv->is_bf2) {
|
if (priv->is_bf2) {
|
||||||
ret = rxp_poll_csr_for_value
|
ret = rxp_poll_csr_for_value
|
||||||
(priv->ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
|
(ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
|
||||||
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
|
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
|
||||||
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
|
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
|
||||||
MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
|
MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
|
||||||
@ -632,30 +632,27 @@ program_rxp_rules(struct mlx5_regex_priv *priv, const char *buf, uint32_t len,
|
|||||||
}
|
}
|
||||||
DRV_LOG(DEBUG, "Rules update took %d cycles", ret);
|
DRV_LOG(DEBUG, "Rules update took %d cycles", ret);
|
||||||
}
|
}
|
||||||
if (mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
|
if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
|
||||||
&val)) {
|
&val)) {
|
||||||
DRV_LOG(ERR, "CSR read failed!");
|
DRV_LOG(ERR, "CSR read failed!");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
val &= ~(MLX5_RXP_RTRU_CSR_CTRL_GO);
|
val &= ~(MLX5_RXP_RTRU_CSR_CTRL_GO);
|
||||||
if (mlx5_devx_regex_register_write(priv->ctx, id,
|
if (mlx5_devx_regex_register_write(ctx, id,
|
||||||
MLX5_RXP_RTRU_CSR_CTRL, val)) {
|
MLX5_RXP_RTRU_CSR_CTRL, val)) {
|
||||||
DRV_LOG(ERR, "CSR write failed!");
|
DRV_LOG(ERR, "CSR write failed!");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
ret = mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_CSR_CTRL,
|
ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &val);
|
||||||
&val);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
val &= ~MLX5_RXP_CSR_CTRL_INIT;
|
val &= ~MLX5_RXP_CSR_CTRL_INIT;
|
||||||
ret = mlx5_devx_regex_register_write(priv->ctx, id, MLX5_RXP_CSR_CTRL,
|
ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, val);
|
||||||
val);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
rxp_init_rtru(priv, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2);
|
rxp_init_rtru(priv, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2);
|
||||||
if (priv->is_bf2) {
|
if (priv->is_bf2) {
|
||||||
ret = rxp_poll_csr_for_value(priv->ctx, &val,
|
ret = rxp_poll_csr_for_value(ctx, &val, MLX5_RXP_CSR_STATUS,
|
||||||
MLX5_RXP_CSR_STATUS,
|
|
||||||
MLX5_RXP_CSR_STATUS_INIT_DONE,
|
MLX5_RXP_CSR_STATUS_INIT_DONE,
|
||||||
MLX5_RXP_CSR_STATUS_INIT_DONE,
|
MLX5_RXP_CSR_STATUS_INIT_DONE,
|
||||||
MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT,
|
MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT,
|
||||||
@ -680,7 +677,7 @@ rxp_init_eng(struct mlx5_regex_priv *priv, uint8_t id)
|
|||||||
{
|
{
|
||||||
uint32_t ctrl;
|
uint32_t ctrl;
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
struct ibv_context *ctx = priv->ctx;
|
struct ibv_context *ctx = priv->cdev->ctx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
|
ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
|
||||||
@ -758,9 +755,10 @@ rxp_db_setup(struct mlx5_regex_priv *priv)
|
|||||||
goto tidyup_error;
|
goto tidyup_error;
|
||||||
}
|
}
|
||||||
/* Register the memory. */
|
/* Register the memory. */
|
||||||
priv->db[i].umem.umem = mlx5_glue->devx_umem_reg(priv->ctx,
|
priv->db[i].umem.umem = mlx5_glue->devx_umem_reg
|
||||||
priv->db[i].ptr,
|
(priv->cdev->ctx,
|
||||||
MLX5_MAX_DB_SIZE, 7);
|
priv->db[i].ptr,
|
||||||
|
MLX5_MAX_DB_SIZE, 7);
|
||||||
if (!priv->db[i].umem.umem) {
|
if (!priv->db[i].umem.umem) {
|
||||||
DRV_LOG(ERR, "Failed to register memory!");
|
DRV_LOG(ERR, "Failed to register memory!");
|
||||||
ret = ENODEV;
|
ret = ENODEV;
|
||||||
@ -804,14 +802,14 @@ mlx5_regex_rules_db_import(struct rte_regexdev *dev,
|
|||||||
}
|
}
|
||||||
if (rule_db_len == 0)
|
if (rule_db_len == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (mlx5_devx_regex_register_read(priv->ctx, 0,
|
if (mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
|
||||||
MLX5_RXP_CSR_BASE_ADDRESS, &ver)) {
|
MLX5_RXP_CSR_BASE_ADDRESS, &ver)) {
|
||||||
DRV_LOG(ERR, "Failed to read Main CSRs Engine 0!");
|
DRV_LOG(ERR, "Failed to read Main CSRs Engine 0!");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
/* Need to ensure RXP not busy before stop! */
|
/* Need to ensure RXP not busy before stop! */
|
||||||
for (id = 0; id < priv->nb_engines; id++) {
|
for (id = 0; id < priv->nb_engines; id++) {
|
||||||
ret = rxp_stop_engine(priv->ctx, id);
|
ret = rxp_stop_engine(priv->cdev->ctx, id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Can't stop engine.");
|
DRV_LOG(ERR, "Can't stop engine.");
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
@ -823,7 +821,7 @@ mlx5_regex_rules_db_import(struct rte_regexdev *dev,
|
|||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto tidyup_error;
|
goto tidyup_error;
|
||||||
}
|
}
|
||||||
ret = rxp_start_engine(priv->ctx, id);
|
ret = rxp_start_engine(priv->cdev->ctx, id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Can't start engine.");
|
DRV_LOG(ERR, "Can't start engine.");
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
|
Loading…
Reference in New Issue
Block a user