common/mlx5: share MR management
Add global shared MR cache as a field of common device structure. Move MR management to use this global cache for all drivers. Signed-off-by: Michael Baum <michaelba@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
fb690f71bd
commit
9f1d636f3e
@ -308,6 +308,41 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback for memory event.
|
||||||
|
*
|
||||||
|
* @param event_type
|
||||||
|
* Memory event type.
|
||||||
|
* @param addr
|
||||||
|
* Address of memory.
|
||||||
|
* @param len
|
||||||
|
* Size of memory.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
||||||
|
size_t len, void *arg __rte_unused)
|
||||||
|
{
|
||||||
|
struct mlx5_common_device *cdev;
|
||||||
|
|
||||||
|
/* Must be called from the primary process. */
|
||||||
|
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
||||||
|
switch (event_type) {
|
||||||
|
case RTE_MEM_EVENT_FREE:
|
||||||
|
pthread_mutex_lock(&devices_list_lock);
|
||||||
|
/* Iterate all the existing mlx5 devices. */
|
||||||
|
TAILQ_FOREACH(cdev, &devices_list, next)
|
||||||
|
mlx5_free_mr_by_addr(&cdev->mr_scache,
|
||||||
|
mlx5_os_get_ctx_device_name
|
||||||
|
(cdev->ctx),
|
||||||
|
addr, len);
|
||||||
|
pthread_mutex_unlock(&devices_list_lock);
|
||||||
|
break;
|
||||||
|
case RTE_MEM_EVENT_ALLOC:
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Uninitialize all HW global of device context.
|
* Uninitialize all HW global of device context.
|
||||||
*
|
*
|
||||||
@ -376,8 +411,13 @@ mlx5_common_dev_release(struct mlx5_common_device *cdev)
|
|||||||
pthread_mutex_lock(&devices_list_lock);
|
pthread_mutex_lock(&devices_list_lock);
|
||||||
TAILQ_REMOVE(&devices_list, cdev, next);
|
TAILQ_REMOVE(&devices_list, cdev, next);
|
||||||
pthread_mutex_unlock(&devices_list_lock);
|
pthread_mutex_unlock(&devices_list_lock);
|
||||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||||
|
if (TAILQ_EMPTY(&devices_list))
|
||||||
|
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
|
||||||
|
NULL);
|
||||||
|
mlx5_mr_release_cache(&cdev->mr_scache);
|
||||||
mlx5_dev_hw_global_release(cdev);
|
mlx5_dev_hw_global_release(cdev);
|
||||||
|
}
|
||||||
rte_free(cdev);
|
rte_free(cdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -412,6 +452,18 @@ mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
|
|||||||
rte_free(cdev);
|
rte_free(cdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
/* Initialize global MR cache resources and update its functions. */
|
||||||
|
ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
|
||||||
|
if (ret) {
|
||||||
|
DRV_LOG(ERR, "Failed to initialize global MR share cache.");
|
||||||
|
mlx5_dev_hw_global_release(cdev);
|
||||||
|
rte_free(cdev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/* Register callback function for global shared MR cache management. */
|
||||||
|
if (TAILQ_EMPTY(&devices_list))
|
||||||
|
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
||||||
|
mlx5_mr_mem_event_cb, NULL);
|
||||||
exit:
|
exit:
|
||||||
pthread_mutex_lock(&devices_list_lock);
|
pthread_mutex_lock(&devices_list_lock);
|
||||||
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
|
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
|
||||||
|
@ -350,6 +350,7 @@ struct mlx5_common_device {
|
|||||||
void *ctx; /* Verbs/DV/DevX context. */
|
void *ctx; /* Verbs/DV/DevX context. */
|
||||||
void *pd; /* Protection Domain. */
|
void *pd; /* Protection Domain. */
|
||||||
uint32_t pdn; /* Protection Domain Number. */
|
uint32_t pdn; /* Protection Domain Number. */
|
||||||
|
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
|
||||||
struct mlx5_common_dev_config config; /* Device configuration. */
|
struct mlx5_common_dev_config config; /* Device configuration. */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -453,8 +454,7 @@ mlx5_dev_is_pci(const struct rte_device *dev);
|
|||||||
__rte_internal
|
__rte_internal
|
||||||
uint32_t
|
uint32_t
|
||||||
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
||||||
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
|
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);
|
||||||
struct mlx5_mr_share_cache *share_cache);
|
|
||||||
|
|
||||||
/* mlx5_common_os.c */
|
/* mlx5_common_os.c */
|
||||||
|
|
||||||
|
@ -1848,16 +1848,13 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
|
|||||||
* Pointer to per-queue MR control structure.
|
* Pointer to per-queue MR control structure.
|
||||||
* @param mbuf
|
* @param mbuf
|
||||||
* Pointer to mbuf.
|
* Pointer to mbuf.
|
||||||
* @param share_cache
|
|
||||||
* Pointer to a global shared MR cache.
|
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* Searched LKey on success, UINT32_MAX on no match.
|
* Searched LKey on success, UINT32_MAX on no match.
|
||||||
*/
|
*/
|
||||||
uint32_t
|
uint32_t
|
||||||
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
||||||
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
|
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
|
||||||
struct mlx5_mr_share_cache *share_cache)
|
|
||||||
{
|
{
|
||||||
uint32_t lkey;
|
uint32_t lkey;
|
||||||
uintptr_t addr = (uintptr_t)mbuf->buf_addr;
|
uintptr_t addr = (uintptr_t)mbuf->buf_addr;
|
||||||
@ -1871,6 +1868,6 @@ mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
|||||||
if (likely(lkey != UINT32_MAX))
|
if (likely(lkey != UINT32_MAX))
|
||||||
return lkey;
|
return lkey;
|
||||||
/* Take slower bottom-half on miss. */
|
/* Take slower bottom-half on miss. */
|
||||||
return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, share_cache, mr_ctrl,
|
return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
|
||||||
addr, cdev->config.mr_ext_memseg_en);
|
addr, cdev->config.mr_ext_memseg_en);
|
||||||
}
|
}
|
||||||
|
@ -140,9 +140,7 @@ __rte_internal
|
|||||||
uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
|
uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
|
||||||
struct mlx5_mr_ctrl *mr_ctrl,
|
struct mlx5_mr_ctrl *mr_ctrl,
|
||||||
struct rte_mempool *mp, uintptr_t addr);
|
struct rte_mempool *mp, uintptr_t addr);
|
||||||
__rte_internal
|
|
||||||
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
|
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
|
||||||
__rte_internal
|
|
||||||
int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
|
int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
|
||||||
__rte_internal
|
__rte_internal
|
||||||
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
|
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
|
||||||
@ -150,7 +148,6 @@ __rte_internal
|
|||||||
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
|
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
|
||||||
__rte_internal
|
__rte_internal
|
||||||
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
|
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
|
||||||
__rte_internal
|
|
||||||
void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
|
void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
|
||||||
const char *ibdev_name, const void *addr, size_t len);
|
const char *ibdev_name, const void *addr, size_t len);
|
||||||
__rte_internal
|
__rte_internal
|
||||||
@ -183,7 +180,6 @@ __rte_internal
|
|||||||
void
|
void
|
||||||
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
|
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
|
||||||
|
|
||||||
__rte_internal
|
|
||||||
void
|
void
|
||||||
mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
|
mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
|
||||||
|
|
||||||
|
@ -109,7 +109,6 @@ INTERNAL {
|
|||||||
mlx5_mr_addr2mr_bh;
|
mlx5_mr_addr2mr_bh;
|
||||||
mlx5_mr_btree_dump;
|
mlx5_mr_btree_dump;
|
||||||
mlx5_mr_btree_free;
|
mlx5_mr_btree_free;
|
||||||
mlx5_mr_create_cache;
|
|
||||||
mlx5_mr_create_primary;
|
mlx5_mr_create_primary;
|
||||||
mlx5_mr_ctrl_init;
|
mlx5_mr_ctrl_init;
|
||||||
mlx5_mr_dump_cache;
|
mlx5_mr_dump_cache;
|
||||||
@ -119,9 +118,7 @@ INTERNAL {
|
|||||||
mlx5_mr_lookup_cache;
|
mlx5_mr_lookup_cache;
|
||||||
mlx5_mr_lookup_list;
|
mlx5_mr_lookup_list;
|
||||||
mlx5_mr_mb2mr;
|
mlx5_mr_mb2mr;
|
||||||
mlx5_free_mr_by_addr;
|
|
||||||
mlx5_mr_rebuild_cache;
|
mlx5_mr_rebuild_cache;
|
||||||
mlx5_mr_release_cache;
|
|
||||||
|
|
||||||
mlx5_nl_allmulti; # WINDOWS_NO_EXPORT
|
mlx5_nl_allmulti; # WINDOWS_NO_EXPORT
|
||||||
mlx5_nl_ifindex; # WINDOWS_NO_EXPORT
|
mlx5_nl_ifindex; # WINDOWS_NO_EXPORT
|
||||||
@ -139,7 +136,6 @@ INTERNAL {
|
|||||||
|
|
||||||
mlx5_os_umem_dereg;
|
mlx5_os_umem_dereg;
|
||||||
mlx5_os_umem_reg;
|
mlx5_os_umem_reg;
|
||||||
mlx5_os_set_reg_mr_cb;
|
|
||||||
|
|
||||||
mlx5_realloc;
|
mlx5_realloc;
|
||||||
|
|
||||||
|
@ -43,7 +43,6 @@ struct mlx5_compress_priv {
|
|||||||
struct rte_compressdev_config dev_config;
|
struct rte_compressdev_config dev_config;
|
||||||
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
|
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
|
||||||
rte_spinlock_t xform_sl;
|
rte_spinlock_t xform_sl;
|
||||||
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
|
|
||||||
volatile uint64_t *uar_addr;
|
volatile uint64_t *uar_addr;
|
||||||
/* HCA caps*/
|
/* HCA caps*/
|
||||||
uint32_t mmo_decomp_sq:1;
|
uint32_t mmo_decomp_sq:1;
|
||||||
@ -206,7 +205,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
|
|||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
dev->data->queue_pairs[qp_id] = qp;
|
dev->data->queue_pairs[qp_id] = qp;
|
||||||
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
|
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
|
||||||
priv->dev_config.socket_id)) {
|
priv->dev_config.socket_id)) {
|
||||||
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
|
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
|
||||||
(uint32_t)qp_id);
|
(uint32_t)qp_id);
|
||||||
@ -444,8 +443,7 @@ mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,
|
|||||||
uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
|
uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
|
||||||
|
|
||||||
dseg->bcount = rte_cpu_to_be_32(len);
|
dseg->bcount = rte_cpu_to_be_32(len);
|
||||||
dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf,
|
dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);
|
||||||
&qp->priv->mr_scache);
|
|
||||||
dseg->pbuf = rte_cpu_to_be_64(addr);
|
dseg->pbuf = rte_cpu_to_be_64(addr);
|
||||||
return dseg->lkey;
|
return dseg->lkey;
|
||||||
}
|
}
|
||||||
@ -679,41 +677,6 @@ mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback for memory event.
|
|
||||||
*
|
|
||||||
* @param event_type
|
|
||||||
* Memory event type.
|
|
||||||
* @param addr
|
|
||||||
* Address of memory.
|
|
||||||
* @param len
|
|
||||||
* Size of memory.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|
||||||
size_t len, void *arg __rte_unused)
|
|
||||||
{
|
|
||||||
struct mlx5_compress_priv *priv;
|
|
||||||
|
|
||||||
/* Must be called from the primary process. */
|
|
||||||
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
|
||||||
switch (event_type) {
|
|
||||||
case RTE_MEM_EVENT_FREE:
|
|
||||||
pthread_mutex_lock(&priv_list_lock);
|
|
||||||
/* Iterate all the existing mlx5 devices. */
|
|
||||||
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
|
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
|
||||||
mlx5_os_get_ctx_device_name
|
|
||||||
(priv->cdev->ctx),
|
|
||||||
addr, len);
|
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
|
||||||
break;
|
|
||||||
case RTE_MEM_EVENT_ALLOC:
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
@ -765,18 +728,6 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
rte_compressdev_pmd_destroy(priv->compressdev);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
|
|
||||||
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
|
||||||
mlx5_compress_uar_release(priv);
|
|
||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
|
||||||
rte_errno = ENOMEM;
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
/* Register callback function for global shared MR cache management. */
|
|
||||||
if (TAILQ_EMPTY(&mlx5_compress_priv_list))
|
|
||||||
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
|
||||||
mlx5_compress_mr_mem_event_cb,
|
|
||||||
NULL);
|
|
||||||
pthread_mutex_lock(&priv_list_lock);
|
pthread_mutex_lock(&priv_list_lock);
|
||||||
TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
|
TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
@ -796,10 +747,6 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
|
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
if (priv) {
|
if (priv) {
|
||||||
if (TAILQ_EMPTY(&mlx5_compress_priv_list))
|
|
||||||
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
|
|
||||||
NULL);
|
|
||||||
mlx5_mr_release_cache(&priv->mr_scache);
|
|
||||||
mlx5_compress_uar_release(priv);
|
mlx5_compress_uar_release(priv);
|
||||||
rte_compressdev_pmd_destroy(priv->compressdev);
|
rte_compressdev_pmd_destroy(priv->compressdev);
|
||||||
}
|
}
|
||||||
|
@ -316,8 +316,7 @@ mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,
|
|||||||
*remain -= data_len;
|
*remain -= data_len;
|
||||||
klm->bcount = rte_cpu_to_be_32(data_len);
|
klm->bcount = rte_cpu_to_be_32(data_len);
|
||||||
klm->pbuf = rte_cpu_to_be_64(addr);
|
klm->pbuf = rte_cpu_to_be_64(addr);
|
||||||
klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf,
|
klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf);
|
||||||
&priv->mr_scache);
|
|
||||||
return klm->lkey;
|
return klm->lkey;
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -643,7 +642,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
|
|||||||
DRV_LOG(ERR, "Failed to create QP.");
|
DRV_LOG(ERR, "Failed to create QP.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
|
if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
|
||||||
priv->dev_config.socket_id) != 0) {
|
priv->dev_config.socket_id) != 0) {
|
||||||
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
|
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
|
||||||
(uint32_t)qp_id);
|
(uint32_t)qp_id);
|
||||||
@ -844,41 +843,6 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback for memory event.
|
|
||||||
*
|
|
||||||
* @param event_type
|
|
||||||
* Memory event type.
|
|
||||||
* @param addr
|
|
||||||
* Address of memory.
|
|
||||||
* @param len
|
|
||||||
* Size of memory.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|
||||||
size_t len, void *arg __rte_unused)
|
|
||||||
{
|
|
||||||
struct mlx5_crypto_priv *priv;
|
|
||||||
|
|
||||||
/* Must be called from the primary process. */
|
|
||||||
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
|
||||||
switch (event_type) {
|
|
||||||
case RTE_MEM_EVENT_FREE:
|
|
||||||
pthread_mutex_lock(&priv_list_lock);
|
|
||||||
/* Iterate all the existing mlx5 devices. */
|
|
||||||
TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
|
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
|
||||||
mlx5_os_get_ctx_device_name
|
|
||||||
(priv->cdev->ctx),
|
|
||||||
addr, len);
|
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
|
||||||
break;
|
|
||||||
case RTE_MEM_EVENT_ALLOC:
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
@ -940,13 +904,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
|
|
||||||
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
|
|
||||||
mlx5_crypto_uar_release(priv);
|
|
||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
|
||||||
rte_errno = ENOMEM;
|
|
||||||
return -rte_errno;
|
|
||||||
}
|
|
||||||
priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
|
priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
|
||||||
priv->max_segs_num = devarg_prms.max_segs_num;
|
priv->max_segs_num = devarg_prms.max_segs_num;
|
||||||
priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
|
priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
|
||||||
@ -960,11 +917,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
|
priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
|
||||||
priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
|
priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
|
||||||
priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
|
priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
|
||||||
/* Register callback function for global shared MR cache management. */
|
|
||||||
if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
|
|
||||||
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
|
||||||
mlx5_crypto_mr_mem_event_cb,
|
|
||||||
NULL);
|
|
||||||
pthread_mutex_lock(&priv_list_lock);
|
pthread_mutex_lock(&priv_list_lock);
|
||||||
TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
|
TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
@ -984,10 +936,6 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
|
TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
|
||||||
pthread_mutex_unlock(&priv_list_lock);
|
pthread_mutex_unlock(&priv_list_lock);
|
||||||
if (priv) {
|
if (priv) {
|
||||||
if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
|
|
||||||
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
|
|
||||||
NULL);
|
|
||||||
mlx5_mr_release_cache(&priv->mr_scache);
|
|
||||||
mlx5_crypto_uar_release(priv);
|
mlx5_crypto_uar_release(priv);
|
||||||
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
rte_cryptodev_pmd_destroy(priv->crypto_dev);
|
||||||
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
|
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
|
||||||
|
@ -26,7 +26,6 @@ struct mlx5_crypto_priv {
|
|||||||
uint32_t max_segs_num; /* Maximum supported data segs. */
|
uint32_t max_segs_num; /* Maximum supported data segs. */
|
||||||
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
|
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
|
||||||
struct rte_cryptodev_config dev_config;
|
struct rte_cryptodev_config dev_config;
|
||||||
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
|
|
||||||
struct mlx5_devx_obj *login_obj;
|
struct mlx5_devx_obj *login_obj;
|
||||||
uint64_t keytag;
|
uint64_t keytag;
|
||||||
uint16_t wqe_set_size;
|
uint16_t wqe_set_size;
|
||||||
|
@ -91,7 +91,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
|
|||||||
case MLX5_MP_REQ_CREATE_MR:
|
case MLX5_MP_REQ_CREATE_MR:
|
||||||
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
||||||
lkey = mlx5_mr_create_primary(cdev->pd,
|
lkey = mlx5_mr_create_primary(cdev->pd,
|
||||||
&priv->sh->share_cache,
|
&priv->sh->cdev->mr_scache,
|
||||||
&entry, param->args.addr,
|
&entry, param->args.addr,
|
||||||
cdev->config.mr_ext_memseg_en);
|
cdev->config.mr_ext_memseg_en);
|
||||||
if (lkey == UINT32_MAX)
|
if (lkey == UINT32_MAX)
|
||||||
|
@ -44,7 +44,6 @@
|
|||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_flow.h"
|
#include "mlx5_flow.h"
|
||||||
#include "rte_pmd_mlx5.h"
|
#include "rte_pmd_mlx5.h"
|
||||||
#include "mlx5_verbs.h"
|
#include "mlx5_verbs.h"
|
||||||
@ -623,10 +622,6 @@ mlx5_init_once(void)
|
|||||||
case RTE_PROC_PRIMARY:
|
case RTE_PROC_PRIMARY:
|
||||||
if (sd->init_done)
|
if (sd->init_done)
|
||||||
break;
|
break;
|
||||||
LIST_INIT(&sd->mem_event_cb_list);
|
|
||||||
rte_rwlock_init(&sd->mem_event_rwlock);
|
|
||||||
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
|
||||||
mlx5_mr_mem_event_cb, NULL);
|
|
||||||
ret = mlx5_mp_init_primary(MLX5_MP_NAME,
|
ret = mlx5_mp_init_primary(MLX5_MP_NAME,
|
||||||
mlx5_mp_os_primary_handle);
|
mlx5_mp_os_primary_handle);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_flow.h"
|
#include "mlx5_flow.h"
|
||||||
#include "mlx5_flow_os.h"
|
#include "mlx5_flow_os.h"
|
||||||
#include "rte_pmd_mlx5.h"
|
#include "rte_pmd_mlx5.h"
|
||||||
@ -1142,7 +1141,7 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
|
|||||||
struct mlx5_mp_id mp_id;
|
struct mlx5_mp_id mp_id;
|
||||||
|
|
||||||
mlx5_mp_id_init(&mp_id, 0);
|
mlx5_mp_id_init(&mp_id, 0);
|
||||||
if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
|
if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
|
||||||
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
|
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
|
||||||
mp->name, sh->cdev->pd, rte_strerror(rte_errno));
|
mp->name, sh->cdev->pd, rte_strerror(rte_errno));
|
||||||
}
|
}
|
||||||
@ -1164,7 +1163,7 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mlx5_mp_id_init(&mp_id, 0);
|
mlx5_mp_id_init(&mp_id, 0);
|
||||||
ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
|
ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
|
||||||
&mp_id);
|
&mp_id);
|
||||||
if (ret < 0 && rte_errno != EEXIST)
|
if (ret < 0 && rte_errno != EEXIST)
|
||||||
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
|
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
|
||||||
@ -1207,8 +1206,8 @@ mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
|
|||||||
switch (event) {
|
switch (event) {
|
||||||
case RTE_MEMPOOL_EVENT_READY:
|
case RTE_MEMPOOL_EVENT_READY:
|
||||||
mlx5_mp_id_init(&mp_id, 0);
|
mlx5_mp_id_init(&mp_id, 0);
|
||||||
if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
|
if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
|
||||||
&mp_id) < 0)
|
mp, &mp_id) < 0)
|
||||||
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
|
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
|
||||||
mp->name, sh->cdev->pd,
|
mp->name, sh->cdev->pd,
|
||||||
rte_strerror(rte_errno));
|
rte_strerror(rte_errno));
|
||||||
@ -1372,20 +1371,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
|
for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
|
||||||
rte_spinlock_init(&sh->uar_lock[i]);
|
rte_spinlock_init(&sh->uar_lock[i]);
|
||||||
#endif
|
#endif
|
||||||
/*
|
|
||||||
* Once the device is added to the list of memory event
|
|
||||||
* callback, its global MR cache table cannot be expanded
|
|
||||||
* on the fly because of deadlock. If it overflows, lookup
|
|
||||||
* should be done by searching MR list linearly, which is slow.
|
|
||||||
*
|
|
||||||
* At this point the device is not added to the memory
|
|
||||||
* event list yet, context is just being created.
|
|
||||||
*/
|
|
||||||
err = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node);
|
|
||||||
if (err) {
|
|
||||||
err = rte_errno;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
mlx5_os_dev_shared_handler_install(sh);
|
mlx5_os_dev_shared_handler_install(sh);
|
||||||
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
|
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
|
||||||
err = mlx5_flow_os_init_workspace_once();
|
err = mlx5_flow_os_init_workspace_once();
|
||||||
@ -1395,11 +1380,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
mlx5_flow_aging_init(sh);
|
mlx5_flow_aging_init(sh);
|
||||||
mlx5_flow_counters_mng_init(sh);
|
mlx5_flow_counters_mng_init(sh);
|
||||||
mlx5_flow_ipool_create(sh, config);
|
mlx5_flow_ipool_create(sh, config);
|
||||||
/* Add device to memory callback list. */
|
|
||||||
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
|
|
||||||
sh, mem_event_cb);
|
|
||||||
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
/* Add context to the global device list. */
|
/* Add context to the global device list. */
|
||||||
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
|
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
|
||||||
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
|
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
|
||||||
@ -1410,8 +1390,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|||||||
pthread_mutex_destroy(&sh->txpp.mutex);
|
pthread_mutex_destroy(&sh->txpp.mutex);
|
||||||
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
||||||
MLX5_ASSERT(sh);
|
MLX5_ASSERT(sh);
|
||||||
if (sh->share_cache.cache.table)
|
|
||||||
mlx5_mr_btree_free(&sh->share_cache.cache);
|
|
||||||
if (sh->tis)
|
if (sh->tis)
|
||||||
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
|
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
|
||||||
if (sh->td)
|
if (sh->td)
|
||||||
@ -1467,12 +1445,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
|
|||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
|
rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
|
||||||
sh);
|
sh);
|
||||||
/* Remove from memory callback device list. */
|
|
||||||
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
LIST_REMOVE(sh, mem_event_cb);
|
|
||||||
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
/* Release created Memory Regions. */
|
|
||||||
mlx5_mr_release_cache(&sh->share_cache);
|
|
||||||
/* Remove context from the global device list. */
|
/* Remove context from the global device list. */
|
||||||
LIST_REMOVE(sh, next);
|
LIST_REMOVE(sh, next);
|
||||||
/* Release flow workspaces objects on the last device. */
|
/* Release flow workspaces objects on the last device. */
|
||||||
|
@ -1135,9 +1135,6 @@ struct mlx5_dev_ctx_shared {
|
|||||||
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
|
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
|
||||||
struct mlx5_dev_attr device_attr; /* Device properties. */
|
struct mlx5_dev_attr device_attr; /* Device properties. */
|
||||||
int numa_node; /* Numa node of backing physical device. */
|
int numa_node; /* Numa node of backing physical device. */
|
||||||
LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
|
|
||||||
/**< Called by memory event callback. */
|
|
||||||
struct mlx5_mr_share_cache share_cache;
|
|
||||||
/* Packet pacing related structure. */
|
/* Packet pacing related structure. */
|
||||||
struct mlx5_dev_txpp txpp;
|
struct mlx5_dev_txpp txpp;
|
||||||
/* Shared DV/DR flow data section. */
|
/* Shared DV/DR flow data section. */
|
||||||
|
@ -60,17 +60,17 @@ mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
|
|||||||
/**
|
/**
|
||||||
* Free MR resources.
|
* Free MR resources.
|
||||||
*
|
*
|
||||||
* @param[in] sh
|
* @param[in] cdev
|
||||||
* Pointer to shared device context.
|
* Pointer to the mlx5 common device.
|
||||||
* @param[in] mr
|
* @param[in] mr
|
||||||
* MR to free.
|
* MR to free.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
|
mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
|
||||||
{
|
{
|
||||||
void *addr = mr->addr;
|
void *addr = mr->addr;
|
||||||
|
|
||||||
sh->share_cache.dereg_mr_cb(mr);
|
cdev->mr_scache.dereg_mr_cb(mr);
|
||||||
mlx5_free(addr);
|
mlx5_free(addr);
|
||||||
memset(mr, 0, sizeof(*mr));
|
memset(mr, 0, sizeof(*mr));
|
||||||
}
|
}
|
||||||
@ -78,8 +78,8 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
|
|||||||
/**
|
/**
|
||||||
* Register Memory Region.
|
* Register Memory Region.
|
||||||
*
|
*
|
||||||
* @param[in] sh
|
* @param[in] cdev
|
||||||
* Pointer to shared device context.
|
* Pointer to the mlx5 common device.
|
||||||
* @param[in] length
|
* @param[in] length
|
||||||
* Size of MR buffer.
|
* Size of MR buffer.
|
||||||
* @param[in/out] mr
|
* @param[in/out] mr
|
||||||
@ -91,7 +91,7 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
|
|||||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
|
mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
|
||||||
struct mlx5_pmd_mr *mr, int socket)
|
struct mlx5_pmd_mr *mr, int socket)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
|
|||||||
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
|
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);
|
ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Failed to create direct Mkey.");
|
DRV_LOG(ERR, "Failed to create direct Mkey.");
|
||||||
mlx5_free(mr->addr);
|
mlx5_free(mr->addr);
|
||||||
@ -313,14 +313,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
|
|||||||
|
|
||||||
switch (aso_opc_mod) {
|
switch (aso_opc_mod) {
|
||||||
case ASO_OPC_MOD_FLOW_HIT:
|
case ASO_OPC_MOD_FLOW_HIT:
|
||||||
if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
|
if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
|
||||||
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
|
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
|
||||||
return -1;
|
return -1;
|
||||||
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
|
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
|
||||||
sh->tx_uar, cdev->pdn,
|
sh->tx_uar, cdev->pdn,
|
||||||
MLX5_ASO_QUEUE_LOG_DESC,
|
MLX5_ASO_QUEUE_LOG_DESC,
|
||||||
cdev->config.hca_attr.sq_ts_format)) {
|
cdev->config.hca_attr.sq_ts_format)) {
|
||||||
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
|
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
|
||||||
@ -335,14 +335,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
|
|||||||
break;
|
break;
|
||||||
case ASO_OPC_MOD_CONNECTION_TRACKING:
|
case ASO_OPC_MOD_CONNECTION_TRACKING:
|
||||||
/* 64B per object for query. */
|
/* 64B per object for query. */
|
||||||
if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
|
if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
|
||||||
&sh->ct_mng->aso_sq.mr, 0))
|
&sh->ct_mng->aso_sq.mr, 0))
|
||||||
return -1;
|
return -1;
|
||||||
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
|
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
|
||||||
sh->tx_uar, cdev->pdn,
|
sh->tx_uar, cdev->pdn,
|
||||||
MLX5_ASO_QUEUE_LOG_DESC,
|
MLX5_ASO_QUEUE_LOG_DESC,
|
||||||
cdev->config.hca_attr.sq_ts_format)) {
|
cdev->config.hca_attr.sq_ts_format)) {
|
||||||
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
|
mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
|
||||||
@ -370,14 +370,14 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
|
|||||||
|
|
||||||
switch (aso_opc_mod) {
|
switch (aso_opc_mod) {
|
||||||
case ASO_OPC_MOD_FLOW_HIT:
|
case ASO_OPC_MOD_FLOW_HIT:
|
||||||
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
|
||||||
sq = &sh->aso_age_mng->aso_sq;
|
sq = &sh->aso_age_mng->aso_sq;
|
||||||
break;
|
break;
|
||||||
case ASO_OPC_MOD_POLICER:
|
case ASO_OPC_MOD_POLICER:
|
||||||
sq = &sh->mtrmng->pools_mng.sq;
|
sq = &sh->mtrmng->pools_mng.sq;
|
||||||
break;
|
break;
|
||||||
case ASO_OPC_MOD_CONNECTION_TRACKING:
|
case ASO_OPC_MOD_CONNECTION_TRACKING:
|
||||||
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
|
mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
|
||||||
sq = &sh->ct_mng->aso_sq;
|
sq = &sh->ct_mng->aso_sq;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -12,46 +12,10 @@
|
|||||||
#include <mlx5_common_mr.h>
|
#include <mlx5_common_mr.h>
|
||||||
|
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_rxtx.h"
|
#include "mlx5_rxtx.h"
|
||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback for memory event. This can be called from both primary and secondary
|
|
||||||
* process.
|
|
||||||
*
|
|
||||||
* @param event_type
|
|
||||||
* Memory event type.
|
|
||||||
* @param addr
|
|
||||||
* Address of memory.
|
|
||||||
* @param len
|
|
||||||
* Size of memory.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|
||||||
size_t len, void *arg __rte_unused)
|
|
||||||
{
|
|
||||||
struct mlx5_dev_ctx_shared *sh;
|
|
||||||
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
|
|
||||||
|
|
||||||
/* Must be called from the primary process. */
|
|
||||||
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
|
||||||
switch (event_type) {
|
|
||||||
case RTE_MEM_EVENT_FREE:
|
|
||||||
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
/* Iterate all the existing mlx5 devices. */
|
|
||||||
LIST_FOREACH(sh, dev_list, mem_event_cb)
|
|
||||||
mlx5_free_mr_by_addr(&sh->share_cache,
|
|
||||||
sh->ibdev_name, addr, len);
|
|
||||||
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
|
|
||||||
break;
|
|
||||||
case RTE_MEM_EVENT_ALLOC:
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Bottom-half of LKey search on Tx.
|
* Bottom-half of LKey search on Tx.
|
||||||
*
|
*
|
||||||
@ -72,7 +36,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
|
|||||||
struct mlx5_priv *priv = txq_ctrl->priv;
|
struct mlx5_priv *priv = txq_ctrl->priv;
|
||||||
|
|
||||||
return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
|
return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
|
||||||
&priv->sh->share_cache, mr_ctrl, addr,
|
&priv->sh->cdev->mr_scache, mr_ctrl, addr,
|
||||||
priv->sh->cdev->config.mr_ext_memseg_en);
|
priv->sh->cdev->config.mr_ext_memseg_en);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +74,7 @@ mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
|||||||
mp = buf->mp;
|
mp = buf->mp;
|
||||||
}
|
}
|
||||||
if (mp != NULL) {
|
if (mp != NULL) {
|
||||||
lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,
|
lkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache,
|
||||||
mr_ctrl, mp, addr);
|
mr_ctrl, mp, addr);
|
||||||
/*
|
/*
|
||||||
* Lookup can only fail on invalid input, e.g. "addr"
|
* Lookup can only fail on invalid input, e.g. "addr"
|
||||||
@ -169,7 +133,7 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
|
|||||||
struct rte_eth_dev *dev;
|
struct rte_eth_dev *dev;
|
||||||
struct mlx5_mr *mr;
|
struct mlx5_mr *mr;
|
||||||
struct mlx5_priv *priv;
|
struct mlx5_priv *priv;
|
||||||
struct mlx5_dev_ctx_shared *sh;
|
struct mlx5_common_device *cdev;
|
||||||
|
|
||||||
dev = dev_to_eth_dev(rte_dev);
|
dev = dev_to_eth_dev(rte_dev);
|
||||||
if (!dev) {
|
if (!dev) {
|
||||||
@ -179,20 +143,20 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
sh = priv->sh;
|
cdev = priv->sh->cdev;
|
||||||
mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
|
mr = mlx5_create_mr_ext(cdev->pd, (uintptr_t)addr, len,
|
||||||
SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
|
SOCKET_ID_ANY, cdev->mr_scache.reg_mr_cb);
|
||||||
if (!mr) {
|
if (!mr) {
|
||||||
DRV_LOG(WARNING,
|
DRV_LOG(WARNING,
|
||||||
"port %u unable to dma map", dev->data->port_id);
|
"port %u unable to dma map", dev->data->port_id);
|
||||||
rte_errno = EINVAL;
|
rte_errno = EINVAL;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
rte_rwlock_write_lock(&sh->share_cache.rwlock);
|
rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
|
||||||
LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
|
LIST_INSERT_HEAD(&cdev->mr_scache.mr_list, mr, mr);
|
||||||
/* Insert to the global cache table. */
|
/* Insert to the global cache table. */
|
||||||
mlx5_mr_insert_cache(&sh->share_cache, mr);
|
mlx5_mr_insert_cache(&cdev->mr_scache, mr);
|
||||||
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
|
rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,7 +181,7 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
|
|||||||
{
|
{
|
||||||
struct rte_eth_dev *dev;
|
struct rte_eth_dev *dev;
|
||||||
struct mlx5_priv *priv;
|
struct mlx5_priv *priv;
|
||||||
struct mlx5_dev_ctx_shared *sh;
|
struct mlx5_common_device *cdev;
|
||||||
struct mlx5_mr *mr;
|
struct mlx5_mr *mr;
|
||||||
struct mr_cache_entry entry;
|
struct mr_cache_entry entry;
|
||||||
|
|
||||||
@ -229,11 +193,11 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
sh = priv->sh;
|
cdev = priv->sh->cdev;
|
||||||
rte_rwlock_write_lock(&sh->share_cache.rwlock);
|
rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
|
||||||
mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
|
mr = mlx5_mr_lookup_list(&cdev->mr_scache, &entry, (uintptr_t)addr);
|
||||||
if (!mr) {
|
if (!mr) {
|
||||||
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
|
rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
|
||||||
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
|
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
|
||||||
(uintptr_t)addr, rte_dev->name);
|
(uintptr_t)addr, rte_dev->name);
|
||||||
rte_errno = EINVAL;
|
rte_errno = EINVAL;
|
||||||
@ -242,16 +206,16 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
|
|||||||
LIST_REMOVE(mr, mr);
|
LIST_REMOVE(mr, mr);
|
||||||
DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
|
DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
|
||||||
(void *)mr);
|
(void *)mr);
|
||||||
mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
|
mlx5_mr_free(mr, cdev->mr_scache.dereg_mr_cb);
|
||||||
mlx5_mr_rebuild_cache(&sh->share_cache);
|
mlx5_mr_rebuild_cache(&cdev->mr_scache);
|
||||||
/*
|
/*
|
||||||
* No explicit wmb is needed after updating dev_gen due to
|
* No explicit wmb is needed after updating dev_gen due to
|
||||||
* store-release ordering in unlock that provides the
|
* store-release ordering in unlock that provides the
|
||||||
* implicit barrier at the software visible level.
|
* implicit barrier at the software visible level.
|
||||||
*/
|
*/
|
||||||
++sh->share_cache.dev_gen;
|
++cdev->mr_scache.dev_gen;
|
||||||
DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
|
DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
|
||||||
sh->share_cache.dev_gen);
|
cdev->mr_scache.dev_gen);
|
||||||
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
|
rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1,26 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
* Copyright 2018 6WIND S.A.
|
|
||||||
* Copyright 2018 Mellanox Technologies, Ltd
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef RTE_PMD_MLX5_MR_H_
|
|
||||||
#define RTE_PMD_MLX5_MR_H_
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <sys/queue.h>
|
|
||||||
|
|
||||||
#include <rte_ethdev.h>
|
|
||||||
#include <rte_rwlock.h>
|
|
||||||
#include <rte_bitmap.h>
|
|
||||||
#include <rte_memory.h>
|
|
||||||
|
|
||||||
#include <mlx5_common_mr.h>
|
|
||||||
|
|
||||||
/* First entry must be NULL for comparison. */
|
|
||||||
#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
|
|
||||||
|
|
||||||
void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|
||||||
size_t len, void *arg);
|
|
||||||
|
|
||||||
#endif /* RTE_PMD_MLX5_MR_H_ */
|
|
@ -22,7 +22,6 @@
|
|||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_defs.h"
|
#include "mlx5_defs.h"
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_utils.h"
|
#include "mlx5_utils.h"
|
||||||
#include "mlx5_rxtx.h"
|
#include "mlx5_rxtx.h"
|
||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
|
@ -18,11 +18,13 @@
|
|||||||
|
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
|
|
||||||
/* Support tunnel matching. */
|
/* Support tunnel matching. */
|
||||||
#define MLX5_FLOW_TUNNEL 10
|
#define MLX5_FLOW_TUNNEL 10
|
||||||
|
|
||||||
|
/* First entry must be NULL for comparison. */
|
||||||
|
#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
|
||||||
|
|
||||||
struct mlx5_rxq_stats {
|
struct mlx5_rxq_stats {
|
||||||
#ifdef MLX5_PMD_SOFT_COUNTERS
|
#ifdef MLX5_PMD_SOFT_COUNTERS
|
||||||
uint64_t ipackets; /**< Total of successfully received packets. */
|
uint64_t ipackets; /**< Total of successfully received packets. */
|
||||||
@ -309,7 +311,7 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
|
|||||||
*/
|
*/
|
||||||
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
|
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
|
||||||
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
|
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
|
||||||
return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache,
|
return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,
|
||||||
mr_ctrl, mp, addr);
|
mr_ctrl, mp, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1241,7 +1241,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
|
|||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
}
|
}
|
||||||
ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
|
ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
|
||||||
priv->sh->cdev->pd, mp, &priv->mp_id);
|
priv->sh->cdev->pd, mp, &priv->mp_id);
|
||||||
if (ret < 0 && rte_errno != EEXIST) {
|
if (ret < 0 && rte_errno != EEXIST) {
|
||||||
ret = rte_errno;
|
ret = rte_errno;
|
||||||
@ -1450,7 +1450,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|||||||
}
|
}
|
||||||
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
|
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
|
||||||
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
|
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
|
||||||
&priv->sh->share_cache.dev_gen, socket)) {
|
&priv->sh->cdev->mr_scache.dev_gen, socket)) {
|
||||||
/* rte_errno is already set. */
|
/* rte_errno is already set. */
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_defs.h"
|
#include "mlx5_defs.h"
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_utils.h"
|
#include "mlx5_utils.h"
|
||||||
#include "mlx5_rxtx.h"
|
#include "mlx5_rxtx.h"
|
||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
#include "mlx5_utils.h"
|
#include "mlx5_utils.h"
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
|
|
||||||
struct mlx5_priv;
|
struct mlx5_priv;
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include <mlx5_prm.h>
|
#include <mlx5_prm.h>
|
||||||
|
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
|
|
||||||
/* HW checksum offload capabilities of vectorized Tx. */
|
/* HW checksum offload capabilities of vectorized Tx. */
|
||||||
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
|
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include <mlx5_malloc.h>
|
#include <mlx5_malloc.h>
|
||||||
|
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
#include "mlx5_utils.h"
|
#include "mlx5_utils.h"
|
||||||
@ -148,7 +147,7 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
|
|||||||
}
|
}
|
||||||
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
|
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
|
||||||
mp = rxq_ctrl->rxq.rxseg[s].mp;
|
mp = rxq_ctrl->rxq.rxseg[s].mp;
|
||||||
ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
|
ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
|
||||||
priv->sh->cdev->pd, mp,
|
priv->sh->cdev->pd, mp,
|
||||||
&priv->mp_id);
|
&priv->mp_id);
|
||||||
if (ret < 0 && rte_errno != EEXIST)
|
if (ret < 0 && rte_errno != EEXIST)
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_defs.h"
|
#include "mlx5_defs.h"
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_utils.h"
|
#include "mlx5_utils.h"
|
||||||
#include "mlx5_rxtx.h"
|
#include "mlx5_rxtx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
#include "mlx5.h"
|
#include "mlx5.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
|
|
||||||
/* TX burst subroutines return codes. */
|
/* TX burst subroutines return codes. */
|
||||||
enum mlx5_txcmp_code {
|
enum mlx5_txcmp_code {
|
||||||
|
@ -1135,7 +1135,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
|
if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
|
||||||
&priv->sh->share_cache.dev_gen, socket)) {
|
&priv->sh->cdev->mr_scache.dev_gen, socket)) {
|
||||||
/* rte_errno is already set. */
|
/* rte_errno is already set. */
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#include "mlx5_rx.h"
|
#include "mlx5_rx.h"
|
||||||
#include "mlx5_tx.h"
|
#include "mlx5_tx.h"
|
||||||
#include "mlx5_autoconf.h"
|
#include "mlx5_autoconf.h"
|
||||||
#include "mlx5_mr.h"
|
|
||||||
#include "mlx5_flow.h"
|
#include "mlx5_flow.h"
|
||||||
#include "mlx5_devx.h"
|
#include "mlx5_devx.h"
|
||||||
|
|
||||||
@ -122,21 +121,8 @@ mlx5_init_shared_data(void)
|
|||||||
static int
|
static int
|
||||||
mlx5_init_once(void)
|
mlx5_init_once(void)
|
||||||
{
|
{
|
||||||
struct mlx5_shared_data *sd;
|
|
||||||
|
|
||||||
if (mlx5_init_shared_data())
|
if (mlx5_init_shared_data())
|
||||||
return -rte_errno;
|
return -rte_errno;
|
||||||
sd = mlx5_shared_data;
|
|
||||||
rte_spinlock_lock(&sd->lock);
|
|
||||||
MLX5_ASSERT(sd);
|
|
||||||
if (!sd->init_done) {
|
|
||||||
LIST_INIT(&sd->mem_event_cb_list);
|
|
||||||
rte_rwlock_init(&sd->mem_event_rwlock);
|
|
||||||
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
|
||||||
mlx5_mr_mem_event_cb, NULL);
|
|
||||||
sd->init_done = true;
|
|
||||||
}
|
|
||||||
rte_spinlock_unlock(&sd->lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,10 +25,6 @@
|
|||||||
|
|
||||||
int mlx5_regex_logtype;
|
int mlx5_regex_logtype;
|
||||||
|
|
||||||
TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =
|
|
||||||
TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);
|
|
||||||
static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
||||||
|
|
||||||
const struct rte_regexdev_ops mlx5_regexdev_ops = {
|
const struct rte_regexdev_ops mlx5_regexdev_ops = {
|
||||||
.dev_info_get = mlx5_regex_info_get,
|
.dev_info_get = mlx5_regex_info_get,
|
||||||
.dev_configure = mlx5_regex_configure,
|
.dev_configure = mlx5_regex_configure,
|
||||||
@ -86,41 +82,6 @@ mlx5_regex_get_name(char *name, struct rte_device *dev)
|
|||||||
sprintf(name, "mlx5_regex_%s", dev->name);
|
sprintf(name, "mlx5_regex_%s", dev->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback for memory event.
|
|
||||||
*
|
|
||||||
* @param event_type
|
|
||||||
* Memory event type.
|
|
||||||
* @param addr
|
|
||||||
* Address of memory.
|
|
||||||
* @param len
|
|
||||||
* Size of memory.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
|
||||||
size_t len, void *arg __rte_unused)
|
|
||||||
{
|
|
||||||
struct mlx5_regex_priv *priv;
|
|
||||||
|
|
||||||
/* Must be called from the primary process. */
|
|
||||||
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
|
||||||
switch (event_type) {
|
|
||||||
case RTE_MEM_EVENT_FREE:
|
|
||||||
pthread_mutex_lock(&mem_event_list_lock);
|
|
||||||
/* Iterate all the existing mlx5 devices. */
|
|
||||||
TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
|
|
||||||
mlx5_free_mr_by_addr(&priv->mr_scache,
|
|
||||||
mlx5_os_get_ctx_device_name
|
|
||||||
(priv->cdev->ctx),
|
|
||||||
addr, len);
|
|
||||||
pthread_mutex_unlock(&mem_event_list_lock);
|
|
||||||
break;
|
|
||||||
case RTE_MEM_EVENT_ALLOC:
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
||||||
{
|
{
|
||||||
@ -194,21 +155,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
|
|||||||
priv->regexdev->device = cdev->dev;
|
priv->regexdev->device = cdev->dev;
|
||||||
priv->regexdev->data->dev_private = priv;
|
priv->regexdev->data->dev_private = priv;
|
||||||
priv->regexdev->state = RTE_REGEXDEV_READY;
|
priv->regexdev->state = RTE_REGEXDEV_READY;
|
||||||
ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());
|
|
||||||
if (ret) {
|
|
||||||
DRV_LOG(ERR, "MR init tree failed.");
|
|
||||||
rte_errno = ENOMEM;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
/* Register callback function for global shared MR cache management. */
|
|
||||||
if (TAILQ_EMPTY(&mlx5_mem_event_list))
|
|
||||||
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
|
|
||||||
mlx5_regex_mr_mem_event_cb,
|
|
||||||
NULL);
|
|
||||||
/* Add device to memory callback list. */
|
|
||||||
pthread_mutex_lock(&mem_event_list_lock);
|
|
||||||
TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);
|
|
||||||
pthread_mutex_unlock(&mem_event_list_lock);
|
|
||||||
DRV_LOG(INFO, "RegEx GGA is %s.",
|
DRV_LOG(INFO, "RegEx GGA is %s.",
|
||||||
priv->has_umr ? "supported" : "unsupported");
|
priv->has_umr ? "supported" : "unsupported");
|
||||||
return 0;
|
return 0;
|
||||||
@ -237,15 +183,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev)
|
|||||||
return 0;
|
return 0;
|
||||||
priv = dev->data->dev_private;
|
priv = dev->data->dev_private;
|
||||||
if (priv) {
|
if (priv) {
|
||||||
/* Remove from memory callback device list. */
|
|
||||||
pthread_mutex_lock(&mem_event_list_lock);
|
|
||||||
TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);
|
|
||||||
pthread_mutex_unlock(&mem_event_list_lock);
|
|
||||||
if (TAILQ_EMPTY(&mlx5_mem_event_list))
|
|
||||||
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
|
|
||||||
NULL);
|
|
||||||
if (priv->mr_scache.cache.table)
|
|
||||||
mlx5_mr_release_cache(&priv->mr_scache);
|
|
||||||
if (priv->uar)
|
if (priv->uar)
|
||||||
mlx5_glue->devx_free_uar(priv->uar);
|
mlx5_glue->devx_free_uar(priv->uar);
|
||||||
if (priv->regexdev)
|
if (priv->regexdev)
|
||||||
|
@ -68,9 +68,6 @@ struct mlx5_regex_priv {
|
|||||||
MLX5_RXP_EM_COUNT];
|
MLX5_RXP_EM_COUNT];
|
||||||
uint32_t nb_engines; /* Number of RegEx engines. */
|
uint32_t nb_engines; /* Number of RegEx engines. */
|
||||||
struct mlx5dv_devx_uar *uar; /* UAR object. */
|
struct mlx5dv_devx_uar *uar; /* UAR object. */
|
||||||
TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
|
|
||||||
/**< Called by memory event callback. */
|
|
||||||
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
|
|
||||||
uint8_t is_bf2; /* The device is BF2 device. */
|
uint8_t is_bf2; /* The device is BF2 device. */
|
||||||
uint8_t has_umr; /* The device supports UMR. */
|
uint8_t has_umr; /* The device supports UMR. */
|
||||||
uint32_t mmo_regex_qp_cap:1;
|
uint32_t mmo_regex_qp_cap:1;
|
||||||
|
@ -242,7 +242,7 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
|
|||||||
nb_sq_config++;
|
nb_sq_config++;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
|
ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
|
||||||
rte_socket_id());
|
rte_socket_id());
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Error setting up mr btree");
|
DRV_LOG(ERR, "Error setting up mr btree");
|
||||||
|
@ -126,7 +126,7 @@ static inline uint32_t
|
|||||||
mlx5_regex_mb2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
|
mlx5_regex_mb2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
|
||||||
struct rte_mbuf *mbuf)
|
struct rte_mbuf *mbuf)
|
||||||
{
|
{
|
||||||
return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf, &priv->mr_scache);
|
return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
Loading…
Reference in New Issue
Block a user