common/mlx5: share MR mempool registration
Expand the use of mempool registration to MR management for other drivers. Signed-off-by: Michael Baum <michaelba@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
a5d06c9006
commit
fc59a1ec55
@ -13,6 +13,7 @@
|
||||
|
||||
#include "mlx5_common.h"
|
||||
#include "mlx5_common_os.h"
|
||||
#include "mlx5_common_mp.h"
|
||||
#include "mlx5_common_log.h"
|
||||
#include "mlx5_common_defs.h"
|
||||
#include "mlx5_common_private.h"
|
||||
@ -302,6 +303,152 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the mempool for the protection domain.
|
||||
*
|
||||
* @param cdev
|
||||
* Pointer to the mlx5 common device.
|
||||
* @param mp
|
||||
* Mempool being registered.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, (-1) on failure and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct mlx5_mp_id mp_id;
|
||||
|
||||
mlx5_mp_id_init(&mp_id, 0);
|
||||
return mlx5_mr_mempool_register(&cdev->mr_scache, cdev->pd, mp, &mp_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister the mempool from the protection domain.
|
||||
*
|
||||
* @param cdev
|
||||
* Pointer to the mlx5 common device.
|
||||
* @param mp
|
||||
* Mempool being unregistered.
|
||||
*/
|
||||
void
|
||||
mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct mlx5_mp_id mp_id;
|
||||
|
||||
mlx5_mp_id_init(&mp_id, 0);
|
||||
if (mlx5_mr_mempool_unregister(&cdev->mr_scache, mp, &mp_id) < 0)
|
||||
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
|
||||
mp->name, cdev->pd, rte_strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
* rte_mempool_walk() callback to register mempools for the protection domain.
|
||||
*
|
||||
* @param mp
|
||||
* The mempool being walked.
|
||||
* @param arg
|
||||
* Pointer to the device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
|
||||
{
|
||||
struct mlx5_common_device *cdev = arg;
|
||||
int ret;
|
||||
|
||||
ret = mlx5_dev_mempool_register(cdev, mp);
|
||||
if (ret < 0 && rte_errno != EEXIST)
|
||||
DRV_LOG(ERR,
|
||||
"Failed to register existing mempool %s for PD %p: %s",
|
||||
mp->name, cdev->pd, rte_strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
* rte_mempool_walk() callback to unregister mempools
|
||||
* from the protection domain.
|
||||
*
|
||||
* @param mp
|
||||
* The mempool being walked.
|
||||
* @param arg
|
||||
* Pointer to the device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
|
||||
{
|
||||
mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mempool life cycle callback for mlx5 common devices.
|
||||
*
|
||||
* @param event
|
||||
* Mempool life cycle event.
|
||||
* @param mp
|
||||
* Associated mempool.
|
||||
* @param arg
|
||||
* Pointer to a device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
|
||||
void *arg)
|
||||
{
|
||||
struct mlx5_common_device *cdev = arg;
|
||||
|
||||
switch (event) {
|
||||
case RTE_MEMPOOL_EVENT_READY:
|
||||
if (mlx5_dev_mempool_register(cdev, mp) < 0)
|
||||
DRV_LOG(ERR,
|
||||
"Failed to register new mempool %s for PD %p: %s",
|
||||
mp->name, cdev->pd, rte_strerror(rte_errno));
|
||||
break;
|
||||
case RTE_MEMPOOL_EVENT_DESTROY:
|
||||
mlx5_dev_mempool_unregister(cdev, mp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!cdev->config.mr_mempool_reg_en)
|
||||
return 0;
|
||||
rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
|
||||
if (cdev->mr_scache.mp_cb_registered)
|
||||
goto exit;
|
||||
/* Callback for this device may be already registered. */
|
||||
ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
|
||||
cdev);
|
||||
if (ret != 0 && rte_errno != EEXIST)
|
||||
goto exit;
|
||||
/* Register mempools only once for this device. */
|
||||
if (ret == 0)
|
||||
rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
|
||||
ret = 0;
|
||||
cdev->mr_scache.mp_cb_registered = 1;
|
||||
exit:
|
||||
rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cdev->mr_scache.mp_cb_registered ||
|
||||
!cdev->config.mr_mempool_reg_en)
|
||||
return;
|
||||
/* Stop watching for mempool events and unregister all mempools. */
|
||||
ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,
|
||||
cdev);
|
||||
if (ret == 0)
|
||||
rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback for memory event.
|
||||
*
|
||||
@ -409,6 +556,7 @@ mlx5_common_dev_release(struct mlx5_common_device *cdev)
|
||||
if (TAILQ_EMPTY(&devices_list))
|
||||
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
|
||||
NULL);
|
||||
mlx5_dev_mempool_unsubscribe(cdev);
|
||||
mlx5_mr_release_cache(&cdev->mr_scache);
|
||||
mlx5_dev_hw_global_release(cdev);
|
||||
}
|
||||
|
@ -408,6 +408,15 @@ __rte_internal
|
||||
bool
|
||||
mlx5_dev_is_pci(const struct rte_device *dev);
|
||||
|
||||
__rte_internal
|
||||
int
|
||||
mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev);
|
||||
|
||||
__rte_internal
|
||||
void
|
||||
mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
|
||||
struct rte_mempool *mp);
|
||||
|
||||
/* mlx5_common_mr.c */
|
||||
|
||||
__rte_internal
|
||||
|
@ -64,6 +64,17 @@ struct mlx5_mp_id {
|
||||
uint16_t port_id;
|
||||
};
|
||||
|
||||
/** Key string for IPC. */
|
||||
#define MLX5_MP_NAME "common_mlx5_mp"
|
||||
|
||||
/** Initialize a multi-process ID. */
|
||||
static inline void
|
||||
mlx5_mp_id_init(struct mlx5_mp_id *mp_id, uint16_t port_id)
|
||||
{
|
||||
mp_id->port_id = port_id;
|
||||
strlcpy(mp_id->name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
|
||||
}
|
||||
|
||||
/** Request timeout for IPC. */
|
||||
#define MLX5_MP_REQ_TIMEOUT_SEC 5
|
||||
|
||||
|
@ -12,8 +12,10 @@
|
||||
#include <rte_rwlock.h>
|
||||
|
||||
#include "mlx5_glue.h"
|
||||
#include "mlx5_common.h"
|
||||
#include "mlx5_common_mp.h"
|
||||
#include "mlx5_common_mr.h"
|
||||
#include "mlx5_common_os.h"
|
||||
#include "mlx5_common_log.h"
|
||||
#include "mlx5_malloc.h"
|
||||
|
||||
@ -47,6 +49,20 @@ struct mlx5_mempool_reg {
|
||||
unsigned int mrs_n;
|
||||
};
|
||||
|
||||
void
|
||||
mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
|
||||
{
|
||||
struct mlx5_mprq_buf *buf = opaque;
|
||||
|
||||
if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
|
||||
rte_mempool_put(buf->mp, buf);
|
||||
} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
|
||||
__ATOMIC_RELAXED) == 0)) {
|
||||
__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
|
||||
rte_mempool_put(buf->mp, buf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Expand B-tree table to a given size. Can't be called with holding
|
||||
* memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
|
||||
@ -600,6 +616,10 @@ mlx5_mr_create_secondary(void *pd __rte_unused,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (mp_id == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
return UINT32_MAX;
|
||||
}
|
||||
DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
|
||||
mp_id->port_id, (void *)addr);
|
||||
ret = mlx5_mp_req_mr_create(mp_id, addr);
|
||||
@ -995,10 +1015,11 @@ mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr_ctrl *mr_ctrl,
|
||||
uintptr_t addr, unsigned int mr_ext_memseg_en)
|
||||
static uint32_t
|
||||
mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr,
|
||||
unsigned int mr_ext_memseg_en)
|
||||
{
|
||||
uint32_t lkey;
|
||||
uint16_t bh_idx = 0;
|
||||
@ -1029,7 +1050,7 @@ uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
|
||||
}
|
||||
|
||||
/**
|
||||
* Release all the created MRs and resources on global MR cache of a device.
|
||||
* Release all the created MRs and resources on global MR cache of a device
|
||||
* list.
|
||||
*
|
||||
* @param share_cache
|
||||
@ -1076,6 +1097,8 @@ mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
|
||||
mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
|
||||
&share_cache->dereg_mr_cb);
|
||||
rte_rwlock_init(&share_cache->rwlock);
|
||||
rte_rwlock_init(&share_cache->mprwlock);
|
||||
share_cache->mp_cb_registered = 0;
|
||||
/* Initialize B-tree and allocate memory for global MR cache table. */
|
||||
return mlx5_mr_btree_init(&share_cache->cache,
|
||||
MLX5_MR_BTREE_CACHE_N * 2, socket);
|
||||
@ -1245,8 +1268,8 @@ mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
|
||||
/**
|
||||
* Dump all the created MRs and the global cache entries.
|
||||
*
|
||||
* @param sh
|
||||
* Pointer to Ethernet device shared context.
|
||||
* @param share_cache
|
||||
* Pointer to a global shared MR cache.
|
||||
*/
|
||||
void
|
||||
mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
|
||||
@ -1581,8 +1604,7 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
|
||||
mpr = mlx5_mempool_reg_lookup(share_cache, mp);
|
||||
if (mpr == NULL) {
|
||||
mlx5_mempool_reg_attach(new_mpr);
|
||||
LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
|
||||
new_mpr, next);
|
||||
LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
|
||||
ret = 0;
|
||||
}
|
||||
rte_rwlock_write_unlock(&share_cache->rwlock);
|
||||
@ -1837,6 +1859,56 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
|
||||
return lkey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Bottom-half of LKey search on. If supported, lookup for the address from
|
||||
* the mempool. Otherwise, search in old mechanism caches.
|
||||
*
|
||||
* @param cdev
|
||||
* Pointer to mlx5 device.
|
||||
* @param mp_id
|
||||
* Multi-process identifier, may be NULL for the primary process.
|
||||
* @param mr_ctrl
|
||||
* Pointer to per-queue MR control structure.
|
||||
* @param mb
|
||||
* Pointer to mbuf.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
static uint32_t
|
||||
mlx5_mr_mb2mr_bh(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
|
||||
{
|
||||
uint32_t lkey;
|
||||
uintptr_t addr = (uintptr_t)mb->buf_addr;
|
||||
|
||||
if (cdev->config.mr_mempool_reg_en) {
|
||||
struct rte_mempool *mp = NULL;
|
||||
struct mlx5_mprq_buf *buf;
|
||||
|
||||
if (!RTE_MBUF_HAS_EXTBUF(mb)) {
|
||||
mp = mlx5_mb2mp(mb);
|
||||
} else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
|
||||
/* Recover MPRQ mempool. */
|
||||
buf = mb->shinfo->fcb_opaque;
|
||||
mp = buf->mp;
|
||||
}
|
||||
if (mp != NULL) {
|
||||
lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
|
||||
mr_ctrl, mp, addr);
|
||||
/*
|
||||
* Lookup can only fail on invalid input, e.g. "addr"
|
||||
* is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
|
||||
*/
|
||||
if (lkey != UINT32_MAX)
|
||||
return lkey;
|
||||
}
|
||||
/* Fallback for generic mechanism in corner cases. */
|
||||
}
|
||||
return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
|
||||
addr, cdev->config.mr_ext_memseg_en);
|
||||
}
|
||||
|
||||
/**
|
||||
* Query LKey from a packet buffer.
|
||||
*
|
||||
@ -1857,7 +1929,6 @@ mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
|
||||
{
|
||||
uint32_t lkey;
|
||||
uintptr_t addr = (uintptr_t)mbuf->buf_addr;
|
||||
|
||||
/* Check generation bit to see if there's any change on existing MRs. */
|
||||
if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
|
||||
@ -1868,6 +1939,5 @@ mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
|
||||
if (likely(lkey != UINT32_MAX))
|
||||
return lkey;
|
||||
/* Take slower bottom-half on miss. */
|
||||
return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
|
||||
addr, cdev->config.mr_ext_memseg_en);
|
||||
return mlx5_mr_mb2mr_bh(cdev, mp_id, mr_ctrl, mbuf);
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
|
||||
struct mlx5_mr_share_cache {
|
||||
uint32_t dev_gen; /* Generation number to flush local caches. */
|
||||
rte_rwlock_t rwlock; /* MR cache Lock. */
|
||||
rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
|
||||
uint8_t mp_cb_registered; /* Mempool are Registered. */
|
||||
struct mlx5_mr_btree cache; /* Global MR cache table. */
|
||||
struct mlx5_mr_list mr_list; /* Registered MR list. */
|
||||
struct mlx5_mr_list mr_free_list; /* Freed MR list. */
|
||||
@ -87,6 +89,40 @@ struct mlx5_mr_share_cache {
|
||||
mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
|
||||
} __rte_packed;
|
||||
|
||||
/* Multi-Packet RQ buffer header. */
|
||||
struct mlx5_mprq_buf {
|
||||
struct rte_mempool *mp;
|
||||
uint16_t refcnt; /* Atomically accessed refcnt. */
|
||||
uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
|
||||
struct rte_mbuf_ext_shared_info shinfos[];
|
||||
/*
|
||||
* Shared information per stride.
|
||||
* More memory will be allocated for the first stride head-room and for
|
||||
* the strides data.
|
||||
*/
|
||||
} __rte_cache_aligned;
|
||||
|
||||
__rte_internal
|
||||
void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
|
||||
|
||||
/**
|
||||
* Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
|
||||
* cloned mbuf is allocated is returned instead.
|
||||
*
|
||||
* @param buf
|
||||
* Pointer to mbuf.
|
||||
*
|
||||
* @return
|
||||
* Memory pool where data is located for given mbuf.
|
||||
*/
|
||||
static inline struct rte_mempool *
|
||||
mlx5_mb2mp(struct rte_mbuf *buf)
|
||||
{
|
||||
if (unlikely(RTE_MBUF_CLONED(buf)))
|
||||
return rte_mbuf_from_indirect(buf)->pool;
|
||||
return buf->pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Look up LKey from given lookup table by linear search. Firstly look up the
|
||||
* last-hit entry. If miss, the entire array is searched. If found, update the
|
||||
@ -133,11 +169,6 @@ __rte_internal
|
||||
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
|
||||
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
|
||||
__rte_internal
|
||||
uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr_ctrl *mr_ctrl,
|
||||
uintptr_t addr, unsigned int mr_ext_memseg_en);
|
||||
__rte_internal
|
||||
uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr_ctrl *mr_ctrl,
|
||||
struct rte_mempool *mp, uintptr_t addr);
|
||||
|
@ -13,6 +13,8 @@ INTERNAL {
|
||||
mlx5_common_verbs_dereg_mr; # WINDOWS_NO_EXPORT
|
||||
|
||||
mlx5_dev_is_pci;
|
||||
mlx5_dev_mempool_unregister;
|
||||
mlx5_dev_mempool_subscribe;
|
||||
|
||||
mlx5_devx_alloc_uar; # WINDOWS_NO_EXPORT
|
||||
|
||||
@ -104,7 +106,7 @@ INTERNAL {
|
||||
mlx5_mp_uninit_primary; # WINDOWS_NO_EXPORT
|
||||
mlx5_mp_uninit_secondary; # WINDOWS_NO_EXPORT
|
||||
|
||||
mlx5_mr_addr2mr_bh;
|
||||
mlx5_mprq_buf_free_cb;
|
||||
mlx5_mr_btree_free;
|
||||
mlx5_mr_create_primary;
|
||||
mlx5_mr_ctrl_init;
|
||||
|
@ -386,8 +386,9 @@ mlx5_compress_dev_stop(struct rte_compressdev *dev)
|
||||
static int
|
||||
mlx5_compress_dev_start(struct rte_compressdev *dev)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
return 0;
|
||||
struct mlx5_compress_priv *priv = dev->data->dev_private;
|
||||
|
||||
return mlx5_dev_mempool_subscribe(priv->cdev);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -142,8 +142,9 @@ mlx5_crypto_dev_stop(struct rte_cryptodev *dev)
|
||||
static int
|
||||
mlx5_crypto_dev_start(struct rte_cryptodev *dev)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
return 0;
|
||||
struct mlx5_crypto_priv *priv = dev->data->dev_private;
|
||||
|
||||
return mlx5_dev_mempool_subscribe(priv->cdev);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -90,8 +90,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
|
||||
switch (param->type) {
|
||||
case MLX5_MP_REQ_CREATE_MR:
|
||||
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
||||
lkey = mlx5_mr_create_primary(cdev->pd,
|
||||
&priv->sh->cdev->mr_scache,
|
||||
lkey = mlx5_mr_create_primary(cdev->pd, &cdev->mr_scache,
|
||||
&entry, param->args.addr,
|
||||
cdev->config.mr_ext_memseg_en);
|
||||
if (lkey == UINT32_MAX)
|
||||
|
@ -18,7 +18,6 @@ sources = files(
|
||||
'mlx5_flow_dv.c',
|
||||
'mlx5_flow_aso.c',
|
||||
'mlx5_mac.c',
|
||||
'mlx5_mr.c',
|
||||
'mlx5_rss.c',
|
||||
'mlx5_rx.c',
|
||||
'mlx5_rxmode.c',
|
||||
|
@ -1127,28 +1127,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister the mempool from the protection domain.
|
||||
*
|
||||
* @param sh
|
||||
* Pointer to the device shared context.
|
||||
* @param mp
|
||||
* Mempool being unregistered.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct mlx5_mp_id mp_id;
|
||||
|
||||
mlx5_mp_id_init(&mp_id, 0);
|
||||
if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
|
||||
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
|
||||
mp->name, sh->cdev->pd, rte_strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
* rte_mempool_walk() callback to register mempools
|
||||
* for the protection domain.
|
||||
* rte_mempool_walk() callback to unregister Rx mempools.
|
||||
* It used when implicit mempool registration is disabled.
|
||||
*
|
||||
* @param mp
|
||||
* The mempool being walked.
|
||||
@ -1156,66 +1136,11 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
|
||||
* Pointer to the device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
|
||||
mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
|
||||
{
|
||||
struct mlx5_dev_ctx_shared *sh = arg;
|
||||
struct mlx5_mp_id mp_id;
|
||||
int ret;
|
||||
|
||||
mlx5_mp_id_init(&mp_id, 0);
|
||||
ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
|
||||
&mp_id);
|
||||
if (ret < 0 && rte_errno != EEXIST)
|
||||
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
|
||||
mp->name, sh->cdev->pd, rte_strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
* rte_mempool_walk() callback to unregister mempools
|
||||
* from the protection domain.
|
||||
*
|
||||
* @param mp
|
||||
* The mempool being walked.
|
||||
* @param arg
|
||||
* Pointer to the device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
|
||||
{
|
||||
mlx5_dev_ctx_shared_mempool_unregister
|
||||
((struct mlx5_dev_ctx_shared *)arg, mp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mempool life cycle callback for Ethernet devices.
|
||||
*
|
||||
* @param event
|
||||
* Mempool life cycle event.
|
||||
* @param mp
|
||||
* Associated mempool.
|
||||
* @param arg
|
||||
* Pointer to a device shared context.
|
||||
*/
|
||||
static void
|
||||
mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
|
||||
struct rte_mempool *mp, void *arg)
|
||||
{
|
||||
struct mlx5_dev_ctx_shared *sh = arg;
|
||||
struct mlx5_mp_id mp_id;
|
||||
|
||||
switch (event) {
|
||||
case RTE_MEMPOOL_EVENT_READY:
|
||||
mlx5_mp_id_init(&mp_id, 0);
|
||||
if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
|
||||
mp, &mp_id) < 0)
|
||||
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
|
||||
mp->name, sh->cdev->pd,
|
||||
rte_strerror(rte_errno));
|
||||
break;
|
||||
case RTE_MEMPOOL_EVENT_DESTROY:
|
||||
mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
|
||||
break;
|
||||
}
|
||||
mlx5_dev_mempool_unregister(sh->cdev, mp);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1236,7 +1161,7 @@ mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
|
||||
struct mlx5_dev_ctx_shared *sh = arg;
|
||||
|
||||
if (event == RTE_MEMPOOL_EVENT_DESTROY)
|
||||
mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
|
||||
mlx5_dev_mempool_unregister(sh->cdev, mp);
|
||||
}
|
||||
|
||||
int
|
||||
@ -1252,15 +1177,7 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
|
||||
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
|
||||
return ret == 0 || rte_errno == EEXIST ? 0 : ret;
|
||||
}
|
||||
/* Callback for this shared context may be already registered. */
|
||||
ret = rte_mempool_event_callback_register
|
||||
(mlx5_dev_ctx_shared_mempool_event_cb, sh);
|
||||
if (ret != 0 && rte_errno != EEXIST)
|
||||
return ret;
|
||||
/* Register mempools only once for this shared context. */
|
||||
if (ret == 0)
|
||||
rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
|
||||
return 0;
|
||||
return mlx5_dev_mempool_subscribe(sh->cdev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1437,14 +1354,13 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
|
||||
if (--sh->refcnt)
|
||||
goto exit;
|
||||
/* Stop watching for mempool events and unregister all mempools. */
|
||||
ret = rte_mempool_event_callback_unregister
|
||||
(mlx5_dev_ctx_shared_mempool_event_cb, sh);
|
||||
if (ret < 0 && rte_errno == ENOENT)
|
||||
if (!sh->cdev->config.mr_mempool_reg_en) {
|
||||
ret = rte_mempool_event_callback_unregister
|
||||
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
|
||||
if (ret == 0)
|
||||
rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
|
||||
sh);
|
||||
if (ret == 0)
|
||||
rte_mempool_walk
|
||||
(mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
|
||||
}
|
||||
/* Remove context from the global device list. */
|
||||
LIST_REMOVE(sh, next);
|
||||
/* Release flow workspaces objects on the last device. */
|
||||
|
@ -153,17 +153,6 @@ struct mlx5_flow_dump_ack {
|
||||
int rc; /**< Return code. */
|
||||
};
|
||||
|
||||
/** Key string for IPC. */
|
||||
#define MLX5_MP_NAME "net_mlx5_mp"
|
||||
|
||||
/** Initialize a multi-process ID. */
|
||||
static inline void
|
||||
mlx5_mp_id_init(struct mlx5_mp_id *mp_id, uint16_t port_id)
|
||||
{
|
||||
mp_id->port_id = port_id;
|
||||
strlcpy(mp_id->name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
|
||||
}
|
||||
|
||||
LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
|
||||
|
||||
/* Shared data between primary and secondary processes. */
|
||||
@ -172,8 +161,6 @@ struct mlx5_shared_data {
|
||||
/* Global spinlock for primary and secondary processes. */
|
||||
int init_done; /* Whether primary has done initialization. */
|
||||
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
|
||||
struct mlx5_dev_list mem_event_cb_list;
|
||||
rte_rwlock_t mem_event_rwlock;
|
||||
};
|
||||
|
||||
/* Per-process data structure, not visible to other processes. */
|
||||
|
@ -1,89 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright 2016 6WIND S.A.
|
||||
* Copyright 2016 Mellanox Technologies, Ltd
|
||||
*/
|
||||
|
||||
#include <rte_eal_memconfig.h>
|
||||
#include <rte_mempool.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_rwlock.h>
|
||||
|
||||
#include <mlx5_common_mp.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5.h"
|
||||
#include "mlx5_rxtx.h"
|
||||
#include "mlx5_rx.h"
|
||||
#include "mlx5_tx.h"
|
||||
|
||||
/**
|
||||
* Bottom-half of LKey search on Tx.
|
||||
*
|
||||
* @param txq
|
||||
* Pointer to Tx queue structure.
|
||||
* @param addr
|
||||
* Search key.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
static uint32_t
|
||||
mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
|
||||
{
|
||||
struct mlx5_txq_ctrl *txq_ctrl =
|
||||
container_of(txq, struct mlx5_txq_ctrl, txq);
|
||||
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
|
||||
struct mlx5_priv *priv = txq_ctrl->priv;
|
||||
|
||||
return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
|
||||
&priv->sh->cdev->mr_scache, mr_ctrl, addr,
|
||||
priv->sh->cdev->config.mr_ext_memseg_en);
|
||||
}
|
||||
|
||||
/**
|
||||
* Bottom-half of LKey search on Tx. If it can't be searched in the memseg
|
||||
* list, register the mempool of the mbuf as externally allocated memory.
|
||||
*
|
||||
* @param txq
|
||||
* Pointer to Tx queue structure.
|
||||
* @param mb
|
||||
* Pointer to mbuf.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
uint32_t
|
||||
mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
||||
{
|
||||
struct mlx5_txq_ctrl *txq_ctrl =
|
||||
container_of(txq, struct mlx5_txq_ctrl, txq);
|
||||
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
|
||||
struct mlx5_priv *priv = txq_ctrl->priv;
|
||||
uintptr_t addr = (uintptr_t)mb->buf_addr;
|
||||
uint32_t lkey;
|
||||
|
||||
if (priv->sh->cdev->config.mr_mempool_reg_en) {
|
||||
struct rte_mempool *mp = NULL;
|
||||
struct mlx5_mprq_buf *buf;
|
||||
|
||||
if (!RTE_MBUF_HAS_EXTBUF(mb)) {
|
||||
mp = mlx5_mb2mp(mb);
|
||||
} else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
|
||||
/* Recover MPRQ mempool. */
|
||||
buf = mb->shinfo->fcb_opaque;
|
||||
mp = buf->mp;
|
||||
}
|
||||
if (mp != NULL) {
|
||||
lkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache,
|
||||
mr_ctrl, mp, addr);
|
||||
/*
|
||||
* Lookup can only fail on invalid input, e.g. "addr"
|
||||
* is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
|
||||
*/
|
||||
if (lkey != UINT32_MAX)
|
||||
return lkey;
|
||||
}
|
||||
/* Fallback for generic mechanism in corner cases. */
|
||||
}
|
||||
return mlx5_tx_addr2mr_bh(txq, addr);
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include <mlx5_prm.h>
|
||||
#include <mlx5_common.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5_autoconf.h"
|
||||
#include "mlx5_defs.h"
|
||||
@ -1027,20 +1028,6 @@ mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
|
||||
mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
|
||||
{
|
||||
struct mlx5_mprq_buf *buf = opaque;
|
||||
|
||||
if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
|
||||
rte_mempool_put(buf->mp, buf);
|
||||
} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
|
||||
__ATOMIC_RELAXED) == 0)) {
|
||||
__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
|
||||
rte_mempool_put(buf->mp, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
|
||||
{
|
||||
|
@ -43,19 +43,6 @@ struct rxq_zip {
|
||||
uint32_t cqe_cnt; /* Number of CQEs. */
|
||||
};
|
||||
|
||||
/* Multi-Packet RQ buffer header. */
|
||||
struct mlx5_mprq_buf {
|
||||
struct rte_mempool *mp;
|
||||
uint16_t refcnt; /* Atomically accessed refcnt. */
|
||||
uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
|
||||
struct rte_mbuf_ext_shared_info shinfos[];
|
||||
/*
|
||||
* Shared information per stride.
|
||||
* More memory will be allocated for the first stride head-room and for
|
||||
* the strides data.
|
||||
*/
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/* Get pointer to the first stride. */
|
||||
#define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
|
||||
sizeof(struct mlx5_mprq_buf) + \
|
||||
@ -255,7 +242,6 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
|
||||
uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
|
||||
void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
|
||||
__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
|
||||
void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
|
||||
void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
|
||||
uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
|
||||
uint16_t pkts_n);
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <mlx5_glue.h>
|
||||
#include <mlx5_malloc.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5_defs.h"
|
||||
#include "mlx5.h"
|
||||
|
@ -43,30 +43,4 @@ int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
|
||||
int mlx5_queue_state_modify(struct rte_eth_dev *dev,
|
||||
struct mlx5_mp_arg_queue_state_modify *sm);
|
||||
|
||||
/* mlx5_mr.c */
|
||||
|
||||
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
|
||||
int mlx5_net_dma_map(struct rte_device *rte_dev, void *addr, uint64_t iova,
|
||||
size_t len);
|
||||
int mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, uint64_t iova,
|
||||
size_t len);
|
||||
|
||||
/**
|
||||
* Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
|
||||
* cloned mbuf is allocated is returned instead.
|
||||
*
|
||||
* @param buf
|
||||
* Pointer to mbuf.
|
||||
*
|
||||
* @return
|
||||
* Memory pool where data is located for given mbuf.
|
||||
*/
|
||||
static inline struct rte_mempool *
|
||||
mlx5_mb2mp(struct rte_mbuf *buf)
|
||||
{
|
||||
if (unlikely(RTE_MBUF_CLONED(buf)))
|
||||
return rte_mbuf_from_indirect(buf)->pool;
|
||||
return buf->pool;
|
||||
}
|
||||
|
||||
#endif /* RTE_PMD_MLX5_RXTX_H_ */
|
||||
|
@ -235,10 +235,6 @@ void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
struct rte_eth_burst_mode *mode);
|
||||
|
||||
/* mlx5_mr.c */
|
||||
|
||||
uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
|
||||
|
||||
/* mlx5_tx_empw.c */
|
||||
|
||||
MLX5_TXOFF_PRE_DECL(full_empw);
|
||||
@ -356,12 +352,12 @@ __mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Query LKey from a packet buffer for Tx. If not found, add the mempool.
|
||||
* Query LKey from a packet buffer for Tx.
|
||||
*
|
||||
* @param txq
|
||||
* Pointer to Tx queue structure.
|
||||
* @param addr
|
||||
* Address to search.
|
||||
* @param mb
|
||||
* Pointer to mbuf.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
@ -370,19 +366,12 @@ static __rte_always_inline uint32_t
|
||||
mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
||||
{
|
||||
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
|
||||
uintptr_t addr = (uintptr_t)mb->buf_addr;
|
||||
uint32_t lkey;
|
||||
struct mlx5_txq_ctrl *txq_ctrl =
|
||||
container_of(txq, struct mlx5_txq_ctrl, txq);
|
||||
struct mlx5_priv *priv = txq_ctrl->priv;
|
||||
|
||||
/* Check generation bit to see if there's any change on existing MRs. */
|
||||
if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
|
||||
mlx5_mr_flush_local_cache(mr_ctrl);
|
||||
/* Linear search on MR cache array. */
|
||||
lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
|
||||
MLX5_MR_CACHE_N, addr);
|
||||
if (likely(lkey != UINT32_MAX))
|
||||
return lkey;
|
||||
/* Take slower bottom-half on miss. */
|
||||
return mlx5_tx_mb2mr_bh(txq, mb);
|
||||
return mlx5_mr_mb2mr(priv->sh->cdev, &priv->mp_id, mr_ctrl, mb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -36,9 +36,11 @@ const struct rte_regexdev_ops mlx5_regexdev_ops = {
|
||||
};
|
||||
|
||||
int
|
||||
mlx5_regex_start(struct rte_regexdev *dev __rte_unused)
|
||||
mlx5_regex_start(struct rte_regexdev *dev)
|
||||
{
|
||||
return 0;
|
||||
struct mlx5_regex_priv *priv = dev->data->dev_private;
|
||||
|
||||
return mlx5_dev_mempool_subscribe(priv->cdev);
|
||||
}
|
||||
|
||||
int
|
||||
|
Loading…
Reference in New Issue
Block a user