common/mlx5: fix multi-process mempool registration

The `mp_cb_registered` flag shared between all processes
was used to ensure that for any IB device (MLX5 common device)
mempool event callback was registered only once
and mempools that had been existing before the device start
were traversed only once to register them.
Since mempool callback registrations have become process-private,
callback registration must be done by every process.
The flag can no longer reflect the state for any single process.
Replace it with a registration counter to track
when no more callbacks are registered for the device in any process.
It is sufficient to only register pre-existing mempools
in the primary process because it is the one that starts the device.

Fixes: 690b2a88c2 ("common/mlx5: add mempool registration facilities")
Cc: stable@dpdk.org

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Dmitry Kozlyuk 2022-08-08 12:42:36 +03:00 committed by Thomas Monjalon
parent 03b3cdf9c2
commit 8ad97e4b32
3 changed files with 11 additions and 8 deletions

View File

@ -583,18 +583,17 @@ mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
if (!cdev->config.mr_mempool_reg_en)
return 0;
rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
if (cdev->mr_scache.mp_cb_registered)
goto exit;
/* Callback for this device may be already registered. */
ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
cdev);
if (ret != 0 && rte_errno != EEXIST)
goto exit;
__atomic_add_fetch(&cdev->mr_scache.mempool_cb_reg_n, 1,
__ATOMIC_ACQUIRE);
/* Register mempools only once for this device. */
if (ret == 0)
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
ret = 0;
cdev->mr_scache.mp_cb_registered = 1;
exit:
rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
return ret;
@ -603,10 +602,14 @@ mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
static void
mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
{
uint32_t mempool_cb_reg_n;
int ret;
if (!cdev->mr_scache.mp_cb_registered ||
!cdev->config.mr_mempool_reg_en)
if (!cdev->config.mr_mempool_reg_en)
return;
mempool_cb_reg_n = __atomic_sub_fetch(&cdev->mr_scache.mempool_cb_reg_n,
1, __ATOMIC_RELEASE);
if (mempool_cb_reg_n > 0)
return;
/* Stop watching for mempool events and unregister all mempools. */
ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,

View File

@ -1138,7 +1138,7 @@ mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
&share_cache->dereg_mr_cb);
rte_rwlock_init(&share_cache->rwlock);
rte_rwlock_init(&share_cache->mprwlock);
share_cache->mp_cb_registered = 0;
share_cache->mempool_cb_reg_n = 0;
/* Initialize B-tree and allocate memory for global MR cache table. */
return mlx5_mr_btree_init(&share_cache->cache,
MLX5_MR_BTREE_CACHE_N * 2, socket);

View File

@ -81,7 +81,7 @@ struct mlx5_mr_share_cache {
uint32_t dev_gen; /* Generation number to flush local caches. */
rte_rwlock_t rwlock; /* MR cache Lock. */
rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
uint8_t mp_cb_registered; /* Mempool are Registered. */
uint32_t mempool_cb_reg_n; /* Mempool event callback registrants. */
struct mlx5_mr_btree cache; /* Global MR cache table. */
struct mlx5_mr_list mr_list; /* Registered MR list. */
struct mlx5_mr_list mr_free_list; /* Freed MR list. */