From 03332379f06d89571f6ff82b9af400df623f1dfb Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Wed, 7 Jul 2021 22:15:36 +0900 Subject: [PATCH] bdev/nvme: Hold mutex while traversing global ctrlr list nvme_bdev_first_ctrlr() and nvme_bdev_next_ctrlr() were not possible to hold mutex correctly, and nvme_ctrlr_get() and nvme_ctrlr_get_by_name() had not held mutex. nvme_bdev_first_ctrlr() and nvme_bdev_next_ctrlr() were replaced by nvme_ctrlr_for_each() in the last patch. In this patch, add mutex to three helper functions, nvme_ctrlr_get(), nvme_ctrlr_get_by_name(), and nvme_ctrlr_for_each(). Add mutex to nvme_ctrlr_create() but it will be removed in the following patches because nvme_ctrlr will be added to not global ctrlr list but ctrlr list per nvme_bdev_ctrlr. Signed-off-by: Shuhei Matsumoto Change-Id: Ife27066d2dcac82db0616b0afeaf68e5705d7da1 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8722 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins Reviewed-by: Changpeng Liu Reviewed-by: Aleksey Marchuk --- module/bdev/nvme/bdev_nvme.c | 2 ++ module/bdev/nvme/common.c | 14 ++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/module/bdev/nvme/bdev_nvme.c b/module/bdev/nvme/bdev_nvme.c index 1f5439404c..746d097ba5 100644 --- a/module/bdev/nvme/bdev_nvme.c +++ b/module/bdev/nvme/bdev_nvme.c @@ -1897,7 +1897,9 @@ nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr, nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr, g_opts.nvme_adminq_poll_period_us); + pthread_mutex_lock(&g_bdev_nvme_mutex); TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq); + pthread_mutex_unlock(&g_bdev_nvme_mutex); if (g_opts.timeout_us > 0) { spdk_nvme_ctrlr_register_timeout_callback(ctrlr, g_opts.timeout_us, diff --git a/module/bdev/nvme/common.c b/module/bdev/nvme/common.c index 634bc7e026..fc7db2d519 100644 --- a/module/bdev/nvme/common.c +++ b/module/bdev/nvme/common.c @@ -44,13 +44,15 @@ nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid) { struct nvme_ctrlr *nvme_ctrlr; + pthread_mutex_lock(&g_bdev_nvme_mutex); TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) { - return nvme_ctrlr; + break; } } + pthread_mutex_unlock(&g_bdev_nvme_mutex); - return NULL; + return nvme_ctrlr; } struct nvme_ctrlr * @@ -62,13 +64,15 @@ nvme_ctrlr_get_by_name(const char *name) return NULL; } + pthread_mutex_lock(&g_bdev_nvme_mutex); TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { if (strcmp(name, nvme_ctrlr->name) == 0) { - return nvme_ctrlr; + break; } } + pthread_mutex_unlock(&g_bdev_nvme_mutex); - return NULL; + return nvme_ctrlr; } void @@ -76,9 +80,11 @@ nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx) { struct nvme_ctrlr *nvme_ctrlr; + pthread_mutex_lock(&g_bdev_nvme_mutex); TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { fn(nvme_ctrlr, ctx); } + pthread_mutex_unlock(&g_bdev_nvme_mutex); } void