bdev/nvme: Hold mutex while traversing global ctrlr list

nvme_bdev_first_ctrlr() and nvme_bdev_next_ctrlr() were not possible
to hold mutex correctly, and nvme_ctrlr_get() and nvme_ctrlr_get_by_name()
had not held mutex.

nvme_bdev_first_ctrlr() and nvme_bdev_next_ctrlr() were replaced by
nvme_ctrlr_for_each() in the last patch.

In this patch, add mutex to three helper functions, nvme_ctrlr_get(),
nvme_ctrlr_get_by_name(), and nvme_ctrlr_for_each().

Add mutex to nvme_ctrlr_create() but it will be removed in the
following patches because nvme_ctrlr will be added to not global
ctrlr list but ctrlr list per nvme_bdev_ctrlr.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ife27066d2dcac82db0616b0afeaf68e5705d7da1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8722
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-07-07 22:15:36 +09:00 committed by Tomasz Zawadzki
parent ca4dfff9e1
commit 03332379f0
2 changed files with 12 additions and 4 deletions

View File

@ -1897,7 +1897,9 @@ nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr, nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr,
g_opts.nvme_adminq_poll_period_us); g_opts.nvme_adminq_poll_period_us);
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq); TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
if (g_opts.timeout_us > 0) { if (g_opts.timeout_us > 0) {
spdk_nvme_ctrlr_register_timeout_callback(ctrlr, g_opts.timeout_us, spdk_nvme_ctrlr_register_timeout_callback(ctrlr, g_opts.timeout_us,

View File

@ -44,13 +44,15 @@ nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
{ {
struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) { if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
return nvme_ctrlr; break;
} }
} }
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return NULL; return nvme_ctrlr;
} }
struct nvme_ctrlr * struct nvme_ctrlr *
@ -62,13 +64,15 @@ nvme_ctrlr_get_by_name(const char *name)
return NULL; return NULL;
} }
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) { if (strcmp(name, nvme_ctrlr->name) == 0) {
return nvme_ctrlr; break;
} }
} }
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return NULL; return nvme_ctrlr;
} }
void void
@ -76,9 +80,11 @@ nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx)
{ {
struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
fn(nvme_ctrlr, ctx); fn(nvme_ctrlr, ctx);
} }
pthread_mutex_unlock(&g_bdev_nvme_mutex);
} }
void void