nvmf_tgt: Detect completion of poll group destroy at shutdown

Previously NVMe-oF target had requested each thread to destroy
its poll group, but had moved to the next state without waiting
for completion.

To create and destroy poll group threads dynamically, NVMe-oF
target have to know the completion of poll group destroy.

The reason is as follows.

spdk_thread_exit() requires that each poll group thread to unregister
its pollers, but poller unregistration is done asynchronously, and
releaes its I/O channels, but I/O channel release is done
asynchronously.

This patch does the following.

To avoid using mutex, add the init thread.

spdk_for_each_thread() requires completion callback but the
completion callback is usable only if each message handler is
synchronous. spdk_nvmf_poll_group_destroy() is asynchronous now.
So replace spdk_for_each_thread() by parsing g_poll_groups and
sending message to each corresonding thread.

Then add comletion callback nvmf_tgt_destroy_poll_group_done() to
spdk_nvmf_poll_group_destroy().

nvmf_tgt_destroy_poll_group_done() sends message to the init
thread.

The init thread executes _nvmf_tgt_destroy_poll_group_done().
_nvmf_tgt_destroy_poll_group_done() decrements g_num_poll_groups
and move to the next state if it becomes zero.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I97eb90b9dbff29d7702b20cf7ac233d39d7216b6
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/496
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2020-02-02 20:10:34 -05:00 committed by Tomasz Zawadzki
parent 2fa51eeb46
commit 4e23653592

View File

@ -77,6 +77,8 @@ struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
static enum nvmf_tgt_state g_tgt_state;
static struct spdk_thread *g_tgt_fini_thread = NULL;
/* Round-Robin/IP-based tracking of threads to poll group assignment */
static struct nvmf_tgt_poll_group *g_next_poll_group = NULL;
@ -306,30 +308,45 @@ acceptor_poll(void *arg)
}
static void
nvmf_tgt_destroy_poll_group_done(void *ctx)
_nvmf_tgt_destroy_poll_group_done(void *ctx)
{
g_tgt_state = NVMF_TGT_FINI_STOP_ACCEPTOR;
assert(g_num_poll_groups == 0);
nvmf_tgt_advance_state();
assert(g_num_poll_groups > 0);
if (--g_num_poll_groups == 0) {
g_tgt_state = NVMF_TGT_FINI_STOP_ACCEPTOR;
nvmf_tgt_advance_state();
}
}
static void
nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
{
struct nvmf_tgt_poll_group *pg = cb_arg;
free(pg);
spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
}
static void
nvmf_tgt_destroy_poll_group(void *ctx)
{
struct nvmf_tgt_poll_group *pg, *tpg;
struct spdk_thread *thread;
struct nvmf_tgt_poll_group *pg = ctx;
thread = spdk_get_thread();
spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
}
static void
nvmf_tgt_destroy_poll_groups(void)
{
struct nvmf_tgt_poll_group *pg, *tpg;
g_tgt_fini_thread = spdk_get_thread();
assert(g_tgt_fini_thread != NULL);
TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
if (pg->thread == thread) {
TAILQ_REMOVE(&g_poll_groups, pg, link);
spdk_nvmf_poll_group_destroy(pg->group, NULL, NULL);
free(pg);
assert(g_num_poll_groups > 0);
g_num_poll_groups--;
return;
}
TAILQ_REMOVE(&g_poll_groups, pg, link);
spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
}
}
@ -567,9 +584,7 @@ nvmf_tgt_advance_state(void)
}
case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
/* Send a message to each thread and destroy the poll group */
spdk_for_each_thread(nvmf_tgt_destroy_poll_group,
NULL,
nvmf_tgt_destroy_poll_group_done);
nvmf_tgt_destroy_poll_groups();
break;
case NVMF_TGT_FINI_STOP_ACCEPTOR:
spdk_poller_unregister(&g_acceptor_poller);