nvmf: relocate nvmf_tgt_destroy_poll_group()

There's no need to forward-declare this, when we can just place it
before its consumers, and this will also help follow-up fixes.

Signed-off-by: John Levon <john.levon@nutanix.com>
Change-Id: I201bd966371db76a3b789473041799bf55b13c95
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11437
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
John Levon 2022-02-07 21:14:54 +00:00 committed by Jim Harris
parent f9cb01ca11
commit 3f585d3f3e

View File

@ -53,7 +53,6 @@ SPDK_LOG_REGISTER_COMPONENT(nvmf)
static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts);
typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
static void nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf);
/* supplied to a single call to nvmf_qpair_disconnect */
struct nvmf_qpair_disconnect_ctx {
@ -107,6 +106,48 @@ nvmf_poll_group_poll(void *ctx)
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static void
nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_tgt *tgt = io_device;
struct spdk_nvmf_poll_group *group = ctx_buf;
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
struct spdk_nvmf_subsystem_poll_group *sgroup;
uint32_t sid, nsid;
SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread));
pthread_mutex_lock(&tgt->mutex);
TAILQ_REMOVE(&tgt->poll_groups, group, link);
pthread_mutex_unlock(&tgt->mutex);
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
TAILQ_REMOVE(&group->tgroups, tgroup, link);
nvmf_transport_poll_group_destroy(tgroup);
}
for (sid = 0; sid < group->num_sgroups; sid++) {
sgroup = &group->sgroups[sid];
for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
if (sgroup->ns_info[nsid].channel) {
spdk_put_io_channel(sgroup->ns_info[nsid].channel);
sgroup->ns_info[nsid].channel = NULL;
}
}
free(sgroup->ns_info);
}
free(group->sgroups);
spdk_poller_unregister(&group->poller);
if (group->destroy_cb_fn) {
group->destroy_cb_fn(group->destroy_cb_arg, 0);
}
}
static int
nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
{
@ -159,48 +200,6 @@ nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
return 0;
}
static void
nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_tgt *tgt = io_device;
struct spdk_nvmf_poll_group *group = ctx_buf;
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
struct spdk_nvmf_subsystem_poll_group *sgroup;
uint32_t sid, nsid;
SPDK_DTRACE_PROBE1(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread));
pthread_mutex_lock(&tgt->mutex);
TAILQ_REMOVE(&tgt->poll_groups, group, link);
pthread_mutex_unlock(&tgt->mutex);
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
TAILQ_REMOVE(&group->tgroups, tgroup, link);
nvmf_transport_poll_group_destroy(tgroup);
}
for (sid = 0; sid < group->num_sgroups; sid++) {
sgroup = &group->sgroups[sid];
for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
if (sgroup->ns_info[nsid].channel) {
spdk_put_io_channel(sgroup->ns_info[nsid].channel);
sgroup->ns_info[nsid].channel = NULL;
}
}
free(sgroup->ns_info);
}
free(group->sgroups);
spdk_poller_unregister(&group->poller);
if (group->destroy_cb_fn) {
group->destroy_cb_fn(group->destroy_cb_arg, 0);
}
}
static void
_nvmf_tgt_disconnect_next_qpair(void *ctx)
{