nvmf: Queue incoming requests to a paused subsystem

Subsystems enter the paused state when their internal
data representation is changing (i.e. namespaces are
being added, etc.). Queue incoming requests while
in this state.

Change-Id: I51e0c687b5b0f98351faa20dffa57110eb4a9df4
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/406449
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2018-04-04 09:16:39 -07:00
parent 2b79861deb
commit fe54959b62
3 changed files with 21 additions and 9 deletions

View File

@ -549,6 +549,7 @@ spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
sgroup = &group->sgroups[subsystem->id];
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
TAILQ_INIT(&sgroup->queued);
return 0;
}
@ -603,20 +604,30 @@ int
spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_request *req, *tmp;
struct spdk_nvmf_subsystem_poll_group *sgroup;
int rc;
if (subsystem->id >= group->num_sgroups) {
return -1;
}
assert(group->sgroups[subsystem->id].state == SPDK_NVMF_SUBSYSTEM_PAUSED);
sgroup = &group->sgroups[subsystem->id];
assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
rc = poll_group_update_subsystem(group, subsystem);
if (rc) {
return rc;
}
group->sgroups[subsystem->id].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
/* Release all queued requests */
TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
TAILQ_REMOVE(&sgroup->queued, req, link);
spdk_nvmf_request_exec(req);
}
return 0;
}

View File

@ -91,6 +91,8 @@ struct spdk_nvmf_subsystem_poll_group {
uint32_t num_channels;
enum spdk_nvmf_subsystem_state state;
TAILQ_HEAD(, spdk_nvmf_request) queued;
};
struct spdk_nvmf_poll_group {
@ -132,6 +134,8 @@ struct spdk_nvmf_request {
void *data;
union nvmf_h2c_msg *cmd;
union nvmf_c2h_msg *rsp;
TAILQ_ENTRY(spdk_nvmf_request) link;
};
struct spdk_nvmf_ns {

View File

@ -120,13 +120,10 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
/* Check if the subsystem is paused (if there is a subsystem) */
if (qpair->ctrlr) {
if (qpair->group->sgroups[qpair->ctrlr->subsys->id].state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
/* TODO: Queue requests here instead of failing */
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
spdk_nvmf_request_complete(req);
struct spdk_nvmf_subsystem_poll_group *sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
/* The subsystem is not currently active. Queue this request. */
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
return;
}
}