nvmf: Queue Connect command when subsystem is paused

Currently SPDK rejects Connect command when subsystem is not active.
This change allows to queue Connect command and execute it when
the subsystem goes back to active state. To queue the command we
should know subsystem_poll_group, in current implementation
this poll_group is known only when controller is already created.
To get the poll_group for Connect command we can retrive subsystem
subnqn, find subsystem and get poll_group by subsystem->id.

Increment subsystem_poll_group->io_outstanding even for Connect
cmd in order to prevent subsystem change state during the
connection process. Update spdk_nvmf_request_complete -
decrement io_outstanding for Connect cmd.

Fixes #1256

Change-Id: I724abb911696d7234a9c9d27458eba24739b26fd
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1273
Reviewed-by: Jacek Kalwas <jacek.kalwas@intel.com>
Reviewed-by: <dongx.yi@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Alexey Marchuk 2020-03-15 13:51:13 +03:00 committed by Tomasz Zawadzki
parent d643b47d9b
commit 1c5444d623
2 changed files with 61 additions and 6 deletions

View File

@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -2809,14 +2809,41 @@ spdk_nvmf_request_free(struct spdk_nvmf_request *req)
return 0;
}
static inline bool
nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req)
{
return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC &&
req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT;
}
static struct spdk_nvmf_subsystem_poll_group *
nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_fabric_connect_data *data;
struct spdk_nvmf_subsystem *subsystem;
struct spdk_nvmf_tgt *tgt;
assert(nvmf_request_is_fabric_connect(req));
assert(req->qpair->ctrlr == NULL);
data = req->data;
tgt = req->qpair->transport->tgt;
subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn);
if (subsystem == NULL) {
return NULL;
}
return &req->qpair->group->sgroups[subsystem->id];
}
int
spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_qpair *qpair;
struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
bool is_connect = req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC &&
req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT;
bool is_aer = false;
rsp->sqid = 0;
rsp->status.p = 0;
@ -2825,6 +2852,9 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
qpair = req->qpair;
if (qpair->ctrlr) {
sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
is_aer = qpair->ctrlr->aer_req == req;
} else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) {
sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
}
SPDK_DEBUGLOG(SPDK_LOG_NVMF,
@ -2836,8 +2866,8 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
SPDK_ERRLOG("Transport request completion error!\n");
}
/* AER cmd and fabric connect are exceptions */
if (sgroup != NULL && qpair->ctrlr->aer_req != req && !is_connect) {
/* AER cmd is an exception */
if (sgroup && !is_aer) {
assert(sgroup->io_outstanding > 0);
sgroup->io_outstanding--;
if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
@ -2948,6 +2978,8 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
if (qpair->ctrlr) {
sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
} else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) {
sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
}
if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {

View File

@ -361,7 +361,6 @@ test_connect(void)
snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
sgroups[subsystem.id].io_outstanding = 5;
group.sgroups = sgroups;
memset(&cmd, 0, sizeof(cmd));
@ -387,12 +386,14 @@ test_connect(void)
/* Valid admin connect command */
memset(&rsp, 0, sizeof(rsp));
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr != NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
free(qpair.ctrlr);
@ -401,12 +402,14 @@ test_connect(void)
/* Valid admin connect command with kato = 0 */
cmd.connect_cmd.kato = 0;
memset(&rsp, 0, sizeof(rsp));
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
free(qpair.ctrlr);
qpair.ctrlr = NULL;
@ -542,18 +545,21 @@ test_connect(void)
MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr);
cmd.connect_cmd.qid = 1;
cmd.connect_cmd.sqsize = 63;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr == &ctrlr);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
qpair.ctrlr = NULL;
cmd.connect_cmd.sqsize = 31;
/* Non-existent controller */
memset(&rsp, 0, sizeof(rsp));
MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL);
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -563,12 +569,14 @@ test_connect(void)
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr);
/* I/O connect to discovery controller */
memset(&rsp, 0, sizeof(rsp));
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -578,6 +586,7 @@ test_connect(void)
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
/* I/O connect to discovery controller with keep-alive-timeout != 0 */
cmd.connect_cmd.qid = 0;
@ -585,6 +594,7 @@ test_connect(void)
memset(&rsp, 0, sizeof(rsp));
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -592,6 +602,7 @@ test_connect(void)
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr != NULL);
CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
free(qpair.ctrlr);
@ -604,6 +615,7 @@ test_connect(void)
memset(&rsp, 0, sizeof(rsp));
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -611,6 +623,7 @@ test_connect(void)
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr != NULL);
CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
spdk_nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
free(qpair.ctrlr);
@ -622,6 +635,7 @@ test_connect(void)
/* I/O connect to disabled controller */
memset(&rsp, 0, sizeof(rsp));
ctrlr.vcprop.cc.bits.en = 0;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -631,11 +645,13 @@ test_connect(void)
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
ctrlr.vcprop.cc.bits.en = 1;
/* I/O connect with invalid IOSQES */
memset(&rsp, 0, sizeof(rsp));
ctrlr.vcprop.cc.bits.iosqes = 3;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -645,11 +661,13 @@ test_connect(void)
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
ctrlr.vcprop.cc.bits.iosqes = 6;
/* I/O connect with invalid IOCQES */
memset(&rsp, 0, sizeof(rsp));
ctrlr.vcprop.cc.bits.iocqes = 3;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -659,6 +677,7 @@ test_connect(void)
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
ctrlr.vcprop.cc.bits.iocqes = 4;
/* I/O connect with too many existing qpairs */
@ -666,6 +685,7 @@ test_connect(void)
spdk_bit_array_set(ctrlr.qpair_mask, 0);
spdk_bit_array_set(ctrlr.qpair_mask, 1);
spdk_bit_array_set(ctrlr.qpair_mask, 2);
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -673,6 +693,7 @@ test_connect(void)
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
spdk_bit_array_clear(ctrlr.qpair_mask, 0);
spdk_bit_array_clear(ctrlr.qpair_mask, 1);
spdk_bit_array_clear(ctrlr.qpair_mask, 2);
@ -684,6 +705,7 @@ test_connect(void)
qpair2.qid = 1;
spdk_bit_array_set(ctrlr.qpair_mask, 1);
cmd.connect_cmd.qid = 1;
sgroups[subsystem.id].io_outstanding++;
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
rc = spdk_nvmf_ctrlr_cmd_connect(&req);
poll_threads();
@ -691,6 +713,7 @@ test_connect(void)
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
CU_ASSERT(qpair.ctrlr == NULL);
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
/* Clean up globals */
MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);