nvmf: Subsystem pauses only pause admin queues
Additionally, the user can specify a namespace to also pause during the operation. This allows for the management of hosts, listeners, and the addition of namespaces all while I/O to other namespaces is occurring. Pausing a specific namespace also allows for the removal of that namespace without impacting I/O to other namespaces in the subsystem. Change-Id: I364336df16df92fe2069114674cb7a68076de6fb Signed-off-by: Ben Walker <benjamin.walker@intel.com> Signed-off-by: Jim Harris <james.r.harris@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4997 Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
parent
7665710cd4
commit
312a9d603d
@ -154,6 +154,13 @@ that allowed transport specific options within subsystem.
|
||||
|
||||
The `trsvcid` in `nvmf_subsystem_add_listener` RPC is now optional.
|
||||
|
||||
Pausing a subsystem now only pauses all admin queues. This allows for the
|
||||
management of hosts and listeners, plus the addition of namespaces without a
|
||||
full subsystem pause. Additionally, the target now allows for pausing
|
||||
individual namespaces within a subsystem. To remove a namespace from a
|
||||
subsystem, only the specific namespace must be paused. I/O will continue to
|
||||
other namespaces while these operations execute.
|
||||
|
||||
### rpc
|
||||
|
||||
An new optional parameter `wait` was added to the RPC `iscsi_create_portal_group`,
|
||||
|
@ -447,7 +447,12 @@ int spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
|
||||
/**
|
||||
* Transition an NVMe-oF subsystem from Active to Paused state.
|
||||
*
|
||||
* In a paused state, all admin queues are frozen across the whole subsystem. If
|
||||
* a namespace ID is provided, all commands to that namespace are quiesced and incoming
|
||||
* commands for that namespace are queued until the subsystem is resumed.
|
||||
*
|
||||
* \param subsystem The NVMe-oF subsystem.
|
||||
* \param nsid The namespace to pause. If 0, pause no namespaces.
|
||||
* \param cb_fn A function that will be called once the subsystem has changed state.
|
||||
* \param cb_arg Argument passed to cb_fn.
|
||||
*
|
||||
@ -455,12 +460,15 @@ int spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
|
||||
* be called on success.
|
||||
*/
|
||||
int spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg);
|
||||
|
||||
/**
|
||||
* Transition an NVMe-oF subsystem from Paused to Active state.
|
||||
*
|
||||
* This resumes the entire subsystem, including any paused namespaces.
|
||||
*
|
||||
* \param subsystem The NVMe-oF subsystem.
|
||||
* \param cb_fn A function that will be called once the subsystem has changed state.
|
||||
* \param cb_arg Argument passed to cb_fn.
|
||||
@ -613,6 +621,7 @@ const char *spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host);
|
||||
* This does not start the listener. Use spdk_nvmf_tgt_listen() for that.
|
||||
*
|
||||
* May only be performed on subsystems in the PAUSED or INACTIVE states.
|
||||
* No namespaces are required to be paused.
|
||||
*
|
||||
* \param subsystem Subsystem to add listener to.
|
||||
* \param trid The address to accept connections from.
|
||||
@ -632,6 +641,7 @@ void spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
|
||||
* spdk_nvmf_tgt_stop_listen().
|
||||
*
|
||||
* May only be performed on subsystems in the PAUSED or INACTIVE states.
|
||||
* No namespaces are required to be paused.
|
||||
*
|
||||
* \param subsystem Subsystem to remove listener from.
|
||||
* \param trid The address to no longer accept connections from.
|
||||
@ -801,6 +811,7 @@ uint32_t spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem,
|
||||
* Remove a namespace from a subsytem.
|
||||
*
|
||||
* May only be performed on subsystems in the PAUSED or INACTIVE states.
|
||||
* Additionally, the namespace must be paused.
|
||||
*
|
||||
* \param subsystem Subsystem the namespace belong to.
|
||||
* \param nsid Namespace ID to be removed.
|
||||
|
107
lib/nvmf/ctrlr.c
107
lib/nvmf/ctrlr.c
@ -771,7 +771,7 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sgroup->io_outstanding++;
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
|
||||
status = _nvmf_ctrlr_connect(req);
|
||||
@ -1672,7 +1672,7 @@ nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
|
||||
/* AER cmd is an exception */
|
||||
sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id];
|
||||
assert(sgroup != NULL);
|
||||
sgroup->io_outstanding--;
|
||||
sgroup->mgmt_io_outstanding--;
|
||||
|
||||
/* Four asynchronous events are supported for now */
|
||||
if (ctrlr->nr_aer_reqs >= NVMF_MAX_ASYNC_EVENTS) {
|
||||
@ -3463,7 +3463,10 @@ _nvmf_request_complete(void *ctx)
|
||||
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
|
||||
struct spdk_nvmf_qpair *qpair;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
||||
bool is_aer = false;
|
||||
uint32_t nsid;
|
||||
bool paused;
|
||||
|
||||
rsp->sqid = 0;
|
||||
rsp->status.p = 0;
|
||||
@ -3489,15 +3492,40 @@ _nvmf_request_complete(void *ctx)
|
||||
|
||||
/* AER cmd is an exception */
|
||||
if (sgroup && !is_aer) {
|
||||
assert(sgroup->io_outstanding > 0);
|
||||
sgroup->io_outstanding--;
|
||||
if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
|
||||
sgroup->io_outstanding == 0) {
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
|
||||
sgroup->cb_fn(sgroup->cb_arg, 0);
|
||||
sgroup->cb_fn = NULL;
|
||||
sgroup->cb_arg = NULL;
|
||||
if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC ||
|
||||
nvmf_qpair_is_admin_queue(qpair))) {
|
||||
assert(sgroup->mgmt_io_outstanding > 0);
|
||||
sgroup->mgmt_io_outstanding--;
|
||||
} else {
|
||||
nsid = req->cmd->nvme_cmd.nsid;
|
||||
|
||||
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
||||
if (spdk_likely(nsid - 1 < sgroup->num_ns)) {
|
||||
sgroup->ns_info[nsid - 1].io_outstanding--;
|
||||
}
|
||||
}
|
||||
|
||||
if (spdk_unlikely(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
|
||||
sgroup->mgmt_io_outstanding == 0)) {
|
||||
paused = true;
|
||||
for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
|
||||
ns_info = &sgroup->ns_info[nsid];
|
||||
|
||||
if (ns_info->state == SPDK_NVMF_SUBSYSTEM_PAUSING &&
|
||||
ns_info->io_outstanding > 0) {
|
||||
paused = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (paused) {
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
|
||||
sgroup->cb_fn(sgroup->cb_arg, 0);
|
||||
sgroup->cb_fn = NULL;
|
||||
sgroup->cb_arg = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
nvmf_qpair_request_cleanup(qpair);
|
||||
@ -3532,7 +3560,7 @@ spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req)
|
||||
}
|
||||
|
||||
assert(sgroup != NULL);
|
||||
sgroup->io_outstanding++;
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
|
||||
/* Place the request on the outstanding list so we can keep track of it */
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
@ -3550,7 +3578,9 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
{
|
||||
struct spdk_nvmf_qpair *qpair = req->qpair;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
||||
enum spdk_nvmf_request_exec_status status;
|
||||
uint32_t nsid;
|
||||
|
||||
if (qpair->ctrlr) {
|
||||
sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
|
||||
@ -3561,22 +3591,55 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
|
||||
/* Check if the subsystem is paused (if there is a subsystem) */
|
||||
if (sgroup != NULL) {
|
||||
if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
/* The subsystem is not currently active. Queue this request. */
|
||||
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
|
||||
return;
|
||||
if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC ||
|
||||
nvmf_qpair_is_admin_queue(qpair))) {
|
||||
if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
/* The subsystem is not currently active. Queue this request. */
|
||||
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
|
||||
return;
|
||||
}
|
||||
sgroup->mgmt_io_outstanding++;
|
||||
} else {
|
||||
nsid = req->cmd->nvme_cmd.nsid;
|
||||
|
||||
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
||||
if (spdk_unlikely(nsid - 1 >= sgroup->num_ns)) {
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
|
||||
req->rsp->nvme_cpl.status.dnr = 1;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
}
|
||||
|
||||
ns_info = &sgroup->ns_info[nsid - 1];
|
||||
if (ns_info->channel == NULL) {
|
||||
/* This can can happen if host sends I/O to a namespace that is
|
||||
* in the process of being added, but before the full addition
|
||||
* process is complete. Report invalid namespace in that case.
|
||||
*/
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
|
||||
req->rsp->nvme_cpl.status.dnr = 1;
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
ns_info->io_outstanding++;
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ns_info->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
||||
/* The namespace is not currently active. Queue this request. */
|
||||
TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
|
||||
return;
|
||||
}
|
||||
ns_info->io_outstanding++;
|
||||
}
|
||||
}
|
||||
|
||||
if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
|
||||
/* Place the request on the outstanding list so we can keep track of it */
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
/* Still increment io_outstanding because request_complete decrements it */
|
||||
if (sgroup != NULL) {
|
||||
sgroup->io_outstanding++;
|
||||
}
|
||||
_nvmf_request_complete(req);
|
||||
return;
|
||||
}
|
||||
@ -3585,10 +3648,6 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
|
||||
spdk_nvme_print_command(qpair->qid, &req->cmd->nvme_cmd);
|
||||
}
|
||||
|
||||
if (sgroup) {
|
||||
sgroup->io_outstanding++;
|
||||
}
|
||||
|
||||
/* Place the request on the outstanding list so we can keep track of it */
|
||||
TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
|
||||
|
||||
|
@ -3151,6 +3151,7 @@ nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
|
||||
ctx->trid.traddr);
|
||||
free(ctx);
|
||||
} else if (spdk_nvmf_subsystem_pause(subsystem,
|
||||
0,
|
||||
nvmf_fc_adm_subsystem_paused_cb,
|
||||
ctx)) {
|
||||
SPDK_ERRLOG("Failed to pause subsystem: %s\n",
|
||||
|
@ -1308,6 +1308,7 @@ nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
{
|
||||
int rc = 0;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
|
||||
uint32_t i;
|
||||
|
||||
TAILQ_INIT(&sgroup->queued);
|
||||
|
||||
@ -1318,6 +1319,11 @@ nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
}
|
||||
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
|
||||
for (i = 0; i < sgroup->num_ns; i++) {
|
||||
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
}
|
||||
|
||||
fini:
|
||||
if (cb_fn) {
|
||||
cb_fn(cb_arg, rc);
|
||||
@ -1401,6 +1407,7 @@ nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
||||
struct nvmf_qpair_disconnect_many_ctx *ctx;
|
||||
int rc = 0;
|
||||
uint32_t i;
|
||||
|
||||
ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
|
||||
|
||||
@ -1417,6 +1424,10 @@ nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
sgroup = &group->sgroups[subsystem->id];
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
|
||||
|
||||
for (i = 0; i < sgroup->num_ns; i++) {
|
||||
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
|
||||
}
|
||||
|
||||
TAILQ_FOREACH(qpair, &group->qpairs, link) {
|
||||
if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
|
||||
break;
|
||||
@ -1445,9 +1456,11 @@ fini:
|
||||
void
|
||||
nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
||||
{
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL;
|
||||
int rc = 0;
|
||||
|
||||
if (subsystem->id >= group->num_sgroups) {
|
||||
@ -1466,7 +1479,13 @@ nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
}
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
||||
|
||||
if (sgroup->io_outstanding > 0) {
|
||||
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
||||
if (nsid - 1 < sgroup->num_ns) {
|
||||
ns_info = &sgroup->ns_info[nsid - 1];
|
||||
ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
||||
}
|
||||
|
||||
if (sgroup->mgmt_io_outstanding > 0) {
|
||||
assert(sgroup->cb_fn == NULL);
|
||||
sgroup->cb_fn = cb_fn;
|
||||
assert(sgroup->cb_arg == NULL);
|
||||
@ -1474,7 +1493,15 @@ nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
return;
|
||||
}
|
||||
|
||||
assert(sgroup->io_outstanding == 0);
|
||||
if (ns_info != NULL && ns_info->io_outstanding > 0) {
|
||||
assert(sgroup->cb_fn == NULL);
|
||||
sgroup->cb_fn = cb_fn;
|
||||
assert(sgroup->cb_arg == NULL);
|
||||
sgroup->cb_arg = cb_arg;
|
||||
return;
|
||||
}
|
||||
|
||||
assert(sgroup->mgmt_io_outstanding == 0);
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
|
||||
fini:
|
||||
if (cb_fn) {
|
||||
@ -1490,6 +1517,7 @@ nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_request *req, *tmp;
|
||||
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
||||
int rc = 0;
|
||||
uint32_t i;
|
||||
|
||||
if (subsystem->id >= group->num_sgroups) {
|
||||
rc = -1;
|
||||
@ -1507,6 +1535,10 @@ nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
goto fini;
|
||||
}
|
||||
|
||||
for (i = 0; i < sgroup->num_ns; i++) {
|
||||
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
}
|
||||
|
||||
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
|
||||
/* Release all queued requests */
|
||||
|
@ -132,6 +132,10 @@ struct spdk_nvmf_subsystem_pg_ns_info {
|
||||
/* Host ID for the registrants with the namespace */
|
||||
struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
|
||||
uint64_t num_blocks;
|
||||
|
||||
/* I/O outstanding to this namespace */
|
||||
uint64_t io_outstanding;
|
||||
enum spdk_nvmf_subsystem_state state;
|
||||
};
|
||||
|
||||
typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
|
||||
@ -141,7 +145,8 @@ struct spdk_nvmf_subsystem_poll_group {
|
||||
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
||||
uint32_t num_ns;
|
||||
|
||||
uint64_t io_outstanding;
|
||||
/* Number of ADMIN and FABRICS requests outstanding */
|
||||
uint64_t mgmt_io_outstanding;
|
||||
spdk_nvmf_poll_group_mod_done cb_fn;
|
||||
void *cb_arg;
|
||||
|
||||
@ -309,7 +314,9 @@ int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
|
||||
void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
|
||||
struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
|
||||
void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
|
||||
|
||||
|
@ -858,7 +858,7 @@ rpc_nvmf_subsystem_add_listener(struct spdk_jsonrpc_request *request,
|
||||
spdk_nvmf_listen_opts_init(&ctx->opts, sizeof(ctx->opts));
|
||||
ctx->opts.transport_specific = params;
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, nvmf_rpc_listen_paused, ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, 0, nvmf_rpc_listen_paused, ctx);
|
||||
if (rc != 0) {
|
||||
if (rc == -EBUSY) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
@ -937,7 +937,7 @@ rpc_nvmf_subsystem_remove_listener(struct spdk_jsonrpc_request *request,
|
||||
|
||||
ctx->op = NVMF_RPC_LISTEN_REMOVE;
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, nvmf_rpc_listen_paused, ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, 0, nvmf_rpc_listen_paused, ctx);
|
||||
if (rc != 0) {
|
||||
if (rc == -EBUSY) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
@ -1044,7 +1044,7 @@ rpc_nvmf_subsystem_listener_set_ana_state(struct spdk_jsonrpc_request *request,
|
||||
|
||||
ctx->op = NVMF_RPC_LISTEN_SET_ANA_STATE;
|
||||
|
||||
if (spdk_nvmf_subsystem_pause(subsystem, nvmf_rpc_listen_paused, ctx)) {
|
||||
if (spdk_nvmf_subsystem_pause(subsystem, 0, nvmf_rpc_listen_paused, ctx)) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
"Internal error");
|
||||
nvmf_rpc_listener_ctx_free(ctx);
|
||||
@ -1248,7 +1248,7 @@ rpc_nvmf_subsystem_add_ns(struct spdk_jsonrpc_request *request,
|
||||
return;
|
||||
}
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, nvmf_rpc_ns_paused, ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, ctx->ns_params.nsid, nvmf_rpc_ns_paused, ctx);
|
||||
if (rc != 0) {
|
||||
if (rc == -EBUSY) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
@ -1368,7 +1368,7 @@ rpc_nvmf_subsystem_remove_ns(struct spdk_jsonrpc_request *request,
|
||||
return;
|
||||
}
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, nvmf_rpc_remove_ns_paused, ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(subsystem, ctx->nsid, nvmf_rpc_remove_ns_paused, ctx);
|
||||
if (rc != 0) {
|
||||
if (rc == -EBUSY) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
@ -2444,7 +2444,7 @@ _rpc_nvmf_subsystem_query(struct spdk_jsonrpc_request *request,
|
||||
|
||||
ctx->subsystem = subsystem;
|
||||
|
||||
if (spdk_nvmf_subsystem_pause(subsystem, cb_fn, ctx)) {
|
||||
if (spdk_nvmf_subsystem_pause(subsystem, 0, cb_fn, ctx)) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
"Internal error");
|
||||
free_rpc_subsystem_query_ctx(ctx);
|
||||
|
@ -471,14 +471,14 @@ nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem,
|
||||
}
|
||||
|
||||
struct subsystem_state_change_ctx {
|
||||
struct spdk_nvmf_subsystem *subsystem;
|
||||
struct spdk_nvmf_subsystem *subsystem;
|
||||
uint16_t nsid;
|
||||
|
||||
enum spdk_nvmf_subsystem_state original_state;
|
||||
enum spdk_nvmf_subsystem_state original_state;
|
||||
enum spdk_nvmf_subsystem_state requested_state;
|
||||
|
||||
enum spdk_nvmf_subsystem_state requested_state;
|
||||
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn;
|
||||
void *cb_arg;
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn;
|
||||
void *cb_arg;
|
||||
};
|
||||
|
||||
static void
|
||||
@ -566,7 +566,8 @@ subsystem_state_change_on_pg(struct spdk_io_channel_iter *i)
|
||||
}
|
||||
break;
|
||||
case SPDK_NVMF_SUBSYSTEM_PAUSED:
|
||||
nvmf_poll_group_pause_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
|
||||
nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue,
|
||||
i);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
@ -576,6 +577,7 @@ subsystem_state_change_on_pg(struct spdk_io_channel_iter *i)
|
||||
|
||||
static int
|
||||
nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
enum spdk_nvmf_subsystem_state requested_state,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg)
|
||||
@ -615,6 +617,7 @@ nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
|
||||
}
|
||||
|
||||
ctx->subsystem = subsystem;
|
||||
ctx->nsid = nsid;
|
||||
ctx->requested_state = requested_state;
|
||||
ctx->cb_fn = cb_fn;
|
||||
ctx->cb_arg = cb_arg;
|
||||
@ -632,7 +635,7 @@ spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
|
||||
return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
int
|
||||
@ -640,15 +643,16 @@ spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg);
|
||||
return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg);
|
||||
return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
int
|
||||
@ -656,7 +660,7 @@ spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem,
|
||||
spdk_nvmf_subsystem_state_change_done cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
return nvmf_subsystem_state_change(subsystem, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
|
||||
return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
struct spdk_nvmf_subsystem *
|
||||
@ -1219,7 +1223,7 @@ nvmf_ns_change_msg(void *ns_ctx)
|
||||
struct subsystem_ns_change_ctx *ctx = ns_ctx;
|
||||
int rc;
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->cb_fn, ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx);
|
||||
if (rc) {
|
||||
if (rc == -EBUSY) {
|
||||
/* Try again, this is not a permanent situation. */
|
||||
@ -1251,7 +1255,7 @@ nvmf_ns_hot_remove(void *remove_ctx)
|
||||
ns_ctx->nsid = ns->opts.nsid;
|
||||
ns_ctx->cb_fn = _nvmf_ns_hot_remove;
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_hot_remove, ns_ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx);
|
||||
if (rc) {
|
||||
if (rc == -EBUSY) {
|
||||
/* Try again, this is not a permanent situation. */
|
||||
@ -1294,7 +1298,7 @@ nvmf_ns_resize(void *event_ctx)
|
||||
ns_ctx->nsid = ns->opts.nsid;
|
||||
ns_ctx->cb_fn = _nvmf_ns_resize;
|
||||
|
||||
rc = spdk_nvmf_subsystem_pause(ns->subsystem, _nvmf_ns_resize, ns_ctx);
|
||||
rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_resize, ns_ctx);
|
||||
if (rc) {
|
||||
if (rc == -EBUSY) {
|
||||
/* Try again, this is not a permanent situation. */
|
||||
|
@ -39,6 +39,7 @@ run_test "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_invalid" test/nvmf/target/invalid.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_abort" test/nvmf/target/abort.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_ns_hotplug_stress" test/nvmf/target/ns_hotplug_stress.sh "${TEST_ARGS[@]}"
|
||||
|
||||
if grep -q '#define SPDK_CONFIG_VFIO_USER 1' $rootdir/include/spdk/config.h; then
|
||||
run_test "nvmf_vfio_user" test/nvmf/target/nvmf_vfio_user.sh "${TEST_ARGS[@]}"
|
||||
|
38
test/nvmf/target/ns_hotplug_stress.sh
Executable file
38
test/nvmf/target/ns_hotplug_stress.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $rootdir/test/nvmf/common.sh
|
||||
|
||||
rpc_py="$rootdir/scripts/rpc.py"
|
||||
|
||||
nvmftestinit
|
||||
nvmfappstart -m 0xE
|
||||
|
||||
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
|
||||
|
||||
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -m 10
|
||||
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
||||
$rpc_py bdev_malloc_create 32 512 -b Malloc0
|
||||
$rpc_py bdev_delay_create -b Malloc0 -d Delay0 -r 1000000 -t 1000000 -w 1000000 -n 1000000
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0
|
||||
$rpc_py bdev_null_create NULL1 1000 512
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 NULL1
|
||||
|
||||
# Note: use -Q option to rate limit the error messages that perf will spew due to the
|
||||
# namespace hotplugs
|
||||
$SPDK_EXAMPLE_DIR/perf -c 0x1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" \
|
||||
-t 30 -q 128 -w randread -o 512 -Q 1000 &
|
||||
PERF_PID=$!
|
||||
|
||||
while kill -0 $PERF_PID; do
|
||||
$rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1
|
||||
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0
|
||||
done
|
||||
|
||||
wait $PERF_PID
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
|
||||
nvmftestfini
|
@ -437,14 +437,14 @@ test_connect(void)
|
||||
|
||||
/* Valid admin connect command */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(qpair.ctrlr != NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
|
||||
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
|
||||
free(qpair.ctrlr);
|
||||
@ -453,14 +453,14 @@ test_connect(void)
|
||||
/* Valid admin connect command with kato = 0 */
|
||||
cmd.connect_cmd.kato = 0;
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
|
||||
free(qpair.ctrlr);
|
||||
qpair.ctrlr = NULL;
|
||||
@ -596,21 +596,21 @@ test_connect(void)
|
||||
MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
|
||||
cmd.connect_cmd.qid = 1;
|
||||
cmd.connect_cmd.sqsize = 63;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(qpair.ctrlr == &ctrlr);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
qpair.ctrlr = NULL;
|
||||
cmd.connect_cmd.sqsize = 31;
|
||||
|
||||
/* Non-existent controller */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -620,14 +620,14 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
|
||||
|
||||
/* I/O connect to discovery controller */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
|
||||
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -637,7 +637,7 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
|
||||
/* I/O connect to discovery controller with keep-alive-timeout != 0 */
|
||||
cmd.connect_cmd.qid = 0;
|
||||
@ -645,7 +645,7 @@ test_connect(void)
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
|
||||
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -653,7 +653,7 @@ test_connect(void)
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(qpair.ctrlr != NULL);
|
||||
CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
|
||||
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
|
||||
free(qpair.ctrlr);
|
||||
@ -666,7 +666,7 @@ test_connect(void)
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
|
||||
subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -674,7 +674,7 @@ test_connect(void)
|
||||
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
|
||||
CU_ASSERT(qpair.ctrlr != NULL);
|
||||
CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
|
||||
spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
|
||||
free(qpair.ctrlr);
|
||||
@ -686,7 +686,7 @@ test_connect(void)
|
||||
/* I/O connect to disabled controller */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
ctrlr.vcprop.cc.bits.en = 0;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -696,13 +696,13 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
ctrlr.vcprop.cc.bits.en = 1;
|
||||
|
||||
/* I/O connect with invalid IOSQES */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
ctrlr.vcprop.cc.bits.iosqes = 3;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -712,13 +712,13 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
ctrlr.vcprop.cc.bits.iosqes = 6;
|
||||
|
||||
/* I/O connect with invalid IOCQES */
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
ctrlr.vcprop.cc.bits.iocqes = 3;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -728,7 +728,7 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
|
||||
CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
ctrlr.vcprop.cc.bits.iocqes = 4;
|
||||
|
||||
/* I/O connect with too many existing qpairs */
|
||||
@ -736,7 +736,7 @@ test_connect(void)
|
||||
spdk_bit_array_set(ctrlr.qpair_mask, 0);
|
||||
spdk_bit_array_set(ctrlr.qpair_mask, 1);
|
||||
spdk_bit_array_set(ctrlr.qpair_mask, 2);
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -744,7 +744,7 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
spdk_bit_array_clear(ctrlr.qpair_mask, 0);
|
||||
spdk_bit_array_clear(ctrlr.qpair_mask, 1);
|
||||
spdk_bit_array_clear(ctrlr.qpair_mask, 2);
|
||||
@ -756,7 +756,7 @@ test_connect(void)
|
||||
qpair2.qid = 1;
|
||||
spdk_bit_array_set(ctrlr.qpair_mask, 1);
|
||||
cmd.connect_cmd.qid = 1;
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
@ -764,20 +764,20 @@ test_connect(void)
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
|
||||
/* I/O connect when admin qpair is being destroyed */
|
||||
admin_qpair.group = NULL;
|
||||
admin_qpair.state = SPDK_NVMF_QPAIR_DEACTIVATING;
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
sgroups[subsystem.id].io_outstanding++;
|
||||
sgroups[subsystem.id].mgmt_io_outstanding++;
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
|
||||
rc = nvmf_ctrlr_cmd_connect(&req);
|
||||
poll_threads();
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
|
||||
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
|
||||
CU_ASSERT(qpair.ctrlr == NULL);
|
||||
CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
|
||||
CU_ASSERT(sgroups[subsystem.id].mgmt_io_outstanding == 0);
|
||||
admin_qpair.group = &group;
|
||||
admin_qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||
|
||||
@ -1615,6 +1615,7 @@ test_fused_compare_and_write(void)
|
||||
struct spdk_nvmf_poll_group group = {};
|
||||
struct spdk_nvmf_subsystem_poll_group sgroups = {};
|
||||
struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
|
||||
struct spdk_io_channel io_ch = {};
|
||||
|
||||
ns.bdev = &bdev;
|
||||
|
||||
@ -1633,6 +1634,8 @@ test_fused_compare_and_write(void)
|
||||
group.num_sgroups = 1;
|
||||
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
sgroups.num_ns = 1;
|
||||
ns_info.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
||||
ns_info.channel = &io_ch;
|
||||
sgroups.ns_info = &ns_info;
|
||||
TAILQ_INIT(&sgroups.queued);
|
||||
group.sgroups = &sgroups;
|
||||
@ -1740,12 +1743,12 @@ test_multi_async_event_reqs(void)
|
||||
}
|
||||
|
||||
/* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
|
||||
sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS;
|
||||
sgroups.mgmt_io_outstanding = NVMF_MAX_ASYNC_EVENTS;
|
||||
for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
|
||||
}
|
||||
CU_ASSERT(sgroups.io_outstanding == 0);
|
||||
CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
|
||||
|
||||
/* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
|
||||
@ -1881,14 +1884,14 @@ test_multi_async_events(void)
|
||||
|
||||
TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
|
||||
|
||||
sgroups.io_outstanding = 1;
|
||||
sgroups.mgmt_io_outstanding = 1;
|
||||
if (i < 3) {
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
|
||||
CU_ASSERT(sgroups.io_outstanding == 0);
|
||||
CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == 0);
|
||||
} else {
|
||||
CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
|
||||
CU_ASSERT(sgroups.io_outstanding == 0);
|
||||
CU_ASSERT(sgroups.mgmt_io_outstanding == 0);
|
||||
CU_ASSERT(ctrlr.nr_aer_reqs == 1);
|
||||
}
|
||||
}
|
||||
|
@ -225,6 +225,7 @@ nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
void
|
||||
nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
||||
{
|
||||
}
|
||||
|
@ -165,6 +165,7 @@ nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
void
|
||||
nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
||||
struct spdk_nvmf_subsystem *subsystem,
|
||||
uint32_t nsid,
|
||||
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
||||
{
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user