nvmf: Send all fabrics and admin commands to master core

This prepares us to fan out I/O qpairs to other cores

Change-Id: I3e9a60226cddf3ccd26b7ad121775ee36f07e6a9
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/375480
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-08-23 11:32:09 -07:00 committed by Jim Harris
parent 3c3c9da4ea
commit 0ab300f872
5 changed files with 105 additions and 20 deletions

View File

@ -39,6 +39,7 @@
#include "subsystem.h" #include "subsystem.h"
#include "transport.h" #include "transport.h"
#include "spdk/io_channel.h"
#include "spdk/trace.h" #include "spdk/trace.h"
#include "spdk/nvme_spec.h" #include "spdk/nvme_spec.h"
#include "spdk/string.h" #include "spdk/string.h"
@ -194,6 +195,9 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, " subnqn: \"%s\"\n", data->subnqn); SPDK_DEBUGLOG(SPDK_TRACE_NVMF, " subnqn: \"%s\"\n", data->subnqn);
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, " hostnqn: \"%s\"\n", data->hostnqn); SPDK_DEBUGLOG(SPDK_TRACE_NVMF, " hostnqn: \"%s\"\n", data->hostnqn);
assert(qpair->thread == NULL);
qpair->thread = spdk_get_thread();
tgt = qpair->transport->tgt; tgt = qpair->transport->tgt;
subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn);

View File

@ -57,6 +57,8 @@ struct spdk_nvmf_qpair {
struct spdk_nvmf_ctrlr *ctrlr; struct spdk_nvmf_ctrlr *ctrlr;
enum spdk_nvmf_qpair_type type; enum spdk_nvmf_qpair_type type;
struct spdk_thread *thread;
uint16_t qid; uint16_t qid;
uint16_t sq_head; uint16_t sq_head;
uint16_t sq_head_max; uint16_t sq_head_max;

View File

@ -34,6 +34,7 @@
#include "spdk/stdinc.h" #include "spdk/stdinc.h"
#include "spdk/conf.h" #include "spdk/conf.h"
#include "spdk/io_channel.h"
#include "spdk/nvmf.h" #include "spdk/nvmf.h"
#include "spdk/trace.h" #include "spdk/trace.h"
@ -77,6 +78,13 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
tgt->opts = *opts; tgt->opts = *opts;
} }
tgt->master_thread = spdk_get_thread();
if (!tgt->master_thread) {
free(tgt);
SPDK_ERRLOG("Call spdk_allocate_thread() prior to calling spdk_nvmf_tgt_create()\n");
return NULL;
}
tgt->discovery_genctr = 0; tgt->discovery_genctr = 0;
tgt->discovery_log_page = NULL; tgt->discovery_log_page = NULL;
tgt->discovery_log_page_size = 0; tgt->discovery_log_page_size = 0;

View File

@ -48,6 +48,8 @@
struct spdk_nvmf_tgt { struct spdk_nvmf_tgt {
struct spdk_nvmf_tgt_opts opts; struct spdk_nvmf_tgt_opts opts;
struct spdk_thread *master_thread;
uint16_t next_cntlid; uint16_t next_cntlid;
uint64_t discovery_genctr; uint64_t discovery_genctr;
TAILQ_HEAD(, spdk_nvmf_subsystem) subsystems; TAILQ_HEAD(, spdk_nvmf_subsystem) subsystems;

View File

@ -39,29 +39,47 @@
#include "subsystem.h" #include "subsystem.h"
#include "transport.h" #include "transport.h"
#include "spdk/io_channel.h"
#include "spdk/nvme.h" #include "spdk/nvme.h"
#include "spdk/nvmf_spec.h" #include "spdk/nvmf_spec.h"
#include "spdk/trace.h" #include "spdk/trace.h"
#include "spdk_internal/assert.h"
#include "spdk_internal/log.h" #include "spdk_internal/log.h"
static void
spdk_nvmf_request_complete_on_qpair(void *ctx)
{
struct spdk_nvmf_request *req = ctx;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
rsp->sqid = 0;
rsp->status.p = 0;
rsp->cid = req->cmd->nvme_cmd.cid;
SPDK_DEBUGLOG(SPDK_TRACE_NVMF,
"cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n",
rsp->cid, rsp->cdw0, rsp->rsvd1,
*(uint16_t *)&rsp->status);
if (spdk_nvmf_transport_req_complete(req)) {
SPDK_ERRLOG("Transport request completion error!\n");
}
}
int int
spdk_nvmf_request_complete(struct spdk_nvmf_request *req) spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{ {
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
response->sqid = 0; if (cmd->opc == SPDK_NVME_OPC_FABRIC ||
response->status.p = 0; req->qpair->type == QPAIR_TYPE_AQ) {
response->cid = req->cmd->nvme_cmd.cid; /* Pass a message back to the originating thread. */
spdk_thread_send_msg(req->qpair->thread,
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, spdk_nvmf_request_complete_on_qpair,
"cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n", req);
response->cid, response->cdw0, response->rsvd1, } else {
*(uint16_t *)&response->status); spdk_nvmf_request_complete_on_qpair(req);
if (spdk_nvmf_transport_req_complete(req)) {
SPDK_ERRLOG("Transport request completion error!\n");
return -1;
} }
return 0; return 0;
@ -269,16 +287,15 @@ nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum spdk_nvmf_qpair_type qpair_
} }
} }
int static void
spdk_nvmf_request_exec(struct spdk_nvmf_request *req) spdk_nvmf_request_exec_on_master(void *ctx)
{ {
struct spdk_nvmf_request *req = ctx;
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status; spdk_nvmf_request_exec_status status;
nvmf_trace_command(req->cmd, req->qpair->type);
if (cmd->opc == SPDK_NVME_OPC_FABRIC) { if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
status = nvmf_process_fabrics_command(req); status = nvmf_process_fabrics_command(req);
} else if (ctrlr == NULL || !ctrlr->vcprop.cc.bits.en) { } else if (ctrlr == NULL || !ctrlr->vcprop.cc.bits.en) {
@ -295,8 +312,61 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
if (subsystem->is_removed) { if (subsystem->is_removed) {
rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else if (req->qpair->type == QPAIR_TYPE_AQ) { } else {
status = spdk_nvmf_ctrlr_process_admin_cmd(req); status = spdk_nvmf_ctrlr_process_admin_cmd(req);
}
}
switch (status) {
case SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE:
spdk_nvmf_request_complete(req);
break;
case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
break;
default:
SPDK_UNREACHABLE();
}
}
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status;
nvmf_trace_command(req->cmd, req->qpair->type);
if (cmd->opc == SPDK_NVME_OPC_FABRIC ||
req->qpair->type == QPAIR_TYPE_AQ) {
/* Fabric and admin commands are sent
* to the master core for synchronization
* reasons.
*/
spdk_thread_send_msg(req->qpair->transport->tgt->master_thread,
spdk_nvmf_request_exec_on_master,
req);
status = SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
} else if (ctrlr == NULL ||
!ctrlr->vcprop.cc.bits.en) {
/* TODO: The EN bit is modified by the master thread. This needs
* stronger synchronization.
*/
SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else {
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
assert(subsystem != NULL);
/* TODO: subsystem->is_removed is touched by multiple threads.
* This needs stronger synchronization.
*/
if (subsystem->is_removed) {
rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else { } else {
status = spdk_nvmf_ctrlr_process_io_cmd(req); status = spdk_nvmf_ctrlr_process_io_cmd(req);
} }
@ -308,8 +378,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS: case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
return 0; return 0;
default: default:
SPDK_ERRLOG("Unknown request exec status: 0x%x\n", status); SPDK_UNREACHABLE();
return -1;
} }
return 0; return 0;