nvmf: Move fabric command handlers to request.c

Now all request processing is in request.c

Change-Id: I27db190f566a4134e5c09566f87c3d0922b3f569
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-06-24 11:19:24 -07:00
parent 744dd009c0
commit 68ba9e1f86
4 changed files with 333 additions and 335 deletions

View File

@ -365,60 +365,6 @@ void spdk_shutdown_nvmf_conns(void)
rte_get_master_lcore(), spdk_nvmf_conn_check_shutdown, NULL);
}
static int
nvmf_process_property_get(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_prop_get_rsp *response;
struct spdk_nvmf_fabric_prop_get_cmd *cmd;
int ret;
cmd = &req->cmd->prop_get_cmd;
response = &req->rsp->prop_get_rsp;
nvmf_property_get(req->conn->sess, cmd, response);
/* send the nvmf response if setup by NVMf library */
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property get capsule response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
return 0;
}
static int
nvmf_process_property_set(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_prop_set_rsp *response;
struct spdk_nvmf_fabric_prop_set_cmd *cmd;
bool shutdown = false;
int ret;
cmd = &req->cmd->prop_set_cmd;
response = &req->rsp->prop_set_rsp;
nvmf_property_set(req->conn->sess, cmd, response, &shutdown);
/* TODO: This is not right. It should shut down the whole session.
if (shutdown == true) {
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Call to set properties has indicated shutdown\n");
conn->state = CONN_STATE_FABRIC_DISCONNECT;
}
*/
/* send the nvmf response if setup by NVMf library */
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property set capsule response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
return 0;
}
/* Check the nvmf message received */
static void nvmf_trace_command(struct spdk_nvmf_capsule_cmd *cap_hdr, enum conn_type conn_type)
{
@ -469,48 +415,7 @@ static void nvmf_trace_command(struct spdk_nvmf_capsule_cmd *cap_hdr, enum conn_
}
}
static int
nvmf_process_io_command(struct nvmf_request *req)
{
int ret;
/* send to NVMf library for backend NVMe processing */
ret = nvmf_process_io_cmd(req);
if (ret) {
/* library failed the request and should have
Updated the response */
SPDK_TRACELOG(SPDK_TRACE_RDMA, "send nvme io cmd capsule error response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
}
return 0;
}
static int
nvmf_process_admin_command(struct nvmf_request *req)
{
int ret;
ret = nvmf_process_admin_cmd(req);
if (ret) {
/* library failed the request and should have
Updated the response */
SPDK_TRACELOG(SPDK_TRACE_NVMF, "send nvme admin cmd capsule sync response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
}
return 0;
}
static void
void
nvmf_init_conn_properites(struct spdk_nvmf_conn *conn,
struct nvmf_session *session,
struct spdk_nvmf_fabric_connect_rsp *response)
@ -547,234 +452,6 @@ nvmf_init_conn_properites(struct spdk_nvmf_conn *conn,
}
static int
nvmf_process_connect(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_connect_cmd *connect;
struct spdk_nvmf_fabric_connect_data *connect_data;
struct spdk_nvmf_fabric_connect_rsp *response;
struct spdk_nvmf_conn *conn = req->conn;
struct nvmf_session *session;
int ret;
if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) {
SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length);
return -1;
}
connect = &req->cmd->connect_cmd;
connect_data = (struct spdk_nvmf_fabric_connect_data *)req->data;
RTE_VERIFY(connect_data != NULL);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** Connect Capsule *** %p\n", connect);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cid = %x ***\n", connect->cid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** recfmt = %x ***\n", connect->recfmt);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** qid = %x ***\n", connect->qid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** sqsize = %x ***\n", connect->sqsize);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** Connect Capsule Data *** %p\n", connect_data);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cntlid = %x ***\n", connect_data->cntlid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** hostid = %04x%04x-%04x-%04x-%04x-%04x%04x%04x ***\n",
htons(*(unsigned short *) &connect_data->hostid[0]),
htons(*(unsigned short *) &connect_data->hostid[2]),
htons(*(unsigned short *) &connect_data->hostid[4]),
htons(*(unsigned short *) &connect_data->hostid[6]),
htons(*(unsigned short *) &connect_data->hostid[8]),
htons(*(unsigned short *) &connect_data->hostid[10]),
htons(*(unsigned short *) &connect_data->hostid[12]),
htons(*(unsigned short *) &connect_data->hostid[14]));
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** subsiqn = %s ***\n", (char *)&connect_data->subnqn[0]);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** hostiqn = %s ***\n", (char *)&connect_data->hostnqn[0]);
response = &req->rsp->connect_rsp;
session = nvmf_connect((void *)conn, connect, connect_data, response);
if (session != NULL) {
conn->sess = session;
conn->qid = connect->qid;
if (connect->qid > 0) {
conn->type = CONN_TYPE_IOQ; /* I/O Connection */
} else {
/* When session first created, set some attributes */
nvmf_init_conn_properites(conn, session, response);
}
}
/* synchronous call, nvmf library expected to init
response status.
*/
SPDK_TRACELOG(SPDK_TRACE_NVMF, "send connect capsule response\n");
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cntlid = %x ***\n",
response->status_code_specific.success.cntlid);
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return ret;
}
return 0;
}
static int
nvmf_process_fabrics_command(struct nvmf_request *req)
{
struct spdk_nvmf_capsule_cmd *cap_hdr;
cap_hdr = &req->cmd->nvmf_cmd;
switch (cap_hdr->fctype) {
case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
return nvmf_process_property_set(req);
case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
return nvmf_process_property_get(req);
case SPDK_NVMF_FABRIC_COMMAND_CONNECT:
return nvmf_process_connect(req);
default:
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "recv capsule header type invalid [%x]!\n",
cap_hdr->fctype);
return 1; /* skip, do nothing */
}
}
/*
* Prepare the nvmf_request data and length fields.
*
* A data transfer will be initiated if required by the request.
*
* \return 1 on success with data immediately available (in-capsule data or controller to host),
* 0 if host to controller transfer was initiated (command will be issued pending completion
* of transfer), or negative on error.
*/
static int
spdk_nvmf_request_prep_data(struct nvmf_request *req)
{
struct nvme_qp_tx_desc *tx_desc = req->tx_desc;
struct nvme_qp_rx_desc *rx_desc = req->rx_desc;
struct spdk_nvmf_conn *conn = tx_desc->conn;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
enum spdk_nvme_data_transfer xfer;
int ret;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype);
} else {
xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
}
if (xfer != SPDK_NVME_DATA_NONE) {
struct spdk_nvme_sgl_descriptor *sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
(sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Keyed data block: raddr 0x%" PRIx64 ", rkey 0x%x, length 0x%x\n",
sgl->address, sgl->keyed.key, sgl->keyed.length);
if (sgl->keyed.length > rx_desc->bb_sgl.length) {
SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n",
sgl->keyed.length, rx_desc->bb_sgl.length);
return -1;
}
req->data = rx_desc->bb;
req->remote_addr = sgl->address;
req->rkey = sgl->keyed.key;
req->length = sgl->keyed.length;
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
uint64_t offset = sgl->address;
uint32_t max_len = rx_desc->bb_sgl.length;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
offset, sgl->unkeyed.length);
if (conn->type == CONN_TYPE_AQ) {
SPDK_ERRLOG("In-capsule data not allowed for admin queue\n");
return -1;
}
if (offset > max_len) {
SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
offset, max_len);
return -1;
}
max_len -= (uint32_t)offset;
if (sgl->unkeyed.length > max_len) {
SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
sgl->unkeyed.length, max_len);
return -1;
}
req->data = rx_desc->bb + offset;
req->length = sgl->unkeyed.length;
} else {
SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n",
sgl->generic.type, sgl->generic.subtype);
return -1;
}
if (req->length == 0) {
xfer = SPDK_NVME_DATA_NONE;
req->data = NULL;
}
req->xfer = xfer;
/*
* For any I/O that requires data to be
* pulled into target BB before processing by
* the backend NVMe device
*/
if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Issuing RDMA Read to get host data\n");
/* temporarily adjust SGE to only copy what the host is prepared to send. */
rx_desc->bb_sgl.length = req->length;
ret = nvmf_post_rdma_read(conn, tx_desc);
if (ret) {
SPDK_ERRLOG("Unable to post rdma read tx descriptor\n");
return -1;
}
/* Wait for transfer to complete before executing command. */
return 1;
}
}
}
if (xfer == SPDK_NVME_DATA_NONE) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "No data to transfer\n");
RTE_VERIFY(req->data == NULL);
RTE_VERIFY(req->length == 0);
} else {
RTE_VERIFY(req->data != NULL);
RTE_VERIFY(req->length != 0);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "%s data ready\n",
xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ? "Host to Controller" :
"Controller to Host");
}
return 0;
}
static int
spdk_nvmf_request_exec(struct nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
return nvmf_process_fabrics_command(req);
} else if (req->conn->type == CONN_TYPE_AQ) {
return nvmf_process_admin_command(req);
} else {
return nvmf_process_io_command(req);
}
}
static int nvmf_recv(struct spdk_nvmf_conn *conn, struct ibv_wc *wc)
{
struct nvme_qp_rx_desc *rx_desc;

View File

@ -100,6 +100,11 @@ void spdk_shutdown_nvmf_conns(void);
struct spdk_nvmf_conn *
spdk_nvmf_allocate_conn(void);
void
nvmf_init_conn_properites(struct spdk_nvmf_conn *conn,
struct nvmf_session *session,
struct spdk_nvmf_fabric_connect_rsp *response);
int spdk_nvmf_startup_conn(struct spdk_nvmf_conn *conn);
void

View File

@ -31,6 +31,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <arpa/inet.h>
#include <rte_config.h>
#include <rte_debug.h>
#include "conn.h"
#include "rdma.h"
#include "request.h"
@ -93,7 +98,7 @@ command_fail:
return ret;
}
int
static int
nvmf_process_admin_cmd(struct nvmf_request *req)
{
struct nvmf_session *session = req->conn->sess;
@ -370,7 +375,27 @@ passthrough:
return rc;
}
int
static int
nvmf_process_admin_command(struct nvmf_request *req)
{
int ret;
ret = nvmf_process_admin_cmd(req);
if (ret) {
/* library failed the request and should have
Updated the response */
SPDK_TRACELOG(SPDK_TRACE_NVMF, "send nvme admin cmd capsule sync response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
}
return 0;
}
static int
nvmf_process_io_cmd(struct nvmf_request *req)
{
struct nvmf_session *session = req->conn->sess;
@ -467,3 +492,297 @@ nvmf_process_io_cmd(struct nvmf_request *req)
}
return rc;
}
static int
nvmf_process_io_command(struct nvmf_request *req)
{
int ret;
/* send to NVMf library for backend NVMe processing */
ret = nvmf_process_io_cmd(req);
if (ret) {
/* library failed the request and should have
Updated the response */
SPDK_TRACELOG(SPDK_TRACE_RDMA, "send nvme io cmd capsule error response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
}
return 0;
}
static int
nvmf_process_property_get(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_prop_get_rsp *response;
struct spdk_nvmf_fabric_prop_get_cmd *cmd;
int ret;
cmd = &req->cmd->prop_get_cmd;
response = &req->rsp->prop_get_rsp;
nvmf_property_get(req->conn->sess, cmd, response);
/* send the nvmf response if setup by NVMf library */
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property get capsule response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
return 0;
}
static int
nvmf_process_property_set(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_prop_set_rsp *response;
struct spdk_nvmf_fabric_prop_set_cmd *cmd;
bool shutdown = false;
int ret;
cmd = &req->cmd->prop_set_cmd;
response = &req->rsp->prop_set_rsp;
nvmf_property_set(req->conn->sess, cmd, response, &shutdown);
/* TODO: This is not right. It should shut down the whole session.
if (shutdown == true) {
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Call to set properties has indicated shutdown\n");
conn->state = CONN_STATE_FABRIC_DISCONNECT;
}
*/
/* send the nvmf response if setup by NVMf library */
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "send property set capsule response\n");
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return -1;
}
return 0;
}
static int
nvmf_process_connect(struct nvmf_request *req)
{
struct spdk_nvmf_fabric_connect_cmd *connect;
struct spdk_nvmf_fabric_connect_data *connect_data;
struct spdk_nvmf_fabric_connect_rsp *response;
struct spdk_nvmf_conn *conn = req->conn;
struct nvmf_session *session;
int ret;
if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) {
SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length);
return -1;
}
connect = &req->cmd->connect_cmd;
connect_data = (struct spdk_nvmf_fabric_connect_data *)req->data;
RTE_VERIFY(connect_data != NULL);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** Connect Capsule *** %p\n", connect);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cid = %x ***\n", connect->cid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** recfmt = %x ***\n", connect->recfmt);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** qid = %x ***\n", connect->qid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** sqsize = %x ***\n", connect->sqsize);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** Connect Capsule Data *** %p\n", connect_data);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cntlid = %x ***\n", connect_data->cntlid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** hostid = %04x%04x-%04x-%04x-%04x-%04x%04x%04x ***\n",
htons(*(unsigned short *) &connect_data->hostid[0]),
htons(*(unsigned short *) &connect_data->hostid[2]),
htons(*(unsigned short *) &connect_data->hostid[4]),
htons(*(unsigned short *) &connect_data->hostid[6]),
htons(*(unsigned short *) &connect_data->hostid[8]),
htons(*(unsigned short *) &connect_data->hostid[10]),
htons(*(unsigned short *) &connect_data->hostid[12]),
htons(*(unsigned short *) &connect_data->hostid[14]));
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** subsiqn = %s ***\n", (char *)&connect_data->subnqn[0]);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** hostiqn = %s ***\n", (char *)&connect_data->hostnqn[0]);
response = &req->rsp->connect_rsp;
session = nvmf_connect((void *)conn, connect, connect_data, response);
if (session != NULL) {
conn->sess = session;
conn->qid = connect->qid;
if (connect->qid > 0) {
conn->type = CONN_TYPE_IOQ; /* I/O Connection */
} else {
/* When session first created, set some attributes */
nvmf_init_conn_properites(conn, session, response);
}
}
/* synchronous call, nvmf library expected to init
response status.
*/
SPDK_TRACELOG(SPDK_TRACE_NVMF, "send connect capsule response\n");
SPDK_TRACELOG(SPDK_TRACE_NVMF, " *** cntlid = %x ***\n",
response->status_code_specific.success.cntlid);
ret = spdk_nvmf_request_complete(req);
if (ret) {
SPDK_ERRLOG("Unable to send aq qp tx descriptor\n");
return ret;
}
return 0;
}
static int
nvmf_process_fabrics_command(struct nvmf_request *req)
{
struct spdk_nvmf_capsule_cmd *cap_hdr;
cap_hdr = &req->cmd->nvmf_cmd;
switch (cap_hdr->fctype) {
case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
return nvmf_process_property_set(req);
case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
return nvmf_process_property_get(req);
case SPDK_NVMF_FABRIC_COMMAND_CONNECT:
return nvmf_process_connect(req);
default:
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "recv capsule header type invalid [%x]!\n",
cap_hdr->fctype);
return 1; /* skip, do nothing */
}
}
int
spdk_nvmf_request_prep_data(struct nvmf_request *req)
{
struct nvme_qp_tx_desc *tx_desc = req->tx_desc;
struct nvme_qp_rx_desc *rx_desc = req->rx_desc;
struct spdk_nvmf_conn *conn = tx_desc->conn;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
enum spdk_nvme_data_transfer xfer;
int ret;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype);
} else {
xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
}
if (xfer != SPDK_NVME_DATA_NONE) {
struct spdk_nvme_sgl_descriptor *sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
(sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Keyed data block: raddr 0x%" PRIx64 ", rkey 0x%x, length 0x%x\n",
sgl->address, sgl->keyed.key, sgl->keyed.length);
if (sgl->keyed.length > rx_desc->bb_sgl.length) {
SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n",
sgl->keyed.length, rx_desc->bb_sgl.length);
return -1;
}
req->data = rx_desc->bb;
req->remote_addr = sgl->address;
req->rkey = sgl->keyed.key;
req->length = sgl->keyed.length;
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
uint64_t offset = sgl->address;
uint32_t max_len = rx_desc->bb_sgl.length;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
offset, sgl->unkeyed.length);
if (conn->type == CONN_TYPE_AQ) {
SPDK_ERRLOG("In-capsule data not allowed for admin queue\n");
return -1;
}
if (offset > max_len) {
SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
offset, max_len);
return -1;
}
max_len -= (uint32_t)offset;
if (sgl->unkeyed.length > max_len) {
SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
sgl->unkeyed.length, max_len);
return -1;
}
req->data = rx_desc->bb + offset;
req->length = sgl->unkeyed.length;
} else {
SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n",
sgl->generic.type, sgl->generic.subtype);
return -1;
}
if (req->length == 0) {
xfer = SPDK_NVME_DATA_NONE;
req->data = NULL;
}
req->xfer = xfer;
/*
* For any I/O that requires data to be
* pulled into target BB before processing by
* the backend NVMe device
*/
if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Issuing RDMA Read to get host data\n");
/* temporarily adjust SGE to only copy what the host is prepared to send. */
rx_desc->bb_sgl.length = req->length;
ret = nvmf_post_rdma_read(conn, tx_desc);
if (ret) {
SPDK_ERRLOG("Unable to post rdma read tx descriptor\n");
return -1;
}
/* Wait for transfer to complete before executing command. */
return 1;
}
}
}
if (xfer == SPDK_NVME_DATA_NONE) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "No data to transfer\n");
RTE_VERIFY(req->data == NULL);
RTE_VERIFY(req->length == 0);
} else {
RTE_VERIFY(req->data != NULL);
RTE_VERIFY(req->length != 0);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "%s data ready\n",
xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ? "Host to Controller" :
"Controller to Host");
}
return 0;
}
int
spdk_nvmf_request_exec(struct nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
return nvmf_process_fabrics_command(req);
} else if (req->conn->type == CONN_TYPE_AQ) {
return nvmf_process_admin_command(req);
} else {
return nvmf_process_io_command(req);
}
}

View File

@ -74,15 +74,12 @@ struct nvmf_request {
TAILQ_ENTRY(nvmf_request) entries;
};
/**
* Send the response and transfer data from controller to host if required.
*/
int
spdk_nvmf_request_prep_data(struct nvmf_request *req);
int
spdk_nvmf_request_exec(struct nvmf_request *req);
int spdk_nvmf_request_complete(struct nvmf_request *req);
int
nvmf_process_admin_cmd(struct nvmf_request *req);
int
nvmf_process_io_cmd(struct nvmf_request *req);
#endif