nvmf: Add check for bidirectional xfer type

RDMA target can't handle bidirectional xfer type, in debug build
it throws an assert in nvmf_rdma_setup_wr function. NVMF controller
performs checks od opcodes, but the failure happens before this
check. Add similar validation in TCP transport.

Change-Id: I14400b9c301295c0ae1d35a4330189d38aeee723
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3436
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2020-07-17 17:04:29 +03:00 committed by Jim Harris
parent 8f7cb5e6dd
commit 864d93c053
3 changed files with 45 additions and 0 deletions

View File

@ -2001,6 +2001,14 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
/* The next state transition depends on the data transfer needs of this request. */
rdma_req->req.xfer = spdk_nvmf_req_get_xfer(&rdma_req->req);
if (spdk_unlikely(rdma_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", rdma_req);
break;
}
/* If no data to transfer, ready to execute. */
if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;

View File

@ -2121,6 +2121,14 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
/* The next state transition depends on the data transfer needs of this request. */
tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req);
if (spdk_unlikely(tcp_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SC_INVALID_OPCODE;
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", tcp_req);
break;
}
/* If no data to transfer, ready to execute. */
if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) {
/* Reset the tqpair receving pdu state */

View File

@ -757,6 +757,35 @@ test_spdk_nvmf_rdma_request_process(void)
qpair_reset(&rqpair, &poller, &device, &resources);
}
/* Test 4, invalid command, check xfer type */
{
struct spdk_nvmf_rdma_recv *rdma_recv_inv;
struct spdk_nvmf_rdma_request *rdma_req_inv;
/* construct an opcode that specifies BIDIRECTIONAL transfer */
uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
rdma_recv_inv = create_recv(&rqpair, opc);
rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
/* NEW -> RDMA_REQUEST_STATE_COMPLETING */
rqpair.current_recv_depth = 1;
progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
/* RDMA_REQUEST_STATE_COMPLETED -> FREE */
rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
nvmf_rdma_request_process(&rtransport, rdma_req_inv);
CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
free_recv(rdma_recv_inv);
free_req(rdma_req_inv);
poller_reset(&poller, &group);
qpair_reset(&rqpair, &poller, &device, &resources);
}
spdk_mempool_free(rtransport.transport.data_buf_pool);
spdk_mempool_free(rtransport.data_wr_pool);
}