nvmf:Return cdw0 over tcp transport for retrieve cmd

tcp transport doesn't send a response capsule when
c2h_success is set even if cdw0 or cdw1 are non-0.

Signed-off-by: Ed rodriguez <edwinr@netapp.com>
Signed-off-by: John Meneghini johnm@netapp.com
Change-Id: Ieba81fcc50342a2009f7931526e6f8392e26b6a5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6808
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ed rodriguez 2021-03-04 08:58:36 -05:00 committed by Tomasz Zawadzki
parent a8d700419d
commit b1144ce304
2 changed files with 28 additions and 4 deletions

View File

@ -1509,8 +1509,6 @@ nvmf_tcp_pdu_c2h_data_complete(void *cb_arg)
struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair,
struct spdk_nvmf_tcp_qpair, qpair);
struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
tcp_req->req.qpair->transport, struct spdk_nvmf_tcp_transport, transport);
assert(tqpair != NULL);
@ -1521,7 +1519,7 @@ nvmf_tcp_pdu_c2h_data_complete(void *cb_arg)
return;
}
if (ttransport->tcp_opts.c2h_success) {
if (tcp_req->pdu->hdr.c2h_data.common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
nvmf_tcp_request_free(tcp_req);
} else {
nvmf_tcp_req_pdu_fini(tcp_req);
@ -2272,7 +2270,9 @@ _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
if (ttransport->tcp_opts.c2h_success) {
/* Need to send the capsule response if response is not all 0 */
if (ttransport->tcp_opts.c2h_success &&
tcp_req->rsp.cdw0 == 0 && tcp_req->rsp.cdw1 == 0) {
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
}

View File

@ -552,6 +552,7 @@ test_nvmf_tcp_send_c2h_data(void)
struct nvme_tcp_pdu pdu = {};
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
ttransport.tcp_opts.c2h_success = true;
thread = spdk_thread_create(NULL, NULL);
SPDK_CU_ASSERT_FATAL(thread != NULL);
spdk_set_thread(thread);
@ -583,6 +584,7 @@ test_nvmf_tcp_send_c2h_data(void)
CU_ASSERT(c2h_data->datal = 300);
CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS);
CU_ASSERT(pdu.data_iovcnt == 3);
CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
@ -592,6 +594,28 @@ test_nvmf_tcp_send_c2h_data(void)
CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
CU_ASSERT(pdu.data_iov[2].iov_len == 99);
tcp_req.pdu_in_use = false;
tcp_req.rsp.cdw0 = 1;
nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
ttransport.tcp_opts.c2h_success = false;
tcp_req.pdu_in_use = false;
tcp_req.rsp.cdw0 = 0;
nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
tcp_req.pdu_in_use = false;
tcp_req.rsp.cdw0 = 1;
nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
spdk_thread_exit(thread);
while (!spdk_thread_is_exited(thread)) {
spdk_thread_poll(thread, 0, 0);