nvme/tcp: Fix the C2hdata pdu receving logic.

There may be multiple C2H data pdus recevied.
So we should use the following steps:

1 Use the SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU
to check whether it is a last pdu or not.
Then we will not cleanup tcp_req, i.e., tcp_req->datao
will not be cleaned.

Then use the SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS
to check whether the controller will use resp pdu
or not.

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: I9dccf2579aadd18f31361444e25bd4b3b76f06c5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9192
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ziye Yang 2021-08-17 20:48:07 +08:00 committed by Tomasz Zawadzki
parent f76f52c057
commit 0b51da144f
2 changed files with 19 additions and 7 deletions

View File

@ -970,7 +970,7 @@ nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
tcp_req->datao += pdu->data_len;
flags = c2h_data->common.flags;
if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU) {
if (tcp_req->datao == tcp_req->req->payload_size) {
tcp_req->rsp.status.p = 0;
} else {
@ -979,10 +979,11 @@ nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
tcp_req->rsp.cid = tcp_req->cid;
tcp_req->rsp.sqid = tqpair->qpair.id;
tcp_req->ordering.bits.data_recv = 1;
if (nvme_tcp_req_complete_safe(tcp_req)) {
(*reaped)++;
if (flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
tcp_req->ordering.bits.data_recv = 1;
if (nvme_tcp_req_complete_safe(tcp_req)) {
(*reaped)++;
}
}
}
}
@ -1292,6 +1293,7 @@ nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
int flags = c2h_data->common.flags;
SPDK_DEBUGLOG(nvme, "enter\n");
SPDK_DEBUGLOG(nvme, "c2h_data info on tqpair(%p): datao=%u, datal=%u, cccid=%d\n",
@ -1308,6 +1310,14 @@ nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu
SPDK_DEBUGLOG(nvme, "tcp_req(%p) on tqpair(%p): datao=%u, payload_size=%u\n",
tcp_req, tqpair, tcp_req->datao, tcp_req->req->payload_size);
if (spdk_unlikely((flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) &&
!(flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU))) {
SPDK_ERRLOG("Invalid flag flags=%d in c2h_data=%p\n", flags, c2h_data);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_c2h_data_hdr, common);
goto end;
}
if (c2h_data->datal > tcp_req->req->payload_size) {
SPDK_ERRLOG("Invalid datal for tcp_req(%p), datal(%u) exceeds payload_size(%u)\n",
tcp_req, c2h_data->datal, tcp_req->req->payload_size);

View File

@ -1157,7 +1157,8 @@ test_nvme_tcp_c2h_payload_handle(void)
TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
pdu.req = &tcp_req;
pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
pdu.data_len = 1024;
tqpair.qpair.id = 1;
@ -1303,7 +1304,8 @@ test_nvme_tcp_pdu_payload_handle(void)
tqpair.qpair.id = 1;
recv_pdu.ddgst_enable = false;
recv_pdu.req = &tcp_req;
recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
recv_pdu.data_len = 1024;
tcp_req.ordering.bits.data_recv = 0;
tcp_req.req->cb_fn = ut_nvme_complete_request;