nvme_tcp: Correctly handle the data digest err

According to NVMe-oF 1.1 spec, it is not a fatal error.
So according to Figure 126 in NVMe Base specification,
we should return "Transient Transport Error".

Change-Id: I601304ae2bb24508882fb1ec8c7e53ec587ab515
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7795
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Ziye Yang 2021-05-07 21:33:40 +08:00 committed by Tomasz Zawadzki
parent 860a61f6eb
commit 252430a053
4 changed files with 37 additions and 14 deletions

View File

@ -24,6 +24,9 @@ Added `min_cntlid` and `max_cntlid` to `nvmf_create_subsystem` to limit the cont
Added a new function `spdk_nvme_ns_cmd_copy` to submit a Simple Copy Command to a Namespace.
Update the spdk_nvme_generic_command_status_code structure with new status code
according to the definition in nvme 1.4 spec.
### rpc
New RPC `bdev_rbd_register_cluster` and `bdev_rbd_unregister_cluster` was added, it allows to create

View File

@ -1401,6 +1401,9 @@ enum spdk_nvme_generic_command_status_code {
SPDK_NVME_SC_SANITIZE_IN_PROGRESS = 0x1d,
SPDK_NVME_SC_SGL_DATA_BLOCK_GRANULARITY_INVALID = 0x1e,
SPDK_NVME_SC_COMMAND_INVALID_IN_CMB = 0x1f,
SPDK_NVME_SC_COMMAND_NAMESPACE_IS_PROTECTED = 0x20,
SPDK_NVME_SC_COMMAND_INTERRUPTED = 0x21,
SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR = 0x22,
SPDK_NVME_SC_LBA_OUT_OF_RANGE = 0x80,
SPDK_NVME_SC_CAPACITY_EXCEEDED = 0x81,

View File

@ -846,7 +846,6 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
}
static void
@ -1046,8 +1045,8 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
{
int rc = 0;
struct nvme_tcp_pdu *pdu;
uint32_t crc32c, error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
uint32_t crc32c;
struct nvme_tcp_req *tcp_req;
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
pdu = tqpair->recv_pdu;
@ -1060,9 +1059,9 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
if (rc == 0) {
SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
return;
tcp_req = pdu->req;
assert(tcp_req != NULL);
tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
}
}

View File

@ -1360,10 +1360,12 @@ nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvme_tcp_cmd *capsule_cmd;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
struct spdk_nvme_cpl *rsp;
capsule_cmd = &pdu->hdr.capsule_cmd;
tcp_req = pdu->req;
assert(tcp_req != NULL);
if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) {
SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n",
SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo);
@ -1373,7 +1375,14 @@ nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
}
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
rsp = &tcp_req->req.rsp->nvme_cpl;
if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) {
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
} else {
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
}
nvmf_tcp_req_process(ttransport, tcp_req);
return;
@ -1582,6 +1591,7 @@ nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvmf_tcp_req *tcp_req;
struct spdk_nvme_cpl *rsp;
tcp_req = pdu->req;
assert(tcp_req != NULL);
@ -1596,7 +1606,14 @@ nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
* acknowledged before moving on. */
if (tcp_req->h2c_offset == tcp_req->req.length &&
tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) {
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
/* After receving all the h2c data, we need to check whether there is
* transient transport error */
rsp = &tcp_req->req.rsp->nvme_cpl;
if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) {
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
} else {
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
}
nvmf_tcp_req_process(ttransport, tcp_req);
}
}
@ -1678,8 +1695,9 @@ nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
{
int rc = 0;
struct nvme_tcp_pdu *pdu;
uint32_t crc32c, error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
uint32_t crc32c;
struct spdk_nvmf_tcp_req *tcp_req;
struct spdk_nvme_cpl *rsp;
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
pdu = tqpair->pdu_in_progress;
@ -1691,10 +1709,10 @@ nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
if (rc == 0) {
SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
return;
tcp_req = pdu->req;
assert(tcp_req != NULL);
rsp = &tcp_req->req.rsp->nvme_cpl;
rsp->status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
}
}