nvmf/tcp: Pass extended LBA based length as I/O length to NVMf controller

When DIF is inserted or stripped,
- in the TCP transport layer, we can use LBA based length throughout, but
- in the NVMf controller layer and BDEV layer, extended LBA based
  length must be used, and NVMf controller gets the length from
  tcp_req->req.length.

Hence by adding and using two variables, elba_length and orig_length
to struct spdk_nvmf_tcp_req, set the extended LBA length to
tcp_req->req.length before calling spdk_nvmf_request_exec(), and then
restore the original LBA based length to tcp_req->req.length after
calling spdk_nvmf_tcp_req_complete().

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I9309b8923c6386644c4fd8ef3ee83a19f5d21ce5
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/458926
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-06-21 13:43:15 +09:00 committed by Darek Stojaczyk
parent 51b643648c
commit 7bfbc388d7

View File

@ -190,6 +190,8 @@ struct spdk_nvmf_tcp_req {
struct spdk_dif_ctx dif_ctx;
bool dif_insert_or_strip;
uint32_t elba_length;
uint32_t orig_length;
TAILQ_ENTRY(spdk_nvmf_tcp_req) link;
TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link;
@ -2150,6 +2152,7 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_transport *ttransport,
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
length = spdk_dif_get_length_with_md(length, &tcp_req->dif_ctx);
tcp_req->elba_length = length;
}
if (spdk_nvmf_tcp_req_fill_iovs(ttransport, tcp_req, length) < 0) {
@ -2197,6 +2200,7 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_transport *ttransport,
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
length = spdk_dif_get_length_with_md(length, &tcp_req->dif_ctx);
tcp_req->elba_length = length;
}
tcp_req->req.iov[0].iov_base = tcp_req->req.data;
@ -2520,6 +2524,13 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
break;
case TCP_REQUEST_STATE_READY_TO_EXECUTE:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
assert(tcp_req->elba_length >= tcp_req->req.length);
tcp_req->orig_length = tcp_req->req.length;
tcp_req->req.length = tcp_req->elba_length;
}
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING);
spdk_nvmf_request_exec(&tcp_req->req);
break;
@ -2530,6 +2541,11 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
break;
case TCP_REQUEST_STATE_EXECUTED:
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0);
if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
tcp_req->req.length = tcp_req->orig_length;
}
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
break;
case TCP_REQUEST_STATE_READY_TO_COMPLETE: