nvmf/tcp: Properly handle multiple iovecs in processing H2C and C2H

NVMe/TCP target had assumed the size of each iovec was io_unit_size.
Using nvme_tcp_pdu_set_data_buf() instead removes the assumption
and supports any alignment transparently.

Hence this patch moves nvme_tcp_pdu_set_data_buf() to
include/spdk_internal/nvme_tcp.h and replaces the current code to use it.

Besides, this patch simplifies spdk_nvmf_tcp_calc_c2h_data_pdu_num()
because sum of iov_len of iovecs is equal to the variable length now.

We cannot separate code movement (lib/nvme/nvme_tcp.c to include/
spdk_internal/nvme_tcp.h) and code replacement (lib/nvmf/tcp.c)
because moved functions are static and compiler give warning if
they are not referenced in lib/nvmf/tcp.c.

The next patch will add UT code.

Change-Id: Iaece5639c6d9a41bd35ee4eb2b75220682dcecd1
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/455625
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-05-29 17:57:50 +09:00 committed by Darek Stojaczyk
parent b09bd95ad3
commit 3184884f9d
3 changed files with 75 additions and 87 deletions

View File

@ -200,6 +200,32 @@ _nvme_tcp_sgl_init(struct _nvme_tcp_sgl *s, struct iovec *iov, int iovcnt,
s->total_size = 0;
}
static inline void
_nvme_tcp_sgl_advance(struct _nvme_tcp_sgl *s, uint32_t step)
{
s->iov_offset += step;
while (s->iovcnt > 0) {
if (s->iov_offset < s->iov->iov_len) {
break;
}
s->iov_offset -= s->iov->iov_len;
s->iov++;
s->iovcnt--;
}
}
static inline void
_nvme_tcp_sgl_get_buf(struct _nvme_tcp_sgl *s, void **_buf, uint32_t *_buf_len)
{
if (_buf != NULL) {
*_buf = s->iov->iov_base + s->iov_offset;
}
if (_buf_len != NULL) {
*_buf_len = s->iov->iov_len - s->iov_offset;
}
}
static inline bool
_nvme_tcp_sgl_append(struct _nvme_tcp_sgl *s, uint8_t *data, uint32_t data_len)
{
@ -419,4 +445,44 @@ nvme_tcp_pdu_set_data(struct nvme_tcp_pdu *pdu, void *data, uint32_t data_len)
pdu->data_iovcnt = 1;
}
static void
nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
struct iovec *iov, int iovcnt,
uint32_t data_offset, uint32_t data_len)
{
uint32_t remain_len, len;
uint8_t *buf;
struct _nvme_tcp_sgl *pdu_sgl, buf_sgl;
if (iovcnt == 1) {
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)iov[0].iov_base + data_offset), data_len);
} else {
pdu_sgl = &pdu->sgl;
_nvme_tcp_sgl_init(pdu_sgl, pdu->data_iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0);
_nvme_tcp_sgl_init(&buf_sgl, iov, iovcnt, 0);
_nvme_tcp_sgl_advance(&buf_sgl, data_offset);
remain_len = data_len;
while (remain_len > 0) {
_nvme_tcp_sgl_get_buf(&buf_sgl, (void *)&buf, &len);
len = spdk_min(len, remain_len);
_nvme_tcp_sgl_advance(&buf_sgl, len);
remain_len -= len;
if (!_nvme_tcp_sgl_append(pdu_sgl, buf, len)) {
break;
}
}
assert(remain_len == 0);
assert(pdu_sgl->total_size == data_len);
pdu->data_iovcnt = NVME_TCP_MAX_SGL_DESCRIPTORS - pdu_sgl->iovcnt;
pdu->data_len = data_len;
}
}
#endif /* SPDK_INTERNAL_NVME_TCP_H */

View File

@ -619,72 +619,6 @@ nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
{
}
static inline void
_nvme_tcp_sgl_advance(struct _nvme_tcp_sgl *s, uint32_t step)
{
s->iov_offset += step;
while (s->iovcnt > 0) {
if (s->iov_offset < s->iov->iov_len) {
break;
}
s->iov_offset -= s->iov->iov_len;
s->iov++;
s->iovcnt--;
}
}
static inline void
_nvme_tcp_sgl_get_buf(struct _nvme_tcp_sgl *s, void **_buf, uint32_t *_buf_len)
{
if (_buf != NULL) {
*_buf = s->iov->iov_base + s->iov_offset;
}
if (_buf_len != NULL) {
*_buf_len = s->iov->iov_len - s->iov_offset;
}
}
static void
nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
struct iovec *iov, int iovcnt,
uint32_t data_offset, uint32_t data_len)
{
uint32_t remain_len, len;
uint8_t *buf;
struct _nvme_tcp_sgl *pdu_sgl, buf_sgl;
if (iovcnt == 1) {
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)iov[0].iov_base + data_offset), data_len);
} else {
pdu_sgl = &pdu->sgl;
_nvme_tcp_sgl_init(pdu_sgl, pdu->data_iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0);
_nvme_tcp_sgl_init(&buf_sgl, iov, iovcnt, 0);
_nvme_tcp_sgl_advance(&buf_sgl, data_offset);
remain_len = data_len;
while (remain_len > 0) {
_nvme_tcp_sgl_get_buf(&buf_sgl, (void *)&buf, &len);
len = spdk_min(len, remain_len);
_nvme_tcp_sgl_advance(&buf_sgl, len);
remain_len -= len;
if (!_nvme_tcp_sgl_append(pdu_sgl, buf, len)) {
break;
}
}
assert(remain_len == 0);
assert(pdu_sgl->total_size == data_len);
pdu->data_iovcnt = NVME_TCP_MAX_SGL_DESCRIPTORS - pdu_sgl->iovcnt;
pdu->data_len = data_len;
}
}
static int
nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
struct nvme_tcp_req *tcp_req)

View File

@ -1348,7 +1348,6 @@ spdk_nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes = 0;
struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
uint32_t iov_index;
bool ttag_offset_error = false;
h2c_data = &pdu->hdr.h2c_data;
@ -1403,9 +1402,8 @@ spdk_nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
}
pdu->ctx = tcp_req;
iov_index = pdu->hdr.h2c_data.datao / ttransport->transport.opts.io_unit_size;
nvme_tcp_pdu_set_data(pdu, tcp_req->req.iov[iov_index].iov_base + (pdu->hdr.h2c_data.datao %
ttransport->transport.opts.io_unit_size), h2c_data->datal);
nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
h2c_data->datao, h2c_data->datal);
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
return;
@ -2185,14 +2183,10 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
{
struct nvme_tcp_pdu *rsp_pdu;
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
uint32_t plen, pdo, alignment, offset, iov_index;
uint32_t plen, pdo, alignment;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n");
/* always use the first iov_len, which is correct */
iov_index = tcp_req->c2h_data_offset / tcp_req->req.iov[0].iov_len;
offset = tcp_req->c2h_data_offset % tcp_req->req.iov[0].iov_len;
rsp_pdu = spdk_nvmf_tcp_pdu_get(tqpair);
assert(rsp_pdu != NULL);
@ -2208,7 +2202,7 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
/* set the psh */
c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid;
c2h_data->datal = spdk_min(NVMF_TCP_PDU_MAX_C2H_DATA_SIZE,
(tcp_req->req.iov[iov_index].iov_len - offset));
tcp_req->req.length - tcp_req->c2h_data_offset);
c2h_data->datao = tcp_req->c2h_data_offset;
/* set the padding */
@ -2231,10 +2225,11 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
c2h_data->common.plen = plen;
nvme_tcp_pdu_set_data(rsp_pdu, tcp_req->req.iov[iov_index].iov_base + offset, c2h_data->datal);
nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
c2h_data->datao, c2h_data->datal);
tcp_req->c2h_data_offset += c2h_data->datal;
if (iov_index == (tcp_req->req.iovcnt - 1) && (tcp_req->c2h_data_offset == tcp_req->req.length)) {
if (tcp_req->c2h_data_offset == tcp_req->req.length) {
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Last pdu for tcp_req=%p on tqpair=%p\n", tcp_req, tqpair);
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
if (tqpair->qpair.transport->opts.c2h_success) {
@ -2250,17 +2245,10 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
static int
spdk_nvmf_tcp_calc_c2h_data_pdu_num(struct spdk_nvmf_tcp_req *tcp_req)
{
uint32_t i, iov_cnt, pdu_num = 0;
iov_cnt = tcp_req->req.iovcnt;
for (i = 0; i < iov_cnt; i++) {
pdu_num += (tcp_req->req.iov[i].iov_len + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE - 1) /
return (tcp_req->req.length + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE - 1) /
NVMF_TCP_PDU_MAX_C2H_DATA_SIZE;
}
return pdu_num;
}
static void
spdk_nvmf_tcp_handle_pending_c2h_data_queue(struct spdk_nvmf_tcp_qpair *tqpair)
{