nvmf: Merge each transport's fill_buffers() into spdk_nvmf_request_get_buffers()

This patch is close to the end of the effort to unify buffer allocation
among NVMe-oF transports.

Merge each transport's fill_buffers() into common
spdk_nvmf_request_get_buffers() of the generic NVMe-oF transport.

One noticeable change is to set req->data_from_pool to true not in
each specific transport but in the generic transport.

The next patch will add spdk_nvmf_request_get_multi_buffers() for
multi SGL case of RDMA transport.

This relatively long patch series is a preparation to support
zcopy APIs in NVMe-oF target.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Icb04e3a1fa4f5a360b1b26d2ab7c67606ca7c9a0
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469205
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-26 18:37:19 +09:00 committed by Jim Harris
parent 16365fd802
commit c0ee8ef7d5
5 changed files with 24 additions and 72 deletions

View File

@ -1274,26 +1274,6 @@ complete:
(void *)fc_req);
}
static void
nvmf_fc_request_fill_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport *transport, uint32_t length)
{
uint32_t i;
req->iovcnt = 0;
while (length) {
i = req->iovcnt;
req->iov[i].iov_base = (void *)((uintptr_t)((char *)req->buffers[i] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
req->iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
req->iovcnt++;
length -= req->iov[i].iov_len;
}
req->data_from_pool = true;
}
static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
{
@ -1306,8 +1286,6 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
return -ENOMEM;
}
nvmf_fc_request_fill_buffers(&fc_req->req, transport, length);
return 0;
}

View File

@ -1659,22 +1659,6 @@ nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
return 0;
}
static void
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_request *req,
uint32_t length)
{
while (length) {
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
req->iov[req->iovcnt].iov_len = spdk_min(length,
rtransport->transport.opts.io_unit_size);
length -= req->iov[req->iovcnt].iov_len;
req->iovcnt++;
}
}
static int
spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_device *device,
@ -1689,26 +1673,22 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rqpair = SPDK_CONTAINEROF(req->qpair, struct spdk_nvmf_rdma_qpair, qpair);
rgroup = rqpair->poller->group;
req->iovcnt = 0;
if (spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
length)) {
return -ENOMEM;
}
req->iovcnt = 0;
rdma_req->iovpos = 0;
assert(req->iovcnt <= rqpair->max_send_sge);
nvmf_rdma_fill_buffers(rtransport, req, length);
rdma_req->iovpos = 0;
rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, wr, length);
if (rc != 0) {
goto err_exit;
}
assert(req->iovcnt <= rqpair->max_send_sge);
req->data_from_pool = true;
return rc;
err_exit:
@ -1771,7 +1751,6 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
current_wr->num_sge = 0;
nvmf_rdma_fill_buffers(rtransport, req, desc->keyed.length);
rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, current_wr, desc->keyed.length);
if (rc != 0) {
rc = -ENOMEM;
@ -1797,7 +1776,6 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
#endif
rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
req->data_from_pool = true;
return 0;

View File

@ -2172,28 +2172,6 @@ spdk_nvmf_tcp_req_get_xfer(struct spdk_nvmf_tcp_req *tcp_req) {
return xfer;
}
static void
spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport *transport,
uint32_t length)
{
uint32_t i = 0;
req->iovcnt = 0;
while (length) {
i = req->iovcnt;
req->iov[i].iov_base = (void *)((uintptr_t)(req->buffers[i] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
req->iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
req->iovcnt++;
length -= req->iov[i].iov_len;
}
assert(req->iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);
req->data_from_pool = true;
}
static int
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req, uint32_t length)
@ -2203,14 +2181,12 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
group = &tqpair->group->group;
tcp_req->req.iovcnt = 0;
if (spdk_nvmf_request_get_buffers(&tcp_req->req, group, &ttransport->transport, length)) {
tcp_req->req.iovcnt = 0;
return -ENOMEM;
}
spdk_nvmf_tcp_req_fill_buffers(&tcp_req->req, &ttransport->transport, length);
return 0;
}

View File

@ -425,6 +425,16 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
}
}
while (length) {
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
req->iov[req->iovcnt].iov_len = spdk_min(length, transport->opts.io_unit_size);
length -= req->iov[req->iovcnt].iov_len;
req->iovcnt++;
}
req->data_from_pool = true;
return 0;
err_exit:

View File

@ -138,6 +138,16 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
}
}
while (length) {
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
req->iov[req->iovcnt].iov_len = spdk_min(length, transport->opts.io_unit_size);
length -= req->iov[req->iovcnt].iov_len;
req->iovcnt++;
}
req->data_from_pool = true;
return 0;
err_exit: