nvmf/rdma: Call spdk_nvmf_request_get_buffers() not once but per WR

This is a preparation to unify getting buffers, filling iovecs,
and filling WRs in a single API in RDMA transport and then to unify
it among RDMA, TCP, and FC transport.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ia69d4409c8cccaf8d7298706d61cd4e2d35e4406
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468944
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-20 14:24:42 +09:00 committed by Jim Harris
parent 22cd4fe2ce
commit aa6964e585

View File

@ -1748,19 +1748,6 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
return -ENOMEM;
}
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
for (i = 0; i < num_sgl_descriptors; i++) {
num_buffers += SPDK_CEIL_DIV(desc->keyed.length, rtransport->transport.opts.io_unit_size);
desc++;
}
rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers);
if (rc != 0) {
nvmf_rdma_request_free_data(rdma_req, rtransport);
return rc;
}
/* The first WR must always be the embedded data WR. This is how we unwind them later. */
current_wr = &rdma_req->data.wr;
assert(current_wr != NULL);
@ -1776,6 +1763,13 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
goto err_exit;
}
num_buffers = SPDK_CEIL_DIV(desc->keyed.length, rtransport->transport.opts.io_unit_size);
rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers);
if (rc != 0) {
goto err_exit;
}
current_wr->num_sge = 0;
rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, req, current_wr,