nvmf/rdma: Factor out WR SGE setup in fill_buffers() into fill_wr_sge()

Factor out setup WR operation from nvmf_rdma_fill_buffers() into a
function nvmf_rdma_fill_wr_sge().

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I813f156b83b6e1773ea76d0d1ed8684b1e267691
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468945
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-23 07:22:10 +09:00 committed by Jim Harris
parent 7c7a0c0a68
commit 89a28bfd1b

View File

@ -1618,6 +1618,35 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
return 0;
}
static bool
nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
struct spdk_nvmf_request *req, struct ibv_send_wr *wr)
{
uint64_t translation_len;
translation_len = req->iov[req->iovcnt].iov_len;
if (!g_nvmf_hooks.get_rkey) {
wr->sg_list[wr->num_sge].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)req->iov[req->iovcnt].iov_base, &translation_len))->lkey;
} else {
wr->sg_list[wr->num_sge].lkey = spdk_mem_map_translate(device->map,
(uint64_t)req->iov[req->iovcnt].iov_base, &translation_len);
}
if (spdk_unlikely(translation_len < req->iov[req->iovcnt].iov_len)) {
/* This is a very rare case that can occur when using DPDK version < 19.05 */
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n");
return false;
}
wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base);
wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len;
wr->num_sge++;
return true;
}
static int
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_poll_group *rgroup,
@ -1626,8 +1655,6 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
struct ibv_send_wr *wr,
uint32_t length)
{
uint64_t translation_len;
wr->num_sge = 0;
while (length) {
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
@ -1635,19 +1662,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
~NVMF_DATA_BUFFER_MASK);
req->iov[req->iovcnt].iov_len = spdk_min(length,
rtransport->transport.opts.io_unit_size);
translation_len = req->iov[req->iovcnt].iov_len;
if (!g_nvmf_hooks.get_rkey) {
wr->sg_list[wr->num_sge].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)req->iov[req->iovcnt].iov_base, &translation_len))->lkey;
} else {
wr->sg_list[wr->num_sge].lkey = spdk_mem_map_translate(device->map,
(uint64_t)req->iov[req->iovcnt].iov_base, &translation_len);
}
/* This is a very rare case that can occur when using DPDK version < 19.05 */
if (spdk_unlikely(translation_len < req->iov[req->iovcnt].iov_len)) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n");
if (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, req, wr))) {
if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) {
return -ENOMEM;
}
@ -1655,10 +1670,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
}
length -= req->iov[req->iovcnt].iov_len;
wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base);
wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len;
req->iovcnt++;
wr->num_sge++;
}
return 0;