nvmf/rdma: Reorder allocation of WRs and buffers in multi SGL case

This patch matches the ordering of single SGL case and multi SGL
case for parsing SGL.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Iea026b48e8957e140b71db7afaf8aca88634dc33
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468941
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-20 13:53:26 +09:00 committed by Jim Harris
parent 5e298147b8
commit 410455e40b

View File

@ -1743,6 +1743,11 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
num_sgl_descriptors = inline_segment->unkeyed.length / sizeof(struct spdk_nvme_sgl_descriptor);
assert(num_sgl_descriptors <= SPDK_NVMF_MAX_SGL_ENTRIES);
if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
return -ENOMEM;
}
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
for (i = 0; i < num_sgl_descriptors; i++) {
@ -1751,15 +1756,12 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
}
/* If the number of buffers is too large, then we know the I/O is larger than allowed. Fail it. */
if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
nvmf_rdma_request_free_data(rdma_req, rtransport);
return -EINVAL;
}
if (spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers) != 0) {
return -ENOMEM;
}
if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
nvmf_rdma_request_free_data(rdma_req, rtransport);
return -ENOMEM;
}