nvme_rdma: don't send split sgl requests inline.

In order to truly support multi-sgl inline requests in the RDMA
transport, we would need to increase the size of the
spdk_nvme_rdma_req object dramatically. This is because we would need
enough ibv_sge objects in it to support up to the maximum number of SGEs
supported by the target (for SPDK that is up to 16). Instead of doing
that or creating a new pool of shared ibv_sge objects to support that
case, just send split multi-sgl requests through the regular sgl path.

Change-Id: I78313bd88f3ed1cea3b772d9476a00087f49a4dd
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452266
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Seth Howell 2019-04-26 13:17:33 -07:00 committed by Ben Walker
parent 47097a3fe4
commit eb6006c242

View File

@ -1137,7 +1137,6 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
struct ibv_mr *mr;
uint32_t length;
uint64_t requested_size;
uint32_t remaining_payload;
void *virt_addr;
int rc, i;
@ -1147,48 +1146,42 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
assert(req->payload.next_sge_fn != NULL);
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
remaining_payload = req->payload_size;
rdma_req->send_wr.num_sge = 1;
do {
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
if (rc) {
return -1;
}
if (length > remaining_payload) {
length = remaining_payload;
}
requested_size = length;
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
&requested_size);
if (mr == NULL || requested_size < length) {
for (i = 1; i < rdma_req->send_wr.num_sge; i++) {
rdma_req->send_sgl[i].addr = 0;
rdma_req->send_sgl[i].length = 0;
rdma_req->send_sgl[i].lkey = 0;
}
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}
rdma_req->send_sgl[rdma_req->send_wr.num_sge].addr = (uint64_t)virt_addr;
rdma_req->send_sgl[rdma_req->send_wr.num_sge].length = length;
rdma_req->send_sgl[rdma_req->send_wr.num_sge].lkey = mr->lkey;
rdma_req->send_wr.num_sge++;
remaining_payload -= length;
} while (remaining_payload && rdma_req->send_wr.num_sge < (int64_t)rqpair->max_send_sge);
if (remaining_payload) {
SPDK_ERRLOG("Unable to prepare request. Too many SGL elements\n");
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
if (rc) {
return -1;
}
if (length < req->payload_size) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Inline SGL request split so sending separately.\n");
return nvme_rdma_build_sgl_request(rqpair, rdma_req);
}
if (length > req->payload_size) {
length = req->payload_size;
}
requested_size = length;
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
&requested_size);
if (mr == NULL || requested_size < length) {
for (i = 1; i < rdma_req->send_wr.num_sge; i++) {
rdma_req->send_sgl[i].addr = 0;
rdma_req->send_sgl[i].length = 0;
rdma_req->send_sgl[i].lkey = 0;
}
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}
rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
rdma_req->send_sgl[1].length = length;
rdma_req->send_sgl[1].lkey = mr->lkey;
rdma_req->send_wr.num_sge = 2;
/* The first element of this SGL is pointing at an
* spdk_nvmf_cmd object. For this particular command,
* we only need the first 64 bytes corresponding to