NVMe-oF: Add explicit reports for MR-split buffers:

This is a failsafe for finding and reporting data buffers that span
multiple Memory Regions. These errors should never be triggered, but
finding and reporting them will help any debugging.

Change-Id: I3c61e3cc510f5a36039fc1815ff0de45fce794d5
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/436054 (master)
Reviewed-on: https://review.gerrithub.io/437016
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Seth Howell 2018-12-04 11:01:15 -07:00 committed by Jim Harris
parent 567b1006a9
commit 36a9358315
2 changed files with 28 additions and 6 deletions

View File

@ -880,6 +880,9 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
(uint64_t)payload, &requested_size);
if (mr == NULL || requested_size < req->payload_size) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -EINVAL;
}
@ -927,7 +930,11 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
requested_size = req->payload_size;
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)payload,
&requested_size);
if (mr == NULL || requested_size < req->payload_size) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}
@ -988,6 +995,9 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
&mr_length);
if (mr == NULL || mr_length < sge_length) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}
@ -1081,6 +1091,9 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
&requested_size);
if (mr == NULL || requested_size < req->payload_size) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}

View File

@ -1139,13 +1139,16 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
{
void *buf = NULL;
uint32_t length = rdma_req->req.length;
uint64_t translation_len;
uint32_t i = 0;
int rc = 0;
rdma_req->req.iovcnt = 0;
while (length) {
buf = spdk_mempool_get(rtransport->data_buf_pool);
if (!buf) {
goto nomem;
rc = -ENOMEM;
goto err_exit;
}
rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
@ -1155,18 +1158,24 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rdma_req->data.buffers[i] = buf;
rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
rdma_req->data.wr.sg_list[i].length = rdma_req->req.iov[i].iov_len;
translation_len = rdma_req->req.iov[i].iov_len;
rdma_req->data.wr.sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)buf, NULL))->lkey;
(uint64_t)buf, &translation_len))->lkey;
length -= rdma_req->req.iov[i].iov_len;
if (translation_len < rdma_req->req.iov[i].iov_len) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
rc = -EINVAL;
goto err_exit;
}
i++;
}
rdma_req->data_from_pool = true;
return 0;
return rc;
nomem:
err_exit:
while (i) {
i--;
spdk_mempool_put(rtransport->data_buf_pool, rdma_req->req.iov[i].iov_base);
@ -1178,7 +1187,7 @@ nomem:
rdma_req->data.wr.sg_list[i].lkey = 0;
}
rdma_req->req.iovcnt = 0;
return -ENOMEM;
return rc;
}
static int