rdma: optimize and move buffers variable.

The buffers are really specific to the request and not the wr or data
object. In the case of multiple wr requests, the maximum number of
buffers per req is equal to the number of SGEs in the NVMe-oF request
*2.

Change-Id: Ic59498bfed461d180adb2fb9a481ac5b11fa9252
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/449108
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2019-03-25 16:38:37 -07:00 committed by Jim Harris
parent e590f607e6
commit f4adbc79ce
2 changed files with 17 additions and 14 deletions

View File

@ -65,7 +65,10 @@ struct spdk_nvme_rdma_hooks g_nvmf_hooks = {};
#define MAX_WR_PER_QP(queue_depth) (queue_depth * 3 + 2)
/* Timeout for destroying defunct rqpairs */
#define NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US 4000000
#define NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US 4000000
/* The maximum number of buffers per request */
#define NVMF_REQ_MAX_BUFFERS (SPDK_NVMF_MAX_SGL_ENTRIES * 2)
enum spdk_nvmf_rdma_request_state {
/* The request is not currently in use */
@ -228,7 +231,6 @@ struct spdk_nvmf_rdma_request_data {
struct spdk_nvmf_rdma_wr rdma_wr;
struct ibv_send_wr wr;
struct ibv_sge sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
};
struct spdk_nvmf_rdma_request {
@ -246,6 +248,7 @@ struct spdk_nvmf_rdma_request {
} rsp;
struct spdk_nvmf_rdma_request_data data;
void *buffers[NVMF_REQ_MAX_BUFFERS];
uint32_t num_outstanding_data_wr;
@ -1291,13 +1294,13 @@ spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
if (group->buf_cache_count < group->buf_cache_size) {
STAILQ_INSERT_HEAD(&group->buf_cache,
(struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->data.buffers[i], link);
(struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->buffers[i], link);
group->buf_cache_count++;
} else {
spdk_mempool_put(transport->data_buf_pool, rdma_req->data.buffers[i]);
spdk_mempool_put(transport->data_buf_pool, rdma_req->buffers[i]);
}
rdma_req->req.iov[i].iov_base = NULL;
rdma_req->data.buffers[i] = NULL;
rdma_req->buffers[i] = NULL;
rdma_req->req.iov[i].iov_len = 0;
}
@ -1440,7 +1443,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
rdma_req->req.iov[iovcnt].iov_len = spdk_min(remaining_length,
rtransport->transport.opts.io_unit_size);
rdma_req->req.iovcnt++;
rdma_req->data.buffers[iovcnt] = buf;
rdma_req->buffers[iovcnt] = buf;
wr->sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[iovcnt].iov_base);
wr->sg_list[i].length = rdma_req->req.iov[iovcnt].iov_len;
translation_len = rdma_req->req.iov[iovcnt].iov_len;

View File

@ -100,7 +100,7 @@ static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
rdma_req->req.iov[i].iov_base = 0;
rdma_req->req.iov[i].iov_len = 0;
rdma_req->data.buffers[i] = 0;
rdma_req->buffers[i] = 0;
rdma_req->data.wr.sg_list[i].addr = 0;
rdma_req->data.wr.sg_list[i].length = 0;
rdma_req->data.wr.sg_list[i].lkey = 0;
@ -165,7 +165,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
CU_ASSERT((uint64_t)rdma_req.data.buffers[0] == 0x2000);
CU_ASSERT((uint64_t)rdma_req.buffers[0] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
@ -182,7 +182,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == 0x2000);
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
@ -205,7 +205,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(rdma_req.data_from_pool == false);
CU_ASSERT(rdma_req.req.data == NULL);
CU_ASSERT(rdma_req.data.wr.num_sge == 0);
CU_ASSERT(rdma_req.data.buffers[0] == NULL);
CU_ASSERT(rdma_req.buffers[0] == NULL);
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
@ -274,7 +274,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(group.group.buf_cache_count == 0);
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
for (i = 0; i < 4; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == (uint64_t)&bufs[i]);
CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK));
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
@ -294,7 +294,7 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(group.group.buf_cache_count == 0);
CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
for (i = 0; i < 4; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == 0x2000);
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
CU_ASSERT(group.group.buf_cache_count == 0);
@ -319,13 +319,13 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
CU_ASSERT(group.group.buf_cache_count == 0);
for (i = 0; i < 2; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == (uint64_t)&bufs[i]);
CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK));
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
}
for (i = 2; i < 4; i++) {
CU_ASSERT((uint64_t)rdma_req.data.buffers[i] == 0x2000);
CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
}