nvmf/rdma: Remove handling of buffers split over MR
Since now min supported DPDK version is 19.11, we can remove handling of buffer that may be split over several Memory Regions as it was specific for DPDK < 19.05 Change-Id: I61a79c80b864e3bdde134e8bff6622025ea578a7 Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5937 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
1ddef952f7
commit
6715cc2a07
@ -445,11 +445,6 @@ struct spdk_nvmf_rdma_poll_group {
|
||||
struct spdk_nvmf_rdma_poll_group_stat stat;
|
||||
TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers;
|
||||
TAILQ_ENTRY(spdk_nvmf_rdma_poll_group) link;
|
||||
/*
|
||||
* buffers which are split across multiple RDMA
|
||||
* memory regions cannot be used by this transport.
|
||||
*/
|
||||
STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf) retired_bufs;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_rdma_conn_sched {
|
||||
@ -1412,34 +1407,6 @@ nvmf_rdma_update_remote_addr(struct spdk_nvmf_rdma_request *rdma_req, uint32_t n
|
||||
}
|
||||
}
|
||||
|
||||
/* This function is used in the rare case that we have a buffer split over multiple memory regions. */
|
||||
static int
|
||||
nvmf_rdma_replace_buffer(struct spdk_nvmf_rdma_poll_group *rgroup, void **buf)
|
||||
{
|
||||
struct spdk_nvmf_transport_poll_group *group = &rgroup->group;
|
||||
struct spdk_nvmf_transport *transport = group->transport;
|
||||
struct spdk_nvmf_transport_pg_cache_buf *old_buf;
|
||||
void *new_buf;
|
||||
|
||||
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
||||
group->buf_cache_count--;
|
||||
new_buf = STAILQ_FIRST(&group->buf_cache);
|
||||
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
||||
assert(*buf != NULL);
|
||||
} else {
|
||||
new_buf = spdk_mempool_get(transport->data_buf_pool);
|
||||
}
|
||||
|
||||
if (*buf == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
old_buf = *buf;
|
||||
STAILQ_INSERT_HEAD(&rgroup->retired_bufs, old_buf, link);
|
||||
*buf = new_buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
|
||||
struct iovec *iov, struct ibv_send_wr **_wr,
|
||||
@ -1534,14 +1501,9 @@ nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
|
||||
wr->num_sge = 0;
|
||||
|
||||
while (length && (num_extra_wrs || wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES)) {
|
||||
while (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, &req->iov[rdma_req->iovpos], &wr,
|
||||
&remaining_data_block, &offset, &num_extra_wrs, dif_ctx))) {
|
||||
if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[rdma_req->iovpos]) == -ENOMEM) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
req->iov[rdma_req->iovpos].iov_base = (void *)((uintptr_t)(req->buffers[rdma_req->iovpos] +
|
||||
NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK);
|
||||
if (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, &req->iov[rdma_req->iovpos], &wr,
|
||||
&remaining_data_block, &offset, &num_extra_wrs, dif_ctx))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
length -= req->iov[rdma_req->iovpos].iov_len;
|
||||
@ -3259,7 +3221,6 @@ nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
|
||||
TAILQ_INIT(&rgroup->pollers);
|
||||
STAILQ_INIT(&rgroup->retired_bufs);
|
||||
|
||||
pthread_mutex_lock(&rtransport->lock);
|
||||
TAILQ_FOREACH(device, &rtransport->devices, link) {
|
||||
@ -3384,7 +3345,6 @@ nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
||||
struct spdk_nvmf_rdma_poll_group *rgroup, *next_rgroup;
|
||||
struct spdk_nvmf_rdma_poller *poller, *tmp;
|
||||
struct spdk_nvmf_rdma_qpair *qpair, *tmp_qpair;
|
||||
struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp_buf;
|
||||
struct spdk_nvmf_rdma_transport *rtransport;
|
||||
|
||||
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
|
||||
@ -3392,13 +3352,6 @@ nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
||||
return;
|
||||
}
|
||||
|
||||
/* free all retired buffers back to the transport so we don't short the mempool. */
|
||||
STAILQ_FOREACH_SAFE(buf, &rgroup->retired_bufs, link, tmp_buf) {
|
||||
STAILQ_REMOVE(&rgroup->retired_bufs, buf, spdk_nvmf_transport_pg_cache_buf, link);
|
||||
assert(group->transport != NULL);
|
||||
spdk_mempool_put(group->transport->data_buf_pool, buf);
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) {
|
||||
TAILQ_REMOVE(&rgroup->pollers, poller, link);
|
||||
|
||||
|
@ -161,8 +161,6 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
struct spdk_nvmf_transport_pg_cache_buf bufs[4];
|
||||
struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
|
||||
struct spdk_nvmf_rdma_request_data data;
|
||||
struct spdk_nvmf_transport_pg_cache_buf buffer;
|
||||
struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
|
||||
int rc, i;
|
||||
|
||||
data.wr.sg_list = data.sgl;
|
||||
@ -170,7 +168,6 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
group.group.buf_cache_size = 0;
|
||||
group.group.buf_cache_count = 0;
|
||||
group.group.transport = &rtransport.transport;
|
||||
STAILQ_INIT(&group.retired_bufs);
|
||||
poller.group = &group;
|
||||
rqpair.poller = &poller;
|
||||
rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
|
||||
@ -467,36 +464,6 @@ test_spdk_nvmf_rdma_request_parse_sgl(void)
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
|
||||
}
|
||||
|
||||
reset_nvmf_rdma_request(&rdma_req);
|
||||
/* Test 5 dealing with a buffer split over two Memory Regions */
|
||||
MOCK_SET(spdk_mempool_get, (void *)&buffer);
|
||||
sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
|
||||
sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
|
||||
sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
|
||||
g_mr_size = rtransport.transport.opts.io_unit_size / 4;
|
||||
g_mr_next_size = rtransport.transport.opts.io_unit_size / 2;
|
||||
|
||||
rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
SPDK_CU_ASSERT_FATAL(rc == 0);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK));
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 1);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
||||
CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK));
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
|
||||
buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
|
||||
CU_ASSERT(buffer_ptr == &buffer);
|
||||
STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
|
||||
CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
|
||||
g_mr_size = 0;
|
||||
g_mr_next_size = 0;
|
||||
|
||||
reset_nvmf_rdma_request(&rdma_req);
|
||||
}
|
||||
|
||||
@ -872,8 +839,6 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
struct spdk_nvmf_rdma_request_data data;
|
||||
char data2_buffer[8192];
|
||||
struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
|
||||
struct spdk_nvmf_transport_pg_cache_buf buffer;
|
||||
struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
|
||||
const uint32_t data_bs = 512;
|
||||
const uint32_t md_size = 8;
|
||||
int rc, i;
|
||||
@ -884,7 +849,6 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
group.group.buf_cache_size = 0;
|
||||
group.group.buf_cache_count = 0;
|
||||
group.group.transport = &rtransport.transport;
|
||||
STAILQ_INIT(&group.retired_bufs);
|
||||
poller.group = &group;
|
||||
rqpair.poller = &poller;
|
||||
rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
|
||||
@ -1184,41 +1148,6 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
|
||||
|
||||
/* Test 9 dealing with a buffer split over two Memory Regions */
|
||||
MOCK_SET(spdk_mempool_get, (void *)&buffer);
|
||||
reset_nvmf_rdma_request(&rdma_req);
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs * 4;
|
||||
sgl->keyed.length = data_bs * 2;
|
||||
g_mr_size = data_bs;
|
||||
g_mr_next_size = rtransport.transport.opts.io_unit_size;
|
||||
|
||||
rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
|
||||
SPDK_CU_ASSERT_FATAL(rc == 0);
|
||||
CU_ASSERT(rdma_req.req.data_from_pool == true);
|
||||
CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
|
||||
CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
|
||||
~NVMF_DATA_BUFFER_MASK));
|
||||
CU_ASSERT(rdma_req.data.wr.num_sge == 2);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
|
||||
CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
|
||||
CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
|
||||
for (i = 0; i < 2; i++) {
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i *
|
||||
(data_bs + md_size));
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
|
||||
CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
|
||||
}
|
||||
buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
|
||||
CU_ASSERT(buffer_ptr == &buffer);
|
||||
STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
|
||||
CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
|
||||
g_mr_size = 0;
|
||||
g_mr_next_size = 0;
|
||||
|
||||
/* Test 2: Multi SGL */
|
||||
sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
|
||||
sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
|
||||
|
Loading…
Reference in New Issue
Block a user