nvmf/rdma: add RPC parameter to specify CQ size with RDMA transport

nvmf_create_transport rpc parameter to configure the CQ size helps
if the user is aware of CQ size needed as iWARP doesn't support CQ resize.

Fixes issue #1747

Signed-off-by: Monica Kenguva <monica.kenguva@intel.com>
Change-Id: Ia9ba2b5f612993be27ebfa3455fb4fefd80ae738
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6495
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Monica Kenguva 2021-02-10 23:38:36 +00:00 committed by Tomasz Zawadzki
parent 7ecf76e02e
commit 97ef870100
4 changed files with 20 additions and 2 deletions

View File

@ -5066,6 +5066,7 @@ io_unit_size | Optional | number | I/O unit size (bytes)
max_aq_depth | Optional | number | Max number of admin cmds per AQ
num_shared_buffers | Optional | number | The number of pooled data buffers available to the transport
buf_cache_size | Optional | number | The number of shared buffers to reserve for each poll group
num_cqe | Optional | number | The number of CQ entires. Only used when no_srq=true (RDMA only)
max_srq_depth | Optional | number | The number of elements in a per-thread shared receive queue (RDMA only)
no_srq | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
c2h_success | Optional | boolean | Disable C2H success optimization (TCP only)

View File

@ -465,6 +465,7 @@ struct spdk_nvmf_rdma_port {
};
struct rdma_transport_opts {
int num_cqe;
uint32_t max_srq_depth;
bool no_srq;
bool no_wr_batching;
@ -493,6 +494,10 @@ struct spdk_nvmf_rdma_transport {
};
static const struct spdk_json_object_decoder rdma_transport_opts_decoder[] = {
{
"num_cqe", offsetof(struct rdma_transport_opts, num_cqe),
spdk_json_decode_int32, true
},
{
"max_srq_depth", offsetof(struct rdma_transport_opts, max_srq_depth),
spdk_json_decode_uint32, true
@ -2232,6 +2237,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
TAILQ_INIT(&rtransport->poll_groups);
rtransport->transport.ops = &spdk_nvmf_transport_rdma;
rtransport->rdma_opts.num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
rtransport->rdma_opts.max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
rtransport->rdma_opts.no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ;
rtransport->rdma_opts.acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
@ -2249,7 +2255,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
" max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" in_capsule_data_size=%d, max_aq_depth=%d,\n"
" num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d,"
" num_shared_buffers=%d, num_cqe=%d, max_srq_depth=%d, no_srq=%d,"
" acceptor_backlog=%d, no_wr_batching=%d abort_timeout_sec=%d\n",
opts->max_queue_depth,
opts->max_io_size,
@ -2258,6 +2264,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
opts->in_capsule_data_size,
opts->max_aq_depth,
opts->num_shared_buffers,
rtransport->rdma_opts.num_cqe,
rtransport->rdma_opts.max_srq_depth,
rtransport->rdma_opts.no_srq,
rtransport->rdma_opts.acceptor_backlog,
@ -2460,6 +2467,9 @@ nvmf_rdma_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_writ
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
spdk_json_write_named_uint32(w, "max_srq_depth", rtransport->rdma_opts.max_srq_depth);
spdk_json_write_named_bool(w, "no_srq", rtransport->rdma_opts.no_srq);
if (rtransport->rdma_opts.no_srq == true) {
spdk_json_write_named_int32(w, "num_cqe", rtransport->rdma_opts.num_cqe);
}
spdk_json_write_named_int32(w, "acceptor_backlog", rtransport->rdma_opts.acceptor_backlog);
spdk_json_write_named_bool(w, "no_wr_batching", rtransport->rdma_opts.no_wr_batching);
}
@ -3280,7 +3290,7 @@ nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
if (poller->srq) {
num_cqe = poller->max_srq_depth * 3;
} else {
num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
num_cqe = rtransport->rdma_opts.num_cqe;
}
poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);

View File

@ -1861,6 +1861,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
max_aq_depth=args.max_aq_depth,
num_shared_buffers=args.num_shared_buffers,
buf_cache_size=args.buf_cache_size,
num_cqe=args.num_cqe,
max_srq_depth=args.max_srq_depth,
no_srq=args.no_srq,
c2h_success=args.c2h_success,
@ -1884,6 +1885,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
p.add_argument('-d', '--num_cqe', help="""The number of CQ entires. Only used when no_srq=true.
Relevant only for RDMA transport""", type=int)
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')

View File

@ -103,6 +103,7 @@ def nvmf_create_transport(client,
max_aq_depth=None,
num_shared_buffers=None,
buf_cache_size=None,
num_cqe=None,
max_srq_depth=None,
no_srq=False,
c2h_success=True,
@ -125,6 +126,7 @@ def nvmf_create_transport(client,
max_aq_depth: Max size admin quque per controller (optional)
num_shared_buffers: The number of pooled data buffers available to the transport (optional)
buf_cache_size: The number of shared buffers to reserve for each poll group (optional)
num_cqe: The number of CQ entries to configure CQ size. Only used when no_srq=true - RDMA specific (optional)
max_srq_depth: Max number of outstanding I/O per shared receive queue - RDMA specific (optional)
no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)
c2h_success: Boolean flag to disable the C2H success optimization - TCP specific (optional)
@ -160,6 +162,8 @@ def nvmf_create_transport(client,
params['num_shared_buffers'] = num_shared_buffers
if buf_cache_size is not None:
params['buf_cache_size'] = buf_cache_size
if num_cqe:
params['num_cqe'] = num_cqe
if max_srq_depth:
params['max_srq_depth'] = max_srq_depth
if no_srq: