lib/nvmf: add an acceptor_backlog transport opt.
This allows users to configure the number of connection requests outstanding to an rdma port at once. RPC included. Signed-off-by: Seth Howell <seth.howell@intel.com> Change-Id: I8a2bb86b2fb7565cb10288088d39af763b778703 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3097 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
37bf49b1d5
commit
3b830202ca
@ -4119,6 +4119,7 @@ no_srq | Optional | boolean | Disable shared receive queue
|
||||
c2h_success | Optional | boolean | Disable C2H success optimization (TCP only)
|
||||
dif_insert_or_strip | Optional | boolean | Enable DIF insert for write I/O and DIF strip for read I/O DIF
|
||||
sock_priority | Optional | number | The socket priority of the connection owned by this transport (TCP only)
|
||||
acceptor_backlog | Optional | number | The number of pending connections allowed in backlog before failing new connection attempts (RDMA only)
|
||||
|
||||
### Example
|
||||
|
||||
|
@ -84,6 +84,7 @@ struct spdk_nvmf_transport_opts {
|
||||
bool c2h_success;
|
||||
bool dif_insert_or_strip;
|
||||
uint32_t sock_priority;
|
||||
int acceptor_backlog;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_poll_group_stat {
|
||||
|
@ -1646,6 +1646,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
|
||||
"sock_priority", offsetof(struct nvmf_rpc_create_transport_ctx, opts.sock_priority),
|
||||
spdk_json_decode_uint32, true
|
||||
},
|
||||
{
|
||||
"acceptor_backlog", offsetof(struct nvmf_rpc_create_transport_ctx, opts.acceptor_backlog),
|
||||
spdk_json_decode_int32, true
|
||||
},
|
||||
{
|
||||
"tgt_name", offsetof(struct nvmf_rpc_create_transport_ctx, tgt_name),
|
||||
spdk_json_decode_string, true
|
||||
@ -1793,6 +1797,7 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
|
||||
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
||||
spdk_json_write_named_uint32(w, "max_srq_depth", opts->max_srq_depth);
|
||||
spdk_json_write_named_bool(w, "no_srq", opts->no_srq);
|
||||
spdk_json_write_named_int32(w, "acceptor_backlog", opts->acceptor_backlog);
|
||||
} else if (type == SPDK_NVME_TRANSPORT_TCP) {
|
||||
spdk_json_write_named_bool(w, "c2h_success", opts->c2h_success);
|
||||
spdk_json_write_named_uint32(w, "sock_priority", opts->sock_priority);
|
||||
|
@ -2219,6 +2219,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_NO_SRQ false
|
||||
#define SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP false
|
||||
#define SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG 100
|
||||
|
||||
static void
|
||||
nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
@ -2234,6 +2235,7 @@ nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
opts->max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
|
||||
opts->no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ;
|
||||
opts->dif_insert_or_strip = SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP;
|
||||
opts->acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
|
||||
}
|
||||
|
||||
const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
|
||||
@ -2294,7 +2296,8 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
|
||||
" max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d,\n"
|
||||
" num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d\n",
|
||||
" num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d,"
|
||||
" acceptor_backlog=%d\n",
|
||||
opts->max_queue_depth,
|
||||
opts->max_io_size,
|
||||
opts->max_qpairs_per_ctrlr - 1,
|
||||
@ -2303,13 +2306,20 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
opts->max_aq_depth,
|
||||
opts->num_shared_buffers,
|
||||
opts->max_srq_depth,
|
||||
opts->no_srq);
|
||||
opts->no_srq,
|
||||
opts->acceptor_backlog);
|
||||
|
||||
/* I/O unit size cannot be larger than max I/O size */
|
||||
if (opts->io_unit_size > opts->max_io_size) {
|
||||
opts->io_unit_size = opts->max_io_size;
|
||||
}
|
||||
|
||||
if (opts->acceptor_backlog <= 0) {
|
||||
SPDK_ERRLOG("The acceptor backlog cannot be less than 1, setting to the default value of (%d).\n",
|
||||
SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG);
|
||||
opts->acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
|
||||
}
|
||||
|
||||
if (opts->num_shared_buffers < (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
|
||||
SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
|
||||
"the minimum number required to guarantee that forward progress can be made (%d)\n",
|
||||
@ -2625,7 +2635,7 @@ nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = rdma_listen(port->id, 100); /* 100 = backlog */
|
||||
rc = rdma_listen(port->id, transport->opts.acceptor_backlog);
|
||||
if (rc < 0) {
|
||||
SPDK_ERRLOG("rdma_listen() failed\n");
|
||||
rdma_destroy_id(port->id);
|
||||
|
@ -1726,7 +1726,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
no_srq=args.no_srq,
|
||||
c2h_success=args.c2h_success,
|
||||
dif_insert_or_strip=args.dif_insert_or_strip,
|
||||
sock_priority=args.sock_priority)
|
||||
sock_priority=args.sock_priority,
|
||||
acceptor_backlog=args.acceptor_backlog)
|
||||
|
||||
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
|
||||
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
|
||||
@ -1746,6 +1747,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')
|
||||
p.add_argument('-f', '--dif-insert-or-strip', action='store_true', help='Enable DIF insert/strip. Relevant only for TCP transport')
|
||||
p.add_argument('-y', '--sock-priority', help='The sock priority of the tcp connection. Relevant only for TCP transport', type=int)
|
||||
p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int)
|
||||
p.set_defaults(func=nvmf_create_transport)
|
||||
|
||||
def nvmf_get_transports(args):
|
||||
|
@ -107,7 +107,8 @@ def nvmf_create_transport(client,
|
||||
no_srq=False,
|
||||
c2h_success=True,
|
||||
dif_insert_or_strip=None,
|
||||
sock_priority=None):
|
||||
sock_priority=None,
|
||||
acceptor_backlog=None):
|
||||
"""NVMf Transport Create options.
|
||||
|
||||
Args:
|
||||
@ -125,6 +126,7 @@ def nvmf_create_transport(client,
|
||||
no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)
|
||||
c2h_success: Boolean flag to disable the C2H success optimization - TCP specific (optional)
|
||||
dif_insert_or_strip: Boolean flag to enable DIF insert/strip for I/O - TCP specific (optional)
|
||||
acceptor_backlog: Pending connections allowed at one time - RDMA specific (optional)
|
||||
|
||||
Returns:
|
||||
True or False
|
||||
@ -163,6 +165,8 @@ def nvmf_create_transport(client,
|
||||
params['dif_insert_or_strip'] = dif_insert_or_strip
|
||||
if sock_priority:
|
||||
params['sock_priority'] = sock_priority
|
||||
if acceptor_backlog is not None:
|
||||
params['acceptor_backlog'] = acceptor_backlog
|
||||
return client.call('nvmf_create_transport', params)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user