rpc: Deprecate max_qpairs_per_ctrlr parameter

This parameter describes the number of admin and IO
qpairs while admin qpair always exists and should not
be configured explicitly.
Introduce a new parameter `max_io_qpairs_per_ctrlr`
which configures the number of IO qpairs.
Internal structure of NVMF transport is not changed,
both RPC parameters configure the same nvmf transport parameter.

Deprecate max_qpairs_per_ctrlr in spdkcli as well

Side change: update dif_insert_or_strip description -
it can be used by TCP and RDMA transports

Config files parsing is not changed since it is deprecated

Fixes #1378

Change-Id: I8403ee6fcf090bb5e86a32e4868fea5924daed23
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2279
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Anil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Community-CI: Broadcom CI
This commit is contained in:
Alexey Marchuk 2020-05-07 16:10:26 +03:00 committed by Tomasz Zawadzki
parent 074cdb4d06
commit 1551197db5
11 changed files with 82 additions and 20 deletions

View File

@ -26,6 +26,14 @@ Two providers are available - verbs (used by default when RDMA is enabled or ena
using --with-rdma=verbs) and mlx5 Direct Verbs aka DV (enabled by --with-rdma=mlx5_dv).
Using mlx5_dv requires libmlx5 installed on the system.
### rpc
Parameter `-p` or `--max-qpairs-per-ctrlr` of `nvmf_create_transport` RPC command accepted by the
rpc.py script is deprecated, new parameter `-m` or `--max-io-qpairs-per-ctrlr` is added.
Parameter `max_qpairs_per_ctrlr` of `nvmf_create_transport` RPC command accepted by the NVMF target
is deprecated, new parameter `max_io_qpairs_per_ctrlr` is added.
### sock
Added `spdk_sock_impl_get_opts` and `spdk_sock_impl_set_opts` functions to set/get socket layer configuration

View File

@ -4021,7 +4021,8 @@ Name | Optional | Type | Description
trtype | Required | string | Transport type (ex. RDMA)
tgt_name | Optional | string | Parent NVMe-oF target name.
max_queue_depth | Optional | number | Max number of outstanding I/O per queue
max_qpairs_per_ctrlr | Optional | number | Max number of SQ and CQ per controller
max_qpairs_per_ctrlr | Optional | number | Max number of SQ and CQ per controller (deprecated, use max_io_qpairs_per_ctrlr)
max_io_qpairs_per_ctrlr | Optional | number | Max number of IO qpairs per controller
in_capsule_data_size | Optional | number | Max number of in-capsule data size
max_io_size | Optional | number | Max I/O size (bytes)
io_unit_size | Optional | number | I/O unit size (bytes)
@ -4031,7 +4032,7 @@ buf_cache_size | Optional | number | The number of shared buffers
max_srq_depth | Optional | number | The number of elements in a per-thread shared receive queue (RDMA only)
no_srq | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
c2h_success | Optional | boolean | Disable C2H success optimization (TCP only)
dif_insert_or_strip | Optional | boolean | Enable DIF insert for write I/O and DIF strip for read I/O DIF (TCP only)
dif_insert_or_strip | Optional | boolean | Enable DIF insert for write I/O and DIF strip for read I/O DIF
sock_priority | Optional | number | The socket priority of the connection owned by this transport (TCP only)
### Example
@ -4565,7 +4566,7 @@ Example response:
{
"type": "RDMA".
"max_queue_depth": 128,
"max_qpairs_per_ctrlr": 64,
"max_io_qpairs_per_ctrlr": 64,
"in_capsule_data_size": 4096,
"max_io_size": 131072,
"io_unit_size": 131072

View File

@ -1821,11 +1821,11 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" max_aq_depth=%d\n",
opts->max_queue_depth,
opts->max_io_size,
opts->max_qpairs_per_ctrlr,
opts->max_qpairs_per_ctrlr - 1,
opts->io_unit_size,
opts->max_aq_depth);

View File

@ -525,7 +525,8 @@ spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_string(w, "trtype", spdk_nvme_transport_id_trtype_str(transport->ops->type));
spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth);
spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", transport->opts.max_qpairs_per_ctrlr);
spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr",
transport->opts.max_qpairs_per_ctrlr - 1);
spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size);
spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size);
spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size);

View File

@ -1559,6 +1559,46 @@ struct nvmf_rpc_create_transport_ctx {
struct spdk_jsonrpc_request *request;
};
/**
* `max_qpairs_per_ctrlr` represents both admin and IO qpairs, that confuses
* users when they configure a transport using RPC. So it was decided to
* deprecate `max_qpairs_per_ctrlr` RPC parameter and use `max_io_qpairs_per_ctrlr`
* But internal logic remains unchanged and SPDK expects that
* spdk_nvmf_transport_opts::max_qpairs_per_ctrlr includes an admin qpair.
* This function parses the number of IO qpairs and adds +1 for admin qpair.
*/
static int
nvmf_rpc_decode_max_io_qpairs(const struct spdk_json_val *val, void *out)
{
uint16_t *i = out;
int rc;
rc = spdk_json_number_to_uint16(val, i);
if (rc == 0) {
(*i)++;
}
return rc;
}
/**
* This function parses deprecated `max_qpairs_per_ctrlr` and warns the user to use
* the new parameter `max_io_qpairs_per_ctrlr`
*/
static int
nvmf_rpc_decode_max_qpairs(const struct spdk_json_val *val, void *out)
{
uint16_t *i = out;
int rc;
rc = spdk_json_number_to_uint16(val, i);
if (rc == 0) {
SPDK_WARNLOG("Parameter max_qpairs_per_ctrlr is deprecated, use max_io_qpairs_per_ctrlr instead.\n");
}
return rc;
}
static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[] = {
{ "trtype", offsetof(struct nvmf_rpc_create_transport_ctx, trtype), spdk_json_decode_string},
{
@ -1567,7 +1607,11 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
},
{
"max_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
spdk_json_decode_uint16, true
nvmf_rpc_decode_max_qpairs, true
},
{
"max_io_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
nvmf_rpc_decode_max_io_qpairs, true
},
{
"in_capsule_data_size", offsetof(struct nvmf_rpc_create_transport_ctx, opts.in_capsule_data_size),
@ -1749,7 +1793,7 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr);
spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);

View File

@ -2289,12 +2289,12 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" in_capsule_data_size=%d, max_aq_depth=%d,\n"
" num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d\n",
opts->max_queue_depth,
opts->max_io_size,
opts->max_qpairs_per_ctrlr,
opts->max_qpairs_per_ctrlr - 1,
opts->io_unit_size,
opts->in_capsule_data_size,
opts->max_aq_depth,

View File

@ -469,13 +469,13 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
SPDK_INFOLOG(SPDK_LOG_NVMF_TCP, "*** TCP Transport Init ***\n"
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
" in_capsule_data_size=%d, max_aq_depth=%d\n"
" num_shared_buffers=%d, c2h_success=%d,\n"
" dif_insert_or_strip=%d, sock_priority=%d\n",
opts->max_queue_depth,
opts->max_io_size,
opts->max_qpairs_per_ctrlr,
opts->max_qpairs_per_ctrlr - 1,
opts->io_unit_size,
opts->in_capsule_data_size,
opts->max_aq_depth,

View File

@ -1715,6 +1715,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
tgt_name=args.tgt_name,
max_queue_depth=args.max_queue_depth,
max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
max_io_qpairs_per_ctrlr=args.max_io_qpairs_per_ctrlr,
in_capsule_data_size=args.in_capsule_data_size,
max_io_size=args.max_io_size,
io_unit_size=args.io_unit_size,
@ -1731,7 +1732,9 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
p.add_argument('-g', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
p.add_argument('-p', '--max-qpairs-per-ctrlr', help='Max number of SQ and CQ per controller', type=int)
p.add_argument('-p', '--max-qpairs-per-ctrlr', help="""Max number of SQ and CQ per controller.
Deprecated, use max-io-qpairs-per-ctrlr""", type=int)
p.add_argument('-m', '--max-io-qpairs-per-ctrlr', help='Max number of IO qpairs per controller', type=int)
p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)

View File

@ -96,6 +96,7 @@ def nvmf_create_transport(client,
tgt_name=None,
max_queue_depth=None,
max_qpairs_per_ctrlr=None,
max_io_qpairs_per_ctrlr=None,
in_capsule_data_size=None,
max_io_size=None,
io_unit_size=None,
@ -112,7 +113,8 @@ def nvmf_create_transport(client,
Args:
trtype: Transport type (ex. RDMA)
max_queue_depth: Max number of outstanding I/O per queue (optional)
max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional)
max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional, deprecated, use max_io_qpairs_per_ctrlr)
max_io_qpairs_per_ctrlr: Max number of IO qpairs per controller (optional)
in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
max_io_size: Maximum I/O data size in bytes (optional)
io_unit_size: I/O unit size in bytes (optional)
@ -135,7 +137,10 @@ def nvmf_create_transport(client,
if max_queue_depth:
params['max_queue_depth'] = max_queue_depth
if max_qpairs_per_ctrlr:
print("WARNING: max_qpairs_per_ctrlr is deprecated, please use max_io_qpairs_per_ctrlr.")
params['max_qpairs_per_ctrlr'] = max_qpairs_per_ctrlr
if max_io_qpairs_per_ctrlr:
params['max_io_qpairs_per_ctrlr'] = max_io_qpairs_per_ctrlr
if in_capsule_data_size:
params['in_capsule_data_size'] = in_capsule_data_size
if max_io_size:

View File

@ -23,21 +23,21 @@ class UINVMfTransports(UINode):
for transport in self.get_root().nvmf_get_transports():
UINVMfTransport(transport, self)
def ui_command_create(self, trtype, max_queue_depth=None, max_qpairs_per_ctrlr=None, in_capsule_data_size=None,
max_io_size=None, io_unit_size=None, max_aq_depth=None):
def ui_command_create(self, trtype, max_queue_depth=None, max_io_qpairs_per_ctrlr=None,
in_capsule_data_size=None, max_io_size=None, io_unit_size=None, max_aq_depth=None):
"""Create a transport with given parameters
Arguments:
trtype - Example: 'RDMA'.
max_queue_depth - Optional parameter. Integer, max value 65535.
max_qpairs_per_ctrlr - Optional parameter. 16 bit Integer, max value 65535.
max_io_qpairs_per_ctrlr - Optional parameter. 16 bit Integer, max value 65535.
in_capsule_data_size - Optional parameter. 32 bit Integer, max value 4294967295
max_io_size - Optional parameter. 32 bit integer, max value 4294967295
io_unit_size - Optional parameter. 32 bit integer, max value 4294967295
max_aq_depth - Optional parameter. 32 bit integer, max value 4294967295
"""
max_queue_depth = self.ui_eval_param(max_queue_depth, "number", None)
max_qpairs_per_ctrlr = self.ui_eval_param(max_qpairs_per_ctrlr, "number", None)
max_io_qpairs_per_ctrlr = self.ui_eval_param(max_io_qpairs_per_ctrlr, "number", None)
in_capsule_data_size = self.ui_eval_param(in_capsule_data_size, "number", None)
max_io_size = self.ui_eval_param(max_io_size, "number", None)
io_unit_size = self.ui_eval_param(io_unit_size, "number", None)
@ -45,7 +45,7 @@ class UINVMfTransports(UINode):
self.get_root().create_nvmf_transport(trtype=trtype,
max_queue_depth=max_queue_depth,
max_qpairs_per_ctrlr=max_qpairs_per_ctrlr,
max_io_qpairs_per_ctrlr=max_io_qpairs_per_ctrlr,
in_capsule_data_size=in_capsule_data_size,
max_io_size=max_io_size,
io_unit_size=io_unit_size,

View File

@ -26,7 +26,7 @@ $spdkcli_job "'/bdevs/malloc create 32 512 Malloc1' 'Malloc1' True
'/bdevs/malloc create 32 512 Malloc4' 'Malloc4' True
'/bdevs/malloc create 32 512 Malloc5' 'Malloc5' True
'/bdevs/malloc create 32 512 Malloc6' 'Malloc6' True
'nvmf/transport create RDMA max_qpairs_per_ctrlr=4 io_unit_size=8192' '' True
'nvmf/transport create RDMA max_io_qpairs_per_ctrlr=4 io_unit_size=8192' '' True
'/nvmf/subsystem create nqn.2014-08.org.spdk:cnode1 N37SXV509SRW\
max_namespaces=4 allow_any_host=True' 'nqn.2014-08.org.spdk:cnode1' True
'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces create Malloc3 1' 'Malloc3' True