nvmf: remove deprecated max_qpairs_per_ctrlr parameter
This was a parameter on the nvmf_create_transport
RPC, and was replaced with max_io_qpairs_per_ctrlr to
reduce confusion on whether this number included the
admin queue or not.
nvmf_vhost test was using this deprecated parameter.
Change it to use -m (max_io_qpairs_per_ctrlr)
instead. '-p 4' would have been evaluated as 1 admin
queue + 3 I/O queues, but it's likely the intent
was for 4 I/O queues. This is a perfect example of
why this parameter was deprecated.
For reference, this was deprecated in June 2020,
commit 1551197db
.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I4364fc0a76c9993b376932b6eea243d7cefca9cd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11543
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
6488a63fbc
commit
e8fdde5235
@ -7,6 +7,11 @@
|
||||
Added `bdev_nvme_add_error_injection` and `bdev_nvme_remove_error_injection` RPCs to add and
|
||||
remove NVMe error injections.
|
||||
|
||||
### nvmf
|
||||
|
||||
Removed deprecated max_qpairs_per_ctrlr parameter from nvmf_create_transport RPC. Use
|
||||
max_io_qpairs_per_ctrlr instead.
|
||||
|
||||
## v22.01
|
||||
|
||||
### accel
|
||||
|
@ -6240,7 +6240,6 @@ Name | Optional | Type | Description
|
||||
trtype | Required | string | Transport type (ex. RDMA)
|
||||
tgt_name | Optional | string | Parent NVMe-oF target name.
|
||||
max_queue_depth | Optional | number | Max number of outstanding I/O per queue
|
||||
max_qpairs_per_ctrlr | Optional | number | Max number of SQ and CQ per controller (deprecated, use max_io_qpairs_per_ctrlr)
|
||||
max_io_qpairs_per_ctrlr | Optional | number | Max number of IO qpairs per controller
|
||||
in_capsule_data_size | Optional | number | Max number of in-capsule data size
|
||||
max_io_size | Optional | number | Max I/O size (bytes)
|
||||
|
@ -1864,34 +1864,12 @@ nvmf_rpc_decode_max_io_qpairs(const struct spdk_json_val *val, void *out)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function parses deprecated `max_qpairs_per_ctrlr` and warns the user to use
|
||||
* the new parameter `max_io_qpairs_per_ctrlr`
|
||||
*/
|
||||
static int
|
||||
nvmf_rpc_decode_max_qpairs(const struct spdk_json_val *val, void *out)
|
||||
{
|
||||
uint16_t *i = out;
|
||||
int rc;
|
||||
|
||||
rc = spdk_json_number_to_uint16(val, i);
|
||||
if (rc == 0) {
|
||||
SPDK_WARNLOG("Parameter max_qpairs_per_ctrlr is deprecated, use max_io_qpairs_per_ctrlr instead.\n");
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[] = {
|
||||
{ "trtype", offsetof(struct nvmf_rpc_create_transport_ctx, trtype), spdk_json_decode_string},
|
||||
{
|
||||
"max_queue_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_queue_depth),
|
||||
spdk_json_decode_uint16, true
|
||||
},
|
||||
{
|
||||
"max_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
|
||||
nvmf_rpc_decode_max_qpairs, true
|
||||
},
|
||||
{
|
||||
"max_io_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
|
||||
nvmf_rpc_decode_max_io_qpairs, true
|
||||
|
@ -2035,8 +2035,6 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
|
||||
p.add_argument('-g', '--tgt-name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
||||
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
|
||||
p.add_argument('-p', '--max-qpairs-per-ctrlr', help="""Max number of SQ and CQ per controller.
|
||||
Deprecated, use max-io-qpairs-per-ctrlr""", type=int)
|
||||
p.add_argument('-m', '--max-io-qpairs-per-ctrlr', help='Max number of IO qpairs per controller', type=int)
|
||||
p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
|
||||
p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
|
||||
|
@ -106,7 +106,6 @@ def nvmf_create_transport(client, **params):
|
||||
Args:
|
||||
trtype: Transport type (ex. RDMA)
|
||||
max_queue_depth: Max number of outstanding I/O per queue (optional)
|
||||
max_qpairs_per_ctrlr: Max number of SQ and CQ per controller (optional, deprecated, use max_io_qpairs_per_ctrlr)
|
||||
max_io_qpairs_per_ctrlr: Max number of IO qpairs per controller (optional)
|
||||
in_capsule_data_size: Maximum in-capsule data size in bytes (optional)
|
||||
max_io_size: Maximum I/O data size in bytes (optional)
|
||||
@ -134,9 +133,6 @@ def nvmf_create_transport(client, **params):
|
||||
apply_defaults(params, no_srq=False, c2h_success=True)
|
||||
remove_null(params)
|
||||
|
||||
if 'max_qpairs_per_ctrlr' in params:
|
||||
print("WARNING: max_qpairs_per_ctrlr is deprecated, please use max_io_qpairs_per_ctrlr.")
|
||||
|
||||
return client.call('nvmf_create_transport', params)
|
||||
|
||||
|
||||
|
@ -41,7 +41,7 @@ trap 'process_shm --id $NVMF_APP_SHM_ID; killprocess $vhostpid nvmftestfini; exi
|
||||
# Configure NVMF tgt on host machine
|
||||
malloc_bdev="$($NVMF_RPC bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
|
||||
|
||||
$NVMF_RPC nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -p 4
|
||||
$NVMF_RPC nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -m 4
|
||||
$NVMF_RPC nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
|
||||
$NVMF_RPC nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
|
||||
$NVMF_RPC nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
||||
|
Loading…
Reference in New Issue
Block a user