2018-09-11 13:26:14 +00:00
|
|
|
#!/usr/bin/env python3
|
2016-08-03 21:37:16 +00:00
|
|
|
|
2019-06-25 18:23:46 +00:00
|
|
|
from rpc.client import print_dict, print_json, JSONRPCException
|
2019-05-03 21:27:09 +00:00
|
|
|
from rpc.helpers import deprecated_aliases
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2018-12-06 15:54:35 +00:00
|
|
|
import logging
|
2016-08-03 21:37:16 +00:00
|
|
|
import argparse
|
2020-05-08 08:05:17 +00:00
|
|
|
import importlib
|
2017-06-06 21:22:03 +00:00
|
|
|
import rpc
|
2018-10-09 18:00:56 +00:00
|
|
|
import sys
|
2019-05-08 10:01:16 +00:00
|
|
|
import shlex
|
2019-06-06 17:41:10 +00:00
|
|
|
import json
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2018-03-19 23:53:16 +00:00
|
|
|
try:
|
|
|
|
from shlex import quote
|
|
|
|
except ImportError:
|
|
|
|
from pipes import quote
|
|
|
|
|
|
|
|
|
|
|
|
def print_array(a):
|
2018-03-20 00:40:09 +00:00
|
|
|
print(" ".join((quote(v) for v in a)))
|
2018-03-19 23:53:16 +00:00
|
|
|
|
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser(
|
2020-04-30 11:21:58 +00:00
|
|
|
description='SPDK RPC command line interface', usage='%(prog)s [options]')
|
2017-06-06 21:22:03 +00:00
|
|
|
parser.add_argument('-s', dest='server_addr',
|
2019-02-22 11:18:57 +00:00
|
|
|
help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
|
2017-06-06 21:22:03 +00:00
|
|
|
parser.add_argument('-p', dest='port',
|
|
|
|
help='RPC port number (if server_addr is IP address)',
|
|
|
|
default=5260, type=int)
|
2018-03-09 15:20:34 +00:00
|
|
|
parser.add_argument('-t', dest='timeout',
|
2019-02-22 11:18:57 +00:00
|
|
|
help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
|
2018-03-09 15:20:34 +00:00
|
|
|
default=60.0, type=float)
|
2020-06-02 12:02:16 +00:00
|
|
|
parser.add_argument('-r', dest='conn_retries',
|
|
|
|
help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0',
|
|
|
|
default=0, type=int)
|
2018-12-06 15:54:35 +00:00
|
|
|
parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
|
|
|
|
help='Set verbose mode to INFO', default="ERROR")
|
|
|
|
parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
|
|
|
|
help="""Set verbose level. """)
|
2019-06-06 17:41:10 +00:00
|
|
|
parser.add_argument('--dry_run', dest='dry_run', action='store_true', help="Display request and exit")
|
|
|
|
parser.set_defaults(dry_run=False)
|
scripts/rpc.py: add daemon mode
Add rpc_cmd() bash command that sends rpc command to an
rpc.py instance permanently running in background.
This makes sending RPC commands even 17 times faster.
We make use of bash coprocesses - a builtin bash feature
that allow starting background processes with stdin and
stdout connected to pipes. rpc.py will block trying to
read stdin, effectively being always "ready" to read
an RPC command.
The background rpc.py is started with a new --server flag
that's described as:
> Start listening on stdin, parse each line as a regular
> rpc.py execution and create a separate connection for each command.
> Each command's output ends with either **STATUS=0 if the
> command succeeded or **STATUS=1 if it failed.
> --server is meant to be used in conjunction with bash
> coproc, where stdin and stdout are named pipes and can be
> used as a faster way to send RPC commands.
As a part of this patch I'm attaching a sample test
that runs the following rpc commands first with the regular
rpc.py, then the new rpc_cmd() function.
```
time {
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
malloc=$($rpc bdev_malloc_create 8 512)
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "1" ]
$rpc bdev_passthru_create -b "$malloc" -p Passthru0
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "2" ]
$rpc bdev_passthru_delete Passthru0
$rpc bdev_malloc_delete $malloc
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
}
```
Regular rpc.py:
```
real 0m1.477s
user 0m1.289s
sys 0m0.139s
```
rpc_cmd():
```
real 0m0.085s
user 0m0.025s
sys 0m0.006s
```
autotest_common.sh will now spawn an rpc.py daemon if
it's not running yet, and it will offer rpc_cmd() function
to quickly send RPC commands. If the command is invalid or
SPDK returns with error, the bash function will return
a non-zero code and may trigger ERR trap just like a regular
rpc.py instance.
Pipes have major advantage over e.g. unix domain sockets - the pipes
will be automatically closed once the owner process exits.
This means we can create a named pipe in autotest_common.sh,
open it, then start rpc.py in background and never worry
about it again - it will be closed automatically once the
test exits. It doesn't even matter if the test is executed
manually in isolation, or as a part of the entire autotest.
(check_so_deps.sh needs to be modified not to wait for *all*
background processes to finish, but just the ones it started)
Change-Id: If0ded961b7fef3af3837b44532300dee8b5b4663
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/621
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-06-21 07:38:47 +00:00
|
|
|
parser.add_argument('--server', dest='is_server', action='store_true',
|
|
|
|
help="Start listening on stdin, parse each line as a regular rpc.py execution and create \
|
|
|
|
a separate connection for each command. Each command's output ends with either \
|
|
|
|
**STATUS=0 if the command succeeded or **STATUS=1 if it failed. --server is meant \
|
|
|
|
to be used in conjunction with bash coproc, where stdin and stdout are connected to \
|
|
|
|
pipes and can be used as a faster way to send RPC commands. If enabled, rpc.py \
|
|
|
|
must be executed without any other parameters.")
|
|
|
|
parser.set_defaults(is_server=False)
|
2020-05-08 08:05:17 +00:00
|
|
|
parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
|
2020-04-30 10:31:43 +00:00
|
|
|
subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='')
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-11 13:30:14 +00:00
|
|
|
def framework_start_init(args):
|
|
|
|
rpc.framework_start_init(args.client)
|
2018-05-02 04:50:39 +00:00
|
|
|
|
2019-09-11 13:30:14 +00:00
|
|
|
p = subparsers.add_parser('framework_start_init', aliases=['start_subsystem_init'],
|
|
|
|
help='Start initialization of subsystems')
|
|
|
|
p.set_defaults(func=framework_start_init)
|
2018-05-02 04:50:39 +00:00
|
|
|
|
2019-09-11 13:35:41 +00:00
|
|
|
def framework_wait_init(args):
|
|
|
|
rpc.framework_wait_init(args.client)
|
2018-11-08 20:24:50 +00:00
|
|
|
|
2019-09-11 13:35:41 +00:00
|
|
|
p = subparsers.add_parser('framework_wait_init', aliases=['wait_subsystem_init'],
|
|
|
|
help='Block until subsystems have been initialized')
|
|
|
|
p.set_defaults(func=framework_wait_init)
|
2018-11-08 20:24:50 +00:00
|
|
|
|
2019-05-03 20:59:01 +00:00
|
|
|
def rpc_get_methods(args):
|
|
|
|
print_dict(rpc.rpc_get_methods(args.client,
|
2019-08-21 12:43:23 +00:00
|
|
|
current=args.current,
|
|
|
|
include_aliases=args.include_aliases))
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-16 13:51:16 +00:00
|
|
|
p = subparsers.add_parser('rpc_get_methods', aliases=['get_rpc_methods'],
|
|
|
|
help='Get list of supported RPC methods')
|
2018-05-04 01:39:27 +00:00
|
|
|
p.add_argument('-c', '--current', help='Get list of RPC methods only callable in the current state.', action='store_true')
|
2019-08-21 12:43:23 +00:00
|
|
|
p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
|
2019-05-03 20:59:01 +00:00
|
|
|
p.set_defaults(func=rpc_get_methods)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-19 21:32:01 +00:00
|
|
|
def spdk_get_version(args):
|
|
|
|
print_json(rpc.spdk_get_version(args.client))
|
2019-03-21 22:02:04 +00:00
|
|
|
|
2019-09-19 21:32:01 +00:00
|
|
|
p = subparsers.add_parser('spdk_get_version', aliases=['get_spdk_version'],
|
|
|
|
help='Get SPDK version')
|
|
|
|
p.set_defaults(func=spdk_get_version)
|
2019-03-21 22:02:04 +00:00
|
|
|
|
2018-03-19 22:24:32 +00:00
|
|
|
def save_config(args):
|
2018-07-17 10:00:20 +00:00
|
|
|
rpc.save_config(args.client,
|
2018-10-09 18:00:56 +00:00
|
|
|
sys.stdout,
|
2018-07-17 10:00:20 +00:00
|
|
|
indent=args.indent)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2018-09-10 18:18:08 +00:00
|
|
|
p = subparsers.add_parser('save_config', help="""Write current (live) configuration of SPDK subsystems and targets to stdout.
|
|
|
|
""")
|
|
|
|
p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
|
|
|
|
""", type=int, default=2)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=save_config)
|
|
|
|
|
|
|
|
def load_config(args):
|
2020-04-29 15:34:51 +00:00
|
|
|
rpc.load_config(args.client, args.json_conf,
|
2019-08-21 12:43:23 +00:00
|
|
|
include_aliases=args.include_aliases)
|
2018-03-09 20:19:34 +00:00
|
|
|
|
2020-04-29 15:34:51 +00:00
|
|
|
p = subparsers.add_parser('load_config', help="""Configure SPDK subsystems and targets using JSON RPC.""")
|
2019-08-21 12:43:23 +00:00
|
|
|
p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
|
2020-04-29 15:34:51 +00:00
|
|
|
p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=load_config)
|
2018-03-09 20:19:34 +00:00
|
|
|
|
2018-06-21 23:44:54 +00:00
|
|
|
def save_subsystem_config(args):
|
2018-07-17 10:00:20 +00:00
|
|
|
rpc.save_subsystem_config(args.client,
|
2018-10-09 18:00:56 +00:00
|
|
|
sys.stdout,
|
2018-07-17 10:00:20 +00:00
|
|
|
indent=args.indent,
|
|
|
|
name=args.name)
|
2018-06-21 23:44:54 +00:00
|
|
|
|
2018-09-10 18:18:08 +00:00
|
|
|
p = subparsers.add_parser('save_subsystem_config', help="""Write current (live) configuration of SPDK subsystem to stdout.
|
|
|
|
""")
|
|
|
|
p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
|
|
|
|
""", type=int, default=2)
|
2018-06-21 23:44:54 +00:00
|
|
|
p.add_argument('-n', '--name', help='Name of subsystem', required=True)
|
|
|
|
p.set_defaults(func=save_subsystem_config)
|
|
|
|
|
2018-05-16 19:45:39 +00:00
|
|
|
def load_subsystem_config(args):
|
2018-10-09 18:00:56 +00:00
|
|
|
rpc.load_subsystem_config(args.client,
|
2020-04-29 15:34:51 +00:00
|
|
|
args.json_conf)
|
2018-05-16 19:45:39 +00:00
|
|
|
|
2020-04-29 15:34:51 +00:00
|
|
|
p = subparsers.add_parser('load_subsystem_config', help="""Configure SPDK subsystem using JSON RPC.""")
|
|
|
|
p.add_argument('-j', '--json_conf', help='Valid JSON configuration', default=sys.stdin)
|
2018-05-16 19:45:39 +00:00
|
|
|
p.set_defaults(func=load_subsystem_config)
|
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# app
|
2019-09-19 21:13:36 +00:00
|
|
|
def spdk_kill_instance(args):
|
|
|
|
rpc.app.spdk_kill_instance(args.client,
|
|
|
|
sig_name=args.sig_name)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-19 21:13:36 +00:00
|
|
|
p = subparsers.add_parser('spdk_kill_instance', aliases=['kill_instance'],
|
|
|
|
help='Send signal to instance')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('sig_name', help='signal will be sent to server.')
|
2019-09-19 21:13:36 +00:00
|
|
|
p.set_defaults(func=spdk_kill_instance)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 08:12:48 +00:00
|
|
|
def framework_monitor_context_switch(args):
|
2018-05-21 20:07:12 +00:00
|
|
|
enabled = None
|
|
|
|
if args.enable:
|
|
|
|
enabled = True
|
|
|
|
if args.disable:
|
|
|
|
enabled = False
|
2019-09-18 08:12:48 +00:00
|
|
|
print_dict(rpc.app.framework_monitor_context_switch(args.client,
|
|
|
|
enabled=enabled))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 08:12:48 +00:00
|
|
|
p = subparsers.add_parser('framework_monitor_context_switch', aliases=['context_switch_monitor'],
|
|
|
|
help='Control whether the context switch monitor is enabled')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-e', '--enable', action='store_true', help='Enable context switch monitoring')
|
|
|
|
p.add_argument('-d', '--disable', action='store_true', help='Disable context switch monitoring')
|
2019-09-18 08:12:48 +00:00
|
|
|
p.set_defaults(func=framework_monitor_context_switch)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-12-17 22:32:51 +00:00
|
|
|
def framework_get_reactors(args):
|
|
|
|
print_dict(rpc.app.framework_get_reactors(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'framework_get_reactors', help='Display list of all reactors')
|
|
|
|
p.set_defaults(func=framework_get_reactors)
|
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# bdev
|
2019-09-20 09:04:58 +00:00
|
|
|
def bdev_set_options(args):
|
|
|
|
rpc.bdev.bdev_set_options(args.client,
|
2018-06-11 15:58:15 +00:00
|
|
|
bdev_io_pool_size=args.bdev_io_pool_size,
|
2020-05-24 21:57:23 +00:00
|
|
|
bdev_io_cache_size=args.bdev_io_cache_size,
|
|
|
|
bdev_auto_examine=args.bdev_auto_examine)
|
2018-06-11 15:58:15 +00:00
|
|
|
|
2019-09-20 09:04:58 +00:00
|
|
|
p = subparsers.add_parser('bdev_set_options', aliases=['set_bdev_options'],
|
|
|
|
help="""Set options of bdev subsystem""")
|
2018-06-11 15:58:15 +00:00
|
|
|
p.add_argument('-p', '--bdev-io-pool-size', help='Number of bdev_io structures in shared buffer pool', type=int)
|
|
|
|
p.add_argument('-c', '--bdev-io-cache-size', help='Maximum number of bdev_io structures cached per thread', type=int)
|
2020-05-24 21:57:23 +00:00
|
|
|
group = p.add_mutually_exclusive_group()
|
|
|
|
group.add_argument('-e', '--enable-auto-examine', dest='bdev_auto_examine', help='Allow to auto examine', action='store_true')
|
|
|
|
group.add_argument('-d', '--disable-auto-examine', dest='bdev_auto_examine', help='Not allow to auto examine', action='store_false')
|
|
|
|
p.set_defaults(bdev_auto_examine=True)
|
2019-09-20 09:04:58 +00:00
|
|
|
p.set_defaults(func=bdev_set_options)
|
2018-06-11 15:58:15 +00:00
|
|
|
|
2020-07-06 03:59:40 +00:00
|
|
|
def bdev_examine(args):
|
|
|
|
rpc.bdev.bdev_examine(args.client,
|
|
|
|
name=args.name)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_examine',
|
|
|
|
help="""examine a bdev if it exists, or will examine it after it is created""")
|
|
|
|
p.add_argument('-b', '--name', help='Name or alias of the bdev')
|
|
|
|
p.set_defaults(func=bdev_examine)
|
|
|
|
|
2019-08-13 11:18:24 +00:00
|
|
|
def bdev_compress_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_compress_create(args.client,
|
|
|
|
base_bdev_name=args.base_bdev_name,
|
2020-07-02 16:43:41 +00:00
|
|
|
pm_path=args.pm_path,
|
|
|
|
lb_size=args.lb_size))
|
2019-08-22 11:47:37 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_compress_create', aliases=['construct_compress_bdev'],
|
2018-12-01 15:09:46 +00:00
|
|
|
help='Add a compress vbdev')
|
|
|
|
p.add_argument('-b', '--base_bdev_name', help="Name of the base bdev")
|
2019-04-26 15:27:45 +00:00
|
|
|
p.add_argument('-p', '--pm_path', help="Path to persistent memory")
|
2020-07-02 16:43:41 +00:00
|
|
|
p.add_argument('-l', '--lb_size', help="Compressed vol logical block size (optional, if used must be 512 or 4096)", type=int, default=0)
|
2019-08-13 11:18:24 +00:00
|
|
|
p.set_defaults(func=bdev_compress_create)
|
2018-12-01 15:09:46 +00:00
|
|
|
|
2019-08-13 11:18:24 +00:00
|
|
|
def bdev_compress_delete(args):
|
|
|
|
rpc.bdev.bdev_compress_delete(args.client,
|
2018-12-01 15:09:46 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-22 11:47:37 +00:00
|
|
|
p = subparsers.add_parser('bdev_compress_delete', aliases=['delete_compress_bdev'],
|
|
|
|
help='Delete a compress disk')
|
2018-12-01 15:09:46 +00:00
|
|
|
p.add_argument('name', help='compress bdev name')
|
2019-08-13 11:18:24 +00:00
|
|
|
p.set_defaults(func=bdev_compress_delete)
|
2018-12-01 15:09:46 +00:00
|
|
|
|
2020-01-23 17:48:01 +00:00
|
|
|
def compress_set_pmd(args):
|
|
|
|
rpc.bdev.compress_set_pmd(args.client,
|
2019-05-30 00:05:38 +00:00
|
|
|
pmd=args.pmd)
|
2020-01-23 17:48:01 +00:00
|
|
|
p = subparsers.add_parser('compress_set_pmd', aliases=['set_compress_pmd'],
|
|
|
|
help='Set pmd option for a compress disk')
|
2019-05-30 00:05:38 +00:00
|
|
|
p.add_argument('-p', '--pmd', type=int, help='0 = auto-select, 1= QAT only, 2 = ISAL only')
|
2020-01-23 17:48:01 +00:00
|
|
|
p.set_defaults(func=compress_set_pmd)
|
2019-05-30 00:05:38 +00:00
|
|
|
|
2019-08-20 17:12:59 +00:00
|
|
|
def bdev_compress_get_orphans(args):
|
|
|
|
print_dict(rpc.bdev.bdev_compress_get_orphans(args.client,
|
|
|
|
name=args.name))
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'bdev_compress_get_orphans', help='Display list of orphaned compress bdevs.')
|
|
|
|
p.add_argument('-b', '--name', help="Name of a comp bdev. Example: COMP_Nvme0n1", required=False)
|
|
|
|
p.set_defaults(func=bdev_compress_get_orphans)
|
|
|
|
|
2019-08-13 12:34:24 +00:00
|
|
|
def bdev_crypto_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_crypto_create(args.client,
|
|
|
|
base_bdev_name=args.base_bdev_name,
|
|
|
|
name=args.name,
|
|
|
|
crypto_pmd=args.crypto_pmd,
|
2020-01-31 22:19:09 +00:00
|
|
|
key=args.key,
|
|
|
|
cipher=args.cipher,
|
|
|
|
key2=args.key2))
|
2019-08-22 11:47:37 +00:00
|
|
|
p = subparsers.add_parser('bdev_crypto_create', aliases=['construct_crypto_bdev'],
|
2018-03-07 23:44:06 +00:00
|
|
|
help='Add a crypto vbdev')
|
2019-07-09 11:51:50 +00:00
|
|
|
p.add_argument('base_bdev_name', help="Name of the base bdev")
|
|
|
|
p.add_argument('name', help="Name of the crypto vbdev")
|
|
|
|
p.add_argument('crypto_pmd', help="Name of the crypto device driver")
|
|
|
|
p.add_argument('key', help="Key")
|
2020-01-31 22:19:09 +00:00
|
|
|
p.add_argument('-c', '--cipher', help="cipher to use, AES_CBC or AES_XTS (QAT only)", default="AES_CBC")
|
|
|
|
p.add_argument('-k2', '--key2', help="2nd key for cipher AET_XTS", default=None)
|
2019-08-13 12:34:24 +00:00
|
|
|
p.set_defaults(func=bdev_crypto_create)
|
2018-03-07 23:44:06 +00:00
|
|
|
|
2019-08-13 12:34:24 +00:00
|
|
|
def bdev_crypto_delete(args):
|
|
|
|
rpc.bdev.bdev_crypto_delete(args.client,
|
2018-03-07 23:44:06 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-22 11:47:37 +00:00
|
|
|
p = subparsers.add_parser('bdev_crypto_delete', aliases=['delete_crypto_bdev'],
|
|
|
|
help='Delete a crypto disk')
|
2018-03-07 23:44:06 +00:00
|
|
|
p.add_argument('name', help='crypto bdev name')
|
2019-08-13 12:34:24 +00:00
|
|
|
p.set_defaults(func=bdev_crypto_delete)
|
2018-03-07 23:44:06 +00:00
|
|
|
|
2019-08-28 08:30:48 +00:00
|
|
|
def bdev_ocf_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_ocf_create(args.client,
|
|
|
|
name=args.name,
|
|
|
|
mode=args.mode,
|
2020-07-16 05:19:17 +00:00
|
|
|
cache_line_size=args.cache_line_size,
|
2019-08-28 08:30:48 +00:00
|
|
|
cache_bdev_name=args.cache_bdev_name,
|
|
|
|
core_bdev_name=args.core_bdev_name))
|
|
|
|
p = subparsers.add_parser('bdev_ocf_create', aliases=['construct_ocf_bdev'],
|
2018-10-29 20:23:51 +00:00
|
|
|
help='Add an OCF block device')
|
|
|
|
p.add_argument('name', help='Name of resulting OCF bdev')
|
2019-10-21 12:58:26 +00:00
|
|
|
p.add_argument('mode', help='OCF cache mode', choices=['wb', 'wt', 'pt', 'wa', 'wi', 'wo'])
|
2020-07-16 05:19:17 +00:00
|
|
|
p.add_argument(
|
|
|
|
'--cache-line-size',
|
|
|
|
help='OCF cache line size. The unit is KiB',
|
|
|
|
type=int,
|
|
|
|
choices=[4, 8, 16, 32, 64],
|
|
|
|
required=False,
|
|
|
|
default=0,
|
|
|
|
)
|
2018-10-29 20:23:51 +00:00
|
|
|
p.add_argument('cache_bdev_name', help='Name of underlying cache bdev')
|
|
|
|
p.add_argument('core_bdev_name', help='Name of unerlying core bdev')
|
2019-08-28 08:30:48 +00:00
|
|
|
p.set_defaults(func=bdev_ocf_create)
|
2018-10-29 20:23:51 +00:00
|
|
|
|
2019-08-28 08:30:48 +00:00
|
|
|
def bdev_ocf_delete(args):
|
|
|
|
rpc.bdev.bdev_ocf_delete(args.client,
|
2018-10-29 20:23:51 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-28 08:30:48 +00:00
|
|
|
p = subparsers.add_parser('bdev_ocf_delete', aliases=['delete_ocf_bdev'],
|
2018-10-29 20:23:51 +00:00
|
|
|
help='Delete an OCF block device')
|
|
|
|
p.add_argument('name', help='Name of OCF bdev')
|
2019-08-28 08:30:48 +00:00
|
|
|
p.set_defaults(func=bdev_ocf_delete)
|
2018-10-29 20:23:51 +00:00
|
|
|
|
2019-08-28 08:43:22 +00:00
|
|
|
def bdev_ocf_get_stats(args):
|
|
|
|
print_dict(rpc.bdev.bdev_ocf_get_stats(args.client,
|
|
|
|
name=args.name))
|
|
|
|
p = subparsers.add_parser('bdev_ocf_get_stats', aliases=['get_ocf_stats'],
|
2018-12-28 18:59:40 +00:00
|
|
|
help='Get statistics of chosen OCF block device')
|
|
|
|
p.add_argument('name', help='Name of OCF bdev')
|
2019-08-28 08:43:22 +00:00
|
|
|
p.set_defaults(func=bdev_ocf_get_stats)
|
2018-12-28 18:59:40 +00:00
|
|
|
|
2019-08-28 08:43:22 +00:00
|
|
|
def bdev_ocf_get_bdevs(args):
|
|
|
|
print_dict(rpc.bdev.bdev_ocf_get_bdevs(args.client,
|
|
|
|
name=args.name))
|
|
|
|
p = subparsers.add_parser('bdev_ocf_get_bdevs', aliases=['get_ocf_bdevs'],
|
2019-01-22 21:11:37 +00:00
|
|
|
help='Get list of OCF devices including unregistered ones')
|
2019-03-11 17:16:32 +00:00
|
|
|
p.add_argument('name', nargs='?', default=None, help='name of OCF vbdev or name of cache device or name of core device (optional)')
|
2019-08-28 08:43:22 +00:00
|
|
|
p.set_defaults(func=bdev_ocf_get_bdevs)
|
2019-01-22 21:11:37 +00:00
|
|
|
|
2019-08-09 11:15:35 +00:00
|
|
|
def bdev_malloc_create(args):
|
2018-05-21 20:28:21 +00:00
|
|
|
num_blocks = (args.total_size * 1024 * 1024) // args.block_size
|
2019-08-09 11:15:35 +00:00
|
|
|
print_json(rpc.bdev.bdev_malloc_create(args.client,
|
|
|
|
num_blocks=int(num_blocks),
|
|
|
|
block_size=args.block_size,
|
|
|
|
name=args.name,
|
|
|
|
uuid=args.uuid))
|
|
|
|
p = subparsers.add_parser('bdev_malloc_create', aliases=['construct_malloc_bdev'],
|
2019-09-11 06:27:20 +00:00
|
|
|
help='Create a bdev with malloc backend')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the bdev")
|
2018-03-08 21:35:44 +00:00
|
|
|
p.add_argument('-u', '--uuid', help="UUID of the bdev")
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument(
|
2018-07-20 17:38:18 +00:00
|
|
|
'total_size', help='Size of malloc bdev in MB (float > 0)', type=float)
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('block_size', help='Block size for this bdev', type=int)
|
2019-08-09 11:15:35 +00:00
|
|
|
p.set_defaults(func=bdev_malloc_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-19 08:19:29 +00:00
|
|
|
def bdev_malloc_delete(args):
|
|
|
|
rpc.bdev.bdev_malloc_delete(args.client,
|
2018-06-15 07:58:57 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-19 08:19:29 +00:00
|
|
|
p = subparsers.add_parser('bdev_malloc_delete', aliases=['delete_malloc_bdev'],
|
|
|
|
help='Delete a malloc disk')
|
2018-06-15 07:58:57 +00:00
|
|
|
p.add_argument('name', help='malloc bdev name')
|
2019-08-19 08:19:29 +00:00
|
|
|
p.set_defaults(func=bdev_malloc_delete)
|
2018-06-15 07:58:57 +00:00
|
|
|
|
2019-08-20 13:06:22 +00:00
|
|
|
def bdev_null_create(args):
|
2018-05-21 20:28:21 +00:00
|
|
|
num_blocks = (args.total_size * 1024 * 1024) // args.block_size
|
2019-08-20 13:06:22 +00:00
|
|
|
print_json(rpc.bdev.bdev_null_create(args.client,
|
|
|
|
num_blocks=num_blocks,
|
|
|
|
block_size=args.block_size,
|
|
|
|
name=args.name,
|
|
|
|
uuid=args.uuid,
|
|
|
|
md_size=args.md_size,
|
|
|
|
dif_type=args.dif_type,
|
|
|
|
dif_is_head_of_md=args.dif_is_head_of_md))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_null_create', aliases=['construct_null_bdev'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add a bdev with null backend')
|
|
|
|
p.add_argument('name', help='Block device name')
|
2018-03-08 21:39:21 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='UUID of the bdev')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument(
|
|
|
|
'total_size', help='Size of null bdev in MB (int > 0)', type=int)
|
|
|
|
p.add_argument('block_size', help='Block size for this bdev', type=int)
|
2019-07-31 11:16:06 +00:00
|
|
|
p.add_argument('-m', '--md-size', type=int,
|
|
|
|
help='Metadata size for this bdev. Default 0')
|
2019-08-01 08:58:16 +00:00
|
|
|
p.add_argument('-t', '--dif-type', type=int, choices=[0, 1, 2, 3],
|
|
|
|
help='Protection information type. Default: 0 - no protection')
|
|
|
|
p.add_argument('-d', '--dif-is-head-of-md', action='store_true',
|
|
|
|
help='Protection information is in the first 8 bytes of metadata. Default: in the last 8 bytes')
|
2019-08-20 13:06:22 +00:00
|
|
|
p.set_defaults(func=bdev_null_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-20 13:06:22 +00:00
|
|
|
def bdev_null_delete(args):
|
|
|
|
rpc.bdev.bdev_null_delete(args.client,
|
2018-06-22 07:42:57 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-20 13:06:22 +00:00
|
|
|
p = subparsers.add_parser('bdev_null_delete', aliases=['delete_null_bdev'],
|
|
|
|
help='Delete a null bdev')
|
2018-06-22 07:42:57 +00:00
|
|
|
p.add_argument('name', help='null bdev name')
|
2019-08-20 13:06:22 +00:00
|
|
|
p.set_defaults(func=bdev_null_delete)
|
2018-06-22 07:42:57 +00:00
|
|
|
|
2019-08-13 09:38:42 +00:00
|
|
|
def bdev_aio_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_aio_create(args.client,
|
|
|
|
filename=args.filename,
|
|
|
|
name=args.name,
|
|
|
|
block_size=args.block_size))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-22 11:47:37 +00:00
|
|
|
p = subparsers.add_parser('bdev_aio_create', aliases=['construct_aio_bdev'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add a bdev with aio backend')
|
|
|
|
p.add_argument('filename', help='Path to device or file (ex: /dev/sda)')
|
|
|
|
p.add_argument('name', help='Block device name')
|
2018-05-21 20:28:47 +00:00
|
|
|
p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
|
2019-08-13 09:38:42 +00:00
|
|
|
p.set_defaults(func=bdev_aio_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-13 09:38:42 +00:00
|
|
|
def bdev_aio_delete(args):
|
|
|
|
rpc.bdev.bdev_aio_delete(args.client,
|
2018-06-14 14:15:36 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-22 11:47:37 +00:00
|
|
|
p = subparsers.add_parser('bdev_aio_delete', aliases=['delete_aio_bdev'],
|
|
|
|
help='Delete an aio disk')
|
2018-06-14 14:15:36 +00:00
|
|
|
p.add_argument('name', help='aio bdev name')
|
2019-08-13 09:38:42 +00:00
|
|
|
p.set_defaults(func=bdev_aio_delete)
|
2018-06-14 14:15:36 +00:00
|
|
|
|
2019-10-07 23:04:23 +00:00
|
|
|
def bdev_uring_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_uring_create(args.client,
|
|
|
|
filename=args.filename,
|
2019-10-29 09:25:00 +00:00
|
|
|
name=args.name,
|
|
|
|
block_size=args.block_size))
|
2019-10-07 23:04:23 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_uring_create', help='Create a bdev with io_uring backend')
|
|
|
|
p.add_argument('filename', help='Path to device or file (ex: /dev/nvme0n1)')
|
2019-10-29 07:42:07 +00:00
|
|
|
p.add_argument('name', help='bdev name')
|
2019-10-29 09:25:00 +00:00
|
|
|
p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
|
2019-10-07 23:04:23 +00:00
|
|
|
p.set_defaults(func=bdev_uring_create)
|
|
|
|
|
|
|
|
def bdev_uring_delete(args):
|
|
|
|
rpc.bdev.bdev_uring_delete(args.client,
|
|
|
|
name=args.name)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_uring_delete', help='Delete a uring bdev')
|
|
|
|
p.add_argument('name', help='uring bdev name')
|
|
|
|
p.set_defaults(func=bdev_uring_delete)
|
|
|
|
|
2019-08-22 12:44:02 +00:00
|
|
|
def bdev_nvme_set_options(args):
|
|
|
|
rpc.bdev.bdev_nvme_set_options(args.client,
|
2018-07-09 21:04:33 +00:00
|
|
|
action_on_timeout=args.action_on_timeout,
|
2018-07-30 16:33:21 +00:00
|
|
|
timeout_us=args.timeout_us,
|
2018-07-09 21:04:33 +00:00
|
|
|
retry_count=args.retry_count,
|
2019-09-03 03:48:49 +00:00
|
|
|
arbitration_burst=args.arbitration_burst,
|
|
|
|
low_priority_weight=args.low_priority_weight,
|
|
|
|
medium_priority_weight=args.medium_priority_weight,
|
|
|
|
high_priority_weight=args.high_priority_weight,
|
2019-03-11 22:26:53 +00:00
|
|
|
nvme_adminq_poll_period_us=args.nvme_adminq_poll_period_us,
|
2019-07-10 05:13:31 +00:00
|
|
|
nvme_ioq_poll_period_us=args.nvme_ioq_poll_period_us,
|
2019-11-18 17:11:39 +00:00
|
|
|
io_queue_requests=args.io_queue_requests,
|
|
|
|
delay_cmd_submit=args.delay_cmd_submit)
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2019-08-22 12:44:02 +00:00
|
|
|
p = subparsers.add_parser('bdev_nvme_set_options', aliases=['set_bdev_nvme_options'],
|
2018-07-09 21:04:33 +00:00
|
|
|
help='Set options for the bdev nvme type. This is startup command.')
|
|
|
|
p.add_argument('-a', '--action-on-timeout',
|
|
|
|
help="Action to take on command time out. Valid valies are: none, reset, abort")
|
|
|
|
p.add_argument('-t', '--timeout-us',
|
|
|
|
help="Timeout for each command, in microseconds. If 0, don't track timeouts.", type=int)
|
|
|
|
p.add_argument('-n', '--retry-count',
|
|
|
|
help='the number of attempts per I/O when an I/O fails', type=int)
|
2019-09-03 03:48:49 +00:00
|
|
|
p.add_argument('--arbitration-burst',
|
|
|
|
help='the value is expressed as a power of two', type=int)
|
|
|
|
p.add_argument('--low-priority-weight',
|
|
|
|
help='the maximum number of commands that the controller may launch at one time from a low priority queue', type=int)
|
|
|
|
p.add_argument('--medium-priority-weight',
|
|
|
|
help='the maximum number of commands that the controller may launch at one time from a medium priority queue', type=int)
|
|
|
|
p.add_argument('--high-priority-weight',
|
|
|
|
help='the maximum number of commands that the controller may launch at one time from a high priority queue', type=int)
|
2018-07-09 21:04:33 +00:00
|
|
|
p.add_argument('-p', '--nvme-adminq-poll-period-us',
|
|
|
|
help='How often the admin queue is polled for asynchronous events', type=int)
|
2019-03-11 22:26:53 +00:00
|
|
|
p.add_argument('-i', '--nvme-ioq-poll-period-us',
|
|
|
|
help='How often to poll I/O queues for completions', type=int)
|
2019-07-10 05:13:31 +00:00
|
|
|
p.add_argument('-s', '--io-queue-requests',
|
|
|
|
help='The number of requests allocated for each NVMe I/O queue. Default: 512', type=int)
|
2019-11-18 17:11:39 +00:00
|
|
|
p.add_argument('-d', '--disable-delay-cmd-submit',
|
|
|
|
help='Disable delaying NVMe command submission, i.e. no batching of multiple commands',
|
|
|
|
action='store_false', dest='delay_cmd_submit', default=True)
|
2019-08-22 12:44:02 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_set_options)
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2019-08-22 13:04:52 +00:00
|
|
|
def bdev_nvme_set_hotplug(args):
|
|
|
|
rpc.bdev.bdev_nvme_set_hotplug(args.client, enable=args.enable, period_us=args.period_us)
|
2018-07-12 12:26:19 +00:00
|
|
|
|
2019-08-22 13:04:52 +00:00
|
|
|
p = subparsers.add_parser('bdev_nvme_set_hotplug', aliases=['set_bdev_nvme_hotplug'],
|
2018-07-12 12:26:19 +00:00
|
|
|
help='Set hotplug options for bdev nvme type.')
|
|
|
|
p.add_argument('-d', '--disable', dest='enable', default=False, action='store_false', help="Disable hotplug (default)")
|
|
|
|
p.add_argument('-e', '--enable', dest='enable', action='store_true', help="Enable hotplug")
|
|
|
|
p.add_argument('-r', '--period-us',
|
|
|
|
help='How often the hotplug is processed for insert and remove events', type=int)
|
2019-08-22 13:04:52 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_set_hotplug)
|
2018-07-12 12:26:19 +00:00
|
|
|
|
2019-08-23 13:50:51 +00:00
|
|
|
def bdev_nvme_attach_controller(args):
|
|
|
|
print_array(rpc.bdev.bdev_nvme_attach_controller(args.client,
|
|
|
|
name=args.name,
|
|
|
|
trtype=args.trtype,
|
|
|
|
traddr=args.traddr,
|
|
|
|
adrfam=args.adrfam,
|
|
|
|
trsvcid=args.trsvcid,
|
2020-02-19 11:18:51 +00:00
|
|
|
priority=args.priority,
|
2019-08-23 13:50:51 +00:00
|
|
|
subnqn=args.subnqn,
|
|
|
|
hostnqn=args.hostnqn,
|
|
|
|
hostaddr=args.hostaddr,
|
|
|
|
hostsvcid=args.hostsvcid,
|
|
|
|
prchk_reftag=args.prchk_reftag,
|
|
|
|
prchk_guard=args.prchk_guard))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_nvme_attach_controller', aliases=['construct_nvme_bdev'],
|
2018-11-01 16:16:56 +00:00
|
|
|
help='Add bdevs with nvme backend')
|
|
|
|
p.add_argument('-b', '--name', help="Name of the NVMe controller, prefix for each bdev name", required=True)
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-t', '--trtype',
|
|
|
|
help='NVMe-oF target trtype: e.g., rdma, pcie', required=True)
|
|
|
|
p.add_argument('-a', '--traddr',
|
|
|
|
help='NVMe-oF target address: e.g., an ip address or BDF', required=True)
|
|
|
|
p.add_argument('-f', '--adrfam',
|
|
|
|
help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
|
|
|
|
p.add_argument('-s', '--trsvcid',
|
|
|
|
help='NVMe-oF target trsvcid: e.g., a port number')
|
2020-02-19 11:18:51 +00:00
|
|
|
p.add_argument('-p', '--priority',
|
|
|
|
help='NVMe-oF connection priority: e.g., a priority number')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
|
2018-12-18 23:49:15 +00:00
|
|
|
p.add_argument('-q', '--hostnqn', help='NVMe-oF host subnqn')
|
2018-12-04 23:30:11 +00:00
|
|
|
p.add_argument('-i', '--hostaddr',
|
|
|
|
help='NVMe-oF host address: e.g., an ip address')
|
|
|
|
p.add_argument('-c', '--hostsvcid',
|
|
|
|
help='NVMe-oF host svcid: e.g., a port number')
|
2019-02-08 01:06:45 +00:00
|
|
|
p.add_argument('-r', '--prchk-reftag',
|
|
|
|
help='Enable checking of PI reference tag for I/O processing.', action='store_true')
|
|
|
|
p.add_argument('-g', '--prchk-guard',
|
|
|
|
help='Enable checking of PI guard for I/O processing.', action='store_true')
|
2019-08-23 13:50:51 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_attach_controller)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-27 19:49:10 +00:00
|
|
|
def bdev_nvme_get_controllers(args):
|
|
|
|
print_dict(rpc.nvme.bdev_nvme_get_controllers(args.client,
|
|
|
|
name=args.name))
|
2018-07-12 12:42:44 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
2019-08-27 19:49:10 +00:00
|
|
|
'bdev_nvme_get_controllers', aliases=['get_nvme_controllers'],
|
|
|
|
help='Display current NVMe controllers list or required NVMe controller')
|
2018-07-12 12:42:44 +00:00
|
|
|
p.add_argument('-n', '--name', help="Name of the NVMe controller. Example: Nvme0", required=False)
|
2019-08-27 19:49:10 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_get_controllers)
|
2018-07-12 12:42:44 +00:00
|
|
|
|
2019-08-23 15:12:13 +00:00
|
|
|
def bdev_nvme_detach_controller(args):
|
|
|
|
rpc.bdev.bdev_nvme_detach_controller(args.client,
|
2020-06-25 19:23:30 +00:00
|
|
|
name=args.name,
|
|
|
|
trtype=args.trtype,
|
|
|
|
traddr=args.traddr,
|
|
|
|
adrfam=args.adrfam,
|
|
|
|
trsvcid=args.trsvcid,
|
|
|
|
subnqn=args.subnqn)
|
2018-07-26 12:54:20 +00:00
|
|
|
|
2019-08-23 15:12:13 +00:00
|
|
|
p = subparsers.add_parser('bdev_nvme_detach_controller', aliases=['delete_nvme_controller'],
|
|
|
|
help='Detach an NVMe controller and delete any associated bdevs')
|
2018-07-26 12:54:20 +00:00
|
|
|
p.add_argument('name', help="Name of the controller")
|
2020-06-25 19:23:30 +00:00
|
|
|
p.add_argument('-t', '--trtype',
|
|
|
|
help='NVMe-oF target trtype: e.g., rdma, pcie')
|
|
|
|
p.add_argument('-a', '--traddr',
|
|
|
|
help='NVMe-oF target address: e.g., an ip address or BDF')
|
|
|
|
p.add_argument('-f', '--adrfam',
|
|
|
|
help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
|
|
|
|
p.add_argument('-s', '--trsvcid',
|
|
|
|
help='NVMe-oF target trsvcid: e.g., a port number')
|
|
|
|
p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
|
2019-08-23 15:12:13 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_detach_controller)
|
2018-07-26 12:54:20 +00:00
|
|
|
|
2019-10-24 18:09:47 +00:00
|
|
|
def bdev_nvme_cuse_register(args):
|
|
|
|
rpc.bdev.bdev_nvme_cuse_register(args.client,
|
2019-11-15 11:14:19 +00:00
|
|
|
name=args.name)
|
2019-10-24 18:09:47 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_nvme_cuse_register',
|
|
|
|
help='Register CUSE devices on NVMe controller')
|
|
|
|
p.add_argument('-n', '--name',
|
|
|
|
help='Name of the NVMe controller. Example: Nvme0', required=True)
|
|
|
|
p.set_defaults(func=bdev_nvme_cuse_register)
|
|
|
|
|
|
|
|
def bdev_nvme_cuse_unregister(args):
|
|
|
|
rpc.bdev.bdev_nvme_cuse_unregister(args.client,
|
|
|
|
name=args.name)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_nvme_cuse_unregister',
|
|
|
|
help='Unregister CUSE devices on NVMe controller')
|
|
|
|
p.add_argument('-n', '--name',
|
|
|
|
help='Name of the NVMe controller. Example: Nvme0', required=True)
|
|
|
|
p.set_defaults(func=bdev_nvme_cuse_unregister)
|
|
|
|
|
2019-08-19 11:36:42 +00:00
|
|
|
def bdev_zone_block_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_zone_block_create(args.client,
|
|
|
|
name=args.name,
|
|
|
|
base_bdev=args.base_bdev,
|
|
|
|
zone_capacity=args.zone_capacity,
|
|
|
|
optimal_open_zones=args.optimal_open_zones))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_zone_block_create',
|
|
|
|
help='Create virtual zone namespace device with block device backend')
|
|
|
|
p.add_argument('-b', '--name', help="Name of the zone device", required=True)
|
|
|
|
p.add_argument('-n', '--base-bdev', help='Name of underlying, non-zoned bdev', required=True)
|
|
|
|
p.add_argument('-z', '--zone-capacity', help='Surfaced zone capacity in blocks', type=int, required=True)
|
|
|
|
p.add_argument('-o', '--optimal-open-zones', help='Number of zones required to reach optimal write speed', type=int, required=True)
|
|
|
|
p.set_defaults(func=bdev_zone_block_create)
|
|
|
|
|
|
|
|
def bdev_zone_block_delete(args):
|
|
|
|
rpc.bdev.bdev_zone_block_delete(args.client,
|
|
|
|
name=args.name)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_zone_block_delete', help='Delete a virtual zone namespace device')
|
|
|
|
p.add_argument('name', help='Virtual zone bdev name')
|
|
|
|
p.set_defaults(func=bdev_zone_block_delete)
|
|
|
|
|
2019-09-11 06:27:20 +00:00
|
|
|
def bdev_rbd_create(args):
|
2018-10-26 12:06:15 +00:00
|
|
|
config = None
|
|
|
|
if args.config:
|
|
|
|
config = {}
|
|
|
|
for entry in args.config:
|
|
|
|
parts = entry.split('=', 1)
|
|
|
|
if len(parts) != 2:
|
|
|
|
raise Exception('--config %s not in key=value form' % entry)
|
|
|
|
config[parts[0]] = parts[1]
|
2019-09-11 06:27:20 +00:00
|
|
|
print_json(rpc.bdev.bdev_rbd_create(args.client,
|
|
|
|
name=args.name,
|
|
|
|
user=args.user,
|
|
|
|
config=config,
|
|
|
|
pool_name=args.pool_name,
|
|
|
|
rbd_name=args.rbd_name,
|
|
|
|
block_size=args.block_size))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-11 06:27:20 +00:00
|
|
|
p = subparsers.add_parser('bdev_rbd_create', aliases=['construct_rbd_bdev'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add a bdev with ceph rbd backend')
|
2018-02-23 19:05:47 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the bdev", required=False)
|
2018-10-26 12:06:15 +00:00
|
|
|
p.add_argument('--user', help="Ceph user name (i.e. admin, not client.admin)", required=False)
|
|
|
|
p.add_argument('--config', action='append', metavar='key=value',
|
|
|
|
help="adds a key=value configuration option for rados_conf_set (default: rely on config file)")
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('pool_name', help='rbd pool name')
|
|
|
|
p.add_argument('rbd_name', help='rbd image name')
|
|
|
|
p.add_argument('block_size', help='rbd block size', type=int)
|
2019-09-11 06:27:20 +00:00
|
|
|
p.set_defaults(func=bdev_rbd_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-11 08:00:10 +00:00
|
|
|
def bdev_rbd_delete(args):
|
|
|
|
rpc.bdev.bdev_rbd_delete(args.client,
|
2018-06-22 11:51:35 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-09-11 08:00:10 +00:00
|
|
|
p = subparsers.add_parser('bdev_rbd_delete', aliases=['delete_rbd_bdev'],
|
|
|
|
help='Delete a rbd bdev')
|
2018-06-22 11:51:35 +00:00
|
|
|
p.add_argument('name', help='rbd bdev name')
|
2019-09-11 08:00:10 +00:00
|
|
|
p.set_defaults(func=bdev_rbd_delete)
|
2018-06-22 11:51:35 +00:00
|
|
|
|
2020-04-08 06:45:55 +00:00
|
|
|
def bdev_rbd_resize(args):
|
|
|
|
print_json(rpc.bdev.bdev_rbd_resize(args.client,
|
|
|
|
name=args.name,
|
|
|
|
new_size=int(args.new_size)))
|
|
|
|
rpc.bdev.bdev_rbd_resize(args.client,
|
|
|
|
name=args.name,
|
|
|
|
new_size=int(args.new_size))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_rbd_resize',
|
|
|
|
help='Resize a rbd bdev')
|
|
|
|
p.add_argument('name', help='rbd bdev name')
|
|
|
|
p.add_argument('new_size', help='new bdev size for resize operation. The unit is MiB')
|
|
|
|
p.set_defaults(func=bdev_rbd_resize)
|
|
|
|
|
2019-05-07 22:43:11 +00:00
|
|
|
def bdev_delay_create(args):
|
2019-06-25 18:23:46 +00:00
|
|
|
print_json(rpc.bdev.bdev_delay_create(args.client,
|
|
|
|
base_bdev_name=args.base_bdev_name,
|
|
|
|
name=args.name,
|
|
|
|
avg_read_latency=args.avg_read_latency,
|
|
|
|
p99_read_latency=args.nine_nine_read_latency,
|
|
|
|
avg_write_latency=args.avg_write_latency,
|
|
|
|
p99_write_latency=args.nine_nine_write_latency))
|
2019-05-07 22:43:11 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_delay_create',
|
|
|
|
help='Add a delay bdev on existing bdev')
|
|
|
|
p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
|
|
|
|
p.add_argument('-d', '--name', help="Name of the delay bdev", required=True)
|
2019-08-14 15:26:54 +00:00
|
|
|
p.add_argument('-r', '--avg-read-latency',
|
|
|
|
help="Average latency to apply before completing read ops (in microseconds)", required=True, type=int)
|
|
|
|
p.add_argument('-t', '--nine-nine-read-latency',
|
|
|
|
help="latency to apply to 1 in 100 read ops (in microseconds)", required=True, type=int)
|
|
|
|
p.add_argument('-w', '--avg-write-latency',
|
|
|
|
help="Average latency to apply before completing write ops (in microseconds)", required=True, type=int)
|
|
|
|
p.add_argument('-n', '--nine-nine-write-latency',
|
|
|
|
help="latency to apply to 1 in 100 write ops (in microseconds)", required=True, type=int)
|
2019-05-07 22:43:11 +00:00
|
|
|
p.set_defaults(func=bdev_delay_create)
|
|
|
|
|
|
|
|
def bdev_delay_delete(args):
|
|
|
|
rpc.bdev.bdev_delay_delete(args.client,
|
|
|
|
name=args.name)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_delay_delete', help='Delete a delay bdev')
|
|
|
|
p.add_argument('name', help='delay bdev name')
|
|
|
|
p.set_defaults(func=bdev_delay_delete)
|
|
|
|
|
2019-08-07 21:11:19 +00:00
|
|
|
def bdev_delay_update_latency(args):
|
|
|
|
print_json(rpc.bdev.bdev_delay_update_latency(args.client,
|
|
|
|
delay_bdev_name=args.delay_bdev_name,
|
|
|
|
latency_type=args.latency_type,
|
|
|
|
latency_us=args.latency_us))
|
|
|
|
p = subparsers.add_parser('bdev_delay_update_latency',
|
|
|
|
help='Update one of the latency values for a given delay bdev')
|
|
|
|
p.add_argument('delay_bdev_name', help='The name of the given delay bdev')
|
|
|
|
p.add_argument('latency_type', help='one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.')
|
2019-08-14 15:26:54 +00:00
|
|
|
p.add_argument('latency_us', help='new latency value in microseconds.', type=int)
|
2019-08-07 21:11:19 +00:00
|
|
|
p.set_defaults(func=bdev_delay_update_latency)
|
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
def bdev_error_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_error_create(args.client,
|
|
|
|
base_name=args.base_name))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
p = subparsers.add_parser('bdev_error_create', aliases=['construct_error_bdev'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add bdev with error injection backend')
|
|
|
|
p.add_argument('base_name', help='base bdev name')
|
2019-08-16 09:58:53 +00:00
|
|
|
p.set_defaults(func=bdev_error_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
def bdev_error_delete(args):
|
|
|
|
rpc.bdev.bdev_error_delete(args.client,
|
2018-06-15 14:06:38 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
p = subparsers.add_parser('bdev_error_delete', aliases=['delete_error_bdev'],
|
|
|
|
help='Delete an error bdev')
|
2018-06-15 14:06:38 +00:00
|
|
|
p.add_argument('name', help='error bdev name')
|
2019-08-16 09:58:53 +00:00
|
|
|
p.set_defaults(func=bdev_error_delete)
|
2018-06-15 14:06:38 +00:00
|
|
|
|
2019-08-22 10:51:25 +00:00
|
|
|
def bdev_iscsi_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_iscsi_create(args.client,
|
|
|
|
name=args.name,
|
|
|
|
url=args.url,
|
|
|
|
initiator_iqn=args.initiator_iqn))
|
2018-04-05 21:41:19 +00:00
|
|
|
|
2019-08-22 10:51:25 +00:00
|
|
|
p = subparsers.add_parser('bdev_iscsi_create', aliases=['construct_iscsi_bdev'],
|
2018-04-05 21:41:19 +00:00
|
|
|
help='Add bdev with iSCSI initiator backend')
|
|
|
|
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
|
|
|
|
p.add_argument('-i', '--initiator-iqn', help="Initiator IQN", required=True)
|
|
|
|
p.add_argument('--url', help="iSCSI Lun URL", required=True)
|
2019-08-22 10:51:25 +00:00
|
|
|
p.set_defaults(func=bdev_iscsi_create)
|
2018-04-05 21:41:19 +00:00
|
|
|
|
2019-08-22 10:51:25 +00:00
|
|
|
def bdev_iscsi_delete(args):
|
|
|
|
rpc.bdev.bdev_iscsi_delete(args.client,
|
2018-06-18 08:07:33 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-22 10:51:25 +00:00
|
|
|
p = subparsers.add_parser('bdev_iscsi_delete', aliases=['delete_iscsi_bdev'],
|
|
|
|
help='Delete an iSCSI bdev')
|
2018-06-18 08:07:33 +00:00
|
|
|
p.add_argument('name', help='iSCSI bdev name')
|
2019-08-22 10:51:25 +00:00
|
|
|
p.set_defaults(func=bdev_iscsi_delete)
|
2018-06-18 08:07:33 +00:00
|
|
|
|
2019-08-29 09:54:36 +00:00
|
|
|
def bdev_pmem_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_pmem_create(args.client,
|
|
|
|
pmem_file=args.pmem_file,
|
|
|
|
name=args.name))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-29 09:54:36 +00:00
|
|
|
p = subparsers.add_parser('bdev_pmem_create', aliases=['construct_pmem_bdev'],
|
|
|
|
help='Add a bdev with pmem backend')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('pmem_file', help='Path to pmemblk pool file')
|
|
|
|
p.add_argument('-n', '--name', help='Block device name', required=True)
|
2019-08-29 09:54:36 +00:00
|
|
|
p.set_defaults(func=bdev_pmem_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-29 10:19:03 +00:00
|
|
|
def bdev_pmem_delete(args):
|
|
|
|
rpc.bdev.bdev_pmem_delete(args.client,
|
2018-06-22 09:48:31 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-29 10:19:03 +00:00
|
|
|
p = subparsers.add_parser('bdev_pmem_delete', aliases=['delete_pmem_bdev'],
|
|
|
|
help='Delete a pmem bdev')
|
2018-06-22 09:48:31 +00:00
|
|
|
p.add_argument('name', help='pmem bdev name')
|
2019-08-29 10:19:03 +00:00
|
|
|
p.set_defaults(func=bdev_pmem_delete)
|
2018-06-22 09:48:31 +00:00
|
|
|
|
2019-08-28 08:56:12 +00:00
|
|
|
def bdev_passthru_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_passthru_create(args.client,
|
|
|
|
base_bdev_name=args.base_bdev_name,
|
|
|
|
name=args.name))
|
2018-04-08 01:53:31 +00:00
|
|
|
|
2019-08-28 08:56:12 +00:00
|
|
|
p = subparsers.add_parser('bdev_passthru_create', aliases=['construct_passthru_bdev'],
|
2018-04-08 01:53:31 +00:00
|
|
|
help='Add a pass through bdev on existing bdev')
|
|
|
|
p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
|
2018-10-10 17:54:43 +00:00
|
|
|
p.add_argument('-p', '--name', help="Name of the pass through bdev", required=True)
|
2019-08-28 08:56:12 +00:00
|
|
|
p.set_defaults(func=bdev_passthru_create)
|
2018-04-08 01:53:31 +00:00
|
|
|
|
2019-08-28 08:56:12 +00:00
|
|
|
def bdev_passthru_delete(args):
|
|
|
|
rpc.bdev.bdev_passthru_delete(args.client,
|
2018-06-22 12:30:19 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-28 08:56:12 +00:00
|
|
|
p = subparsers.add_parser('bdev_passthru_delete', aliases=['delete_passthru_bdev'],
|
|
|
|
help='Delete a pass through bdev')
|
2018-06-22 12:30:19 +00:00
|
|
|
p.add_argument('name', help='pass through bdev name')
|
2019-08-28 08:56:12 +00:00
|
|
|
p.set_defaults(func=bdev_passthru_delete)
|
2018-06-22 12:30:19 +00:00
|
|
|
|
2019-09-11 09:29:55 +00:00
|
|
|
def bdev_get_bdevs(args):
|
|
|
|
print_dict(rpc.bdev.bdev_get_bdevs(args.client,
|
|
|
|
name=args.name))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-11 09:29:55 +00:00
|
|
|
p = subparsers.add_parser('bdev_get_bdevs', aliases=['get_bdevs'],
|
|
|
|
help='Display current blockdev list or required blockdev')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
|
2019-09-11 09:29:55 +00:00
|
|
|
p.set_defaults(func=bdev_get_bdevs)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-11 08:28:07 +00:00
|
|
|
def bdev_get_iostat(args):
|
|
|
|
print_dict(rpc.bdev.bdev_get_iostat(args.client,
|
|
|
|
name=args.name))
|
2017-12-28 09:03:17 +00:00
|
|
|
|
2019-09-11 08:28:07 +00:00
|
|
|
p = subparsers.add_parser('bdev_get_iostat', aliases=['get_bdevs_iostat'],
|
|
|
|
help='Display current I/O statistics of all the blockdevs or required blockdev.')
|
2017-12-28 09:03:17 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
|
2019-09-11 08:28:07 +00:00
|
|
|
p.set_defaults(func=bdev_get_iostat)
|
2017-12-28 09:03:17 +00:00
|
|
|
|
2019-09-11 10:39:52 +00:00
|
|
|
def bdev_enable_histogram(args):
|
|
|
|
rpc.bdev.bdev_enable_histogram(args.client, name=args.name, enable=args.enable)
|
2019-01-18 13:00:04 +00:00
|
|
|
|
2019-09-11 10:39:52 +00:00
|
|
|
p = subparsers.add_parser('bdev_enable_histogram', aliases=['enable_bdev_histogram'],
|
|
|
|
help='Enable or disable histogram for specified bdev')
|
2019-01-18 13:00:04 +00:00
|
|
|
p.add_argument('-e', '--enable', default=True, dest='enable', action='store_true', help='Enable histograms on specified device')
|
|
|
|
p.add_argument('-d', '--disable', dest='enable', action='store_false', help='Disable histograms on specified device')
|
|
|
|
p.add_argument('name', help='bdev name')
|
2019-09-11 10:39:52 +00:00
|
|
|
p.set_defaults(func=bdev_enable_histogram)
|
2019-01-18 13:00:04 +00:00
|
|
|
|
2019-09-11 10:54:13 +00:00
|
|
|
def bdev_get_histogram(args):
|
|
|
|
print_dict(rpc.bdev.bdev_get_histogram(args.client, name=args.name))
|
2019-01-18 13:00:04 +00:00
|
|
|
|
2019-09-11 10:54:13 +00:00
|
|
|
p = subparsers.add_parser('bdev_get_histogram', aliases=['get_bdev_histogram'],
|
|
|
|
help='Get histogram for specified bdev')
|
2019-01-18 13:00:04 +00:00
|
|
|
p.add_argument('name', help='bdev name')
|
2019-09-11 10:54:13 +00:00
|
|
|
p.set_defaults(func=bdev_get_histogram)
|
2019-01-18 13:00:04 +00:00
|
|
|
|
2019-09-11 10:24:12 +00:00
|
|
|
def bdev_set_qd_sampling_period(args):
|
|
|
|
rpc.bdev.bdev_set_qd_sampling_period(args.client,
|
2018-07-05 18:41:41 +00:00
|
|
|
name=args.name,
|
|
|
|
period=args.period)
|
|
|
|
|
2019-09-11 10:24:12 +00:00
|
|
|
p = subparsers.add_parser('bdev_set_qd_sampling_period', aliases=['set_bdev_qd_sampling_period'],
|
|
|
|
help="Enable or disable tracking of a bdev's queue depth.")
|
2018-07-05 18:41:41 +00:00
|
|
|
p.add_argument('name', help='Blockdev name. Example: Malloc0')
|
|
|
|
p.add_argument('period', help='Period with which to poll the block device queue depth in microseconds.'
|
|
|
|
' If set to 0, polling will be disabled.',
|
|
|
|
type=int)
|
2019-09-11 10:24:12 +00:00
|
|
|
p.set_defaults(func=bdev_set_qd_sampling_period)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-11 10:32:34 +00:00
|
|
|
def bdev_set_qos_limit(args):
|
|
|
|
rpc.bdev.bdev_set_qos_limit(args.client,
|
2018-06-22 02:15:02 +00:00
|
|
|
name=args.name,
|
|
|
|
rw_ios_per_sec=args.rw_ios_per_sec,
|
2018-06-22 02:15:02 +00:00
|
|
|
rw_mbytes_per_sec=args.rw_mbytes_per_sec,
|
|
|
|
r_mbytes_per_sec=args.r_mbytes_per_sec,
|
|
|
|
w_mbytes_per_sec=args.w_mbytes_per_sec)
|
2017-12-29 08:02:08 +00:00
|
|
|
|
2019-09-11 10:32:34 +00:00
|
|
|
p = subparsers.add_parser('bdev_set_qos_limit', aliases=['set_bdev_qos_limit'],
|
|
|
|
help='Set QoS rate limit on a blockdev')
|
2017-12-29 08:02:08 +00:00
|
|
|
p.add_argument('name', help='Blockdev name to set QoS. Example: Malloc0')
|
2018-06-22 02:15:02 +00:00
|
|
|
p.add_argument('--rw_ios_per_sec',
|
|
|
|
help='R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.',
|
|
|
|
type=int, required=False)
|
|
|
|
p.add_argument('--rw_mbytes_per_sec',
|
|
|
|
help="R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.",
|
|
|
|
type=int, required=False)
|
2018-06-22 02:15:02 +00:00
|
|
|
p.add_argument('--r_mbytes_per_sec',
|
|
|
|
help="Read megabytes per second limit (>=10, example: 100). 0 means unlimited.",
|
|
|
|
type=int, required=False)
|
|
|
|
p.add_argument('--w_mbytes_per_sec',
|
|
|
|
help="Write megabytes per second limit (>=10, example: 100). 0 means unlimited.",
|
|
|
|
type=int, required=False)
|
2019-09-11 10:32:34 +00:00
|
|
|
p.set_defaults(func=bdev_set_qos_limit)
|
2017-12-29 08:02:08 +00:00
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
def bdev_error_inject_error(args):
|
|
|
|
rpc.bdev.bdev_error_inject_error(args.client,
|
|
|
|
name=args.name,
|
|
|
|
io_type=args.io_type,
|
|
|
|
error_type=args.error_type,
|
|
|
|
num=args.num)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-16 09:58:53 +00:00
|
|
|
p = subparsers.add_parser('bdev_error_inject_error', aliases=['bdev_inject_error'],
|
|
|
|
help='bdev inject error')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('name', help="""the name of the error injection bdev""")
|
|
|
|
p.add_argument('io_type', help="""io_type: 'clear' 'read' 'write' 'unmap' 'flush' 'all'""")
|
|
|
|
p.add_argument('error_type', help="""error_type: 'failure' 'pending'""")
|
|
|
|
p.add_argument(
|
|
|
|
'-n', '--num', help='the number of commands you want to fail', type=int, default=1)
|
2019-08-16 09:58:53 +00:00
|
|
|
p.set_defaults(func=bdev_error_inject_error)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-27 20:43:52 +00:00
|
|
|
def bdev_nvme_apply_firmware(args):
|
|
|
|
print_dict(rpc.bdev.bdev_nvme_apply_firmware(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
filename=args.filename))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2020-02-05 11:36:22 +00:00
|
|
|
p = subparsers.add_parser('bdev_nvme_apply_firmware', aliases=['apply_firmware'],
|
2019-08-27 20:43:52 +00:00
|
|
|
help='Download and commit firmware to NVMe device')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('filename', help='filename of the firmware to download')
|
|
|
|
p.add_argument('bdev_name', help='name of the NVMe device')
|
2019-08-27 20:43:52 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_apply_firmware)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
# iSCSI
|
2019-09-11 11:15:34 +00:00
|
|
|
def iscsi_set_options(args):
|
|
|
|
rpc.iscsi.iscsi_set_options(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
auth_file=args.auth_file,
|
|
|
|
node_base=args.node_base,
|
|
|
|
nop_timeout=args.nop_timeout,
|
|
|
|
nop_in_interval=args.nop_in_interval,
|
2018-08-22 02:30:57 +00:00
|
|
|
disable_chap=args.disable_chap,
|
|
|
|
require_chap=args.require_chap,
|
|
|
|
mutual_chap=args.mutual_chap,
|
|
|
|
chap_group=args.chap_group,
|
2018-06-20 11:53:30 +00:00
|
|
|
max_sessions=args.max_sessions,
|
2018-07-17 17:44:22 +00:00
|
|
|
max_queue_depth=args.max_queue_depth,
|
2018-06-20 11:53:30 +00:00
|
|
|
max_connections_per_session=args.max_connections_per_session,
|
|
|
|
default_time2wait=args.default_time2wait,
|
|
|
|
default_time2retain=args.default_time2retain,
|
2018-08-10 18:27:02 +00:00
|
|
|
first_burst_length=args.first_burst_length,
|
2018-06-20 11:53:30 +00:00
|
|
|
immediate_data=args.immediate_data,
|
|
|
|
error_recovery_level=args.error_recovery_level,
|
2020-08-13 15:49:25 +00:00
|
|
|
allow_duplicated_isid=args.allow_duplicated_isid,
|
2020-08-14 01:26:31 +00:00
|
|
|
max_large_datain_per_connection=args.max_large_datain_per_connection,
|
|
|
|
max_r2t_per_connection=args.max_r2t_per_connection)
|
2018-05-14 23:24:19 +00:00
|
|
|
|
2019-09-11 11:15:34 +00:00
|
|
|
p = subparsers.add_parser('iscsi_set_options', aliases=['set_iscsi_options'],
|
|
|
|
help="""Set options of iSCSI subsystem""")
|
2018-08-27 23:37:35 +00:00
|
|
|
p.add_argument('-f', '--auth-file', help='Path to CHAP shared secret file')
|
2018-05-14 23:24:19 +00:00
|
|
|
p.add_argument('-b', '--node-base', help='Prefix of the name of iSCSI target node')
|
|
|
|
p.add_argument('-o', '--nop-timeout', help='Timeout in seconds to nop-in request to the initiator', type=int)
|
|
|
|
p.add_argument('-n', '--nop-in-interval', help='Time interval in secs between nop-in requests by the target', type=int)
|
2018-08-22 02:30:57 +00:00
|
|
|
p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
|
|
|
|
*** Mutually exclusive with --require-chap""", action='store_true')
|
|
|
|
p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
|
|
|
|
*** Mutually exclusive with --disable-chap""", action='store_true')
|
|
|
|
p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
|
|
|
|
p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
|
2018-05-14 23:24:19 +00:00
|
|
|
*** Authentication group must be precreated ***""", type=int)
|
|
|
|
p.add_argument('-a', '--max-sessions', help='Maximum number of sessions in the host.', type=int)
|
2018-07-17 17:44:22 +00:00
|
|
|
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/Os per queue.', type=int)
|
2018-05-14 23:24:19 +00:00
|
|
|
p.add_argument('-c', '--max-connections-per-session', help='Negotiated parameter, MaxConnections.', type=int)
|
|
|
|
p.add_argument('-w', '--default-time2wait', help='Negotiated parameter, DefaultTime2Wait.', type=int)
|
|
|
|
p.add_argument('-v', '--default-time2retain', help='Negotiated parameter, DefaultTime2Retain.', type=int)
|
2018-08-10 18:27:02 +00:00
|
|
|
p.add_argument('-s', '--first-burst-length', help='Negotiated parameter, FirstBurstLength.', type=int)
|
2018-05-14 23:24:19 +00:00
|
|
|
p.add_argument('-i', '--immediate-data', help='Negotiated parameter, ImmediateData.', action='store_true')
|
|
|
|
p.add_argument('-l', '--error-recovery-level', help='Negotiated parameter, ErrorRecoveryLevel', type=int)
|
|
|
|
p.add_argument('-p', '--allow-duplicated-isid', help='Allow duplicated initiator session ID.', action='store_true')
|
2020-08-13 15:49:25 +00:00
|
|
|
p.add_argument('-x', '--max-large-datain-per-connection', help='Max number of outstanding split read I/Os per connection', type=int)
|
2020-08-14 01:26:31 +00:00
|
|
|
p.add_argument('-k', '--max-r2t-per-connection', help='Max number of outstanding R2Ts per connection', type=int)
|
2019-09-11 11:15:34 +00:00
|
|
|
p.set_defaults(func=iscsi_set_options)
|
2018-05-14 23:24:19 +00:00
|
|
|
|
2019-09-13 13:10:13 +00:00
|
|
|
def iscsi_set_discovery_auth(args):
|
|
|
|
rpc.iscsi.iscsi_set_discovery_auth(
|
2018-08-22 04:43:18 +00:00
|
|
|
args.client,
|
|
|
|
disable_chap=args.disable_chap,
|
|
|
|
require_chap=args.require_chap,
|
|
|
|
mutual_chap=args.mutual_chap,
|
|
|
|
chap_group=args.chap_group)
|
|
|
|
|
2019-09-13 13:10:13 +00:00
|
|
|
p = subparsers.add_parser('iscsi_set_discovery_auth', aliases=['set_iscsi_discovery_auth'],
|
|
|
|
help="""Set CHAP authentication for discovery session.""")
|
2018-08-22 04:43:18 +00:00
|
|
|
p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
|
|
|
|
*** Mutually exclusive with --require-chap""", action='store_true')
|
|
|
|
p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
|
|
|
|
*** Mutually exclusive with --disable-chap""", action='store_true')
|
|
|
|
p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
|
|
|
|
p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
|
|
|
|
*** Authentication group must be precreated ***""", type=int)
|
2019-09-13 13:10:13 +00:00
|
|
|
p.set_defaults(func=iscsi_set_discovery_auth)
|
2018-08-22 04:43:18 +00:00
|
|
|
|
2019-09-13 13:26:19 +00:00
|
|
|
def iscsi_create_auth_group(args):
|
2018-08-09 03:16:11 +00:00
|
|
|
secrets = None
|
|
|
|
if args.secrets:
|
|
|
|
secrets = [dict(u.split(":") for u in a.split(" ")) for a in args.secrets.split(",")]
|
|
|
|
|
2019-09-13 13:26:19 +00:00
|
|
|
rpc.iscsi.iscsi_create_auth_group(args.client, tag=args.tag, secrets=secrets)
|
2018-08-09 03:16:11 +00:00
|
|
|
|
2019-09-13 13:26:19 +00:00
|
|
|
p = subparsers.add_parser('iscsi_create_auth_group', aliases=['add_iscsi_auth_group'],
|
|
|
|
help='Create authentication group for CHAP authentication.')
|
2018-08-09 03:16:11 +00:00
|
|
|
p.add_argument('tag', help='Authentication group tag (unique, integer > 0).', type=int)
|
|
|
|
p.add_argument('-c', '--secrets', help="""Comma-separated list of CHAP secrets
|
|
|
|
<user:user_name secret:chap_secret muser:mutual_user_name msecret:mutual_chap_secret> enclosed in quotes.
|
|
|
|
Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 msecret:ms2'""", required=False)
|
2019-09-13 13:26:19 +00:00
|
|
|
p.set_defaults(func=iscsi_create_auth_group)
|
2018-08-09 03:16:11 +00:00
|
|
|
|
2019-09-13 13:39:01 +00:00
|
|
|
def iscsi_delete_auth_group(args):
|
|
|
|
rpc.iscsi.iscsi_delete_auth_group(args.client, tag=args.tag)
|
2018-08-09 03:16:11 +00:00
|
|
|
|
2019-09-13 13:39:01 +00:00
|
|
|
p = subparsers.add_parser('iscsi_delete_auth_group', aliases=['delete_iscsi_auth_group'],
|
|
|
|
help='Delete an authentication group.')
|
2018-08-09 03:16:11 +00:00
|
|
|
p.add_argument('tag', help='Authentication group tag', type=int)
|
2019-09-13 13:39:01 +00:00
|
|
|
p.set_defaults(func=iscsi_delete_auth_group)
|
2018-08-09 03:16:11 +00:00
|
|
|
|
2019-09-16 13:15:11 +00:00
|
|
|
def iscsi_auth_group_add_secret(args):
|
|
|
|
rpc.iscsi.iscsi_auth_group_add_secret(
|
2018-08-09 04:05:34 +00:00
|
|
|
args.client,
|
|
|
|
tag=args.tag,
|
|
|
|
user=args.user,
|
|
|
|
secret=args.secret,
|
|
|
|
muser=args.muser,
|
|
|
|
msecret=args.msecret)
|
|
|
|
|
2019-09-16 13:15:11 +00:00
|
|
|
p = subparsers.add_parser('iscsi_auth_group_add_secret', aliases=['add_secret_to_iscsi_auth_group'],
|
|
|
|
help='Add a secret to an authentication group.')
|
2018-08-09 04:05:34 +00:00
|
|
|
p.add_argument('tag', help='Authentication group tag', type=int)
|
|
|
|
p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
|
|
|
|
p.add_argument('-s', '--secret', help='Secret for one-way CHAP authentication', required=True)
|
|
|
|
p.add_argument('-m', '--muser', help='User name for mutual CHAP authentication')
|
|
|
|
p.add_argument('-r', '--msecret', help='Secret for mutual CHAP authentication')
|
2019-09-16 13:15:11 +00:00
|
|
|
p.set_defaults(func=iscsi_auth_group_add_secret)
|
2018-08-09 04:05:34 +00:00
|
|
|
|
2019-09-16 13:38:55 +00:00
|
|
|
def iscsi_auth_group_remove_secret(args):
|
|
|
|
rpc.iscsi.iscsi_auth_group_remove_secret(args.client, tag=args.tag, user=args.user)
|
2018-08-09 04:05:34 +00:00
|
|
|
|
2019-09-16 13:38:55 +00:00
|
|
|
p = subparsers.add_parser('iscsi_auth_group_remove_secret', aliases=['delete_secret_from_iscsi_auth_group'],
|
|
|
|
help='Remove a secret from an authentication group.')
|
2018-08-09 04:05:34 +00:00
|
|
|
p.add_argument('tag', help='Authentication group tag', type=int)
|
|
|
|
p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
|
2019-09-16 13:38:55 +00:00
|
|
|
p.set_defaults(func=iscsi_auth_group_remove_secret)
|
2018-08-09 04:05:34 +00:00
|
|
|
|
2019-09-13 13:55:55 +00:00
|
|
|
def iscsi_get_auth_groups(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_auth_groups(args.client))
|
2018-08-09 05:07:15 +00:00
|
|
|
|
2019-09-13 13:55:55 +00:00
|
|
|
p = subparsers.add_parser('iscsi_get_auth_groups', aliases=['get_iscsi_auth_groups'],
|
2018-08-09 05:07:15 +00:00
|
|
|
help='Display current authentication group configuration')
|
2019-09-13 13:55:55 +00:00
|
|
|
p.set_defaults(func=iscsi_get_auth_groups)
|
2018-08-09 05:07:15 +00:00
|
|
|
|
2019-09-09 10:12:04 +00:00
|
|
|
def iscsi_get_portal_groups(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_portal_groups(args.client))
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
p = subparsers.add_parser(
|
2019-09-09 10:12:04 +00:00
|
|
|
'iscsi_get_portal_groups', aliases=['get_portal_groups'],
|
|
|
|
help='Display current portal group configuration')
|
|
|
|
p.set_defaults(func=iscsi_get_portal_groups)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-02 22:10:36 +00:00
|
|
|
def iscsi_get_initiator_groups(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_initiator_groups(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-02 22:10:36 +00:00
|
|
|
p = subparsers.add_parser('iscsi_get_initiator_groups',
|
|
|
|
aliases=['get_initiator_groups'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Display current initiator group configuration')
|
2019-09-02 22:10:36 +00:00
|
|
|
p.set_defaults(func=iscsi_get_initiator_groups)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-04 10:20:43 +00:00
|
|
|
def iscsi_get_target_nodes(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_target_nodes(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-04 10:20:43 +00:00
|
|
|
p = subparsers.add_parser('iscsi_get_target_nodes', aliases=['get_target_nodes'],
|
|
|
|
help='Display target nodes')
|
|
|
|
p.set_defaults(func=iscsi_get_target_nodes)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-05 10:35:18 +00:00
|
|
|
def iscsi_create_target_node(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
luns = []
|
|
|
|
for u in args.bdev_name_id_pairs.strip().split(" "):
|
|
|
|
bdev_name, lun_id = u.split(":")
|
|
|
|
luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
|
|
|
|
|
|
|
|
pg_ig_maps = []
|
|
|
|
for u in args.pg_ig_mappings.strip().split(" "):
|
|
|
|
pg, ig = u.split(":")
|
|
|
|
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
|
|
|
|
|
2019-09-05 10:35:18 +00:00
|
|
|
rpc.iscsi.iscsi_create_target_node(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
luns=luns,
|
|
|
|
pg_ig_maps=pg_ig_maps,
|
|
|
|
name=args.name,
|
|
|
|
alias_name=args.alias_name,
|
|
|
|
queue_depth=args.queue_depth,
|
|
|
|
chap_group=args.chap_group,
|
|
|
|
disable_chap=args.disable_chap,
|
|
|
|
require_chap=args.require_chap,
|
|
|
|
mutual_chap=args.mutual_chap,
|
|
|
|
header_digest=args.header_digest,
|
|
|
|
data_digest=args.data_digest)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-05 10:35:18 +00:00
|
|
|
p = subparsers.add_parser('iscsi_create_target_node', aliases=['construct_target_node'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add a target node')
|
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('alias_name', help='Target node alias name (ASCII)')
|
|
|
|
p.add_argument('bdev_name_id_pairs', help="""Whitespace-separated list of <bdev name:LUN ID> pairs enclosed
|
|
|
|
in quotes. Format: 'bdev_name0:id0 bdev_name1:id1' etc
|
|
|
|
Example: 'Malloc0:0 Malloc1:1 Malloc5:2'
|
|
|
|
*** The bdevs must pre-exist ***
|
|
|
|
*** LUN0 (id = 0) is required ***
|
|
|
|
*** bdevs names cannot contain space or colon characters ***""")
|
|
|
|
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
|
|
|
|
Whitespace separated, quoted, mapping defined with colon
|
|
|
|
separated list of "tags" (int > 0)
|
|
|
|
Example: '1:1 2:2 2:1'
|
|
|
|
*** The Portal/Initiator Groups must be precreated ***""")
|
|
|
|
p.add_argument('queue_depth', help='Desired target queue depth', type=int)
|
2018-02-21 17:51:16 +00:00
|
|
|
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
|
|
|
|
*** Authentication group must be precreated ***""", type=int, default=0)
|
|
|
|
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
|
|
|
|
*** Mutually exclusive with --require-chap ***""", action='store_true')
|
|
|
|
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
|
|
|
|
*** Mutually exclusive with --disable-chap ***""", action='store_true')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument(
|
2018-02-21 17:51:16 +00:00
|
|
|
'-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.', action='store_true')
|
2018-02-21 09:47:34 +00:00
|
|
|
p.add_argument('-H', '--header-digest',
|
|
|
|
help='Header Digest should be required for this target node.', action='store_true')
|
|
|
|
p.add_argument('-D', '--data-digest',
|
|
|
|
help='Data Digest should be required for this target node.', action='store_true')
|
2019-09-05 10:35:18 +00:00
|
|
|
p.set_defaults(func=iscsi_create_target_node)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-10 11:06:57 +00:00
|
|
|
def iscsi_target_node_add_lun(args):
|
|
|
|
rpc.iscsi.iscsi_target_node_add_lun(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
name=args.name,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
lun_id=args.lun_id)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-10 11:06:57 +00:00
|
|
|
p = subparsers.add_parser('iscsi_target_node_add_lun', aliases=['target_node_add_lun'],
|
|
|
|
help='Add LUN to the target node')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('bdev_name', help="""bdev name enclosed in quotes.
|
|
|
|
*** bdev name cannot contain space or colon characters ***""")
|
|
|
|
p.add_argument('-i', dest='lun_id', help="""LUN ID (integer >= 0)
|
|
|
|
*** If LUN ID is omitted or -1, the lowest free one is assigned ***""", type=int, required=False)
|
2019-09-10 11:06:57 +00:00
|
|
|
p.set_defaults(func=iscsi_target_node_add_lun)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-10 11:13:46 +00:00
|
|
|
def iscsi_target_node_set_auth(args):
|
|
|
|
rpc.iscsi.iscsi_target_node_set_auth(
|
2018-08-20 00:12:59 +00:00
|
|
|
args.client,
|
|
|
|
name=args.name,
|
|
|
|
chap_group=args.chap_group,
|
|
|
|
disable_chap=args.disable_chap,
|
|
|
|
require_chap=args.require_chap,
|
|
|
|
mutual_chap=args.mutual_chap)
|
|
|
|
|
2019-09-10 11:13:46 +00:00
|
|
|
p = subparsers.add_parser('iscsi_target_node_set_auth', aliases=['set_iscsi_target_node_auth'],
|
|
|
|
help='Set CHAP authentication for the target node')
|
2018-08-20 00:12:59 +00:00
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
|
|
|
|
*** Authentication group must be precreated ***""", type=int, default=0)
|
|
|
|
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
|
|
|
|
*** Mutually exclusive with --require-chap ***""", action='store_true')
|
|
|
|
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
|
|
|
|
*** Mutually exclusive with --disable-chap ***""", action='store_true')
|
|
|
|
p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
|
|
|
|
action='store_true')
|
2019-09-10 11:13:46 +00:00
|
|
|
p.set_defaults(func=iscsi_target_node_set_auth)
|
2018-08-20 00:12:59 +00:00
|
|
|
|
2019-09-06 08:42:19 +00:00
|
|
|
def iscsi_target_node_add_pg_ig_maps(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
pg_ig_maps = []
|
|
|
|
for u in args.pg_ig_mappings.strip().split(" "):
|
|
|
|
pg, ig = u.split(":")
|
|
|
|
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
|
2019-09-06 08:42:19 +00:00
|
|
|
rpc.iscsi.iscsi_target_node_add_pg_ig_maps(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
pg_ig_maps=pg_ig_maps,
|
|
|
|
name=args.name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-06 08:42:19 +00:00
|
|
|
p = subparsers.add_parser('iscsi_target_node_add_pg_ig_maps',
|
|
|
|
aliases=['add_pg_ig_maps'],
|
|
|
|
help='Add PG-IG maps to the target node')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
|
|
|
|
Whitespace separated, quoted, mapping defined with colon
|
|
|
|
separated list of "tags" (int > 0)
|
|
|
|
Example: '1:1 2:2 2:1'
|
|
|
|
*** The Portal/Initiator Groups must be precreated ***""")
|
2019-09-06 08:42:19 +00:00
|
|
|
p.set_defaults(func=iscsi_target_node_add_pg_ig_maps)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-06 08:59:10 +00:00
|
|
|
def iscsi_target_node_remove_pg_ig_maps(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
pg_ig_maps = []
|
|
|
|
for u in args.pg_ig_mappings.strip().split(" "):
|
|
|
|
pg, ig = u.split(":")
|
|
|
|
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
|
2019-09-06 08:59:10 +00:00
|
|
|
rpc.iscsi.iscsi_target_node_remove_pg_ig_maps(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client, pg_ig_maps=pg_ig_maps, name=args.name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-06 08:59:10 +00:00
|
|
|
p = subparsers.add_parser('iscsi_target_node_remove_pg_ig_maps',
|
|
|
|
aliases=['delete_pg_ig_maps'],
|
|
|
|
help='Delete PG-IG maps from the target node')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
|
|
|
|
Whitespace separated, quoted, mapping defined with colon
|
|
|
|
separated list of "tags" (int > 0)
|
|
|
|
Example: '1:1 2:2 2:1'
|
|
|
|
*** The Portal/Initiator Groups must be precreated ***""")
|
2019-09-06 08:59:10 +00:00
|
|
|
p.set_defaults(func=iscsi_target_node_remove_pg_ig_maps)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2020-07-17 09:33:36 +00:00
|
|
|
def iscsi_target_node_set_redirect(args):
|
|
|
|
rpc.iscsi.iscsi_target_node_set_redirect(
|
|
|
|
args.client,
|
|
|
|
name=args.name,
|
|
|
|
pg_tag=args.pg_tag,
|
|
|
|
redirect_host=args.redirect_host,
|
|
|
|
redirect_port=args.redirect_port)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('iscsi_target_node_set_redirect',
|
2020-08-14 01:41:36 +00:00
|
|
|
help="""Update redirect portal of the public portal group for the target node.
|
2020-07-17 09:33:36 +00:00
|
|
|
Omit redirect host and port to clear previously set redirect settings.""")
|
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('pg_tag', help='Portal group tag (unique, integer > 0)', type=int)
|
|
|
|
p.add_argument('-a', '--redirect_host', help='Numeric IP address for redirect portal', required=False)
|
|
|
|
p.add_argument('-p', '--redirect_port', help='Numeric TCP port for redirect portal', required=False)
|
|
|
|
p.set_defaults(func=iscsi_target_node_set_redirect)
|
|
|
|
|
2020-08-14 03:14:27 +00:00
|
|
|
def iscsi_target_node_request_logout(args):
|
|
|
|
rpc.iscsi.iscsi_target_node_request_logout(
|
|
|
|
args.client,
|
|
|
|
name=args.name,
|
|
|
|
pg_tag=args.pg_tag)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('iscsi_target_node_request_logout',
|
|
|
|
help="""For the target node, request connections whose portal group tag
|
|
|
|
match to logout, or request all connections if portal group tag is omitted.""")
|
|
|
|
p.add_argument('name', help='Target node name (ASCII)')
|
|
|
|
p.add_argument('-t', '--pg-tag', help='Portal group tag (unique, integer > 0)', type=int, required=False)
|
|
|
|
p.set_defaults(func=iscsi_target_node_request_logout)
|
|
|
|
|
2019-09-09 10:35:30 +00:00
|
|
|
def iscsi_create_portal_group(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
portals = []
|
2019-02-27 04:06:54 +00:00
|
|
|
for p in args.portal_list.strip().split(' '):
|
2018-06-20 11:53:30 +00:00
|
|
|
ip, separator, port_cpumask = p.rpartition(':')
|
|
|
|
split_port_cpumask = port_cpumask.split('@')
|
|
|
|
if len(split_port_cpumask) == 1:
|
|
|
|
port = port_cpumask
|
|
|
|
portals.append({'host': ip, 'port': port})
|
|
|
|
else:
|
|
|
|
port = split_port_cpumask[0]
|
|
|
|
cpumask = split_port_cpumask[1]
|
2019-07-10 14:18:01 +00:00
|
|
|
portals.append({'host': ip, 'port': port})
|
|
|
|
print("WARNING: Specifying a portal group with a CPU mask is no longer supported. Ignoring it.")
|
2019-09-09 10:35:30 +00:00
|
|
|
rpc.iscsi.iscsi_create_portal_group(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
portals=portals,
|
2020-07-21 12:53:06 +00:00
|
|
|
tag=args.tag,
|
|
|
|
private=args.private)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-09 10:35:30 +00:00
|
|
|
p = subparsers.add_parser('iscsi_create_portal_group', aliases=['add_portal_group'],
|
|
|
|
help='Add a portal group')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Portal group tag (unique, integer > 0)', type=int)
|
2019-07-10 14:18:01 +00:00
|
|
|
p.add_argument('portal_list', help="""List of portals in host:port format, separated by whitespace
|
|
|
|
Example: '192.168.100.100:3260 192.168.100.100:3261 192.168.100.100:3262""")
|
2020-07-21 12:53:06 +00:00
|
|
|
p.add_argument('-p', '--private', help="""Public (false) or private (true) portal group.
|
|
|
|
Private portal groups do not have their portals returned by a discovery session. A public
|
|
|
|
portal group may optionally specify a redirect portal for non-discovery logins. This redirect
|
|
|
|
portal must be from a private portal group.""", action='store_true')
|
2019-09-09 10:35:30 +00:00
|
|
|
p.set_defaults(func=iscsi_create_portal_group)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-03 08:53:37 +00:00
|
|
|
def iscsi_create_initiator_group(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
initiators = []
|
|
|
|
netmasks = []
|
|
|
|
for i in args.initiator_list.strip().split(' '):
|
|
|
|
initiators.append(i)
|
|
|
|
for n in args.netmask_list.strip().split(' '):
|
|
|
|
netmasks.append(n)
|
2019-09-03 08:53:37 +00:00
|
|
|
rpc.iscsi.iscsi_create_initiator_group(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
tag=args.tag,
|
|
|
|
initiators=initiators,
|
|
|
|
netmasks=netmasks)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-03 08:53:37 +00:00
|
|
|
p = subparsers.add_parser('iscsi_create_initiator_group', aliases=['add_initiator_group'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add an initiator group')
|
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
|
|
|
|
p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
|
|
|
|
enclosed in quotes. Example: 'ANY' or '127.0.0.1 192.168.200.100'""")
|
|
|
|
p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
|
|
|
|
Example: '255.255.0.0 255.248.0.0' etc""")
|
2019-09-03 08:53:37 +00:00
|
|
|
p.set_defaults(func=iscsi_create_initiator_group)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-03 09:59:23 +00:00
|
|
|
def iscsi_initiator_group_add_initiators(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
initiators = None
|
|
|
|
netmasks = None
|
|
|
|
if args.initiator_list:
|
|
|
|
initiators = []
|
|
|
|
for i in args.initiator_list.strip().split(' '):
|
|
|
|
initiators.append(i)
|
|
|
|
if args.netmask_list:
|
|
|
|
netmasks = []
|
|
|
|
for n in args.netmask_list.strip().split(' '):
|
|
|
|
netmasks.append(n)
|
2019-09-03 09:59:23 +00:00
|
|
|
rpc.iscsi.iscsi_initiator_group_add_initiators(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
tag=args.tag,
|
|
|
|
initiators=initiators,
|
|
|
|
netmasks=netmasks)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-03 09:59:23 +00:00
|
|
|
p = subparsers.add_parser('iscsi_initiator_group_add_initiators',
|
|
|
|
aliases=['add_initiators_to_initiator_group'],
|
2018-02-06 23:22:00 +00:00
|
|
|
help='Add initiators to an existing initiator group')
|
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
|
|
|
|
p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
|
|
|
|
enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
|
|
|
|
p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
|
|
|
|
This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
|
2019-09-03 09:59:23 +00:00
|
|
|
p.set_defaults(func=iscsi_initiator_group_add_initiators)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-04 09:52:01 +00:00
|
|
|
def iscsi_initiator_group_remove_initiators(args):
|
2018-06-20 11:53:30 +00:00
|
|
|
initiators = None
|
|
|
|
netmasks = None
|
|
|
|
if args.initiator_list:
|
|
|
|
initiators = []
|
|
|
|
for i in args.initiator_list.strip().split(' '):
|
|
|
|
initiators.append(i)
|
|
|
|
if args.netmask_list:
|
|
|
|
netmasks = []
|
|
|
|
for n in args.netmask_list.strip().split(' '):
|
|
|
|
netmasks.append(n)
|
2019-09-04 09:52:01 +00:00
|
|
|
rpc.iscsi.iscsi_initiator_group_remove_initiators(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client,
|
|
|
|
tag=args.tag,
|
|
|
|
initiators=initiators,
|
|
|
|
netmasks=netmasks)
|
2018-02-06 23:22:00 +00:00
|
|
|
|
2019-09-04 09:52:01 +00:00
|
|
|
p = subparsers.add_parser('iscsi_initiator_group_remove_initiators',
|
|
|
|
aliases=['delete_initiators_from_initiator_group'],
|
2018-02-06 23:22:00 +00:00
|
|
|
help='Delete initiators from an existing initiator group')
|
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
|
|
|
|
p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
|
|
|
|
enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
|
|
|
|
p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
|
|
|
|
This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
|
2019-09-04 09:52:01 +00:00
|
|
|
p.set_defaults(func=iscsi_initiator_group_remove_initiators)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-05 10:52:14 +00:00
|
|
|
def iscsi_delete_target_node(args):
|
|
|
|
rpc.iscsi.iscsi_delete_target_node(
|
2018-06-20 11:53:30 +00:00
|
|
|
args.client, target_node_name=args.target_node_name)
|
2018-02-06 23:22:00 +00:00
|
|
|
|
2019-09-05 10:52:14 +00:00
|
|
|
p = subparsers.add_parser('iscsi_delete_target_node', aliases=['delete_target_node'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Delete a target node')
|
|
|
|
p.add_argument('target_node_name',
|
|
|
|
help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.')
|
2019-09-05 10:52:14 +00:00
|
|
|
p.set_defaults(func=iscsi_delete_target_node)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-09 10:51:22 +00:00
|
|
|
def iscsi_delete_portal_group(args):
|
|
|
|
rpc.iscsi.iscsi_delete_portal_group(args.client, tag=args.tag)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-09 10:51:22 +00:00
|
|
|
p = subparsers.add_parser('iscsi_delete_portal_group',
|
|
|
|
aliases=['delete_portal_group'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Delete a portal group')
|
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Portal group tag (unique, integer > 0)', type=int)
|
2019-09-09 10:51:22 +00:00
|
|
|
p.set_defaults(func=iscsi_delete_portal_group)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-03 09:30:46 +00:00
|
|
|
def iscsi_delete_initiator_group(args):
|
|
|
|
rpc.iscsi.iscsi_delete_initiator_group(args.client, tag=args.tag)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-03 09:30:46 +00:00
|
|
|
p = subparsers.add_parser('iscsi_delete_initiator_group',
|
|
|
|
aliases=['delete_initiator_group'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Delete an initiator group')
|
|
|
|
p.add_argument(
|
|
|
|
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
|
2019-09-03 09:30:46 +00:00
|
|
|
p.set_defaults(func=iscsi_delete_initiator_group)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-25 06:42:29 +00:00
|
|
|
def iscsi_portal_group_set_auth(args):
|
|
|
|
rpc.iscsi.iscsi_portal_group_set_auth(
|
|
|
|
args.client,
|
|
|
|
tag=args.tag,
|
|
|
|
chap_group=args.chap_group,
|
|
|
|
disable_chap=args.disable_chap,
|
|
|
|
require_chap=args.require_chap,
|
|
|
|
mutual_chap=args.mutual_chap)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('iscsi_portal_group_set_auth',
|
|
|
|
help='Set CHAP authentication for discovery sessions specific for the portal group')
|
|
|
|
p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int)
|
|
|
|
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this portal group.
|
|
|
|
*** Authentication group must be precreated ***""", type=int, default=0)
|
|
|
|
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this portal group.
|
|
|
|
*** Mutually exclusive with --require-chap ***""", action='store_true')
|
|
|
|
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this portal group.
|
|
|
|
*** Mutually exclusive with --disable-chap ***""", action='store_true')
|
|
|
|
p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
|
|
|
|
action='store_true')
|
|
|
|
p.set_defaults(func=iscsi_portal_group_set_auth)
|
|
|
|
|
2019-09-10 09:32:18 +00:00
|
|
|
def iscsi_get_connections(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_connections(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-10 09:32:18 +00:00
|
|
|
p = subparsers.add_parser('iscsi_get_connections', aliases=['get_iscsi_connections'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Display iSCSI connections')
|
2019-09-10 09:32:18 +00:00
|
|
|
p.set_defaults(func=iscsi_get_connections)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-11 11:39:42 +00:00
|
|
|
def iscsi_get_options(args):
|
|
|
|
print_dict(rpc.iscsi.iscsi_get_options(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-11 11:39:42 +00:00
|
|
|
p = subparsers.add_parser('iscsi_get_options', aliases=['get_iscsi_global_params'],
|
|
|
|
help='Display iSCSI global parameters')
|
|
|
|
p.set_defaults(func=iscsi_get_options)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-19 22:16:27 +00:00
|
|
|
def scsi_get_devices(args):
|
|
|
|
print_dict(rpc.iscsi.scsi_get_devices(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-19 22:16:27 +00:00
|
|
|
p = subparsers.add_parser('scsi_get_devices', aliases=['get_scsi_devices'],
|
|
|
|
help='Display SCSI devices')
|
|
|
|
p.set_defaults(func=scsi_get_devices)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2018-11-29 11:12:26 +00:00
|
|
|
# trace
|
2019-09-23 13:11:10 +00:00
|
|
|
def trace_enable_tpoint_group(args):
|
|
|
|
rpc.trace.trace_enable_tpoint_group(args.client, name=args.name)
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2019-09-23 13:11:10 +00:00
|
|
|
p = subparsers.add_parser('trace_enable_tpoint_group', aliases=['enable_tpoint_group'],
|
|
|
|
help='enable trace on a specific tpoint group')
|
2018-11-29 11:12:26 +00:00
|
|
|
p.add_argument(
|
|
|
|
'name', help="""trace group name we want to enable in tpoint_group_mask.
|
|
|
|
(for example "bdev" for bdev trace group, "all" for all trace groups).""")
|
2019-09-23 13:11:10 +00:00
|
|
|
p.set_defaults(func=trace_enable_tpoint_group)
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2019-09-23 13:21:57 +00:00
|
|
|
def trace_disable_tpoint_group(args):
|
|
|
|
rpc.trace.trace_disable_tpoint_group(args.client, name=args.name)
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2019-09-23 13:21:57 +00:00
|
|
|
p = subparsers.add_parser('trace_disable_tpoint_group', aliases=['disable_tpoint_group'],
|
|
|
|
help='disable trace on a specific tpoint group')
|
2018-11-29 11:12:26 +00:00
|
|
|
p.add_argument(
|
|
|
|
'name', help="""trace group name we want to disable in tpoint_group_mask.
|
|
|
|
(for example "bdev" for bdev trace group, "all" for all trace groups).""")
|
2019-09-23 13:21:57 +00:00
|
|
|
p.set_defaults(func=trace_disable_tpoint_group)
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2019-09-23 13:33:00 +00:00
|
|
|
def trace_get_tpoint_group_mask(args):
|
|
|
|
print_dict(rpc.trace.trace_get_tpoint_group_mask(args.client))
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2019-09-23 13:33:00 +00:00
|
|
|
p = subparsers.add_parser('trace_get_tpoint_group_mask', aliases=['get_tpoint_group_mask'],
|
|
|
|
help='get trace point group mask')
|
|
|
|
p.set_defaults(func=trace_get_tpoint_group_mask)
|
2018-11-29 11:12:26 +00:00
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# log
|
2019-09-18 12:21:59 +00:00
|
|
|
def log_set_flag(args):
|
|
|
|
rpc.log.log_set_flag(args.client, flag=args.flag)
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 12:21:59 +00:00
|
|
|
p = subparsers.add_parser('log_set_flag', help='set log flag', aliases=['set_log_flag'])
|
2018-11-27 07:26:59 +00:00
|
|
|
p.add_argument(
|
|
|
|
'flag', help='log flag we want to set. (for example "nvme").')
|
2019-09-18 12:21:59 +00:00
|
|
|
p.set_defaults(func=log_set_flag)
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 12:31:30 +00:00
|
|
|
def log_clear_flag(args):
|
|
|
|
rpc.log.log_clear_flag(args.client, flag=args.flag)
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 12:31:30 +00:00
|
|
|
p = subparsers.add_parser('log_clear_flag', help='clear log flag', aliases=['clear_log_flag'])
|
2018-11-27 07:26:59 +00:00
|
|
|
p.add_argument(
|
|
|
|
'flag', help='log flag we want to clear. (for example "nvme").')
|
2019-09-18 12:31:30 +00:00
|
|
|
p.set_defaults(func=log_clear_flag)
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 12:39:32 +00:00
|
|
|
def log_get_flags(args):
|
|
|
|
print_dict(rpc.log.log_get_flags(args.client))
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 12:39:32 +00:00
|
|
|
p = subparsers.add_parser('log_get_flags', help='get log flags', aliases=['get_log_flags'])
|
|
|
|
p.set_defaults(func=log_get_flags)
|
2018-11-27 07:26:59 +00:00
|
|
|
|
2019-09-18 11:10:43 +00:00
|
|
|
def log_set_level(args):
|
|
|
|
rpc.log.log_set_level(args.client, level=args.level)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 11:10:43 +00:00
|
|
|
p = subparsers.add_parser('log_set_level', aliases=['set_log_level'],
|
|
|
|
help='set log level')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('level', help='log level we want to set. (for example "DEBUG").')
|
2019-09-18 11:10:43 +00:00
|
|
|
p.set_defaults(func=log_set_level)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 11:21:23 +00:00
|
|
|
def log_get_level(args):
|
|
|
|
print_dict(rpc.log.log_get_level(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 11:21:23 +00:00
|
|
|
p = subparsers.add_parser('log_get_level', aliases=['get_log_level'],
|
|
|
|
help='get log level')
|
|
|
|
p.set_defaults(func=log_get_level)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 10:36:40 +00:00
|
|
|
def log_set_print_level(args):
|
|
|
|
rpc.log.log_set_print_level(args.client, level=args.level)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 10:36:40 +00:00
|
|
|
p = subparsers.add_parser('log_set_print_level', aliases=['set_log_print_level'],
|
|
|
|
help='set log print level')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('level', help='log print level we want to set. (for example "DEBUG").')
|
2019-09-18 10:36:40 +00:00
|
|
|
p.set_defaults(func=log_set_print_level)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 10:54:27 +00:00
|
|
|
def log_get_print_level(args):
|
|
|
|
print_dict(rpc.log.log_get_print_level(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 10:54:27 +00:00
|
|
|
p = subparsers.add_parser('log_get_print_level', aliases=['get_log_print_level'],
|
|
|
|
help='get log print level')
|
|
|
|
p.set_defaults(func=log_get_print_level)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
# lvol
|
2019-08-29 13:31:19 +00:00
|
|
|
def bdev_lvol_create_lvstore(args):
|
|
|
|
print_json(rpc.lvol.bdev_lvol_create_lvstore(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
lvs_name=args.lvs_name,
|
|
|
|
cluster_sz=args.cluster_sz,
|
|
|
|
clear_method=args.clear_method))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_lvol_create_lvstore', aliases=['construct_lvol_store'],
|
|
|
|
help='Add logical volume store on base bdev')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('bdev_name', help='base bdev name')
|
|
|
|
p.add_argument('lvs_name', help='name for lvol store')
|
|
|
|
p.add_argument('-c', '--cluster-sz', help='size of cluster (in bytes)', type=int, required=False)
|
2019-02-28 09:57:19 +00:00
|
|
|
p.add_argument('--clear-method', help="""Change clear method for data region.
|
|
|
|
Available: none, unmap, write_zeroes""", required=False)
|
2019-08-29 13:31:19 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_create_lvstore)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-29 13:11:18 +00:00
|
|
|
def bdev_lvol_rename_lvstore(args):
|
|
|
|
rpc.lvol.bdev_lvol_rename_lvstore(args.client,
|
|
|
|
old_name=args.old_name,
|
|
|
|
new_name=args.new_name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-29 13:11:18 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_rename_lvstore', aliases=['rename_lvol_store'],
|
|
|
|
help='Change logical volume store name')
|
2018-01-10 10:03:39 +00:00
|
|
|
p.add_argument('old_name', help='old name')
|
|
|
|
p.add_argument('new_name', help='new name')
|
2019-08-29 13:11:18 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_rename_lvstore)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-29 11:01:30 +00:00
|
|
|
def bdev_lvol_create(args):
|
|
|
|
print_json(rpc.lvol.bdev_lvol_create(args.client,
|
|
|
|
lvol_name=args.lvol_name,
|
|
|
|
size=args.size * 1024 * 1024,
|
|
|
|
thin_provision=args.thin_provision,
|
|
|
|
clear_method=args.clear_method,
|
|
|
|
uuid=args.uuid,
|
|
|
|
lvs_name=args.lvs_name))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_lvol_create', aliases=['construct_lvol_bdev'],
|
|
|
|
help='Add a bdev with an logical volume backend')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
|
2018-04-17 18:36:19 +00:00
|
|
|
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
|
2018-01-26 13:33:27 +00:00
|
|
|
p.add_argument('-t', '--thin-provision', action='store_true', help='create lvol bdev as thin provisioned')
|
2019-01-22 08:47:24 +00:00
|
|
|
p.add_argument('-c', '--clear-method', help="""Change default data clusters clear method.
|
|
|
|
Available: none, unmap, write_zeroes""", required=False)
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('lvol_name', help='name for this lvol')
|
|
|
|
p.add_argument('size', help='size in MiB for this bdev', type=int)
|
2019-08-29 11:01:30 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_create)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-29 09:40:39 +00:00
|
|
|
def bdev_lvol_snapshot(args):
|
|
|
|
print_json(rpc.lvol.bdev_lvol_snapshot(args.client,
|
2019-06-25 18:23:46 +00:00
|
|
|
lvol_name=args.lvol_name,
|
|
|
|
snapshot_name=args.snapshot_name))
|
2018-02-22 12:29:49 +00:00
|
|
|
|
2019-08-29 09:40:39 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_snapshot', aliases=['snapshot_lvol_bdev'],
|
|
|
|
help='Create a snapshot of an lvol bdev')
|
2018-02-22 12:29:49 +00:00
|
|
|
p.add_argument('lvol_name', help='lvol bdev name')
|
|
|
|
p.add_argument('snapshot_name', help='lvol snapshot name')
|
2019-08-29 09:40:39 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_snapshot)
|
2018-02-22 12:29:49 +00:00
|
|
|
|
2019-08-23 09:35:34 +00:00
|
|
|
def bdev_lvol_clone(args):
|
|
|
|
print_json(rpc.lvol.bdev_lvol_clone(args.client,
|
2019-06-25 18:23:46 +00:00
|
|
|
snapshot_name=args.snapshot_name,
|
|
|
|
clone_name=args.clone_name))
|
2018-02-22 12:29:49 +00:00
|
|
|
|
2019-08-23 09:35:34 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_clone', aliases=['clone_lvol_bdev'],
|
|
|
|
help='Create a clone of an lvol snapshot')
|
2018-02-22 12:29:49 +00:00
|
|
|
p.add_argument('snapshot_name', help='lvol snapshot name')
|
|
|
|
p.add_argument('clone_name', help='lvol clone name')
|
2019-08-23 09:35:34 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_clone)
|
2018-02-22 12:29:49 +00:00
|
|
|
|
2019-08-23 09:57:07 +00:00
|
|
|
def bdev_lvol_rename(args):
|
|
|
|
rpc.lvol.bdev_lvol_rename(args.client,
|
2018-04-11 21:31:09 +00:00
|
|
|
old_name=args.old_name,
|
|
|
|
new_name=args.new_name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-23 09:57:07 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_rename', aliases=['rename_lvol_bdev'],
|
|
|
|
help='Change lvol bdev name')
|
2018-01-10 10:02:27 +00:00
|
|
|
p.add_argument('old_name', help='lvol bdev name')
|
|
|
|
p.add_argument('new_name', help='new lvol name')
|
2019-08-23 09:57:07 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_rename)
|
2018-01-10 10:02:27 +00:00
|
|
|
|
2019-08-29 09:25:48 +00:00
|
|
|
def bdev_lvol_inflate(args):
|
|
|
|
rpc.lvol.bdev_lvol_inflate(args.client,
|
2018-04-23 13:53:21 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-29 09:25:48 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_inflate', aliases=['inflate_lvol_bdev'],
|
|
|
|
help='Make thin provisioned lvol a thick provisioned lvol')
|
2018-04-23 13:53:21 +00:00
|
|
|
p.add_argument('name', help='lvol bdev name')
|
2019-08-29 09:25:48 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_inflate)
|
2018-04-23 13:53:21 +00:00
|
|
|
|
2019-08-26 13:35:53 +00:00
|
|
|
def bdev_lvol_decouple_parent(args):
|
|
|
|
rpc.lvol.bdev_lvol_decouple_parent(args.client,
|
2018-05-10 10:01:29 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-26 13:35:53 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_decouple_parent', aliases=['decouple_parent_lvol_bdev'],
|
|
|
|
help='Decouple parent of lvol')
|
2018-05-10 10:01:29 +00:00
|
|
|
p.add_argument('name', help='lvol bdev name')
|
2019-08-26 13:35:53 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_decouple_parent)
|
2018-05-10 10:01:29 +00:00
|
|
|
|
2019-08-26 13:06:42 +00:00
|
|
|
def bdev_lvol_resize(args):
|
|
|
|
rpc.lvol.bdev_lvol_resize(args.client,
|
2018-04-11 21:31:09 +00:00
|
|
|
name=args.name,
|
|
|
|
size=args.size * 1024 * 1024)
|
2018-02-07 13:57:05 +00:00
|
|
|
|
2019-08-26 13:06:42 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_resize', aliases=['resize_lvol_bdev'],
|
|
|
|
help='Resize existing lvol bdev')
|
2018-02-07 13:57:05 +00:00
|
|
|
p.add_argument('name', help='lvol bdev name')
|
|
|
|
p.add_argument('size', help='new size in MiB for this bdev', type=int)
|
2019-08-26 13:06:42 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_resize)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-26 12:49:31 +00:00
|
|
|
def bdev_lvol_set_read_only(args):
|
|
|
|
rpc.lvol.bdev_lvol_set_read_only(args.client,
|
2019-01-15 15:31:05 +00:00
|
|
|
name=args.name)
|
|
|
|
|
2019-08-26 12:49:31 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_set_read_only', aliases=['set_read_only_lvol_bdev'],
|
|
|
|
help='Mark lvol bdev as read only')
|
2019-01-15 15:31:05 +00:00
|
|
|
p.add_argument('name', help='lvol bdev name')
|
2019-08-26 12:49:31 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_set_read_only)
|
2019-01-15 15:31:05 +00:00
|
|
|
|
2019-08-29 10:33:16 +00:00
|
|
|
def bdev_lvol_delete(args):
|
|
|
|
rpc.lvol.bdev_lvol_delete(args.client,
|
|
|
|
name=args.name)
|
2018-04-11 22:12:13 +00:00
|
|
|
|
2019-08-29 10:33:16 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_delete', aliases=['destroy_lvol_bdev'],
|
|
|
|
help='Destroy a logical volume')
|
2018-04-11 22:12:13 +00:00
|
|
|
p.add_argument('name', help='lvol bdev name')
|
2019-08-29 10:33:16 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_delete)
|
2018-04-11 22:12:13 +00:00
|
|
|
|
2019-08-29 12:07:56 +00:00
|
|
|
def bdev_lvol_delete_lvstore(args):
|
|
|
|
rpc.lvol.bdev_lvol_delete_lvstore(args.client,
|
|
|
|
uuid=args.uuid,
|
|
|
|
lvs_name=args.lvs_name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-29 12:07:56 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_delete_lvstore', aliases=['destroy_lvol_store'],
|
|
|
|
help='Destroy an logical volume store')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
|
2018-04-17 18:36:19 +00:00
|
|
|
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
|
2019-08-29 12:07:56 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_delete_lvstore)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-26 11:54:05 +00:00
|
|
|
def bdev_lvol_get_lvstores(args):
|
|
|
|
print_dict(rpc.lvol.bdev_lvol_get_lvstores(args.client,
|
|
|
|
uuid=args.uuid,
|
|
|
|
lvs_name=args.lvs_name))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-26 11:54:05 +00:00
|
|
|
p = subparsers.add_parser('bdev_lvol_get_lvstores', aliases=['get_lvol_stores'],
|
|
|
|
help='Display current logical volume store list')
|
2018-01-29 07:09:35 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
|
2018-04-17 18:36:19 +00:00
|
|
|
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
|
2019-08-26 11:54:05 +00:00
|
|
|
p.set_defaults(func=bdev_lvol_get_lvstores)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-10 08:27:50 +00:00
|
|
|
def bdev_raid_get_bdevs(args):
|
|
|
|
print_array(rpc.bdev.bdev_raid_get_bdevs(args.client,
|
|
|
|
category=args.category))
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-09-10 08:27:50 +00:00
|
|
|
p = subparsers.add_parser('bdev_raid_get_bdevs', aliases=['get_raid_bdevs'],
|
|
|
|
help="""This is used to list all the raid bdev names based on the input category
|
2018-05-08 11:30:29 +00:00
|
|
|
requested. Category should be one of 'all', 'online', 'configuring' or 'offline'. 'all' means all the raid bdevs whether
|
|
|
|
they are online or configuring or offline. 'online' is the raid bdev which is registered with bdev layer. 'configuring'
|
|
|
|
is the raid bdev which does not have full configuration discovered yet. 'offline' is the raid bdev which is not registered
|
|
|
|
with bdev as of now and it has encountered any error or user has requested to offline the raid bdev""")
|
|
|
|
p.add_argument('category', help='all or online or configuring or offline')
|
2019-09-10 08:27:50 +00:00
|
|
|
p.set_defaults(func=bdev_raid_get_bdevs)
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-09-10 08:41:05 +00:00
|
|
|
def bdev_raid_create(args):
|
2018-05-08 11:30:29 +00:00
|
|
|
base_bdevs = []
|
|
|
|
for u in args.base_bdevs.strip().split(" "):
|
|
|
|
base_bdevs.append(u)
|
|
|
|
|
2019-09-10 08:41:05 +00:00
|
|
|
rpc.bdev.bdev_raid_create(args.client,
|
|
|
|
name=args.name,
|
|
|
|
strip_size=args.strip_size,
|
|
|
|
strip_size_kb=args.strip_size_kb,
|
|
|
|
raid_level=args.raid_level,
|
|
|
|
base_bdevs=base_bdevs)
|
|
|
|
p = subparsers.add_parser('bdev_raid_create', aliases=['construct_raid_bdev'],
|
|
|
|
help='Create new raid bdev')
|
2018-05-08 11:30:29 +00:00
|
|
|
p.add_argument('-n', '--name', help='raid bdev name', required=True)
|
2018-12-19 18:04:18 +00:00
|
|
|
p.add_argument('-s', '--strip-size', help='strip size in KB (deprecated)', type=int)
|
|
|
|
p.add_argument('-z', '--strip-size_kb', help='strip size in KB', type=int)
|
2019-10-09 11:46:46 +00:00
|
|
|
p.add_argument('-r', '--raid-level', help='raid level, only raid level 0 is supported', required=True)
|
2018-05-08 11:30:29 +00:00
|
|
|
p.add_argument('-b', '--base-bdevs', help='base bdevs name, whitespace separated list in quotes', required=True)
|
2019-09-10 08:41:05 +00:00
|
|
|
p.set_defaults(func=bdev_raid_create)
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-09-10 09:08:35 +00:00
|
|
|
def bdev_raid_delete(args):
|
|
|
|
rpc.bdev.bdev_raid_delete(args.client,
|
|
|
|
name=args.name)
|
|
|
|
p = subparsers.add_parser('bdev_raid_delete', aliases=['destroy_raid_bdev'],
|
|
|
|
help='Delete existing raid bdev')
|
2018-05-08 11:30:29 +00:00
|
|
|
p.add_argument('name', help='raid bdev name')
|
2019-09-10 09:08:35 +00:00
|
|
|
p.set_defaults(func=bdev_raid_delete)
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2018-03-16 18:27:57 +00:00
|
|
|
# split
|
2019-09-11 11:10:43 +00:00
|
|
|
def bdev_split_create(args):
|
|
|
|
print_array(rpc.bdev.bdev_split_create(args.client,
|
|
|
|
base_bdev=args.base_bdev,
|
|
|
|
split_count=args.split_count,
|
|
|
|
split_size_mb=args.split_size_mb))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_split_create', aliases=['construct_split_vbdev'],
|
|
|
|
help="""Add given disk name to split config. If bdev with base_name
|
2018-03-16 18:27:57 +00:00
|
|
|
name exist the split bdevs will be created right away, if not split bdevs will be created when base bdev became
|
|
|
|
available (during examination process).""")
|
|
|
|
p.add_argument('base_bdev', help='base bdev name')
|
2018-04-17 18:36:19 +00:00
|
|
|
p.add_argument('-s', '--split-size-mb', help='size in MiB for each bdev', type=int, default=0)
|
2018-03-16 18:27:57 +00:00
|
|
|
p.add_argument('split_count', help="""Optional - number of split bdevs to create. Total size * split_count must not
|
|
|
|
exceed the base bdev size.""", type=int)
|
2019-09-11 11:10:43 +00:00
|
|
|
p.set_defaults(func=bdev_split_create)
|
2018-03-16 18:27:57 +00:00
|
|
|
|
2019-09-11 11:44:17 +00:00
|
|
|
def bdev_split_delete(args):
|
|
|
|
rpc.bdev.bdev_split_delete(args.client,
|
|
|
|
base_bdev=args.base_bdev)
|
2018-03-16 18:27:57 +00:00
|
|
|
|
2019-09-11 11:44:17 +00:00
|
|
|
p = subparsers.add_parser('bdev_split_delete', aliases=['destruct_split_vbdev'],
|
|
|
|
help="""Delete split config with all created splits.""")
|
2018-03-16 18:27:57 +00:00
|
|
|
p.add_argument('base_bdev', help='base bdev name')
|
2019-09-11 11:44:17 +00:00
|
|
|
p.set_defaults(func=bdev_split_delete)
|
2018-03-16 18:27:57 +00:00
|
|
|
|
2018-10-29 14:35:27 +00:00
|
|
|
# ftl
|
2019-07-04 11:37:23 +00:00
|
|
|
ftl_valid_limits = ('crit', 'high', 'low', 'start')
|
|
|
|
|
2019-08-21 09:03:04 +00:00
|
|
|
def bdev_ftl_create(args):
|
2019-07-04 11:37:23 +00:00
|
|
|
def parse_limits(limits, arg_dict, key_suffix=''):
|
|
|
|
for limit in limits.split(','):
|
|
|
|
key, value = limit.split(':', 1)
|
|
|
|
if key in ftl_valid_limits:
|
|
|
|
arg_dict['limit_' + key + key_suffix] = int(value)
|
|
|
|
else:
|
|
|
|
raise ValueError('Limit {} is not supported'.format(key))
|
|
|
|
|
|
|
|
arg_limits = {}
|
|
|
|
if args.limit_threshold:
|
|
|
|
parse_limits(args.limit_threshold, arg_limits, '_threshold')
|
|
|
|
|
|
|
|
if args.limit:
|
|
|
|
parse_limits(args.limit, arg_limits)
|
|
|
|
|
2019-08-21 09:03:04 +00:00
|
|
|
print_dict(rpc.bdev.bdev_ftl_create(args.client,
|
|
|
|
name=args.name,
|
2019-10-25 07:31:14 +00:00
|
|
|
base_bdev=args.base_bdev,
|
2019-08-21 09:03:04 +00:00
|
|
|
uuid=args.uuid,
|
|
|
|
cache=args.cache,
|
|
|
|
allow_open_bands=args.allow_open_bands,
|
|
|
|
overprovisioning=args.overprovisioning,
|
2020-02-12 14:11:25 +00:00
|
|
|
l2p_path=args.l2p_path,
|
2020-01-16 14:57:53 +00:00
|
|
|
use_append=args.use_append,
|
2019-08-21 09:03:04 +00:00
|
|
|
**arg_limits))
|
|
|
|
|
2019-10-25 07:31:14 +00:00
|
|
|
p = subparsers.add_parser('bdev_ftl_create', aliases=['construct_ftl_bdev'], help='Add FTL bdev')
|
2018-10-29 14:35:27 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
|
2019-10-25 07:31:14 +00:00
|
|
|
p.add_argument('-d', '--base_bdev', help='Name of zoned bdev used as underlying device',
|
|
|
|
required=True)
|
2018-10-29 14:35:27 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='UUID of restored bdev (not applicable when creating new '
|
|
|
|
'instance): e.g. b286d19a-0059-4709-abcd-9f7732b1567d (optional)')
|
2019-03-15 16:37:44 +00:00
|
|
|
p.add_argument('-c', '--cache', help='Name of the bdev to be used as a write buffer cache (optional)')
|
2019-05-29 09:21:02 +00:00
|
|
|
p.add_argument('-o', '--allow_open_bands', help='Restoring after dirty shutdown without cache will'
|
|
|
|
' result in partial data recovery, instead of error', action='store_true')
|
2019-07-04 11:37:23 +00:00
|
|
|
p.add_argument('--overprovisioning', help='Percentage of device used for relocation, not exposed'
|
|
|
|
' to user (optional)', type=int)
|
2020-02-12 14:11:25 +00:00
|
|
|
p.add_argument('--l2p_path', help='Path to persistent memory file or device to store l2p onto, '
|
|
|
|
'by default l2p is kept in DRAM and is volatile (optional)')
|
2020-01-16 14:57:53 +00:00
|
|
|
p.add_argument('--use_append', help='Use appends instead of writes', action='store_true')
|
2019-07-04 11:37:23 +00:00
|
|
|
|
|
|
|
limits = p.add_argument_group('Defrag limits', 'Configures defrag limits and thresholds for'
|
|
|
|
' levels ' + str(ftl_valid_limits)[1:-1])
|
|
|
|
limits.add_argument('--limit', help='Percentage of allowed user versus internal writes at given'
|
|
|
|
' levels, e.g. crit:0,high:20,low:80')
|
|
|
|
limits.add_argument('--limit-threshold', help='Number of free bands triggering a given level of'
|
|
|
|
' write limiting e.g. crit:1,high:2,low:3,start:4')
|
2019-08-21 09:03:04 +00:00
|
|
|
p.set_defaults(func=bdev_ftl_create)
|
2018-10-29 14:35:27 +00:00
|
|
|
|
2019-08-21 09:03:04 +00:00
|
|
|
def bdev_ftl_delete(args):
|
|
|
|
print_dict(rpc.bdev.bdev_ftl_delete(args.client, name=args.name))
|
2018-10-29 14:35:27 +00:00
|
|
|
|
2019-08-21 09:03:04 +00:00
|
|
|
p = subparsers.add_parser('bdev_ftl_delete', aliases=['delete_ftl_bdev'],
|
|
|
|
help='Delete FTL bdev')
|
2018-10-29 14:35:27 +00:00
|
|
|
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
|
2019-08-21 09:03:04 +00:00
|
|
|
p.set_defaults(func=bdev_ftl_delete)
|
2018-10-29 14:35:27 +00:00
|
|
|
|
2019-07-01 11:54:57 +00:00
|
|
|
# vmd
|
|
|
|
def enable_vmd(args):
|
|
|
|
print_dict(rpc.vmd.enable_vmd(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('enable_vmd', help='Enable VMD enumeration')
|
|
|
|
p.set_defaults(func=enable_vmd)
|
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# nbd
|
2019-09-17 10:36:40 +00:00
|
|
|
def nbd_start_disk(args):
|
|
|
|
print(rpc.nbd.nbd_start_disk(args.client,
|
2018-06-05 19:31:50 +00:00
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
nbd_device=args.nbd_device))
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-17 10:36:40 +00:00
|
|
|
p = subparsers.add_parser('nbd_start_disk', aliases=['start_nbd_disk'],
|
2019-09-18 00:21:26 +00:00
|
|
|
help='Export a bdev as an nbd disk')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('bdev_name', help='Blockdev name to be exported. Example: Malloc0.')
|
2019-01-15 12:52:11 +00:00
|
|
|
p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.', nargs='?')
|
2019-09-17 10:36:40 +00:00
|
|
|
p.set_defaults(func=nbd_start_disk)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-17 10:47:41 +00:00
|
|
|
def nbd_stop_disk(args):
|
|
|
|
rpc.nbd.nbd_stop_disk(args.client,
|
2018-03-27 23:02:38 +00:00
|
|
|
nbd_device=args.nbd_device)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-17 10:47:41 +00:00
|
|
|
p = subparsers.add_parser('nbd_stop_disk', aliases=['stop_nbd_disk'],
|
2019-09-18 00:21:26 +00:00
|
|
|
help='Stop an nbd disk')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
|
2019-09-17 10:47:41 +00:00
|
|
|
p.set_defaults(func=nbd_stop_disk)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-17 10:59:02 +00:00
|
|
|
def nbd_get_disks(args):
|
|
|
|
print_dict(rpc.nbd.nbd_get_disks(args.client,
|
2018-03-27 23:02:38 +00:00
|
|
|
nbd_device=args.nbd_device))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-17 10:59:02 +00:00
|
|
|
p = subparsers.add_parser('nbd_get_disks', aliases=['get_nbd_disks'],
|
|
|
|
help='Display full or specified nbd device list')
|
2018-04-17 18:36:19 +00:00
|
|
|
p.add_argument('-n', '--nbd-device', help="Path of the nbd device. Example: /dev/nbd0", required=False)
|
2019-09-17 10:59:02 +00:00
|
|
|
p.set_defaults(func=nbd_get_disks)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
# net
|
2019-09-18 08:53:32 +00:00
|
|
|
def net_interface_add_ip_address(args):
|
|
|
|
rpc.net.net_interface_add_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 08:53:32 +00:00
|
|
|
p = subparsers.add_parser('net_interface_add_ip_address', aliases=['add_ip_address'],
|
|
|
|
help='Add IP address')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
|
|
|
|
p.add_argument('ip_addr', help='ip address will be added.')
|
2019-09-18 08:53:32 +00:00
|
|
|
p.set_defaults(func=net_interface_add_ip_address)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 09:00:43 +00:00
|
|
|
def net_interface_delete_ip_address(args):
|
|
|
|
rpc.net.net_interface_delete_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-18 09:00:43 +00:00
|
|
|
p = subparsers.add_parser('net_interface_delete_ip_address', aliases=['delete_ip_address'],
|
|
|
|
help='Delete IP address')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
|
|
|
|
p.add_argument('ip_addr', help='ip address will be deleted.')
|
2019-09-18 09:00:43 +00:00
|
|
|
p.set_defaults(func=net_interface_delete_ip_address)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 09:07:01 +00:00
|
|
|
def net_get_interfaces(args):
|
|
|
|
print_dict(rpc.net.net_get_interfaces(args.client))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
2019-09-18 09:07:01 +00:00
|
|
|
'net_get_interfaces', aliases=['get_interfaces'], help='Display current interface list')
|
|
|
|
p.set_defaults(func=net_get_interfaces)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
# NVMe-oF
|
2019-09-23 10:29:33 +00:00
|
|
|
def nvmf_set_max_subsystems(args):
|
|
|
|
rpc.nvmf.nvmf_set_max_subsystems(args.client,
|
|
|
|
max_subsystems=args.max_subsystems)
|
2018-10-19 20:19:09 +00:00
|
|
|
|
2019-09-23 10:29:33 +00:00
|
|
|
p = subparsers.add_parser('nvmf_set_max_subsystems', aliases=['set_nvmf_target_max_subsystems'],
|
|
|
|
help='Set the maximum number of NVMf target subsystems')
|
2018-10-19 20:19:09 +00:00
|
|
|
p.add_argument('-x', '--max-subsystems', help='Max number of NVMf subsystems', type=int, required=True)
|
2019-09-23 10:29:33 +00:00
|
|
|
p.set_defaults(func=nvmf_set_max_subsystems)
|
2018-10-19 20:19:09 +00:00
|
|
|
|
2019-09-23 10:35:43 +00:00
|
|
|
def nvmf_set_config(args):
|
|
|
|
rpc.nvmf.nvmf_set_config(args.client,
|
|
|
|
acceptor_poll_rate=args.acceptor_poll_rate,
|
2020-01-07 23:01:43 +00:00
|
|
|
conn_sched=args.conn_sched,
|
|
|
|
passthru_identify_ctrlr=args.passthru_identify_ctrlr)
|
2018-06-07 23:00:26 +00:00
|
|
|
|
2019-09-23 10:35:43 +00:00
|
|
|
p = subparsers.add_parser('nvmf_set_config', aliases=['set_nvmf_target_config'],
|
|
|
|
help='Set NVMf target config')
|
2018-06-14 06:45:47 +00:00
|
|
|
p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
|
2020-05-27 21:42:07 +00:00
|
|
|
p.add_argument('-s', '--conn-sched', help='(Deprecated). Ignored.')
|
2020-01-07 23:01:43 +00:00
|
|
|
p.add_argument('-i', '--passthru-identify-ctrlr', help="""Passthrough fields like serial number and model number
|
|
|
|
when the controller has a single namespace that is an NVMe bdev""", action='store_true')
|
2019-09-23 10:35:43 +00:00
|
|
|
p.set_defaults(func=nvmf_set_config)
|
2018-06-07 23:00:26 +00:00
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
def nvmf_create_transport(args):
|
|
|
|
rpc.nvmf.nvmf_create_transport(args.client,
|
|
|
|
trtype=args.trtype,
|
2019-08-16 15:53:48 +00:00
|
|
|
tgt_name=args.tgt_name,
|
2018-08-27 22:27:47 +00:00
|
|
|
max_queue_depth=args.max_queue_depth,
|
|
|
|
max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
|
2020-05-07 13:10:26 +00:00
|
|
|
max_io_qpairs_per_ctrlr=args.max_io_qpairs_per_ctrlr,
|
2018-08-27 22:27:47 +00:00
|
|
|
in_capsule_data_size=args.in_capsule_data_size,
|
|
|
|
max_io_size=args.max_io_size,
|
|
|
|
io_unit_size=args.io_unit_size,
|
2018-12-14 14:29:48 +00:00
|
|
|
max_aq_depth=args.max_aq_depth,
|
2019-01-07 20:53:44 +00:00
|
|
|
num_shared_buffers=args.num_shared_buffers,
|
nvmf/rdma: Add shared receive queue support
This is a new feature for NVMEoF RDMA target, that is intended to save
resource allocation (by sharing them) and utilize the
locality (completions and memory) to get the best performance with
Shared Receive Queues (SRQs). We'll create a SRQ per core (poll
group), per device and associate each created QP/CQ with an
appropriate SRQ.
Our testing environment has 2 hosts.
Host 1:
CPU: Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz dual socket (8 cores total)
Network: ConnectX-5, ConnectX-5 VPI , 100GbE, single-port QSFP28, PCIe3.0 x16
Disk: Intel Optane SSD 900P Series
OS: Fedora 27 x86_64
Host 2:
CPU: Intel(R) Xeon(R) CPU E5-2630 v2 @ 2.60GHz dual-socket (24 cores total)
Network: ConnectX-4 VPI , 100GbE, dual-port QSFP28
Disk: Intel Optane SSD 900P Series
OS : CentOS 7.5.1804 x86_64
Hosts are connected via Spectrum switch.
Host 1 is running SPDK NVMeoF target.
Host 2 is used as initiator running fio with SPDK plugin.
Configuration:
- SPDK NVMeoF target: cpu mask 0x0F (4 cores), max queue depth 128,
max SRQ depth 1024, max QPs per controller 1024
- Single NVMf subsystem with single namespace backed by physical SSD disk
- fio with SPDK plugin: randread pattern, 1-256 jobs, block size 4k,
IO depth 16, cpu_mask 0xFFF0, IO rate 10k, rate process “poisson”
Here is a full fio command line:
fio --name=Job --stats=1 --group_reporting=1 --idle-prof=percpu \
--loops=1 --numjobs=1 --thread=1 --time_based=1 --runtime=30s \
--ramp_time=5s --bs=4k --size=4G --iodepth=16 --readwrite=randread \
--rwmixread=75 --randrepeat=1 --ioengine=spdk --direct=1 \
--gtod_reduce=0 --cpumask=0xFFF0 --rate_iops=10k \
--rate_process=poisson \
--filename='trtype=RDMA adrfam=IPv4 traddr=1.1.79.1 trsvcid=4420 ns=1'
SPDK allocates the following entities for every work request in
receive queue (shared or not): reqs (1024 bytes), recvs (96 bytes),
cmds (64 bytes), cpls (16 bytes), in_capsule_buffer. All except the
last one are fixed size. In capsule data size is configured to 4096.
Memory consumption calculation (target):
- Multiple SRQ: core_num * ib_devs_num * SRQ_depth * (1200 +
in_capsule_data_size)
- Multiple RQ: queue_num * RQ_depth * (1200 + in_capsule_data_size)
We ignore admin queues in calculations for simplicity.
Cases:
1. Multiple SRQ with 1024 entries:
- Mem = 4 * 1 * 1024 * (1200 + 4096) = 20.7 MiB
(Constant number – does not depend on initiators number)
2. RQ with 128 entries for 64 initiators:
- Mem = 64 * 128 * (1200 + 4096) = 41.4 MiB
Results:
FIO_JOBS kIOPS Bandwidth,MiB/s AvgLatency,us MaxResidentSize,kiB
RQ SRQ RQ SRQ RQ SRQ RQ SRQ
1 8.623 8.623 33.7 33.7 13.89 14.03 144376 155624
2 17.3 17.3 67.4 67.4 14.03 14.1 145776 155700
4 34.5 34.5 135 135 14.15 14.23 146540 156184
8 69.1 69.1 270 270 14.64 14.49 148116 156960
16 138 138 540 540 14.84 15.38 151216 158668
32 276 276 1079 1079 16.5 16.61 157560 161936
64 513 502 2005 1960 1673 1612 170408 168440
128 535 526 2092 2054 3329 3344 195796 181524
256 571 571 2232 2233 6854 6873 246484 207856
We can see the benefit in memory consumption.
Change-Id: I40c70f6ccbad7754918bcc6cb397e955b09d1033
Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/428458
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2018-10-04 14:59:08 +00:00
|
|
|
buf_cache_size=args.buf_cache_size,
|
2019-04-26 21:25:20 +00:00
|
|
|
max_srq_depth=args.max_srq_depth,
|
2019-06-11 15:07:28 +00:00
|
|
|
no_srq=args.no_srq,
|
2019-07-02 08:04:16 +00:00
|
|
|
c2h_success=args.c2h_success,
|
2019-07-15 12:58:16 +00:00
|
|
|
dif_insert_or_strip=args.dif_insert_or_strip,
|
2020-06-26 17:30:55 +00:00
|
|
|
sock_priority=args.sock_priority,
|
2020-07-09 01:50:03 +00:00
|
|
|
acceptor_backlog=args.acceptor_backlog,
|
|
|
|
abort_timeout_sec=args.abort_timeout_sec)
|
2018-08-27 22:27:47 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
|
|
|
|
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-g', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-08-27 22:27:47 +00:00
|
|
|
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
|
2020-05-07 13:10:26 +00:00
|
|
|
p.add_argument('-p', '--max-qpairs-per-ctrlr', help="""Max number of SQ and CQ per controller.
|
|
|
|
Deprecated, use max-io-qpairs-per-ctrlr""", type=int)
|
|
|
|
p.add_argument('-m', '--max-io-qpairs-per-ctrlr', help='Max number of IO qpairs per controller', type=int)
|
2018-08-27 22:27:47 +00:00
|
|
|
p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
|
|
|
|
p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
|
|
|
|
p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
|
|
|
|
p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
|
2018-12-14 14:29:48 +00:00
|
|
|
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
|
2019-01-28 21:59:42 +00:00
|
|
|
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
|
nvmf/rdma: Add shared receive queue support
This is a new feature for NVMEoF RDMA target, that is intended to save
resource allocation (by sharing them) and utilize the
locality (completions and memory) to get the best performance with
Shared Receive Queues (SRQs). We'll create a SRQ per core (poll
group), per device and associate each created QP/CQ with an
appropriate SRQ.
Our testing environment has 2 hosts.
Host 1:
CPU: Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz dual socket (8 cores total)
Network: ConnectX-5, ConnectX-5 VPI , 100GbE, single-port QSFP28, PCIe3.0 x16
Disk: Intel Optane SSD 900P Series
OS: Fedora 27 x86_64
Host 2:
CPU: Intel(R) Xeon(R) CPU E5-2630 v2 @ 2.60GHz dual-socket (24 cores total)
Network: ConnectX-4 VPI , 100GbE, dual-port QSFP28
Disk: Intel Optane SSD 900P Series
OS : CentOS 7.5.1804 x86_64
Hosts are connected via Spectrum switch.
Host 1 is running SPDK NVMeoF target.
Host 2 is used as initiator running fio with SPDK plugin.
Configuration:
- SPDK NVMeoF target: cpu mask 0x0F (4 cores), max queue depth 128,
max SRQ depth 1024, max QPs per controller 1024
- Single NVMf subsystem with single namespace backed by physical SSD disk
- fio with SPDK plugin: randread pattern, 1-256 jobs, block size 4k,
IO depth 16, cpu_mask 0xFFF0, IO rate 10k, rate process “poisson”
Here is a full fio command line:
fio --name=Job --stats=1 --group_reporting=1 --idle-prof=percpu \
--loops=1 --numjobs=1 --thread=1 --time_based=1 --runtime=30s \
--ramp_time=5s --bs=4k --size=4G --iodepth=16 --readwrite=randread \
--rwmixread=75 --randrepeat=1 --ioengine=spdk --direct=1 \
--gtod_reduce=0 --cpumask=0xFFF0 --rate_iops=10k \
--rate_process=poisson \
--filename='trtype=RDMA adrfam=IPv4 traddr=1.1.79.1 trsvcid=4420 ns=1'
SPDK allocates the following entities for every work request in
receive queue (shared or not): reqs (1024 bytes), recvs (96 bytes),
cmds (64 bytes), cpls (16 bytes), in_capsule_buffer. All except the
last one are fixed size. In capsule data size is configured to 4096.
Memory consumption calculation (target):
- Multiple SRQ: core_num * ib_devs_num * SRQ_depth * (1200 +
in_capsule_data_size)
- Multiple RQ: queue_num * RQ_depth * (1200 + in_capsule_data_size)
We ignore admin queues in calculations for simplicity.
Cases:
1. Multiple SRQ with 1024 entries:
- Mem = 4 * 1 * 1024 * (1200 + 4096) = 20.7 MiB
(Constant number – does not depend on initiators number)
2. RQ with 128 entries for 64 initiators:
- Mem = 64 * 128 * (1200 + 4096) = 41.4 MiB
Results:
FIO_JOBS kIOPS Bandwidth,MiB/s AvgLatency,us MaxResidentSize,kiB
RQ SRQ RQ SRQ RQ SRQ RQ SRQ
1 8.623 8.623 33.7 33.7 13.89 14.03 144376 155624
2 17.3 17.3 67.4 67.4 14.03 14.1 145776 155700
4 34.5 34.5 135 135 14.15 14.23 146540 156184
8 69.1 69.1 270 270 14.64 14.49 148116 156960
16 138 138 540 540 14.84 15.38 151216 158668
32 276 276 1079 1079 16.5 16.61 157560 161936
64 513 502 2005 1960 1673 1612 170408 168440
128 535 526 2092 2054 3329 3344 195796 181524
256 571 571 2232 2233 6854 6873 246484 207856
We can see the benefit in memory consumption.
Change-Id: I40c70f6ccbad7754918bcc6cb397e955b09d1033
Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/428458
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2018-10-04 14:59:08 +00:00
|
|
|
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
|
2019-04-26 21:25:20 +00:00
|
|
|
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
|
2019-07-15 02:58:55 +00:00
|
|
|
p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')
|
2019-07-02 08:04:16 +00:00
|
|
|
p.add_argument('-f', '--dif-insert-or-strip', action='store_true', help='Enable DIF insert/strip. Relevant only for TCP transport')
|
2019-07-15 12:58:16 +00:00
|
|
|
p.add_argument('-y', '--sock-priority', help='The sock priority of the tcp connection. Relevant only for TCP transport', type=int)
|
2020-06-26 17:30:55 +00:00
|
|
|
p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int)
|
2020-07-09 01:50:03 +00:00
|
|
|
p.add_argument('-x', '--abort-timeout-sec', help='Abort execution timeout value, in seconds', type=int)
|
2018-08-27 22:27:47 +00:00
|
|
|
p.set_defaults(func=nvmf_create_transport)
|
|
|
|
|
2019-09-23 10:42:27 +00:00
|
|
|
def nvmf_get_transports(args):
|
|
|
|
print_dict(rpc.nvmf.nvmf_get_transports(args.client, tgt_name=args.tgt_name))
|
2018-10-23 22:07:42 +00:00
|
|
|
|
2019-09-23 10:42:27 +00:00
|
|
|
p = subparsers.add_parser('nvmf_get_transports', aliases=['get_nvmf_transports'],
|
2018-10-23 22:07:42 +00:00
|
|
|
help='Display nvmf transports')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2019-09-23 10:42:27 +00:00
|
|
|
p.set_defaults(func=nvmf_get_transports)
|
2018-10-23 22:07:42 +00:00
|
|
|
|
2019-09-20 09:35:36 +00:00
|
|
|
def nvmf_get_subsystems(args):
|
|
|
|
print_dict(rpc.nvmf.nvmf_get_subsystems(args.client, tgt_name=args.tgt_name))
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-20 09:35:36 +00:00
|
|
|
p = subparsers.add_parser('nvmf_get_subsystems', aliases=['get_nvmf_subsystems'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Display nvmf subsystems')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2019-09-20 09:35:36 +00:00
|
|
|
p.set_defaults(func=nvmf_get_subsystems)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-20 10:22:44 +00:00
|
|
|
def nvmf_create_subsystem(args):
|
|
|
|
rpc.nvmf.nvmf_create_subsystem(args.client,
|
2018-09-10 17:40:19 +00:00
|
|
|
nqn=args.nqn,
|
2019-08-16 15:53:48 +00:00
|
|
|
tgt_name=args.tgt_name,
|
2018-09-10 17:40:19 +00:00
|
|
|
serial_number=args.serial_number,
|
2018-12-29 19:39:48 +00:00
|
|
|
model_number=args.model_number,
|
2018-09-10 17:40:19 +00:00
|
|
|
allow_any_host=args.allow_any_host,
|
2020-08-19 05:23:56 +00:00
|
|
|
max_namespaces=args.max_namespaces,
|
|
|
|
ana_reporting=args.ana_reporting)
|
2018-09-10 17:40:19 +00:00
|
|
|
|
2019-09-20 10:22:44 +00:00
|
|
|
p = subparsers.add_parser('nvmf_create_subsystem', aliases=['nvmf_subsystem_create'],
|
|
|
|
help='Create an NVMe-oF subsystem')
|
2018-09-10 17:40:19 +00:00
|
|
|
p.add_argument('nqn', help='Subsystem NQN (ASCII)')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-09-10 17:40:19 +00:00
|
|
|
p.add_argument("-s", "--serial-number", help="""
|
|
|
|
Format: 'sn' etc
|
2018-09-10 22:59:57 +00:00
|
|
|
Example: 'SPDK00000000000001'""", default='00000000000000000000')
|
2018-12-29 19:39:48 +00:00
|
|
|
p.add_argument("-d", "--model-number", help="""
|
|
|
|
Format: 'mn' etc
|
|
|
|
Example: 'SPDK Controller'""", default='SPDK bdev Controller')
|
2018-09-10 17:40:19 +00:00
|
|
|
p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
|
|
|
|
p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed",
|
|
|
|
type=int, default=0)
|
2020-08-19 05:23:56 +00:00
|
|
|
p.add_argument("-r", "--ana-reporting", action='store_true', help="Enable ANA reporting feature")
|
2019-09-20 10:22:44 +00:00
|
|
|
p.set_defaults(func=nvmf_create_subsystem)
|
2018-09-10 17:40:19 +00:00
|
|
|
|
2019-09-23 10:17:30 +00:00
|
|
|
def nvmf_delete_subsystem(args):
|
|
|
|
rpc.nvmf.nvmf_delete_subsystem(args.client,
|
2019-08-16 15:53:48 +00:00
|
|
|
nqn=args.subsystem_nqn,
|
|
|
|
tgt_name=args.tgt_name)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-23 10:17:30 +00:00
|
|
|
p = subparsers.add_parser('nvmf_delete_subsystem', aliases=['delete_nvmf_subsystem'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Delete a nvmf subsystem')
|
|
|
|
p.add_argument('subsystem_nqn',
|
|
|
|
help='subsystem nqn to be deleted. Example: nqn.2016-06.io.spdk:cnode1.')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2019-09-23 10:17:30 +00:00
|
|
|
p.set_defaults(func=nvmf_delete_subsystem)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
|
|
|
def nvmf_subsystem_add_listener(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_add_listener(args.client,
|
|
|
|
nqn=args.nqn,
|
|
|
|
trtype=args.trtype,
|
|
|
|
traddr=args.traddr,
|
2019-08-16 15:53:48 +00:00
|
|
|
tgt_name=args.tgt_name,
|
2018-06-07 18:57:42 +00:00
|
|
|
adrfam=args.adrfam,
|
|
|
|
trsvcid=args.trsvcid)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2018-01-17 22:35:58 +00:00
|
|
|
p = subparsers.add_parser('nvmf_subsystem_add_listener', help='Add a listener to an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
|
|
|
|
p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-01-17 22:35:58 +00:00
|
|
|
p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
|
|
|
|
p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_add_listener)
|
|
|
|
|
|
|
|
def nvmf_subsystem_remove_listener(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_remove_listener(args.client,
|
|
|
|
nqn=args.nqn,
|
|
|
|
trtype=args.trtype,
|
|
|
|
traddr=args.traddr,
|
2019-08-16 15:53:48 +00:00
|
|
|
tgt_name=args.tgt_name,
|
2018-06-07 18:57:42 +00:00
|
|
|
adrfam=args.adrfam,
|
|
|
|
trsvcid=args.trsvcid)
|
2018-01-17 22:35:58 +00:00
|
|
|
|
2018-03-15 18:55:28 +00:00
|
|
|
p = subparsers.add_parser('nvmf_subsystem_remove_listener', help='Remove a listener from an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
|
|
|
|
p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-03-15 18:55:28 +00:00
|
|
|
p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
|
|
|
|
p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_remove_listener)
|
|
|
|
|
|
|
|
def nvmf_subsystem_add_ns(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_add_ns(args.client,
|
|
|
|
nqn=args.nqn,
|
|
|
|
bdev_name=args.bdev_name,
|
2019-08-16 15:53:48 +00:00
|
|
|
tgt_name=args.tgt_name,
|
2019-05-23 07:24:46 +00:00
|
|
|
ptpl_file=args.ptpl_file,
|
2018-06-07 18:57:42 +00:00
|
|
|
nsid=args.nsid,
|
|
|
|
nguid=args.nguid,
|
|
|
|
eui64=args.eui64,
|
|
|
|
uuid=args.uuid)
|
2018-03-15 18:55:28 +00:00
|
|
|
|
2018-01-23 22:51:28 +00:00
|
|
|
p = subparsers.add_parser('nvmf_subsystem_add_ns', help='Add a namespace to an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('bdev_name', help='The name of the bdev that will back this namespace')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2019-05-23 07:24:46 +00:00
|
|
|
p.add_argument('-p', '--ptpl-file', help='The persistent reservation storage location (optional)', type=str)
|
2018-01-23 22:51:28 +00:00
|
|
|
p.add_argument('-n', '--nsid', help='The requested NSID (optional)', type=int)
|
2018-02-13 00:03:01 +00:00
|
|
|
p.add_argument('-g', '--nguid', help='Namespace globally unique identifier (optional)')
|
|
|
|
p.add_argument('-e', '--eui64', help='Namespace EUI-64 identifier (optional)')
|
2018-06-07 21:58:17 +00:00
|
|
|
p.add_argument('-u', '--uuid', help='Namespace UUID (optional)')
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_add_ns)
|
|
|
|
|
|
|
|
def nvmf_subsystem_remove_ns(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_remove_ns(args.client,
|
|
|
|
nqn=args.nqn,
|
2019-08-16 15:53:48 +00:00
|
|
|
nsid=args.nsid,
|
|
|
|
tgt_name=args.tgt_name)
|
2018-01-23 22:51:28 +00:00
|
|
|
|
2018-02-27 07:47:29 +00:00
|
|
|
p = subparsers.add_parser('nvmf_subsystem_remove_ns', help='Remove a namespace to an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('nsid', help='The requested NSID', type=int)
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_remove_ns)
|
|
|
|
|
|
|
|
def nvmf_subsystem_add_host(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_add_host(args.client,
|
|
|
|
nqn=args.nqn,
|
2019-08-16 15:53:48 +00:00
|
|
|
host=args.host,
|
|
|
|
tgt_name=args.tgt_name)
|
2018-02-27 07:47:29 +00:00
|
|
|
|
2018-01-23 22:03:38 +00:00
|
|
|
p = subparsers.add_parser('nvmf_subsystem_add_host', help='Add a host to an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('host', help='Host NQN to allow')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_add_host)
|
|
|
|
|
|
|
|
def nvmf_subsystem_remove_host(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_remove_host(args.client,
|
|
|
|
nqn=args.nqn,
|
2019-08-16 15:53:48 +00:00
|
|
|
host=args.host,
|
|
|
|
tgt_name=args.tgt_name)
|
2018-01-23 22:03:38 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('nvmf_subsystem_remove_host', help='Remove a host from an NVMe-oF subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('host', help='Host NQN to remove')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_remove_host)
|
|
|
|
|
|
|
|
def nvmf_subsystem_allow_any_host(args):
|
2018-06-07 18:57:42 +00:00
|
|
|
rpc.nvmf.nvmf_subsystem_allow_any_host(args.client,
|
|
|
|
nqn=args.nqn,
|
2019-08-16 15:53:48 +00:00
|
|
|
disable=args.disable,
|
|
|
|
tgt_name=args.tgt_name)
|
2018-01-23 22:03:38 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('nvmf_subsystem_allow_any_host', help='Allow any host to connect to the subsystem')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('-e', '--enable', action='store_true', help='Enable allowing any host')
|
|
|
|
p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2018-03-19 22:24:32 +00:00
|
|
|
p.set_defaults(func=nvmf_subsystem_allow_any_host)
|
2018-01-23 22:03:38 +00:00
|
|
|
|
2020-08-19 03:44:22 +00:00
|
|
|
def nvmf_subsystem_get_controllers(args):
|
|
|
|
print_dict(rpc.nvmf.nvmf_subsystem_get_controllers(args.client,
|
|
|
|
nqn=args.nqn,
|
|
|
|
tgt_name=args.tgt_name))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('nvmf_subsystem_get_controllers',
|
|
|
|
help='Display controllers of an NVMe-oF subsystem.')
|
|
|
|
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
|
|
|
|
p.add_argument('-t', '--tgt-name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
|
|
|
p.set_defaults(func=nvmf_subsystem_get_controllers)
|
|
|
|
|
2019-04-15 09:54:38 +00:00
|
|
|
def nvmf_get_stats(args):
|
2019-08-16 15:53:48 +00:00
|
|
|
print_dict(rpc.nvmf.nvmf_get_stats(args.client, tgt_name=args.tgt_name))
|
2019-04-15 09:54:38 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'nvmf_get_stats', help='Display current statistics for NVMf subsystem')
|
2019-08-16 15:53:48 +00:00
|
|
|
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
|
2019-04-15 09:54:38 +00:00
|
|
|
p.set_defaults(func=nvmf_get_stats)
|
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# pmem
|
2019-08-29 10:36:38 +00:00
|
|
|
def bdev_pmem_create_pool(args):
|
2018-06-18 12:09:44 +00:00
|
|
|
num_blocks = int((args.total_size * 1024 * 1024) / args.block_size)
|
2019-08-29 10:36:38 +00:00
|
|
|
rpc.pmem.bdev_pmem_create_pool(args.client,
|
|
|
|
pmem_file=args.pmem_file,
|
|
|
|
num_blocks=num_blocks,
|
|
|
|
block_size=args.block_size)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-29 10:36:38 +00:00
|
|
|
p = subparsers.add_parser('bdev_pmem_create_pool', aliases=['create_pmem_pool'],
|
|
|
|
help='Create pmem pool')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('pmem_file', help='Path to pmemblk pool file')
|
|
|
|
p.add_argument('total_size', help='Size of malloc bdev in MB (int > 0)', type=int)
|
|
|
|
p.add_argument('block_size', help='Block size for this pmem pool', type=int)
|
2019-08-29 10:36:38 +00:00
|
|
|
p.set_defaults(func=bdev_pmem_create_pool)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-30 08:38:54 +00:00
|
|
|
def bdev_pmem_get_pool_info(args):
|
|
|
|
print_dict(rpc.pmem.bdev_pmem_get_pool_info(args.client,
|
|
|
|
pmem_file=args.pmem_file))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-30 08:38:54 +00:00
|
|
|
p = subparsers.add_parser('bdev_pmem_get_pool_info', aliases=['pmem_pool_info'],
|
|
|
|
help='Display pmem pool info and check consistency')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('pmem_file', help='Path to pmemblk pool file')
|
2019-08-30 08:38:54 +00:00
|
|
|
p.set_defaults(func=bdev_pmem_get_pool_info)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-08-30 08:52:13 +00:00
|
|
|
def bdev_pmem_delete_pool(args):
|
|
|
|
rpc.pmem.bdev_pmem_delete_pool(args.client,
|
|
|
|
pmem_file=args.pmem_file)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-08-30 08:52:13 +00:00
|
|
|
p = subparsers.add_parser('bdev_pmem_delete_pool', aliases=['delete_pmem_pool'],
|
|
|
|
help='Delete pmem pool')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('pmem_file', help='Path to pmemblk pool file')
|
2019-08-30 08:52:13 +00:00
|
|
|
p.set_defaults(func=bdev_pmem_delete_pool)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2018-02-20 16:03:57 +00:00
|
|
|
# subsystem
|
2019-09-18 08:24:58 +00:00
|
|
|
def framework_get_subsystems(args):
|
|
|
|
print_dict(rpc.subsystem.framework_get_subsystems(args.client))
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 08:24:58 +00:00
|
|
|
p = subparsers.add_parser('framework_get_subsystems', aliases=['get_subsystems'],
|
|
|
|
help="""Print subsystems array in initialization order. Each subsystem
|
2018-02-20 16:03:57 +00:00
|
|
|
entry contain (unsorted) array of subsystems it depends on.""")
|
2019-09-18 08:24:58 +00:00
|
|
|
p.set_defaults(func=framework_get_subsystems)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-18 10:09:24 +00:00
|
|
|
def framework_get_config(args):
|
|
|
|
print_dict(rpc.subsystem.framework_get_config(args.client, args.name))
|
2018-02-20 16:03:57 +00:00
|
|
|
|
2019-09-18 10:09:24 +00:00
|
|
|
p = subparsers.add_parser('framework_get_config', aliases=['get_subsystem_config'],
|
|
|
|
help="""Print subsystem configuration""")
|
2018-02-26 19:16:59 +00:00
|
|
|
p.add_argument('name', help='Name of subsystem to query')
|
2019-09-18 10:09:24 +00:00
|
|
|
p.set_defaults(func=framework_get_config)
|
2018-02-26 19:16:59 +00:00
|
|
|
|
2017-06-06 21:22:03 +00:00
|
|
|
# vhost
|
2019-09-30 12:16:22 +00:00
|
|
|
def vhost_controller_set_coalescing(args):
|
|
|
|
rpc.vhost.vhost_controller_set_coalescing(args.client,
|
2018-06-13 10:55:38 +00:00
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
delay_base_us=args.delay_base_us,
|
|
|
|
iops_threshold=args.iops_threshold)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-30 12:16:22 +00:00
|
|
|
p = subparsers.add_parser('vhost_controller_set_coalescing', aliases=['set_vhost_controller_coalescing'],
|
|
|
|
help='Set vhost controller coalescing')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name')
|
|
|
|
p.add_argument('delay_base_us', help='Base delay time', type=int)
|
|
|
|
p.add_argument('iops_threshold', help='IOPS threshold when coalescing is enabled', type=int)
|
2019-09-30 12:16:22 +00:00
|
|
|
p.set_defaults(func=vhost_controller_set_coalescing)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-24 14:29:10 +00:00
|
|
|
def vhost_create_scsi_controller(args):
|
|
|
|
rpc.vhost.vhost_create_scsi_controller(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
cpumask=args.cpumask)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
2019-09-24 14:29:10 +00:00
|
|
|
'vhost_create_scsi_controller', aliases=['construct_vhost_scsi_controller'],
|
|
|
|
help='Add new vhost controller')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name')
|
|
|
|
p.add_argument('--cpumask', help='cpu mask for this controller')
|
2019-09-24 14:29:10 +00:00
|
|
|
p.set_defaults(func=vhost_create_scsi_controller)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-27 20:53:41 +00:00
|
|
|
def vhost_scsi_controller_add_target(args):
|
|
|
|
print_json(rpc.vhost.vhost_scsi_controller_add_target(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
scsi_target_num=args.scsi_target_num,
|
|
|
|
bdev_name=args.bdev_name))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('vhost_scsi_controller_add_target',
|
|
|
|
aliases=['add_vhost_scsi_lun'],
|
2017-06-06 21:22:03 +00:00
|
|
|
help='Add lun to vhost controller')
|
|
|
|
p.add_argument('ctrlr', help='conntroller name where add lun')
|
|
|
|
p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
|
|
|
|
p.add_argument('bdev_name', help='bdev name')
|
2019-09-27 20:53:41 +00:00
|
|
|
p.set_defaults(func=vhost_scsi_controller_add_target)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-27 21:06:04 +00:00
|
|
|
def vhost_scsi_controller_remove_target(args):
|
|
|
|
rpc.vhost.vhost_scsi_controller_remove_target(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
scsi_target_num=args.scsi_target_num)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('vhost_scsi_controller_remove_target',
|
|
|
|
aliases=['remove_vhost_scsi_target'],
|
|
|
|
help='Remove target from vhost controller')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name to remove target from')
|
|
|
|
p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
|
2019-09-27 21:06:04 +00:00
|
|
|
p.set_defaults(func=vhost_scsi_controller_remove_target)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-30 10:51:55 +00:00
|
|
|
def vhost_create_blk_controller(args):
|
|
|
|
rpc.vhost.vhost_create_blk_controller(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
dev_name=args.dev_name,
|
|
|
|
cpumask=args.cpumask,
|
2020-03-30 11:16:24 +00:00
|
|
|
readonly=args.readonly,
|
|
|
|
packed_ring=args.packed_ring)
|
2019-09-30 10:51:55 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('vhost_create_blk_controller',
|
|
|
|
aliases=['construct_vhost_blk_controller'],
|
|
|
|
help='Add a new vhost block controller')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name')
|
|
|
|
p.add_argument('dev_name', help='device name')
|
|
|
|
p.add_argument('--cpumask', help='cpu mask for this controller')
|
|
|
|
p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only')
|
2020-03-30 11:16:24 +00:00
|
|
|
p.add_argument("-p", "--packed_ring", action='store_true', help='Set controller as packed ring supported')
|
2019-09-30 10:51:55 +00:00
|
|
|
p.set_defaults(func=vhost_create_blk_controller)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-10-03 20:58:51 +00:00
|
|
|
def vhost_create_nvme_controller(args):
|
|
|
|
rpc.vhost.vhost_create_nvme_controller(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
io_queues=args.io_queues,
|
|
|
|
cpumask=args.cpumask)
|
2018-01-12 04:06:49 +00:00
|
|
|
|
2019-10-03 20:58:51 +00:00
|
|
|
p = subparsers.add_parser('vhost_create_nvme_controller', aliases=['vhost_create_nvme_controller'],
|
|
|
|
help='Add new vhost controller')
|
2018-01-12 04:06:49 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name')
|
|
|
|
p.add_argument('io_queues', help='number of IO queues for the controller', type=int)
|
|
|
|
p.add_argument('--cpumask', help='cpu mask for this controller')
|
2019-10-03 20:58:51 +00:00
|
|
|
p.set_defaults(func=vhost_create_nvme_controller)
|
2018-01-12 04:06:49 +00:00
|
|
|
|
2019-10-03 21:56:32 +00:00
|
|
|
def vhost_nvme_controller_add_ns(args):
|
|
|
|
rpc.vhost.vhost_nvme_controller_add_ns(args.client,
|
|
|
|
ctrlr=args.ctrlr,
|
|
|
|
bdev_name=args.bdev_name)
|
2018-01-12 04:06:49 +00:00
|
|
|
|
2019-10-03 21:56:32 +00:00
|
|
|
p = subparsers.add_parser('vhost_nvme_controller_add_ns', aliases=['add_vhost_nvme_ns'],
|
|
|
|
help='Add a Namespace to vhost controller')
|
2018-01-12 04:06:49 +00:00
|
|
|
p.add_argument('ctrlr', help='conntroller name where add a Namespace')
|
|
|
|
p.add_argument('bdev_name', help='block device name for a new Namespace')
|
2019-10-03 21:56:32 +00:00
|
|
|
p.set_defaults(func=vhost_nvme_controller_add_ns)
|
2018-01-12 04:06:49 +00:00
|
|
|
|
2019-09-30 12:00:12 +00:00
|
|
|
def vhost_get_controllers(args):
|
|
|
|
print_dict(rpc.vhost.vhost_get_controllers(args.client, args.name))
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-30 12:00:12 +00:00
|
|
|
p = subparsers.add_parser('vhost_get_controllers', aliases=['get_vhost_controllers'],
|
|
|
|
help='List all or specific vhost controller(s)')
|
2018-09-13 03:58:55 +00:00
|
|
|
p.add_argument('-n', '--name', help="Name of vhost controller", required=False)
|
2019-09-30 12:00:12 +00:00
|
|
|
p.set_defaults(func=vhost_get_controllers)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-30 11:01:44 +00:00
|
|
|
def vhost_delete_controller(args):
|
|
|
|
rpc.vhost.vhost_delete_controller(args.client,
|
2018-06-13 10:55:38 +00:00
|
|
|
ctrlr=args.ctrlr)
|
2017-06-06 21:22:03 +00:00
|
|
|
|
2019-09-30 11:01:44 +00:00
|
|
|
p = subparsers.add_parser('vhost_delete_controller', aliases=['remove_vhost_controller'],
|
|
|
|
help='Delete a vhost controller')
|
2017-06-06 21:22:03 +00:00
|
|
|
p.add_argument('ctrlr', help='controller name')
|
2019-09-30 11:01:44 +00:00
|
|
|
p.set_defaults(func=vhost_delete_controller)
|
2018-03-19 22:24:32 +00:00
|
|
|
|
2019-09-11 12:50:38 +00:00
|
|
|
def bdev_virtio_attach_controller(args):
|
|
|
|
print_array(rpc.vhost.bdev_virtio_attach_controller(args.client,
|
|
|
|
name=args.name,
|
|
|
|
trtype=args.trtype,
|
|
|
|
traddr=args.traddr,
|
|
|
|
dev_type=args.dev_type,
|
|
|
|
vq_count=args.vq_count,
|
|
|
|
vq_size=args.vq_size))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_virtio_attach_controller', aliases=['construct_virtio_dev'],
|
|
|
|
help="""Attach virtio controller using provided
|
|
|
|
transport type and device type. This will also create bdevs for any block devices connected to the
|
|
|
|
controller (for example, SCSI devices for a virtio-scsi controller).
|
|
|
|
Result is array of added bdevs.""")
|
2018-03-27 13:45:10 +00:00
|
|
|
p.add_argument('name', help="Use this name as base for new created bdevs")
|
|
|
|
p.add_argument('-t', '--trtype',
|
|
|
|
help='Virtio target transport type: pci or user', required=True)
|
|
|
|
p.add_argument('-a', '--traddr',
|
|
|
|
help='Transport type specific target address: e.g. UNIX domain socket path or BDF', required=True)
|
|
|
|
p.add_argument('-d', '--dev-type',
|
|
|
|
help='Device type: blk or scsi', required=True)
|
|
|
|
p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
|
|
|
|
p.add_argument('--vq-size', help='Size of each queue', type=int)
|
2019-09-11 12:50:38 +00:00
|
|
|
p.set_defaults(func=bdev_virtio_attach_controller)
|
2018-03-27 13:45:10 +00:00
|
|
|
|
2019-09-11 12:03:46 +00:00
|
|
|
def bdev_virtio_scsi_get_devices(args):
|
|
|
|
print_dict(rpc.vhost.bdev_virtio_scsi_get_devices(args.client))
|
2018-03-25 18:17:11 +00:00
|
|
|
|
2019-09-11 12:03:46 +00:00
|
|
|
p = subparsers.add_parser('bdev_virtio_scsi_get_devices', aliases=['get_virtio_scsi_devs'],
|
|
|
|
help='List all Virtio-SCSI devices.')
|
|
|
|
p.set_defaults(func=bdev_virtio_scsi_get_devices)
|
2018-03-25 18:17:11 +00:00
|
|
|
|
2019-09-11 11:57:14 +00:00
|
|
|
def bdev_virtio_detach_controller(args):
|
|
|
|
rpc.vhost.bdev_virtio_detach_controller(args.client,
|
|
|
|
name=args.name)
|
2018-07-26 14:08:16 +00:00
|
|
|
|
2019-09-11 11:57:14 +00:00
|
|
|
p = subparsers.add_parser('bdev_virtio_detach_controller', aliases=['remove_virtio_bdev'],
|
|
|
|
help="""Remove a Virtio device
|
2018-07-26 14:08:16 +00:00
|
|
|
This will delete all bdevs exposed by this device""")
|
|
|
|
p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
|
2019-09-11 11:57:14 +00:00
|
|
|
p.set_defaults(func=bdev_virtio_detach_controller)
|
2018-07-26 14:08:16 +00:00
|
|
|
|
2019-09-20 12:26:24 +00:00
|
|
|
# OCSSD
|
|
|
|
def bdev_ocssd_create(args):
|
|
|
|
nsid = int(args.nsid) if args.nsid is not None else None
|
|
|
|
print_json(rpc.bdev.bdev_ocssd_create(args.client,
|
|
|
|
ctrlr_name=args.ctrlr_name,
|
|
|
|
bdev_name=args.name,
|
2019-09-30 13:04:08 +00:00
|
|
|
nsid=nsid,
|
|
|
|
range=args.range))
|
2019-09-20 12:26:24 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_ocssd_create',
|
|
|
|
help='Creates zoned bdev on specified Open Channel controller')
|
|
|
|
p.add_argument('-c', '--ctrlr_name', help='Name of the OC NVMe controller', required=True)
|
|
|
|
p.add_argument('-b', '--name', help='Name of the bdev to create', required=True)
|
|
|
|
p.add_argument('-n', '--nsid', help='Namespace ID', required=False)
|
2019-09-30 13:04:08 +00:00
|
|
|
p.add_argument('-r', '--range', help='Parallel unit range (in the form of BEGIN-END (inclusive))',
|
|
|
|
required=False)
|
2019-09-20 12:26:24 +00:00
|
|
|
p.set_defaults(func=bdev_ocssd_create)
|
|
|
|
|
|
|
|
def bdev_ocssd_delete(args):
|
|
|
|
print_json(rpc.bdev.bdev_ocssd_delete(args.client,
|
|
|
|
name=args.name))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_ocssd_delete',
|
|
|
|
help='Deletes Open Channel bdev')
|
|
|
|
p.add_argument('name', help='Name of the Open Channel bdev')
|
|
|
|
p.set_defaults(func=bdev_ocssd_delete)
|
|
|
|
|
2018-06-12 23:31:20 +00:00
|
|
|
# ioat
|
2020-02-05 17:10:05 +00:00
|
|
|
def ioat_scan_accel_engine(args):
|
2018-06-12 23:31:20 +00:00
|
|
|
pci_whitelist = []
|
|
|
|
if args.pci_whitelist:
|
|
|
|
for w in args.pci_whitelist.strip().split(" "):
|
|
|
|
pci_whitelist.append(w)
|
2020-02-05 17:10:05 +00:00
|
|
|
rpc.ioat.ioat_scan_accel_engine(args.client, pci_whitelist)
|
2018-06-12 23:31:20 +00:00
|
|
|
|
2020-02-05 17:10:05 +00:00
|
|
|
p = subparsers.add_parser('ioat_scan_accel_engine',
|
|
|
|
aliases=['ioat_scan_copy_engine', 'scan_ioat_copy_engine'],
|
|
|
|
help='Set scan and enable IOAT accel engine offload.')
|
2018-06-12 23:31:20 +00:00
|
|
|
p.add_argument('-w', '--pci-whitelist', help="""Whitespace-separated list of PCI addresses in
|
|
|
|
domain:bus:device.function format or domain.bus.device.function format""")
|
2020-02-05 17:10:05 +00:00
|
|
|
p.set_defaults(func=ioat_scan_accel_engine)
|
2018-06-12 23:31:20 +00:00
|
|
|
|
2020-04-07 16:42:02 +00:00
|
|
|
# idxd
|
|
|
|
def idxd_scan_accel_engine(args):
|
|
|
|
rpc.idxd.idxd_scan_accel_engine(args.client, config_number=args.config_number)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('idxd_scan_accel_engine',
|
|
|
|
help='Set config and enable idxd accel engine offload.')
|
|
|
|
p.add_argument('-c', '--config-number', help="""Pre-defined configuration number to use. See docs.""", type=int)
|
|
|
|
p.set_defaults(func=idxd_scan_accel_engine)
|
|
|
|
|
2019-09-12 09:40:38 +00:00
|
|
|
# opal
|
2019-10-23 13:29:17 +00:00
|
|
|
def bdev_nvme_opal_init(args):
|
|
|
|
rpc.nvme.bdev_nvme_opal_init(args.client,
|
|
|
|
nvme_ctrlr_name=args.nvme_ctrlr_name,
|
|
|
|
password=args.password)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_nvme_opal_init', help='take ownership and activate')
|
|
|
|
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
|
|
|
|
p.add_argument('-p', '--password', help='password for admin')
|
|
|
|
p.set_defaults(func=bdev_nvme_opal_init)
|
|
|
|
|
|
|
|
def bdev_nvme_opal_revert(args):
|
|
|
|
rpc.nvme.bdev_nvme_opal_revert(args.client,
|
|
|
|
nvme_ctrlr_name=args.nvme_ctrlr_name,
|
|
|
|
password=args.password)
|
|
|
|
p = subparsers.add_parser('bdev_nvme_opal_revert', help='Revert to default factory settings')
|
|
|
|
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
|
|
|
|
p.add_argument('-p', '--password', help='password')
|
|
|
|
p.set_defaults(func=bdev_nvme_opal_revert)
|
|
|
|
|
2019-09-12 09:40:38 +00:00
|
|
|
def bdev_opal_create(args):
|
|
|
|
print_json(rpc.bdev.bdev_opal_create(args.client,
|
|
|
|
nvme_ctrlr_name=args.nvme_ctrlr_name,
|
|
|
|
nsid=args.nsid,
|
|
|
|
locking_range_id=args.locking_range_id,
|
|
|
|
range_start=args.range_start,
|
|
|
|
range_length=args.range_length,
|
|
|
|
password=args.password))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_opal_create', help="""Create opal bdev on specified NVMe controller""")
|
|
|
|
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name', required=True)
|
|
|
|
p.add_argument('-n', '--nsid', help='namespace ID (only support nsid=1 for now)', type=int, required=True)
|
|
|
|
p.add_argument('-i', '--locking-range-id', help='locking range id', type=int, required=True)
|
|
|
|
p.add_argument('-s', '--range-start', help='locking range start LBA', type=int, required=True)
|
|
|
|
p.add_argument('-l', '--range-length', help='locking range length (in blocks)', type=int, required=True)
|
|
|
|
p.add_argument('-p', '--password', help='admin password', required=True)
|
|
|
|
p.set_defaults(func=bdev_opal_create)
|
|
|
|
|
2019-10-23 13:29:17 +00:00
|
|
|
def bdev_opal_get_info(args):
|
|
|
|
print_dict(rpc.bdev.bdev_opal_get_info(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
password=args.password))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_opal_get_info', help='get opal locking range info for this bdev')
|
|
|
|
p.add_argument('-b', '--bdev-name', help='opal bdev')
|
|
|
|
p.add_argument('-p', '--password', help='password')
|
|
|
|
p.set_defaults(func=bdev_opal_get_info)
|
|
|
|
|
2019-09-12 09:40:38 +00:00
|
|
|
def bdev_opal_delete(args):
|
|
|
|
rpc.bdev.bdev_opal_delete(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
password=args.password)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_opal_delete', help="""delete a virtual opal bdev""")
|
|
|
|
p.add_argument('-b', '--bdev-name', help='opal virtual bdev', required=True)
|
|
|
|
p.add_argument('-p', '--password', help='admin password', required=True)
|
|
|
|
p.set_defaults(func=bdev_opal_delete)
|
|
|
|
|
2019-10-09 11:55:46 +00:00
|
|
|
def bdev_opal_new_user(args):
|
|
|
|
rpc.bdev.bdev_opal_new_user(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
admin_password=args.admin_password,
|
|
|
|
user_id=args.user_id,
|
|
|
|
user_password=args.user_password)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_opal_new_user', help="""Add a user to opal bdev who can set lock state for this bdev""")
|
|
|
|
p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
|
|
|
|
p.add_argument('-p', '--admin-password', help='admin password', required=True)
|
|
|
|
p.add_argument('-i', '--user-id', help='ID for new user', type=int, required=True)
|
|
|
|
p.add_argument('-u', '--user-password', help='password set for this user', required=True)
|
|
|
|
p.set_defaults(func=bdev_opal_new_user)
|
|
|
|
|
|
|
|
def bdev_opal_set_lock_state(args):
|
|
|
|
rpc.bdev.bdev_opal_set_lock_state(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
user_id=args.user_id,
|
|
|
|
password=args.password,
|
|
|
|
lock_state=args.lock_state)
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_opal_set_lock_state', help="""set lock state for an opal bdev""")
|
|
|
|
p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
|
|
|
|
p.add_argument('-i', '--user-id', help='ID of the user who want to set lock state, either admin or a user assigned to this bdev',
|
|
|
|
type=int, required=True)
|
|
|
|
p.add_argument('-p', '--password', help='password of this user', required=True)
|
|
|
|
p.add_argument('-l', '--lock-state', help='lock state to set, choose from {readwrite, readonly, rwlock}', required=True)
|
|
|
|
p.set_defaults(func=bdev_opal_set_lock_state)
|
|
|
|
|
2019-08-28 08:09:45 +00:00
|
|
|
# bdev_nvme_send_cmd
|
|
|
|
def bdev_nvme_send_cmd(args):
|
|
|
|
print_dict(rpc.nvme.bdev_nvme_send_cmd(args.client,
|
|
|
|
name=args.nvme_name,
|
|
|
|
cmd_type=args.cmd_type,
|
|
|
|
data_direction=args.data_direction,
|
|
|
|
cmdbuf=args.cmdbuf,
|
|
|
|
data=args.data,
|
|
|
|
metadata=args.metadata,
|
|
|
|
data_len=args.data_length,
|
|
|
|
metadata_len=args.metadata_length,
|
|
|
|
timeout_ms=args.timeout_ms))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('bdev_nvme_send_cmd', aliases=['send_nvme_cmd'],
|
|
|
|
help='NVMe passthrough cmd.')
|
2018-07-05 07:46:48 +00:00
|
|
|
p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""")
|
|
|
|
p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""")
|
|
|
|
p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""")
|
|
|
|
p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""")
|
|
|
|
p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""")
|
|
|
|
p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""")
|
|
|
|
p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int)
|
|
|
|
p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int)
|
|
|
|
p.add_argument('-T', '--timeout-ms',
|
|
|
|
help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0)
|
2019-08-28 08:09:45 +00:00
|
|
|
p.set_defaults(func=bdev_nvme_send_cmd)
|
2018-07-05 07:46:48 +00:00
|
|
|
|
2018-12-06 15:56:06 +00:00
|
|
|
# Notifications
|
2019-09-20 08:24:52 +00:00
|
|
|
def notify_get_types(args):
|
|
|
|
print_dict(rpc.notify.notify_get_types(args.client))
|
2018-12-06 15:56:06 +00:00
|
|
|
|
2019-09-20 08:24:52 +00:00
|
|
|
p = subparsers.add_parser('notify_get_types', aliases=['get_notification_types'],
|
|
|
|
help='List available notifications that user can subscribe to.')
|
|
|
|
p.set_defaults(func=notify_get_types)
|
2018-12-06 15:56:06 +00:00
|
|
|
|
2019-09-20 08:35:27 +00:00
|
|
|
def notify_get_notifications(args):
|
|
|
|
ret = rpc.notify.notify_get_notifications(args.client,
|
|
|
|
id=args.id,
|
|
|
|
max=args.max)
|
2018-12-06 15:56:06 +00:00
|
|
|
print_dict(ret)
|
|
|
|
|
2019-09-20 08:35:27 +00:00
|
|
|
p = subparsers.add_parser('notify_get_notifications', aliases=['get_notifications'],
|
|
|
|
help='Get notifications')
|
2018-12-06 15:56:06 +00:00
|
|
|
p.add_argument('-i', '--id', help="""First ID to start fetching from""", type=int)
|
|
|
|
p.add_argument('-n', '--max', help="""Maximum number of notifications to return in response""", type=int)
|
2019-09-20 08:35:27 +00:00
|
|
|
p.set_defaults(func=notify_get_notifications)
|
2018-12-06 15:56:06 +00:00
|
|
|
|
2019-03-02 08:32:19 +00:00
|
|
|
def thread_get_stats(args):
|
|
|
|
print_dict(rpc.app.thread_get_stats(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'thread_get_stats', help='Display current statistics of all the threads')
|
|
|
|
p.set_defaults(func=thread_get_stats)
|
|
|
|
|
2020-02-20 22:50:39 +00:00
|
|
|
def thread_set_cpumask(args):
|
|
|
|
ret = rpc.app.thread_set_cpumask(args.client,
|
|
|
|
id=args.id,
|
|
|
|
cpumask=args.cpumask)
|
|
|
|
p = subparsers.add_parser('thread_set_cpumask',
|
|
|
|
help="""set the cpumask of the thread whose ID matches to the
|
|
|
|
specified value. The thread may be migrated to one of the specified CPUs.""")
|
|
|
|
p.add_argument('-i', '--id', type=int, help='thread ID')
|
|
|
|
p.add_argument('-m', '--cpumask', help='cpumask for this thread')
|
|
|
|
p.set_defaults(func=thread_set_cpumask)
|
|
|
|
|
2020-02-06 07:26:33 +00:00
|
|
|
def thread_get_pollers(args):
|
|
|
|
print_dict(rpc.app.thread_get_pollers(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'thread_get_pollers', help='Display current pollers of all the threads')
|
|
|
|
p.set_defaults(func=thread_get_pollers)
|
|
|
|
|
2020-02-13 12:19:11 +00:00
|
|
|
def thread_get_io_channels(args):
|
|
|
|
print_dict(rpc.app.thread_get_io_channels(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'thread_get_io_channels', help='Display current IO channels of all the threads')
|
|
|
|
p.set_defaults(func=thread_get_io_channels)
|
|
|
|
|
2019-12-10 23:29:37 +00:00
|
|
|
def env_dpdk_get_mem_stats(args):
|
|
|
|
print_dict(rpc.env_dpdk.env_dpdk_get_mem_stats(args.client))
|
|
|
|
|
|
|
|
p = subparsers.add_parser(
|
|
|
|
'env_dpdk_get_mem_stats', help='write the dpdk memory stats to a file.')
|
|
|
|
p.set_defaults(func=env_dpdk_get_mem_stats)
|
|
|
|
|
2019-08-28 08:06:50 +00:00
|
|
|
# blobfs
|
|
|
|
def blobfs_detect(args):
|
|
|
|
print(rpc.blobfs.blobfs_detect(args.client,
|
|
|
|
bdev_name=args.bdev_name))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('blobfs_detect', help='Detect whether a blobfs exists on bdev')
|
|
|
|
p.add_argument('bdev_name', help='Blockdev name to detect blobfs. Example: Malloc0.')
|
|
|
|
p.set_defaults(func=blobfs_detect)
|
|
|
|
|
2019-08-28 08:06:50 +00:00
|
|
|
def blobfs_create(args):
|
|
|
|
print(rpc.blobfs.blobfs_create(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
cluster_sz=args.cluster_sz))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('blobfs_create', help='Build a blobfs on bdev')
|
|
|
|
p.add_argument('bdev_name', help='Blockdev name to build blobfs. Example: Malloc0.')
|
|
|
|
p.add_argument('-c', '--cluster_sz',
|
|
|
|
help="""Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.""")
|
|
|
|
p.set_defaults(func=blobfs_create)
|
|
|
|
|
2019-09-17 07:31:59 +00:00
|
|
|
def blobfs_mount(args):
|
|
|
|
print(rpc.blobfs.blobfs_mount(args.client,
|
|
|
|
bdev_name=args.bdev_name,
|
|
|
|
mountpoint=args.mountpoint))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('blobfs_mount', help='Mount a blobfs on bdev to host path by FUSE')
|
|
|
|
p.add_argument('bdev_name', help='Blockdev name where the blobfs is. Example: Malloc0.')
|
|
|
|
p.add_argument('mountpoint', help='Mountpoint path in host to mount blobfs. Example: /mnt/.')
|
|
|
|
p.set_defaults(func=blobfs_mount)
|
|
|
|
|
2019-10-21 15:32:12 +00:00
|
|
|
def blobfs_set_cache_size(args):
|
|
|
|
print(rpc.blobfs.blobfs_set_cache_size(args.client,
|
|
|
|
size_in_mb=args.size_in_mb))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('blobfs_set_cache_size', help='Set cache size for blobfs')
|
|
|
|
p.add_argument('size_in_mb', help='Cache size for blobfs in megabytes.', type=int)
|
|
|
|
p.set_defaults(func=blobfs_set_cache_size)
|
|
|
|
|
2020-01-28 19:24:46 +00:00
|
|
|
# sock
|
|
|
|
def sock_impl_get_options(args):
|
|
|
|
print_json(rpc.sock.sock_impl_get_options(args.client,
|
|
|
|
impl_name=args.impl))
|
|
|
|
|
|
|
|
p = subparsers.add_parser('sock_impl_get_options', help="""Get options of socket layer implementation""")
|
|
|
|
p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
|
|
|
|
p.set_defaults(func=sock_impl_get_options)
|
|
|
|
|
|
|
|
def sock_impl_set_options(args):
|
|
|
|
rpc.sock.sock_impl_set_options(args.client,
|
|
|
|
impl_name=args.impl,
|
|
|
|
recv_buf_size=args.recv_buf_size,
|
2020-07-14 13:28:30 +00:00
|
|
|
send_buf_size=args.send_buf_size,
|
2020-07-14 19:03:45 +00:00
|
|
|
enable_recv_pipe=args.enable_recv_pipe,
|
2020-07-30 01:59:57 +00:00
|
|
|
enable_zerocopy_send=args.enable_zerocopy_send,
|
|
|
|
enable_quickack=args.enable_quickack)
|
2020-01-28 19:24:46 +00:00
|
|
|
|
|
|
|
p = subparsers.add_parser('sock_impl_set_options', help="""Set options of socket layer implementation""")
|
|
|
|
p.add_argument('-i', '--impl', help='Socket implementation name, e.g. posix', required=True)
|
|
|
|
p.add_argument('-r', '--recv-buf-size', help='Size of receive buffer on socket in bytes', type=int)
|
|
|
|
p.add_argument('-s', '--send-buf-size', help='Size of send buffer on socket in bytes', type=int)
|
2020-07-14 13:28:30 +00:00
|
|
|
p.add_argument('--enable-recv-pipe', help='Enable receive pipe',
|
|
|
|
action='store_true', dest='enable_recv_pipe')
|
|
|
|
p.add_argument('--disable-recv-pipe', help='Disable receive pipe',
|
|
|
|
action='store_false', dest='enable_recv_pipe')
|
2020-07-14 19:03:45 +00:00
|
|
|
p.add_argument('--enable-zerocopy-send', help='Enable zerocopy on send',
|
|
|
|
action='store_true', dest='enable_zerocopy_send')
|
|
|
|
p.add_argument('--disable-zerocopy-send', help='Disable zerocopy on send',
|
|
|
|
action='store_false', dest='enable_zerocopy_send')
|
2020-07-30 01:59:57 +00:00
|
|
|
p.add_argument('--enable-quickack', help='Enable quick ACK',
|
|
|
|
action='store_true', dest='enable_quickack')
|
|
|
|
p.add_argument('--disable-quickack', help='Disable quick ACK',
|
|
|
|
action='store_false', dest='enable_quickack')
|
|
|
|
p.set_defaults(func=sock_impl_set_options, enable_recv_pipe=None, enable_zerocopy_send=None,
|
|
|
|
enable_quickack=None)
|
2020-01-28 19:24:46 +00:00
|
|
|
|
2019-05-03 21:27:09 +00:00
|
|
|
def check_called_name(name):
|
|
|
|
if name in deprecated_aliases:
|
|
|
|
print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr)
|
|
|
|
|
2019-06-26 04:32:48 +00:00
|
|
|
class dry_run_client:
|
2019-06-06 17:41:10 +00:00
|
|
|
def call(self, method, params=None):
|
|
|
|
print("Request:\n" + json.dumps({"method": method, "params": params}, indent=2))
|
|
|
|
|
2019-06-26 04:32:48 +00:00
|
|
|
def null_print(arg):
|
2019-06-06 17:41:10 +00:00
|
|
|
pass
|
|
|
|
|
2019-05-01 15:22:59 +00:00
|
|
|
def call_rpc_func(args):
|
2019-05-24 08:58:54 +00:00
|
|
|
args.func(args)
|
|
|
|
check_called_name(args.called_rpc_name)
|
2019-05-01 15:22:59 +00:00
|
|
|
|
2019-05-01 16:01:35 +00:00
|
|
|
def execute_script(parser, client, fd):
|
2019-05-24 08:58:54 +00:00
|
|
|
executed_rpc = ""
|
2019-05-01 16:01:35 +00:00
|
|
|
for rpc_call in map(str.rstrip, fd):
|
2019-05-10 05:46:59 +00:00
|
|
|
if not rpc_call.strip():
|
|
|
|
continue
|
2019-05-24 08:58:54 +00:00
|
|
|
executed_rpc = "\n".join([executed_rpc, rpc_call])
|
2019-05-08 10:01:16 +00:00
|
|
|
args = parser.parse_args(shlex.split(rpc_call))
|
2019-05-01 16:01:35 +00:00
|
|
|
args.client = client
|
2019-05-24 08:58:54 +00:00
|
|
|
try:
|
|
|
|
call_rpc_func(args)
|
|
|
|
except JSONRPCException as ex:
|
|
|
|
print("Exception:")
|
|
|
|
print(executed_rpc.strip() + " <<<")
|
|
|
|
print(ex.message)
|
|
|
|
exit(1)
|
2019-05-01 16:01:35 +00:00
|
|
|
|
2020-05-08 08:05:17 +00:00
|
|
|
# Create temporary parser, pull out the plugin parameter, load the module, and then run the real argument parser
|
|
|
|
plugin_parser = argparse.ArgumentParser(add_help=False)
|
|
|
|
plugin_parser.add_argument('--plugin', dest='rpc_plugin', help='Module name of plugin with additional RPC commands')
|
|
|
|
|
|
|
|
rpc_module = plugin_parser.parse_known_args()[0].rpc_plugin
|
|
|
|
if rpc_module is not None:
|
|
|
|
try:
|
|
|
|
rpc_plugin = importlib.import_module(rpc_module)
|
|
|
|
try:
|
|
|
|
rpc_plugin.spdk_rpc_plugin_initialize(subparsers)
|
|
|
|
except AttributeError:
|
|
|
|
print("Module %s does not contain 'spdk_rpc_plugin_initialize' function" % rpc_module)
|
|
|
|
except ModuleNotFoundError:
|
|
|
|
print("Module %s not found" % rpc_module)
|
|
|
|
|
2019-05-01 15:22:59 +00:00
|
|
|
args = parser.parse_args()
|
script/rpc: do not send rpc when no arguments provided
Any time script is called an RPC is called and socket
connection is attempted.
This should not be required when no arguments were provided,
such as calling help to explore possible options.
Example of the error:
[tzawadzk@PowerEdgeFour spdk]$ sudo ./scripts/rpc.py
Traceback (most recent call last):
File "/home/tzawadzk/spdk/scripts/rpc/client.py", line 40, in __init__
self.sock.connect(addr)
ConnectionRefusedError: [Errno 111] Connection refused
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./scripts/rpc.py", line 2438, in <module>
args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
File "/home/tzawadzk/spdk/scripts/rpc/client.py", line 56, in __init__
"Error details: %s" % (addr, ex))
rpc.client.JSONRPCException: Error while connecting to /var/tmp/spdk.sock
Error details: [Errno 111] Connection refused
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I3734e1603fa7151fd53b99b8af4ea79bf82fe0f3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2124
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2020-04-30 10:21:35 +00:00
|
|
|
|
|
|
|
if sys.stdin.isatty() and not hasattr(args, 'func'):
|
|
|
|
# No arguments and no data piped through stdin
|
|
|
|
parser.print_help()
|
|
|
|
exit(1)
|
scripts/rpc.py: add daemon mode
Add rpc_cmd() bash command that sends rpc command to an
rpc.py instance permanently running in background.
This makes sending RPC commands even 17 times faster.
We make use of bash coprocesses - a builtin bash feature
that allow starting background processes with stdin and
stdout connected to pipes. rpc.py will block trying to
read stdin, effectively being always "ready" to read
an RPC command.
The background rpc.py is started with a new --server flag
that's described as:
> Start listening on stdin, parse each line as a regular
> rpc.py execution and create a separate connection for each command.
> Each command's output ends with either **STATUS=0 if the
> command succeeded or **STATUS=1 if it failed.
> --server is meant to be used in conjunction with bash
> coproc, where stdin and stdout are named pipes and can be
> used as a faster way to send RPC commands.
As a part of this patch I'm attaching a sample test
that runs the following rpc commands first with the regular
rpc.py, then the new rpc_cmd() function.
```
time {
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
malloc=$($rpc bdev_malloc_create 8 512)
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "1" ]
$rpc bdev_passthru_create -b "$malloc" -p Passthru0
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "2" ]
$rpc bdev_passthru_delete Passthru0
$rpc bdev_malloc_delete $malloc
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
}
```
Regular rpc.py:
```
real 0m1.477s
user 0m1.289s
sys 0m0.139s
```
rpc_cmd():
```
real 0m0.085s
user 0m0.025s
sys 0m0.006s
```
autotest_common.sh will now spawn an rpc.py daemon if
it's not running yet, and it will offer rpc_cmd() function
to quickly send RPC commands. If the command is invalid or
SPDK returns with error, the bash function will return
a non-zero code and may trigger ERR trap just like a regular
rpc.py instance.
Pipes have major advantage over e.g. unix domain sockets - the pipes
will be automatically closed once the owner process exits.
This means we can create a named pipe in autotest_common.sh,
open it, then start rpc.py in background and never worry
about it again - it will be closed automatically once the
test exits. It doesn't even matter if the test is executed
manually in isolation, or as a part of the entire autotest.
(check_so_deps.sh needs to be modified not to wait for *all*
background processes to finish, but just the ones it started)
Change-Id: If0ded961b7fef3af3837b44532300dee8b5b4663
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/621
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-06-21 07:38:47 +00:00
|
|
|
if args.is_server:
|
|
|
|
for input in sys.stdin:
|
|
|
|
cmd = shlex.split(input)
|
|
|
|
try:
|
|
|
|
tmp_args = parser.parse_args(cmd)
|
|
|
|
except SystemExit as ex:
|
|
|
|
print("**STATUS=1", flush=True)
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
tmp_args.client = rpc.client.JSONRPCClient(
|
|
|
|
tmp_args.server_addr, tmp_args.port, tmp_args.timeout,
|
2020-06-02 12:02:16 +00:00
|
|
|
log_level=getattr(logging, tmp_args.verbose.upper()), conn_retries=tmp_args.conn_retries)
|
scripts/rpc.py: add daemon mode
Add rpc_cmd() bash command that sends rpc command to an
rpc.py instance permanently running in background.
This makes sending RPC commands even 17 times faster.
We make use of bash coprocesses - a builtin bash feature
that allow starting background processes with stdin and
stdout connected to pipes. rpc.py will block trying to
read stdin, effectively being always "ready" to read
an RPC command.
The background rpc.py is started with a new --server flag
that's described as:
> Start listening on stdin, parse each line as a regular
> rpc.py execution and create a separate connection for each command.
> Each command's output ends with either **STATUS=0 if the
> command succeeded or **STATUS=1 if it failed.
> --server is meant to be used in conjunction with bash
> coproc, where stdin and stdout are named pipes and can be
> used as a faster way to send RPC commands.
As a part of this patch I'm attaching a sample test
that runs the following rpc commands first with the regular
rpc.py, then the new rpc_cmd() function.
```
time {
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
malloc=$($rpc bdev_malloc_create 8 512)
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "1" ]
$rpc bdev_passthru_create -b "$malloc" -p Passthru0
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "2" ]
$rpc bdev_passthru_delete Passthru0
$rpc bdev_malloc_delete $malloc
bdevs=$($rpc bdev_get_bdevs)
[ "$(jq length <<< "$bdevs")" == "0" ]
}
```
Regular rpc.py:
```
real 0m1.477s
user 0m1.289s
sys 0m0.139s
```
rpc_cmd():
```
real 0m0.085s
user 0m0.025s
sys 0m0.006s
```
autotest_common.sh will now spawn an rpc.py daemon if
it's not running yet, and it will offer rpc_cmd() function
to quickly send RPC commands. If the command is invalid or
SPDK returns with error, the bash function will return
a non-zero code and may trigger ERR trap just like a regular
rpc.py instance.
Pipes have major advantage over e.g. unix domain sockets - the pipes
will be automatically closed once the owner process exits.
This means we can create a named pipe in autotest_common.sh,
open it, then start rpc.py in background and never worry
about it again - it will be closed automatically once the
test exits. It doesn't even matter if the test is executed
manually in isolation, or as a part of the entire autotest.
(check_so_deps.sh needs to be modified not to wait for *all*
background processes to finish, but just the ones it started)
Change-Id: If0ded961b7fef3af3837b44532300dee8b5b4663
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/621
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-06-21 07:38:47 +00:00
|
|
|
call_rpc_func(tmp_args)
|
|
|
|
print("**STATUS=0", flush=True)
|
|
|
|
except JSONRPCException as ex:
|
|
|
|
print(ex.message)
|
|
|
|
print("**STATUS=1", flush=True)
|
|
|
|
exit(0)
|
|
|
|
elif args.dry_run:
|
2019-06-26 04:32:48 +00:00
|
|
|
args.client = dry_run_client()
|
|
|
|
print_dict = null_print
|
|
|
|
print_json = null_print
|
|
|
|
print_array = null_print
|
2019-06-06 17:41:10 +00:00
|
|
|
else:
|
2020-06-02 12:02:16 +00:00
|
|
|
args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout,
|
|
|
|
log_level=getattr(logging, args.verbose.upper()),
|
|
|
|
conn_retries=args.conn_retries)
|
2019-05-01 16:01:35 +00:00
|
|
|
if hasattr(args, 'func'):
|
2019-06-05 09:05:39 +00:00
|
|
|
try:
|
|
|
|
call_rpc_func(args)
|
|
|
|
except JSONRPCException as ex:
|
2019-12-03 14:36:15 +00:00
|
|
|
print(ex.message)
|
2019-06-05 09:05:39 +00:00
|
|
|
exit(1)
|
2019-05-01 16:01:35 +00:00
|
|
|
else:
|
|
|
|
execute_script(parser, args.client, sys.stdin)
|