rpc: Rename construct_nvme_bdev to bdev_nvme_attach_controller

Change-Id: I1831eb39d3d357594968271b2ee69bb48502a59c
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466138
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Pawel Kaminski 2019-08-23 09:50:51 -04:00 committed by Jim Harris
parent 714a56466a
commit f54df84058
25 changed files with 84 additions and 81 deletions

View File

@ -393,15 +393,15 @@ To delete a null bdev use the bdev_null_delete command.
There are two ways to create block device based on NVMe device in SPDK. First
way is to connect local PCIe drive and second one is to connect NVMe-oF device.
In both cases user should use `construct_nvme_bdev` RPC command to achieve that.
In both cases user should use `bdev_nvme_attach_controller` RPC command to achieve that.
Example commands
`rpc.py construct_nvme_bdev -b NVMe1 -t PCIe -a 0000:01:00.0`
`rpc.py bdev_nvme_attach_controller -b NVMe1 -t PCIe -a 0000:01:00.0`
This command will create NVMe bdev of physical device in the system.
`rpc.py construct_nvme_bdev -b Nvme0 -t RDMA -a 192.168.100.1 -f IPv4 -s 4420 -n nqn.2016-06.io.spdk:cnode1`
`rpc.py bdev_nvme_attach_controller -b Nvme0 -t RDMA -a 192.168.100.1 -f IPv4 -s 4420 -n nqn.2016-06.io.spdk:cnode1`
This command will create NVMe bdev of NVMe-oF resource.

View File

@ -293,7 +293,7 @@ Example response:
"construct_passthru_bdev",
"bdev_nvme_apply_firmware",
"delete_nvme_controller",
"construct_nvme_bdev",
"bdev_nvme_attach_controller",
"bdev_null_create",
"bdev_malloc_delete",
"bdev_malloc_create",
@ -452,7 +452,7 @@ Example response:
"name": "Nvme1",
"traddr": "0000:01:00.0"
},
"method": "construct_nvme_bdev"
"method": "bdev_nvme_attach_controller"
},
{
"params": {
@ -460,7 +460,7 @@ Example response:
"name": "Nvme2",
"traddr": "0000:03:00.0"
},
"method": "construct_nvme_bdev"
"method": "bdev_nvme_attach_controller"
},
{
"params": {
@ -1479,7 +1479,7 @@ Example response:
}
~~~
## construct_nvme_bdev {#rpc_construct_nvme_bdev}
## bdev_nvme_attach_controller {#rpc_bdev_nvme_attach_controller}
Construct @ref bdev_config_nvme
@ -1515,7 +1515,7 @@ Example request:
"traddr": "0000:0a:00.0"
},
"jsonrpc": "2.0",
"method": "construct_nvme_bdev",
"method": "bdev_nvme_attach_controller",
"id": 1
}
~~~

View File

@ -260,7 +260,7 @@ reactor.c: 424:_spdk_reactor_run: *NOTICE*: Reactor started on core 0 on socket
~~~
~~~{.sh}
host:~# ./scripts/rpc.py construct_nvme_bdev -b Nvme0 -t pcie -a 0000:01:00.0
host:~# ./scripts/rpc.py bdev_nvme_attach_controller -b Nvme0 -t pcie -a 0000:01:00.0
EAL: PCI device 0000:01:00.0 on NUMA socket 0
EAL: probe driver: 8086:953 spdk_nvme
EAL: using IOMMU type 1 (Type 1)

View File

@ -2121,7 +2121,7 @@ bdev_nvme_config_json(struct spdk_json_write_ctx *w)
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "method", "construct_nvme_bdev");
spdk_json_write_named_string(w, "method", "bdev_nvme_attach_controller");
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_string(w, "name", nvme_bdev_ctrlr->name);

View File

@ -158,7 +158,7 @@ invalid:
SPDK_RPC_REGISTER("bdev_nvme_set_hotplug", spdk_rpc_bdev_nvme_set_hotplug, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_set_hotplug, set_bdev_nvme_hotplug)
struct rpc_construct_nvme {
struct rpc_bdev_nvme_attach_controller {
char *name;
char *trtype;
char *adrfam;
@ -173,7 +173,7 @@ struct rpc_construct_nvme {
};
static void
free_rpc_construct_nvme(struct rpc_construct_nvme *req)
free_rpc_bdev_nvme_attach_controller(struct rpc_bdev_nvme_attach_controller *req)
{
free(req->name);
free(req->trtype);
@ -186,35 +186,35 @@ free_rpc_construct_nvme(struct rpc_construct_nvme *req)
free(req->hostsvcid);
}
static const struct spdk_json_object_decoder rpc_construct_nvme_decoders[] = {
{"name", offsetof(struct rpc_construct_nvme, name), spdk_json_decode_string},
{"trtype", offsetof(struct rpc_construct_nvme, trtype), spdk_json_decode_string},
{"traddr", offsetof(struct rpc_construct_nvme, traddr), spdk_json_decode_string},
static const struct spdk_json_object_decoder rpc_bdev_nvme_attach_controller_decoders[] = {
{"name", offsetof(struct rpc_bdev_nvme_attach_controller, name), spdk_json_decode_string},
{"trtype", offsetof(struct rpc_bdev_nvme_attach_controller, trtype), spdk_json_decode_string},
{"traddr", offsetof(struct rpc_bdev_nvme_attach_controller, traddr), spdk_json_decode_string},
{"adrfam", offsetof(struct rpc_construct_nvme, adrfam), spdk_json_decode_string, true},
{"trsvcid", offsetof(struct rpc_construct_nvme, trsvcid), spdk_json_decode_string, true},
{"subnqn", offsetof(struct rpc_construct_nvme, subnqn), spdk_json_decode_string, true},
{"hostnqn", offsetof(struct rpc_construct_nvme, hostnqn), spdk_json_decode_string, true},
{"hostaddr", offsetof(struct rpc_construct_nvme, hostaddr), spdk_json_decode_string, true},
{"hostsvcid", offsetof(struct rpc_construct_nvme, hostsvcid), spdk_json_decode_string, true},
{"adrfam", offsetof(struct rpc_bdev_nvme_attach_controller, adrfam), spdk_json_decode_string, true},
{"trsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, trsvcid), spdk_json_decode_string, true},
{"subnqn", offsetof(struct rpc_bdev_nvme_attach_controller, subnqn), spdk_json_decode_string, true},
{"hostnqn", offsetof(struct rpc_bdev_nvme_attach_controller, hostnqn), spdk_json_decode_string, true},
{"hostaddr", offsetof(struct rpc_bdev_nvme_attach_controller, hostaddr), spdk_json_decode_string, true},
{"hostsvcid", offsetof(struct rpc_bdev_nvme_attach_controller, hostsvcid), spdk_json_decode_string, true},
{"prchk_reftag", offsetof(struct rpc_construct_nvme, prchk_reftag), spdk_json_decode_bool, true},
{"prchk_guard", offsetof(struct rpc_construct_nvme, prchk_guard), spdk_json_decode_bool, true}
{"prchk_reftag", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_reftag), spdk_json_decode_bool, true},
{"prchk_guard", offsetof(struct rpc_bdev_nvme_attach_controller, prchk_guard), spdk_json_decode_bool, true}
};
#define NVME_MAX_BDEVS_PER_RPC 128
struct rpc_create_nvme_bdev_ctx {
struct rpc_construct_nvme req;
struct rpc_bdev_nvme_attach_controller_ctx {
struct rpc_bdev_nvme_attach_controller req;
size_t count;
const char *names[NVME_MAX_BDEVS_PER_RPC];
struct spdk_jsonrpc_request *request;
};
static void
spdk_rpc_construct_nvme_bdev_done(void *cb_ctx, int rc)
spdk_rpc_bdev_nvme_attach_controller_done(void *cb_ctx, int rc)
{
struct rpc_create_nvme_bdev_ctx *ctx = cb_ctx;
struct rpc_bdev_nvme_attach_controller_ctx *ctx = cb_ctx;
struct spdk_jsonrpc_request *request = ctx->request;
struct spdk_json_write_ctx *w;
size_t i;
@ -233,15 +233,15 @@ spdk_rpc_construct_nvme_bdev_done(void *cb_ctx, int rc)
spdk_jsonrpc_end_result(request, w);
exit:
free_rpc_construct_nvme(&ctx->req);
free_rpc_bdev_nvme_attach_controller(&ctx->req);
free(ctx);
}
static void
spdk_rpc_construct_nvme_bdev(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
spdk_rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_create_nvme_bdev_ctx *ctx;
struct rpc_bdev_nvme_attach_controller_ctx *ctx;
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
uint32_t prchk_flags = 0;
@ -253,8 +253,8 @@ spdk_rpc_construct_nvme_bdev(struct spdk_jsonrpc_request *request,
return;
}
if (spdk_json_decode_object(params, rpc_construct_nvme_decoders,
SPDK_COUNTOF(rpc_construct_nvme_decoders),
if (spdk_json_decode_object(params, rpc_bdev_nvme_attach_controller_decoders,
SPDK_COUNTOF(rpc_bdev_nvme_attach_controller_decoders),
&ctx->req)) {
SPDK_ERRLOG("spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
@ -314,7 +314,7 @@ spdk_rpc_construct_nvme_bdev(struct spdk_jsonrpc_request *request,
ctx->request = request;
ctx->count = NVME_MAX_BDEVS_PER_RPC;
rc = spdk_bdev_nvme_create(&trid, &hostid, ctx->req.name, ctx->names, &ctx->count, ctx->req.hostnqn,
prchk_flags, spdk_rpc_construct_nvme_bdev_done, ctx);
prchk_flags, spdk_rpc_bdev_nvme_attach_controller_done, ctx);
if (rc) {
spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
goto cleanup;
@ -323,10 +323,12 @@ spdk_rpc_construct_nvme_bdev(struct spdk_jsonrpc_request *request,
return;
cleanup:
free_rpc_construct_nvme(&ctx->req);
free_rpc_bdev_nvme_attach_controller(&ctx->req);
free(ctx);
}
SPDK_RPC_REGISTER("construct_nvme_bdev", spdk_rpc_construct_nvme_bdev, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER("bdev_nvme_attach_controller", spdk_rpc_bdev_nvme_attach_controller,
SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_bdev)
static void
spdk_rpc_dump_nvme_controller_info(struct spdk_json_write_ctx *w,

View File

@ -56,7 +56,7 @@ struct nvme_bdev_ctrlr {
bool destruct;
/**
* PI check flags. This flags is set to NVMe controllers created only
* through construct_nvme_bdev RPC or .INI config file. Hot added
* through bdev_nvme_attach_controller RPC or .INI config file. Hot added
* NVMe controllers are not included.
*/
uint32_t prchk_flags;

View File

@ -10,7 +10,7 @@ bdev_dict = OrderedDict()
bdev_dict["set_bdev_options"] = []
bdev_dict["construct_split_vbdev"] = []
bdev_dict["bdev_nvme_set_options"] = []
bdev_dict["construct_nvme_bdev"] = []
bdev_dict["bdev_nvme_attach_controller"] = []
bdev_dict["bdev_nvme_set_hotplug"] = []
bdev_dict["bdev_malloc_create"] = []
bdev_dict["bdev_aio_create"] = []
@ -230,7 +230,7 @@ def get_nvme_bdev_json(config, section):
"name": nvme_name,
"traddr": traddr
},
"method": "construct_nvme_bdev"
"method": "bdev_nvme_attach_controller"
})
else:
set_param(params, option, value)

View File

@ -27,7 +27,7 @@ function create_json_config()
echo "\"name\": \"Nvme$i\","
echo "\"traddr\": \"${bdfs[i]}\""
echo '},'
echo '"method": "construct_nvme_bdev"'
echo '"method": "bdev_nvme_attach_controller"'
if [ -z ${bdfs[i+1]} ]; then
echo '}'
else

View File

@ -496,7 +496,7 @@ class SPDKTarget(Target):
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.construct_nvme_bdev(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))

View File

@ -355,21 +355,21 @@ if __name__ == "__main__":
help='How often the hotplug is processed for insert and remove events', type=int)
p.set_defaults(func=bdev_nvme_set_hotplug)
def construct_nvme_bdev(args):
print_array(rpc.bdev.construct_nvme_bdev(args.client,
name=args.name,
trtype=args.trtype,
traddr=args.traddr,
adrfam=args.adrfam,
trsvcid=args.trsvcid,
subnqn=args.subnqn,
hostnqn=args.hostnqn,
hostaddr=args.hostaddr,
hostsvcid=args.hostsvcid,
prchk_reftag=args.prchk_reftag,
prchk_guard=args.prchk_guard))
def bdev_nvme_attach_controller(args):
print_array(rpc.bdev.bdev_nvme_attach_controller(args.client,
name=args.name,
trtype=args.trtype,
traddr=args.traddr,
adrfam=args.adrfam,
trsvcid=args.trsvcid,
subnqn=args.subnqn,
hostnqn=args.hostnqn,
hostaddr=args.hostaddr,
hostsvcid=args.hostsvcid,
prchk_reftag=args.prchk_reftag,
prchk_guard=args.prchk_guard))
p = subparsers.add_parser('construct_nvme_bdev',
p = subparsers.add_parser('bdev_nvme_attach_controller', aliases=['construct_nvme_bdev'],
help='Add bdevs with nvme backend')
p.add_argument('-b', '--name', help="Name of the NVMe controller, prefix for each bdev name", required=True)
p.add_argument('-t', '--trtype',
@ -390,7 +390,7 @@ if __name__ == "__main__":
help='Enable checking of PI reference tag for I/O processing.', action='store_true')
p.add_argument('-g', '--prchk-guard',
help='Enable checking of PI guard for I/O processing.', action='store_true')
p.set_defaults(func=construct_nvme_bdev)
p.set_defaults(func=bdev_nvme_attach_controller)
def bdev_nvme_get_controllers(args):
print_dict(rpc.nvme.bdev_nvme_get_controllers(args.client,

View File

@ -369,10 +369,11 @@ def bdev_nvme_set_hotplug(client, enable, period_us=None):
return client.call('bdev_nvme_set_hotplug', params)
def construct_nvme_bdev(client, name, trtype, traddr, adrfam=None, trsvcid=None,
subnqn=None, hostnqn=None, hostaddr=None, hostsvcid=None,
prchk_reftag=None, prchk_guard=None):
"""Construct NVMe namespace block devices.
@deprecated_alias('construct_nvme_bdev')
def bdev_nvme_attach_controller(client, name, trtype, traddr, adrfam=None, trsvcid=None,
subnqn=None, hostnqn=None, hostaddr=None, hostsvcid=None,
prchk_reftag=None, prchk_guard=None):
"""Construct block device for each NVMe namespace in the attached controller.
Args:
name: bdev name prefix; "n" + namespace ID will be appended to create unique names
@ -418,7 +419,7 @@ def construct_nvme_bdev(client, name, trtype, traddr, adrfam=None, trsvcid=None,
if prchk_guard:
params['prchk_guard'] = prchk_guard
return client.call('construct_nvme_bdev', params)
return client.call('bdev_nvme_attach_controller', params)
def delete_nvme_controller(client, name):

View File

@ -164,7 +164,7 @@ class UIRoot(UINode):
@verbose
def create_nvme_bdev(self, **kwargs):
response = rpc.bdev.construct_nvme_bdev(self.client, **kwargs)
response = rpc.bdev.bdev_nvme_attach_controller(self.client, **kwargs)
return response
@verbose

View File

@ -23,7 +23,7 @@ bdev_svc_pid=$!
trap 'killprocess $bdev_svc_pid; compress_err_cleanup; exit 1' SIGINT SIGTERM EXIT
waitforlisten $bdev_svc_pid
bdf=$(iter_pci_class_code 01 08 02 | head -1)
$rpc_py construct_nvme_bdev -b "Nvme0" -t "pcie" -a $bdf
$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf
lvs_u=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs0)
$rpc_py bdev_lvol_create -t -u $lvs_u lv0 100
# this will force isal_pmd as some of the CI systems need a qat driver update
@ -39,7 +39,7 @@ bdevio_pid=$!
trap 'killprocess $bdevio_pid; compress_err_cleanup; exit 1' SIGINT SIGTERM EXIT
waitforlisten $bdevio_pid
$rpc_py set_compress_pmd -p 2
$rpc_py construct_nvme_bdev -b "Nvme0" -t "pcie" -a $bdf
$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf
waitforbdev $compress_bdev
$rootdir/test/bdev/bdevio/tests.py perform_tests
trap - SIGINT SIGTERM EXIT
@ -59,7 +59,7 @@ bdevperf_pid=$!
trap 'killprocess $bdevperf_pid; compress_err_cleanup; exit 1' SIGINT SIGTERM EXIT
waitforlisten $bdevperf_pid
$rpc_py set_compress_pmd -p 2
$rpc_py construct_nvme_bdev -b "Nvme0" -t "pcie" -a $bdf
$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf
waitforbdev $compress_bdev
$rootdir/test/bdev/bdevperf/bdevperf.py perform_tests

View File

@ -46,7 +46,7 @@
"name": "Nvme0",
"traddr": "0000:00:04.0"
},
"method": "construct_nvme_bdev"
"method": "bdev_nvme_attach_controller"
},
{
"params": {

View File

@ -30,6 +30,6 @@ function create_nv_cache_bdev() {
local size=$((($size + $bytes_to_mb) / $bytes_to_mb))
# Create NVMe bdev on specified device and split it so that it has the desired size
local nvc_bdev=$($rootdir/scripts/rpc.py construct_nvme_bdev -b $name -t PCIe -a $cache_bdf)
local nvc_bdev=$($rootdir/scripts/rpc.py bdev_nvme_attach_controller -b $name -t PCIe -a $cache_bdf)
$rootdir/scripts/rpc.py construct_split_vbdev $nvc_bdev -s $size 1
}

View File

@ -45,7 +45,7 @@ timing_exit start_iscsi_tgt
bdf=$(iter_pci_class_code 01 08 02 | head -1)
$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
$rpc_py construct_nvme_bdev -b "Nvme0" -t "pcie" -a $bdf
$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf
ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs_0)
free_mb=$(get_lvs_free_mb "$ls_guid")

View File

@ -32,7 +32,7 @@ function run_nvme_remote() {
$rpc_py -s "$iscsi_rpc_addr" set_iscsi_options -o 30 -a 16
$rpc_py -s "$iscsi_rpc_addr" start_subsystem_init
if [ "$1" = "remote" ]; then
$rpc_py -s $iscsi_rpc_addr construct_nvme_bdev -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
$rpc_py -s $iscsi_rpc_addr bdev_nvme_attach_controller -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
fi
echo "iSCSI target has started."
@ -43,7 +43,7 @@ function run_nvme_remote() {
$rpc_py -s "$iscsi_rpc_addr" iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py -s "$iscsi_rpc_addr" add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
if [ "$1" = "local" ]; then
$rpc_py -s "$iscsi_rpc_addr" construct_nvme_bdev -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
$rpc_py -s "$iscsi_rpc_addr" bdev_nvme_attach_controller -b "Nvme0" -t "rdma" -f "ipv4" -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
fi
$rpc_py -s "$iscsi_rpc_addr" iscsi_create_target_node Target1 Target1_alias 'Nvme0n1:0' $PORTAL_TAG:$INITIATOR_TAG 64 -d
sleep 1

View File

@ -464,11 +464,11 @@ def verify_add_nvme_bdev_rpc_methods(rpc_py):
addrs = re.findall(r'^([0-9]{2}:[0-9]{2}.[0-9]) "Non-Volatile memory controller \[0108\]".*-p02', output.decode(), re.MULTILINE)
for addr in addrs:
ctrlr_address = "-b Nvme{} -t pcie -a 0000:{}".format(addrs.index(addr), addr)
rpc.construct_nvme_bdev(ctrlr_address)
rpc.bdev_nvme_attach_controller(ctrlr_address)
print("add nvme device passed first time")
test_pass = 0
try:
rpc.construct_nvme_bdev(ctrlr_address)
rpc.bdev_nvme_attach_controller(ctrlr_address)
except Exception as e:
print("add nvme device passed second time")
test_pass = 1

View File

@ -46,7 +46,7 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
if [ $RUN_NIGHTLY -eq 1 ]; then
# Test fio_plugin as host with nvme lvol backend
bdfs=$(iter_pci_class_code 01 08 02)
$rpc_py construct_nvme_bdev -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0)
get_lvs_free_mb $ls_guid
$rpc_py bdev_lvol_create -l lvs_0 lbd_0 $free_mb

View File

@ -48,7 +48,7 @@ $NVMF_RPC nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
$NVMF_RPC nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Configure VHost on host machine
$VHOST_RPC construct_nvme_bdev -b Nvme0 -t $TEST_TRANSPORT -f ipv4 -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
$VHOST_RPC bdev_nvme_attach_controller -b Nvme0 -t $TEST_TRANSPORT -f ipv4 -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -n nqn.2016-06.io.spdk:cnode1
$VHOST_RPC construct_vhost_scsi_controller naa.VhostScsi0.3
$VHOST_RPC add_vhost_scsi_lun naa.VhostScsi0.3 0 "Nvme0n1"

View File

@ -219,5 +219,5 @@ function delete_nvme() {
}
function add_nvme() {
$rpc_py construct_nvme_bdev -b $1 -t PCIe -a $2
$rpc_py bdev_nvme_attach_controller -b $1 -t PCIe -a $2
}

View File

@ -119,11 +119,11 @@ function migration_tc2_configure_vhost()
$rpc_nvmf nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
$rpc_nvmf nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
$rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
$rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
$rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr
$rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1
$rpc_1 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
$rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
$rpc_1 construct_vhost_scsi_controller $target_vm_ctrlr
$rpc_1 add_vhost_scsi_lun $target_vm_ctrlr 0 Nvme0n1

View File

@ -123,7 +123,7 @@ function host1_start_vhost()
notice "Starting vhost0 instance on local server"
trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
vhost_run 0 "-u"
$rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
$rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
$rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr
$rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1

View File

@ -35,7 +35,7 @@ function host_2_start_vhost()
trap 'host_2_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
vhost_run 1 "-u"
$rpc construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
$rpc bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
$rpc construct_vhost_scsi_controller $target_vm_ctrl
$rpc add_vhost_scsi_lun $target_vm_ctrl 0 Nvme0n1

View File

@ -51,7 +51,7 @@ $rpc_py enable_vmd
$rpc_py start_subsystem_init
for bdf in $pci_devs; do
$rpc_py construct_nvme_bdev -b NVMe_$bdf -t PCIe -a $bdf
$rpc_py bdev_nvme_attach_controller -b NVMe_$bdf -t PCIe -a $bdf
done
timing_exit vmd