bdev: add delete_nvme_bdev call

Since delete_bdev should be used only for debug purpose,
this patch adds delete call specific for NVMe bdev.

Signed-off-by: Maciej Szwed <maciej.szwed@intel.com>
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: Ib9a0475d735af2616a3005d04530ca825ece8a52
Reviewed-on: https://review.gerrithub.io/416546
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Tomasz Zawadzki 2018-07-26 08:54:20 -04:00 committed by Ben Walker
parent b36face57d
commit 3a56fabce3
24 changed files with 169 additions and 20 deletions

View File

@ -771,6 +771,24 @@ nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
return NULL;
}
static struct nvme_ctrlr *
nvme_ctrlr_get_by_name(const char *name)
{
struct nvme_ctrlr *nvme_ctrlr;
if (name == NULL) {
return NULL;
}
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) {
return nvme_ctrlr;
}
}
return NULL;
}
static bool
probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr_opts *opts)
@ -1090,6 +1108,25 @@ spdk_bdev_nvme_create(struct spdk_nvme_transport_id *trid,
return 0;
}
int
spdk_bdev_nvme_delete(const char *name)
{
struct nvme_ctrlr *nvme_ctrlr = NULL;
if (name == NULL) {
return -EINVAL;
}
nvme_ctrlr = nvme_ctrlr_get_by_name(name);
if (nvme_ctrlr == NULL) {
SPDK_ERRLOG("Failed to find NVMe controller\n");
return -ENODEV;
}
remove_cb(NULL, nvme_ctrlr->ctrlr);
return 0;
}
static int
bdev_nvme_library_init(void)
{

View File

@ -76,4 +76,13 @@ int spdk_bdev_nvme_create(struct spdk_nvme_transport_id *trid,
const char *hostnqn);
struct spdk_nvme_ctrlr *spdk_bdev_nvme_get_ctrlr(struct spdk_bdev *bdev);
/**
* Delete NVMe controller with all bdevs on top of it.
* Requires to pass name of NVMe controller.
*
* \param name NVMe controller name
* \return zero on success, -EINVAL on wrong parameters or -ENODEV if controller is not found
*/
int spdk_bdev_nvme_delete(const char *name);
#endif // SPDK_BDEV_NVME_H

View File

@ -160,6 +160,58 @@ invalid:
}
SPDK_RPC_REGISTER("construct_nvme_bdev", spdk_rpc_construct_nvme_bdev, SPDK_RPC_RUNTIME)
struct rpc_delete_nvme {
char *name;
};
static void
free_rpc_delete_nvme(struct rpc_delete_nvme *req)
{
free(req->name);
}
static const struct spdk_json_object_decoder rpc_delete_nvme_decoders[] = {
{"name", offsetof(struct rpc_delete_nvme, name), spdk_json_decode_string},
};
static void
spdk_rpc_delete_nvme_ctrlr(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_delete_nvme req = {NULL};
struct spdk_json_write_ctx *w;
int rc = 0;
if (spdk_json_decode_object(params, rpc_delete_nvme_decoders,
SPDK_COUNTOF(rpc_delete_nvme_decoders),
&req)) {
rc = -EINVAL;
goto invalid;
}
rc = spdk_bdev_nvme_delete(req.name);
if (rc != 0) {
goto invalid;
}
free_rpc_delete_nvme(&req);
w = spdk_jsonrpc_begin_result(request);
if (w == NULL) {
return;
}
spdk_json_write_bool(w, true);
spdk_jsonrpc_end_result(request, w);
return;
invalid:
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
spdk_strerror(-rc));
free_rpc_delete_nvme(&req);
}
SPDK_RPC_REGISTER("delete_nvme_controller", spdk_rpc_delete_nvme_ctrlr, SPDK_RPC_RUNTIME)
struct rpc_apply_firmware {
char *filename;
char *bdev_name;

View File

@ -242,6 +242,16 @@ if __name__ == "__main__":
p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
p.set_defaults(func=construct_nvme_bdev)
@call_cmd
def delete_nvme_controller(args):
rpc.bdev.delete_nvme_controller(args.client,
name=args.name)
p = subparsers.add_parser('delete_nvme_controller',
help='Delete a NVMe controller using controller name')
p.add_argument('name', help="Name of the controller")
p.set_defaults(func=delete_nvme_controller)
@call_cmd
def construct_rbd_bdev(args):
print(rpc.bdev.construct_rbd_bdev(args.client,

View File

@ -177,6 +177,17 @@ def construct_nvme_bdev(client, name, trtype, traddr, adrfam=None, trsvcid=None,
return client.call('construct_nvme_bdev', params)
def delete_nvme_controller(client, name):
"""Remove NVMe controller from the system.
Args:
name: controller name
"""
params = {'name': name}
return client.call('delete_nvme_controller', params)
def construct_rbd_bdev(client, pool_name, rbd_name, block_size, name=None):
"""Construct a Ceph RBD block device.

View File

@ -105,6 +105,10 @@ class UIRoot(UINode):
response = rpc.bdev.construct_nvme_bdev(self.client, **kwargs)
return response
@verbose
def delete_nvme_controller(self, **kwargs):
rpc.bdev.delete_nvme_controller(self.client, **kwargs)
@verbose
def create_null_bdev(self, **kwargs):
response = rpc.bdev.construct_null_bdev(self.client, **kwargs)

View File

@ -120,6 +120,11 @@ rm -f $testdir/iscsi.conf
iscsicleanup
$rpc_py destruct_split_vbdev Nvme0n1
$rpc_py delete_error_bdev EE_Malloc0
if [ -z "$NO_NVME" ]; then
$rpc_py delete_nvme_controller Nvme0
fi
killprocess $pid
report_test_completion "nightly_iscsi_ext4test"
timing_exit ext4test

View File

@ -17,6 +17,10 @@ function remove_backends()
echo "INFO: Removing lvol stores"
$rpc_py destroy_lvol_store -l lvs_0
echo "INFO: Removing NVMe"
$rpc_py delete_nvme_controller Nvme0
return 0
}

View File

@ -25,6 +25,9 @@ function remove_backends()
$rpc_py destroy_lvol_store -l lvs0
echo "INFO: lvol store lvs0 removed"
echo "INFO: Removing NVMe"
$rpc_py delete_nvme_controller Nvme0
return 0
}

View File

@ -39,14 +39,14 @@ def delete_subbdevs(args, bdev, rpc_bdevs):
if construct_method == 'construct_nvme_bdev':
for rpc_bdev in rpc_bdevs:
if bdev_name in rpc_bdev['name'] and rpc_bdev['product_name'] == "NVMe disk":
args.client.call('delete_bdev', {'name': "%s" % rpc_bdev['name']})
args.client.call('delete_nvme_controller', {'name': "%s" % rpc_bdev['name'].split('n')[0]})
ret_value = True
return ret_value
def get_bdev_destroy_method(bdev):
destroy_method_map = {'construct_nvme_bdev': "delete_bdev",
destroy_method_map = {'construct_nvme_bdev': "delete_nvme_controller",
'construct_malloc_bdev': "delete_malloc_bdev",
'construct_null_bdev': "delete_null_bdev",
'construct_rbd_bdev': "delete_rbd_bdev",

View File

@ -152,6 +152,11 @@ class Commands_Rpc(object):
output, rc = self.rpc.delete_malloc_bdev(base_name)
return rc
def delete_nvme_controller(self, controller_name):
print("INFO: RPC COMMAND delete_nvme_controller")
output, rc = self.rpc.delete_nvme_controller(controller_name)
return rc
def destroy_lvol_bdev(self, bdev_name):
print("INFO: RPC COMMAND destroy_lvol_bdev")
output, rc = self.rpc.destroy_lvol_bdev(bdev_name)

View File

@ -638,7 +638,7 @@ class TestCases(object):
fail_count += 1
traddr = self._find_traddress_for_nvme("Nvme0")
if traddr != -1:
self.c.delete_bdev("Nvme0n1")
self.c.delete_nvme_controller("Nvme0")
self.c.construct_nvme_bdev("Nvme0", "PCIe", traddr)
# wait 1 second to allow time for lvolstore tasting
sleep(1)
@ -1507,7 +1507,7 @@ class TestCases(object):
traddr = self._find_traddress_for_nvme("Nvme0")
if traddr != -1:
# delete NVMe bdev
self.c.delete_bdev("Nvme0n1")
self.c.delete_nvme_controller("Nvme0")
# add NVMe bdev
self.c.construct_nvme_bdev("Nvme0", "PCIe", traddr)
# wait 1 second to allow time for lvolstore tasting

View File

@ -72,7 +72,7 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
$rpc_py destroy_lvol_store -l lvs_n_0
$rpc_py destroy_lvol_bdev "$lb_guid"
$rpc_py destroy_lvol_store -l lvs_0
$rpc_py delete_bdev "Nvme0n1"
$rpc_py delete_nvme_controller Nvme0
fi
trap - SIGINT SIGTERM EXIT

View File

@ -80,7 +80,7 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
$rpc_py destroy_lvol_store -l lvs_n_0
$rpc_py destroy_lvol_bdev "$lb_guid"
$rpc_py destroy_lvol_store -l lvs_0
$rpc_py delete_bdev "Nvme0n1"
$rpc_py delete_nvme_controller Nvme0
fi
fi

View File

@ -46,7 +46,7 @@ function blk_hotremove_tc1() {
traddr=""
# 1. Run the command to hot remove NVMe disk.
get_traddr "Nvme0"
delete_nvme "Nvme0n1"
delete_nvme "Nvme0"
# 2. If vhost had crashed then tests would stop running
sleep 1
add_nvme "HotInNvme0" "$traddr"
@ -73,7 +73,7 @@ function blk_hotremove_tc2() {
local last_pid=$!
sleep 3
# 4. Run the command to hot remove NVMe disk.
delete_nvme "HotInNvme0n1"
delete_nvme "HotInNvme0"
local retcode=0
wait_for_finish $last_pid || retcode=$?
# 5. Check that fio job run on hot-removed device stopped.
@ -115,7 +115,7 @@ function blk_hotremove_tc3() {
local last_pid=$!
sleep 3
# 4. Run the command to hot remove of first NVMe disk.
delete_nvme "HotInNvme1n1"
delete_nvme "HotInNvme1"
local retcode=0
wait_for_finish $last_pid || retcode=$?
# 6. Check that fio job run on hot-removed device stopped.
@ -162,7 +162,7 @@ function blk_hotremove_tc4() {
sleep 3
prepare_fio_cmd_tc1 "0 1"
# 5. Run the command to hot remove of first NVMe disk.
delete_nvme "HotInNvme2n1"
delete_nvme "HotInNvme2"
local retcode_vm0=0
local retcode_vm1=0
wait_for_finish $last_pid_vm0 || retcode_vm0=$?
@ -206,7 +206,7 @@ function blk_hotremove_tc5() {
local last_pid=$!
sleep 3
# 4. Run the command to hot remove of first NVMe disk.
delete_nvme "HotInNvme3n1"
delete_nvme "HotInNvme3"
local retcode=0
wait_for_finish $last_pid || retcode=$?
# 5. Check that fio job run on hot-removed device stopped.

View File

@ -222,7 +222,7 @@ function get_traddr() {
}
function delete_nvme() {
$rpc_py delete_bdev $1
$rpc_py delete_nvme_controller $1
}
function add_nvme() {

View File

@ -94,6 +94,7 @@ function cleanup_after_tests() {
$rpc_py remove_vhost_scsi_target naa.Nvme0n1p0.0 1
$rpc_py remove_vhost_scsi_target naa.Nvme0n1p1.0 0
$rpc_py remove_vhost_scsi_target naa.Nvme0n1p2.1 0
$rpc_py delete_nvme_controller Nvme0
}
hotattach_tc1

View File

@ -42,7 +42,7 @@ function scsi_hotremove_tc1() {
traddr=""
get_traddr "Nvme0"
# 1. Run the command to hot remove NVMe disk.
delete_nvme "Nvme0n1"
delete_nvme "Nvme0"
# 2. If vhost had crashed then tests would stop running
sleep 1
add_nvme "HotInNvme0" "$traddr"
@ -73,7 +73,7 @@ function scsi_hotremove_tc2() {
local last_pid=$!
sleep 3
# 4. Run the command to hot remove NVMe disk.
delete_nvme "HotInNvme0n1"
delete_nvme "HotInNvme0"
# 5. Check that fio job run on hot-remove device stopped on VM.
# Expected: Fio should return error message and return code != 0.
@ -116,7 +116,7 @@ function scsi_hotremove_tc3() {
local last_pid=$!
sleep 3
# 4. Run the command to hot remove NVMe disk.
delete_nvme "HotInNvme1n1"
delete_nvme "HotInNvme1"
# 5. Check that fio job run on hot-remove device stopped on first VM.
# Expected: Fio should return error message and return code != 0.
wait_for_finish $last_pid || retcode=$?
@ -167,7 +167,7 @@ function scsi_hotremove_tc4() {
# 5. Run the command to hot remove NVMe disk.
traddr=""
get_traddr "Nvme0"
delete_nvme "HotInNvme2n1"
delete_nvme "HotInNvme2"
# 6. Check that fio job run on hot-removed devices stopped.
# Expected: Fio should return error message and return code != 0.
local retcode_vm0=0

View File

@ -193,6 +193,8 @@ timing_enter remove_kernel_vhost
remove_kernel_vhost
timing_exit remove_kernel_vhost
$RPC_PY delete_nvme_controller Nvme0
timing_enter spdk_vhost_kill
spdk_vhost_kill
timing_exit spdk_vhost_kill

View File

@ -90,5 +90,8 @@ notice "Shutting down virtual machine..."
vm_shutdown_all
clean_lvol_cfg
$rpc_py delete_nvme_controller Nvme0
notice "Shutting down SPDK vhost app..."
spdk_vhost_kill

View File

@ -45,10 +45,10 @@ function migration_tc2_cleanup_vhost_config()
notice "Removing vhost devices & controllers via RPC ..."
# Delete bdev first to remove all LUNs and SCSI targets
$rpc_0 delete_bdev Nvme0n1
$rpc_0 delete_nvme_controller Nvme0
$rpc_0 remove_vhost_controller $incoming_vm_ctrlr
$rpc_1 delete_bdev Nvme0n1
$rpc_1 delete_nvme_controller Nvme0
$rpc_1 remove_vhost_controller $target_vm_ctrlr
notice "killing vhost app"

View File

@ -82,7 +82,7 @@ function host1_cleanup_vhost()
vm_kill $incoming_vm
notice "Removing bdev & controller from vhost on local server"
$rpc_0 delete_bdev Nvme0n1
$rpc_0 delete_nvme_controller Nvme0
$rpc_0 remove_vhost_controller $incoming_vm_ctrlr
notice "Shutting down vhost app"

View File

@ -16,7 +16,7 @@ function host_2_cleanup_vhost()
vm_kill $target_vm
notice "Removing bdev & controller from vhost 1 on remote server"
$rpc delete_bdev Nvme0n1
$rpc delete_nvme_controller Nvme0
$rpc remove_vhost_controller $target_vm_ctrl
notice "Shutting down vhost app"

View File

@ -126,4 +126,7 @@ if [[ -z $x ]]; then
fi
blk_ro_tc1
$rpc_py delete_nvme_controller Nvme0
spdk_vhost_kill