test/vhost: Blk hot remove tests

Change-Id: Ibae9dfcb29fc94c38f48c788ae83477b2719b9fa
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/392454
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Pawel Kaminski 2017-12-20 14:26:59 +01:00 committed by Jim Harris
parent 77b40b64db
commit 2114019360
5 changed files with 223 additions and 13 deletions

View File

@ -278,7 +278,7 @@ function vm_create_ssh_config()
(
echo "Host *"
echo " ControlPersist=10m"
echo " ConnectTimeout=2"
echo " ConnectTimeout=1"
echo " Compression=no"
echo " ControlMaster=auto"
echo " UserKnownHostsFile=/dev/null"
@ -362,6 +362,9 @@ function vm_os_booted()
return 1
fi
# Shutdown existing master. Ignore errors as it might not exist.
ssh -O exit -F $VM_BASE_DIR/ssh_config -p $(vm_ssh_socket $1) 127.0.0.1 2> /dev/null || true
if ! vm_ssh $1 "true" 2>/dev/null; then
return 1
fi

View File

@ -0,0 +1,171 @@
function prepare_fio_cmd_tc1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_blk_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
rm $tmp_detach_job
done
}
function remove_vhost_controllers() {
$rpc_py remove_vhost_controller naa.Nvme0n1p0.0
$rpc_py remove_vhost_controller naa.Nvme0n1p1.0
$rpc_py remove_vhost_controller naa.Nvme0n1p2.1
$rpc_py remove_vhost_controller naa.Nvme0n1p3.1
}
function blk_hotremove_tc1() {
echo "Blk hotremove test case 1"
traddr=""
get_traddr "Nvme0"
delete_nvme "Nvme0n1"
sleep 1
add_nvme "HotInNvme0" "$traddr"
sleep 1
}
function blk_hotremove_tc2() {
echo "Blk hotremove test case 2"
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
vm_run_with_arg "0 1"
vms_prepare "0"
traddr=""
get_traddr "Nvme0"
prepare_fio_cmd_tc1 "0"
$run_fio &
local last_pid=$!
sleep 3
delete_nvme "HotInNvme0n1"
local retcode=0
wait_for_finish $last_pid || retcode=$?
check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode
reboot_all_and_prepare "0"
$run_fio &
local retcode=0
wait_for_finish $! || retcode=$?
check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
vm_shutdown_all
remove_vhost_controllers
add_nvme "HotInNvme1" "$traddr"
sleep 1
}
function blk_hotremove_tc3() {
echo "Blk hotremove test case 3"
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
vm_run_with_arg "0 1"
vms_prepare "0 1"
traddr=""
get_traddr "Nvme0"
prepare_fio_cmd_tc1 "0"
$run_fio &
local last_pid=$!
sleep 3
delete_nvme "HotInNvme1n1"
local retcode=0
wait_for_finish $last_pid || retcode=$?
check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode
reboot_all_and_prepare "0"
local retcode=0
$run_fio &
wait_for_finish $! || retcode=$?
check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
vm_shutdown_all
remove_vhost_controllers
add_nvme "HotInNvme2" "$traddr"
sleep 1
}
function blk_hotremove_tc4() {
echo "Blk hotremove test case 4"
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
vm_run_with_arg "0 1"
vms_prepare "0 1"
prepare_fio_cmd_tc1 "0"
$run_fio &
local last_pid_vm0=$!
prepare_fio_cmd_tc1 "1"
$run_fio &
local last_pid_vm1=$!
sleep 3
prepare_fio_cmd_tc1 "0 1"
delete_nvme "HotInNvme2n1"
local retcode_vm0=0
local retcode_vm1=0
wait_for_finish $last_pid_vm0 || retcode_vm0=$?
wait_for_finish $last_pid_vm1 || retcode_vm1=$?
check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1
reboot_all_and_prepare "0 1"
$run_fio &
local retcode=0
wait_for_finish $! || retcode=$?
check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode
vm_shutdown_all
remove_vhost_controllers
add_nvme "HotInNvme3" "$traddr"
sleep 1
}
function blk_hotremove_tc5() {
echo "Blk hotremove test case 5"
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
$rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
vm_run_with_arg "0 1"
vms_prepare "0 1"
prepare_fio_cmd_tc1 "0"
$run_fio &
local last_pid=$!
sleep 3
delete_nvme "HotInNvme3n1"
local retcode=0
wait_for_finish $last_pid || retcode=$?
check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode
reboot_all_and_prepare "0"
local retcode=0
$run_fio &
wait_for_finish $! || retcode=$?
check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
vm_shutdown_all
remove_vhost_controllers
add_nvme "HotInNvme4" "$traddr"
sleep 1
}
vms_setup
blk_hotremove_tc1
blk_hotremove_tc2
blk_hotremove_tc3
blk_hotremove_tc4
blk_hotremove_tc5

View File

@ -9,6 +9,7 @@ used_vms=""
disk_split=""
x=""
scsi_hot_remove_test=0
blk_hot_remove_test=0
function usage() {
@ -45,6 +46,7 @@ while getopts 'xh-:' optchar; do
test-type=*) test_type="${OPTARG#*=}" ;;
vm=*) vms+=("${OPTARG#*=}") ;;
scsi-hotremove-test) scsi_hot_remove_test=1 ;;
blk-hotremove-test) blk_hot_remove_test=1 ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
@ -133,21 +135,21 @@ function vms_reboot_all() {
notice "Rebooting all vms "
for vm_num in $1; do
vm_ssh $vm_num "reboot" || true
while vm_os_booted $vm_num; do
sleep 0.5
done
done
vm_wait_for_boot 600 $1
vm_wait_for_boot 300 $1
}
function check_fio_retcode() {
fio_retcode=$3
local fio_retcode=$3
echo $1
retcode_expected=$2
local retcode_expected=$2
if [ $retcode_expected == 0 ]; then
if [ $fio_retcode != 0 ]; then
warning " Fio test ended with error."
vm_shutdown_all
spdk_vhost_kill
exit 1
error " Fio test ended with error."
else
notice " Fio test ended with success."
fi
@ -155,14 +157,30 @@ function check_fio_retcode() {
if [ $fio_retcode != 0 ]; then
notice " Fio test ended with expected error."
else
warning " Fio test ended with unexpected success."
vm_shutdown_all
spdk_vhost_kill
exit 1
error " Fio test ended with unexpected success."
fi
fi
}
function wait_for_finish() {
local wait_for_pid=$1
local sequence=${2:-30}
for i in `seq 1 $sequence`; do
if kill -0 $wait_for_pid; then
sleep 0.5
continue
else
break
fi
done
if kill -0 $wait_for_pid; then
error "Timeout for fio command"
fi
wait $wait_for_pid
}
function reboot_all_and_prepare() {
vms_reboot_all "$1"
vms_prepare "$1"

View File

@ -3,6 +3,10 @@ set -e
BASE_DIR=$(readlink -f $(dirname $0))
. $BASE_DIR/common.sh
if [[ $scsi_hot_remove_test == 1 ]] && [[ $blk_hot_remove_test == 1 ]]; then
notice "Vhost-scsi and vhost-blk hotremove tests cannot be run together"
fi
# Add split section into vhost config
function gen_config() {
cp $BASE_DIR/vhost.conf.base $BASE_DIR/vhost.conf.in
@ -13,6 +17,7 @@ function gen_config() {
Split HotInNvme0n1 2
Split HotInNvme1n1 2
Split HotInNvme2n1 2
Split HotInNvme3n1 2
END_OF_CONFIG
}
@ -63,7 +68,7 @@ trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
gen_config
run_vhost
rm $BASE_DIR/vhost.conf.in
if [[ $scsi_hot_remove_test == 0 ]]; then
if [[ $scsi_hot_remove_test == 0 ]] && [[ $blk_hot_remove_test == 0 ]]; then
pre_hot_attach_detach_test_case
$BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
first_script=$!
@ -77,4 +82,7 @@ fi
if [[ $scsi_hot_remove_test == 1 ]]; then
source $BASE_DIR/scsi_hotremove.sh
fi
if [[ $blk_hot_remove_test == 1 ]]; then
source $BASE_DIR/blk_hotremove.sh
fi
post_test_case

View File

@ -25,6 +25,7 @@ case $1 in
echo " -ilbn|--integrity-lvol-blk-nightly for running an nightly integrity test with vhost blk and lvol backends"
echo " -hp|--hotplug for running hotplug tests"
echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
echo " -bhr|--blk-hot-remove for running blk hot remove tests"
echo " -ro|--readonly for running readonly test for vhost blk"
echo " -h |--help prints this message"
echo ""
@ -182,6 +183,15 @@ case $1 in
--scsi-hotremove-test \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
;;
-bhr|--blk-hot-remove)
echo 'Running blk hotremove tests suite...'
$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \
--blk-hotremove-test \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
;;
-ro|--readonly)
echo 'Running readonly tests suite...'
$WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1