test/vhost: add vhost block option to fiotest

Allow running VMs in fio tests with vhost-block instead of
vhost-scsi.

Change-Id: I89ee437264ddcda0aaefdb71744cee6ce7d451d8
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.gerrithub.io/365808
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Karol Latecki 2017-06-16 12:03:46 +02:00 committed by Daniel Verkamp
parent 908cab3f1c
commit 0abf67ce31
5 changed files with 112 additions and 49 deletions

View File

@ -122,6 +122,7 @@ fi
if [ $SPDK_TEST_VHOST -eq 1 ]; then
timing_enter vhost
run_test ./test/vhost/spdk_vhost.sh --integrity-blk
run_test ./test/vhost/spdk_vhost.sh --integrity
timing_exit vhost
fi

View File

@ -7,7 +7,7 @@ dry_run=false
no_shutdown=false
fio_bin="fio"
fio_jobs="$BASE_DIR/fio_jobs/"
test_type=spdk_vhost
test_type=spdk_vhost_scsi
reuse_vms=false
force_build=false
vms=()
@ -25,7 +25,8 @@ function usage()
echo " --test-type=TYPE Perform specified test:"
echo " virtio - test host virtio-scsi-pci using file as disk image"
echo " kernel_vhost - use kernel driver vhost-scsi"
echo " spdk_vhost - use spdk vhost"
echo " spdk_vhost_scsi - use spdk vhost scsi"
echo " spdk_vhost_blk - use spdk vhost block"
echo "-x set -x for script debug"
echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
echo " --qemu-src=QEMU_DIR Location of the QEMU sources"
@ -42,6 +43,8 @@ function usage()
echo " NUM - VM number (mandatory)"
echo " OS - VM os disk path (optional)"
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
echo " If test-type=spdk_vhost_blk then each disk can have additional size parameter, e.g."
echo " --vm=X,os.qcow,DISK_size_35G; unit can be M or G; default - 20G"
echo " --disk-split By default all test types execute fio jobs on all disks which are available on guest"
echo " system. Use this option if only some of the disks should be used for testing."
echo " Example: --disk-split=4,1-3 will result in VM 1 using it's first disk (ex. /dev/sda)"
@ -113,7 +116,7 @@ fi
vm_kill_all
if [[ $test_type == "spdk_vhost" ]]; then
if [[ $test_type =~ "spdk_vhost" ]]; then
echo "==============="
echo ""
echo "INFO: running SPDK"
@ -151,45 +154,52 @@ for vm_conf in ${vms[@]}; do
[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disk=${conf[2]}"
if [[ $test_type == "spdk_vhost" ]]; then
echo "INFO: Trying to remove inexistent controller"
if $rpc_py remove_vhost_scsi_controller unk0 > /dev/null; then
echo "ERROR: Removing inexistent controller succeeded, but it shouldn't"
false
fi
if [[ $test_type =~ "spdk_vhost" ]]; then
echo "INFO: Adding device via RPC ..."
echo ""
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
echo "INFO: Creating controller naa.$disk.${conf[0]}"
$rpc_py construct_vhost_scsi_controller naa.$disk.${conf[0]}
if [[ "$test_type" == "spdk_vhost_blk" ]]; then
disk=${disk%%_*}
echo "INFO: Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
$rpc_py construct_vhost_blk_controller naa.$disk.${conf[0]} $disk
else
echo "INFO: Trying to remove inexistent controller"
if $rpc_py remove_vhost_scsi_controller unk0 > /dev/null; then
echo "ERROR: Removing inexistent controller succeeded, but it shouldn't"
false
fi
echo "INFO: Creating controller naa.$disk.${conf[0]}"
$rpc_py construct_vhost_scsi_controller naa.$disk.${conf[0]}
echo "INFO: Adding initial device (0) to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
echo "INFO: Adding initial device (0) to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
echo "INFO: Trying to remove inexistent device on existing controller"
if $rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 1 > /dev/null; then
echo "ERROR: Removing inexistent device (1) from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
false
echo "INFO: Trying to remove inexistent device on existing controller"
if $rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 1 > /dev/null; then
echo "ERROR: Removing inexistent device (1) from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
false
fi
echo "INFO: Trying to remove existing device from a controller"
$rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0
echo "INFO: Trying to remove a just-deleted device from a controller again"
if $rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0 > /dev/null; then
echo "ERROR: Removing device 0 from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
false
fi
echo "INFO: Re-adding device 0 to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
fi
echo "INFO: Trying to remove existing device from a controller"
$rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0
echo "INFO: Trying to remove a just-deleted device from a controller again"
if $rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0 > /dev/null; then
echo "ERROR: Removing device 0 from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
false
fi
echo "INFO: Re-adding device 0 to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
done
done <<< "${conf[2]}"
unset IFS;
$rpc_py get_vhost_scsi_controllers
$rpc_py get_vhost_blk_controllers
fi
$setup_cmd
done
@ -229,10 +239,13 @@ for vm_num in $used_vms; do
echo "INFO: Setting up hostname: $host_name"
vm_ssh $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $readonly $vm_num
vm_check_scsi_location $vm_num
SCSI_DISK="${SCSI_DISK::-1}"
#vm_reset_scsi_devices $vm_num $SCSI_DISK
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
vm_check_scsi_location $vm_num
# vm_reset_scsi_devices $vm_num $SCSI_DISK
elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
vm_check_blk_location $vm_num
fi
run_fio+="127.0.0.1:$(cat $vm_dir/fio_socket):"
for disk in $SCSI_DISK; do
@ -257,9 +270,11 @@ fi
$run_fio
#for vm_num in $used_vms; do
#vm_reset_scsi_devices $vm_num $SCSI_DISK
#done
#if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
# for vm_num in $used_vms; do
# vm_reset_scsi_devices $vm_num $SCSI_DISK
# done
#fi
if ! $no_shutdown; then
echo "==============="
@ -268,16 +283,21 @@ if ! $no_shutdown; then
vm_shutdown_all
echo "INFO: waiting 2 seconds to let all VMs die"
sleep 2
if [[ $test_type == "spdk_vhost" ]]; then
if [[ $test_type =~ "spdk_vhost" ]]; then
echo "INFO: Removing vhost devices & controllers via RPC ..."
for vm_conf in ${vms[@]}; do
IFS=',' read -ra conf <<< "$vm_conf"
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
disk=${disk%%_*}
echo "INFO: Removing all vhost devices from controller naa.$disk.${conf[0]}"
$rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0
$rpc_py remove_vhost_scsi_controller naa.$disk.${conf[0]}
if [[ "$test_type" == "spdk_vhost_blk" ]]; then
$rpc_py remove_vhost_blk_controller naa.$disk.${conf[0]}
else
$rpc_py remove_vhost_scsi_dev naa.$disk.${conf[0]} 0
$rpc_py remove_vhost_scsi_controller naa.$disk.${conf[0]}
fi
done
done <<< "${conf[2]}"
done

View File

@ -486,7 +486,6 @@ function vm_setup()
echo "INFO: TASK MASK: $task_mask"
local cmd="taskset -a $task_mask $INSTALL_DIR/bin/qemu-system-x86_64 ${eol}"
local vm_socket_offset=$(( 10000 + 100 * vm_num ))
local ssh_socket=$(( vm_socket_offset + 0 ))
@ -555,12 +554,23 @@ function vm_setup()
cmd+="-device scsi-hd,drive=hd$i,vendor=$raw_name ${eol}"
cmd+="-drive if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache ${eol}"
;;
spdk_vhost)
spdk_vhost_scsi)
echo "INFO: using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
cmd+="-chardev socket,id=char_$disk,path=$SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num ${eol}"
cmd+="-device vhost-user-scsi-pci,id=scsi_$disk,num_queues=$cpu_num,chardev=char_$disk ${eol}"
;;
spdk_vhost_blk)
[[ $disk =~ _size_([0-9]+[MG]?) ]] || true
size=${BASH_REMATCH[1]}
if [ -z "$size" ]; then
size="20G"
fi
disk=${disk%%_*}
echo "INFO: using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
cmd+="-chardev socket,id=char_$disk,path=$SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num ${eol}"
cmd+="-device vhost-user-blk-pci,chardev=char_$disk,"
cmd+="logical_block_size=4096,size=$size ${eol}"
;;
kernel_vhost)
if [[ -z $disk ]]; then
error "need WWN for $disk_type"
@ -573,7 +583,7 @@ function vm_setup()
cmd+=" -device vhost-scsi-pci,wwpn=$disk ${eol}"
;;
*)
error "unknown mode '$disk_type', use: virtio, spdk_vhost or kernel_vhost"
error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
return 1
esac
done
@ -596,6 +606,7 @@ function vm_setup()
echo "rm -f $qemu_pid_file"
echo '$qemu_cmd'
echo "echo 'Waiting for QEMU pid file'"
echo "sleep 1"
echo "[[ ! -f $qemu_pid_file ]] && sleep 1"
echo "[[ ! -f $qemu_pid_file ]] && echo 'ERROR: no qemu pid file found' && exit 1"
echo
@ -766,7 +777,7 @@ function vm_check_scsi_location()
disk_type="$(cat $entry/device/vendor)"; \
if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then \
fname=$(basename $entry); \
echo -n "$fname "; \
echo -n " $fname"; \
fi; \
done'
@ -789,6 +800,17 @@ function vm_reset_scsi_devices()
done
}
function vm_check_blk_location()
{
local script='shopt -s nullglob; cd /sys/block; echo vd*'
SCSI_DISK="$(echo "$script" | vm_ssh $1 bash -s)"
if [[ -z "$SCSI_DISK" ]]; then
error "no blk test disk found!"
return 1
fi
}
# Shutdown or kill any running VM and SPDK APP.
#
function at_app_exit()

View File

@ -15,7 +15,8 @@ function usage()
echo " --test-type=TYPE Perform specified test:"
echo " virtio - test host virtio-scsi-pci using file as disk image"
echo " kernel_vhost - use kernel driver vhost-scsi"
echo " spdk_vhost - use spdk vhost"
echo " spdk_vhost_scsi - use spdk vhost scsi"
echo " spdk_vhost_blk - use spdk vhost block"
echo " ---cache=CACHE Use CACHE for virtio test: "
echo " writethrough, writeback, none, unsafe or directsyns"
echo " Default is writethrough"
@ -43,6 +44,7 @@ while getopts 'xf:h-:' optchar; do
work-dir=*) TEST_DIR="${OPTARG#*=}" ;;
raw-cache=*) raw_cache="--raw-cache=${OPTARG#*=}" ;;
test-type=*) test_type="${OPTARG#*=}" ;;
spdk-vhost-mode=*) spdk_vhost_mode="${OPTARG#*=}" ;;
disk=*) disk="${OPTARG#*=}" ;;
os=*) os="${OPTARG#*=}"
if [[ ! -r "$os" ]]; then
@ -65,7 +67,7 @@ done
. $BASE_DIR/common.sh
[[ -z "$os" ]] && os="$TEST_DIR/debian.qcow2"
[[ $test_type == "spdk_vhost" ]] && [[ -z "$disk" ]] && disk="$SPDK_VHOST_SCSI_TEST_DIR/usvhost"
[[ $test_type =~ "spdk_vhost" ]] && [[ -z "$disk" ]] && disk="$SPDK_VHOST_SCSI_TEST_DIR/usvhost"
if [[ $test_type == "kernel_vhost" ]] && [[ -z "$disk" ]]; then
echo "ERROR: for $test_type '--disk=WWN' is mandatory"
exit 1

View File

@ -21,7 +21,15 @@ case $param in
echo Running performance suite...
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p0 \
--test-type=spdk_vhost \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/fiotest/fio_jobs/default_performance.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
-pb|--performance-blk)
echo Running blk performance suite...
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p0 \
--test-type=spdk_vhost_blk \
--fio-jobs=$WORKDIR/fiotest/fio_jobs/default_performance.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
@ -29,7 +37,15 @@ case $param in
echo Running integrity suite...
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/fiotest/fio_jobs/default_integrity.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
-ib|--integrity-blk)
echo Running blk integrity suite...
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \
--fio-jobs=$WORKDIR/fiotest/fio_jobs/default_integrity.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
@ -38,9 +54,11 @@ case $param in
VM_IMG=/home/sys_sgsw/vhost_scsi_vm_image.qcow2 ./integrity/integrity_start.sh
;;
-h|--help)
echo "-i|--integrity for running an integrity test"
echo "-i|--integrity for running an integrity test with vhost scsi"
echo "-f|--fs-integrity for running an integrity test with filesystem"
echo "-p|--performance for running a performance test"
echo "-p|--performance for running a performance test with vhost scsi
echo "-ib|--integrity-blk for running an integrity test with vhost blk""
echo "-pb|--performance-blk for running a performance test with vhost blk"
echo "-h|--help prints this message"
;;
*)