test/vhost: Group all vhost generated files into a single directory

Change-Id: I12b23363a6bc954eabd7cbde510a8a44b176fbd2
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/461385
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Ben Walker 2019-06-04 13:29:27 -07:00 committed by Darek Stojaczyk
parent 0d33baa14c
commit 8f44d126b4
14 changed files with 68 additions and 63 deletions

View File

@ -1,16 +1,21 @@
: ${SPDK_VHOST_VERBOSE=false}
: ${VM_IMAGE="$HOME/vhost_vm_image.qcow2"}
: ${VHOST_DIR="$HOME/vhost_test"}
TEST_DIR=$(readlink -f $rootdir/..)
VM_DIR=$VHOST_DIR/vms
TARGET_DIR=$VHOST_DIR/vhost
#TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
VM_IMAGE=$HOME/vhost_vm_image.qcow2
if ! hash qemu-img qemu-system-x86_64; then
error 'QEMU is not installed on this system. Unable to run vhost tests.'
exit 1
fi
VM_BASE_DIR="$TEST_DIR/vms"
mkdir -p $TEST_DIR
mkdir -p $VHOST_DIR
mkdir -p $VM_DIR
mkdir -p $TARGET_DIR
#
# Source config describing QEMU and VHOST cores and NUMA
@ -98,13 +103,13 @@ function get_vhost_dir()
local vhost_num=0
fi
echo "$TEST_DIR/vhost${vhost_num}"
echo "$TARGET_DIR/${vhost_num}"
}
function vhost_list_all()
{
shopt -s nullglob
local vhost_list="$(echo $TEST_DIR/vhost[0-9]*)"
local vhost_list="$(echo $TARGET_DIR/[0-9]*)"
shopt -u nullglob
if [[ ! -z "$vhost_list" ]]; then
@ -304,7 +309,7 @@ function vm_num_is_valid()
function vm_ssh_socket()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
cat $vm_dir/ssh_socket
}
@ -312,7 +317,7 @@ function vm_ssh_socket()
function vm_fio_socket()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
cat $vm_dir/fio_socket
}
@ -359,7 +364,7 @@ function vm_scp()
function vm_is_running()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
if [[ ! -r $vm_dir/qemu.pid ]]; then
return 1
@ -386,7 +391,7 @@ function vm_is_running()
function vm_os_booted()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
if [[ ! -r $vm_dir/qemu.pid ]]; then
error "VM $1 is not running"
@ -409,7 +414,7 @@ function vm_os_booted()
function vm_shutdown()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
if [[ ! -d "$vm_dir" ]]; then
error "VM$1 ($vm_dir) not exist - setup it first"
return 1
@ -435,7 +440,7 @@ function vm_shutdown()
function vm_kill()
{
vm_num_is_valid $1 || return 1
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
if [[ ! -r $vm_dir/qemu.pid ]]; then
return 0
@ -455,17 +460,17 @@ function vm_kill()
fi
}
# List all VM numbers in VM_BASE_DIR
# List all VM numbers in VM_DIR
#
function vm_list_all()
{
local vms="$(shopt -s nullglob; echo $VM_BASE_DIR/[0-9]*)"
local vms="$(shopt -s nullglob; echo $VM_DIR/[0-9]*)"
if [[ ! -z "$vms" ]]; then
basename --multiple $vms
fi
}
# Kills all VM in $VM_BASE_DIR
# Kills all VM in $VM_DIR
#
function vm_kill_all()
{
@ -474,10 +479,10 @@ function vm_kill_all()
vm_kill $vm
done
rm -rf $VM_BASE_DIR
rm -rf $VM_DIR
}
# Shutdown all VM in $VM_BASE_DIR
# Shutdown all VM in $VM_DIR
#
function vm_shutdown_all()
{
@ -513,7 +518,7 @@ function vm_shutdown_all()
sleep 1
done
rm -rf $VM_BASE_DIR
rm -rf $VM_DIR
$shell_restore_x
error "Timeout waiting for some VMs to shutdown"
@ -573,14 +578,14 @@ function vm_setup()
vm_num=$force_vm
vm_num_is_valid $vm_num || return 1
local vm_dir="$VM_BASE_DIR/$vm_num"
local vm_dir="$VM_DIR/$vm_num"
[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
else
local vm_dir=""
set +x
for (( i=0; i<=256; i++)); do
local vm_dir="$VM_BASE_DIR/$i"
local vm_dir="$VM_DIR/$i"
[[ ! -d $vm_dir ]] && break
done
$shell_restore_x
@ -603,7 +608,7 @@ function vm_setup()
fi
os_mode="original"
os="$VM_BASE_DIR/$vm_incoming/os.qcow2"
os="$VM_DIR/$vm_incoming/os.qcow2"
elif [[ ! -z "$vm_migrate_to" ]]; then
[[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
os_mode=backing
@ -828,8 +833,8 @@ function vm_setup()
echo $gdbserver_socket > $vm_dir/gdbserver_socket
echo $vnc_socket >> $vm_dir/vnc_socket
[[ -z $vm_incoming ]] || ln -fs $VM_BASE_DIR/$vm_incoming $vm_dir/vm_incoming
[[ -z $vm_migrate_to ]] || ln -fs $VM_BASE_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
[[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
}
function vm_run()
@ -854,7 +859,7 @@ function vm_run()
shift $((OPTIND-1))
for vm in $@; do
vm_num_is_valid $1 || return 1
if [[ ! -x $VM_BASE_DIR/$vm/run.sh ]]; then
if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
error "VM$vm not defined - setup it first"
return 1
fi
@ -864,12 +869,12 @@ function vm_run()
for vm in $vms_to_run; do
if vm_is_running $vm; then
warning "VM$vm ($VM_BASE_DIR/$vm) already running"
warning "VM$vm ($VM_DIR/$vm) already running"
continue
fi
notice "running $VM_BASE_DIR/$vm/run.sh"
if ! $VM_BASE_DIR/$vm/run.sh; then
notice "running $VM_DIR/$vm/run.sh"
if ! $VM_DIR/$vm/run.sh; then
error "FAILED to run vm $vm"
return 1
fi
@ -881,22 +886,22 @@ function vm_print_logs()
vm_num=$1
warning "================"
warning "QEMU LOG:"
if [[ -r $VM_BASE_DIR/$vm_num/qemu.log ]]; then
cat $VM_BASE_DIR/$vm_num/qemu.log
if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
cat $VM_DIR/$vm_num/qemu.log
else
warning "LOG qemu.log not found"
fi
warning "VM LOG:"
if [[ -r $VM_BASE_DIR/$vm_num/serial.log ]]; then
cat $VM_BASE_DIR/$vm_num/serial.log
if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
cat $VM_DIR/$vm_num/serial.log
else
warning "LOG serial.log not found"
fi
warning "SEABIOS LOG:"
if [[ -r $VM_BASE_DIR/$vm_num/seabios.log ]]; then
cat $VM_BASE_DIR/$vm_num/seabios.log
if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
cat $VM_DIR/$vm_num/seabios.log
else
warning "LOG seabios.log not found"
fi
@ -920,11 +925,11 @@ function vm_wait_for_boot()
notice "Waiting for VMs to boot"
shift
if [[ "$@" == "" ]]; then
local vms_to_check="$VM_BASE_DIR/[0-9]*"
local vms_to_check="$VM_DIR/[0-9]*"
else
local vms_to_check=""
for vm in $@; do
vms_to_check+=" $VM_BASE_DIR/$vm"
vms_to_check+=" $VM_DIR/$vm"
done
fi

View File

@ -187,7 +187,7 @@ DISK=""
fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
qemu_mask_param="VM_${vm_num}_qemu_mask"
@ -213,7 +213,7 @@ if $dry_run; then
exit 0
fi
run_fio $fio_bin --job-file="$fio_job" --out="$TEST_DIR/fio_results" $fio_disks
run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
for vm_num in $used_vms; do

View File

@ -19,7 +19,7 @@ function prepare_fio_cmd_tc1() {
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_blk_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job

View File

@ -113,7 +113,7 @@ function vms_setup_and_run() {
function vms_prepare() {
for vm_num in $1; do
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
qemu_mask_param="VM_${vm_num}_qemu_mask"

View File

@ -12,7 +12,7 @@ function prepare_fio_cmd_tc1() {
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_attach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_attach_job

View File

@ -24,7 +24,7 @@ function prepare_fio_cmd_tc1_iter1() {
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
@ -41,7 +41,7 @@ function prepare_fio_cmd_tc1_iter2() {
for vm_num in 2; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
@ -66,7 +66,7 @@ function prepare_fio_cmd_tc2_iter1() {
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
disk_array=($SCSI_DISK)
disk=${disk_array[0]}
@ -89,7 +89,7 @@ function prepare_fio_cmd_tc2_iter2() {
else
vm_job_name=default_integrity_4discs.job
fi
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
@ -113,7 +113,7 @@ function prepare_fio_cmd_tc3_iter1() {
else
vm_job_name=default_integrity_4discs.job
fi
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
j=1
for disk in $SCSI_DISK; do

View File

@ -21,7 +21,7 @@ function prepare_fio_cmd_tc1() {
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job

View File

@ -226,7 +226,7 @@ vm_wait_for_boot 300 $used_vms
fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-$vm_num-${!qemu_mask_param}"
@ -248,7 +248,7 @@ else
job_file="default_integrity.job"
fi
# Run FIO traffic
run_fio $fio_bin --job-file=$rootdir/test/vhost/common/fio_jobs/$job_file --out="$TEST_DIR/fio_results" $fio_disks
run_fio $fio_bin --job-file=$rootdir/test/vhost/common/fio_jobs/$job_file --out="$VHOST_DIR/fio_results" $fio_disks
notice "Shutting down virtual machines..."
vm_shutdown_all

View File

@ -69,7 +69,7 @@ function migration_tc2_configure_vhost()
timing_enter migration_tc2_configure_vhost
# Those are global intentionally - they will be unset in cleanup handler
nvmf_dir="$TEST_DIR/nvmf_tgt"
nvmf_dir="$VHOST_DIR/nvmf_tgt"
incoming_vm=1
target_vm=2

View File

@ -8,7 +8,7 @@ incoming_vm=1
target_vm=2
target_vm_ctrl=naa.VhostScsi0.$target_vm
rpc="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
share_dir=$TEST_DIR/share
share_dir=$VHOST_DIR/share
function host_2_cleanup_vhost()
{
@ -26,8 +26,8 @@ function host_2_cleanup_vhost()
function host_2_start_vhost()
{
echo "BASE DIR $TEST_DIR"
vhost_work_dir=$TEST_DIR/vhost1
echo "BASE DIR $VHOST_DIR"
vhost_work_dir=$VHOST_DIR/vhost1
mkdir -p $vhost_work_dir
rm -f $vhost_work_dir/*
@ -49,7 +49,7 @@ function host_2_start_vhost()
echo "DONE" > $share_dir/DONE
}
echo $$ > $TEST_DIR/tc3b.pid
echo $$ > $VHOST_DIR/tc3b.pid
host_2_start_vhost
suspend -f

View File

@ -66,7 +66,7 @@ function vm_monitor_send()
{
local vm_num=$1
local cmd_result_file="$2"
local vm_dir="$VM_BASE_DIR/$1"
local vm_dir="$VM_DIR/$1"
local vm_monitor_port=$(cat $vm_dir/monitor_port)
[[ ! -z "$vm_monitor_port" ]] || fail "No monitor port!"
@ -78,7 +78,7 @@ function vm_monitor_send()
# Migrate VM $1
function vm_migrate()
{
local from_vm_dir="$VM_BASE_DIR/$1"
local from_vm_dir="$VM_DIR/$1"
local target_vm_dir="$(readlink -e $from_vm_dir/vm_migrate_to)"
local target_vm="$(basename $target_vm_dir)"
local target_vm_migration_port="$(cat $target_vm_dir/migration_port)"

View File

@ -43,10 +43,10 @@ notice "Testing vhost command line arguments"
$VHOST_APP -c /path/to/non_existing_file/conf -S $testdir -e 0x0 -s 1024 -d -h --silence-noticelog
# Testing vhost create pid file option. Vhost will exit with error as invalid config path is given
if $VHOST_APP -c /path/to/non_existing_file/conf -f $TEST_DIR/vhost/vhost.pid; then
if $VHOST_APP -c /path/to/non_existing_file/conf -f $VHOST_DIR/vhost/vhost.pid; then
fail "vhost started when specifying invalid config file"
fi
rm -f $TEST_DIR/vhost/vhost.pid
rm -f $VHOST_DIR/vhost/vhost.pid
# Testing vhost start with invalid config. Vhost will exit with error as bdev module init failed
if $VHOST_APP -c $testdir/invalid.config; then

View File

@ -322,7 +322,7 @@ fi
# Run FIO
fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
vm_dir=$VM_DIR/$vm_num
host_name="VM-$vm_num"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $vm_num
@ -350,12 +350,12 @@ fio_job_fname=$(basename $fio_job)
fio_log_fname="${fio_job_fname%%.*}.log"
for i in $(seq 1 $fio_iterations); do
echo "Running FIO iteration $i"
run_fio $fio_bin --job-file="$fio_job" --out="$TEST_DIR/fio_results" --json $fio_disks &
run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" --json $fio_disks &
fio_pid=$!
if $vm_sar_enable; then
sleep $vm_sar_delay
mkdir -p $TEST_DIR/fio_results/sar_stats
mkdir -p $VHOST_DIR/fio_results/sar_stats
pids=""
for vm_num in $used_vms; do
vm_exec "$vm_num" "mkdir -p /root/sar; sar -P ALL $vm_sar_interval $vm_sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
@ -365,12 +365,12 @@ for i in $(seq 1 $fio_iterations); do
wait $j
done
for vm_num in $used_vms; do
vm_scp "$vm_num" "root@127.0.0.1:/root/sar/sar_stats_VM${vm_num}_run${i}.txt" "$TEST_DIR/fio_results/sar_stats"
vm_scp "$vm_num" "root@127.0.0.1:/root/sar/sar_stats_VM${vm_num}_run${i}.txt" "$VHOST_DIR/fio_results/sar_stats"
done
fi
wait $fio_pid
mv $TEST_DIR/fio_results/$fio_log_fname $TEST_DIR/fio_results/$fio_log_fname.$i
mv $VHOST_DIR/fio_results/$fio_log_fname $VHOST_DIR/fio_results/$fio_log_fname.$i
sleep 1
done

View File

@ -66,7 +66,7 @@ function blk_ro_tc1()
local vm_no="0"
local disk_name=$disk
local vhost_blk_name=""
local vm_dir="$TEST_DIR/vms/$vm_no"
local vm_dir="$VHOST_DIR/vms/$vm_no"
if [[ $disk =~ .*malloc.* ]]; then
disk_name=$($rpc_py construct_malloc_bdev 512 4096)