test/vfio_user: Build vfio-user test directory

Add test script for launching VMs with vfio-user transport,
this test script not active until vfio-user capable qemu available,
new flag SPDK_TEST_VFIOUSER_QEMU will decide case whether to run.

Change-Id: I9b885cec797fe3bb28860f1ec436c31a6fa2f131
Signed-off-by: Mao Jiang <maox.jiang@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10096
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Michal Berger <michalx.berger@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Mao Jiang 2021-10-29 07:24:39 +00:00 committed by Tomasz Zawadzki
parent 5d5b293387
commit a0d255ac02
5 changed files with 148 additions and 1 deletions

View File

@ -275,6 +275,10 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test "vhost" ./test/vhost/vhost.sh
fi
if [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ]; then
run_test "vfio_user_qemu" ./test/vfio_user/vfio_user.sh
fi
if [ $SPDK_TEST_LVOL -eq 1 ]; then
run_test "lvol" ./test/lvol/lvol.sh
run_test "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh

View File

@ -101,6 +101,8 @@ export SPDK_TEST_NVME_CUSE
export SPDK_TEST_NVMF
: ${SPDK_TEST_VFIOUSER=0}
export SPDK_TEST_VFIOUSER
: ${SPDK_TEST_VFIOUSER_QEMU=0}
export SPDK_TEST_VFIOUSER_QEMU
: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}
export SPDK_TEST_NVMF_TRANSPORT
: ${SPDK_TEST_RBD=0}
@ -450,7 +452,7 @@ function get_config_params() {
config_params+=' --with-raid5'
fi
if [ $SPDK_TEST_VFIOUSER -eq 1 ]; then
if [ $SPDK_TEST_VFIOUSER -eq 1 ] || [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ]; then
config_params+=' --with-vfio-user'
fi

38
test/vfio_user/common.sh Normal file
View File

@ -0,0 +1,38 @@
: ${MALLOC_BDEV_SIZE=256}
: ${MALLOC_BLOCK_SIZE=512}
# Verify vfio-user support of qemu.
QEMU_BIN="/usr/local/qemu/vfio-user-v0.93/bin/qemu-system-x86_64"
function clean_vfio_user() {
trap - ERR
print_backtrace
set +e
error "Error on $1 $2"
vm_kill_all
vhost_kill 0
exit 1
}
function vfio_user_run() {
local vhost_name=$1
local vfio_user_dir nvmf_pid_file rpc_py
vfio_user_dir=$(get_vhost_dir $vhost_name)
nvmf_pid_file="$vfio_user_dir/vhost.pid"
rpc_py="$rootdir/scripts/rpc.py -s $vfio_user_dir/rpc.sock"
mkdir -p $vfio_user_dir
timing_enter vfio_user_start
$rootdir/build/bin/nvmf_tgt -r $vfio_user_dir/rpc.sock -m 0x1 &
nvmfpid=$!
echo $nvmfpid > $nvmf_pid_file
echo "Process pid: $nvmfpid"
echo "waiting for app to run..."
waitforlisten $nvmfpid $vfio_user_dir/rpc.sock
$rpc_py nvmf_create_transport -t VFIOUSER
timing_exit vfio_user_start
}

14
test/vfio_user/vfio_user.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/vhost/common.sh
source $rootdir/test/vfio_user/common.sh
echo "Running SPDK vfio-user fio autotest..."
vhosttestinit
run_test "vfio_user_fio" $WORKDIR/vfio_user_fio/vfio_user_fio.sh
vhosttestfini

View File

@ -0,0 +1,89 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/vhost/common.sh
source $rootdir/test/vfio_user/common.sh
rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
fio_bin="--fio-bin=$FIO_BIN"
vm_no="2"
trap 'clean_vfio_user "${FUNCNAME}" "${LINENO}"' ERR
vhosttestinit
timing_enter start_vfio_user
vfio_user_run 0
#
# Create multiple malloc bdevs for multiple VMs, last VM uses nvme bdev.
#
for i in $(seq 0 $vm_no); do
vm_muser_dir="$VM_DIR/$i/muser"
rm -rf $vm_muser_dir
mkdir -p $vm_muser_dir/domain/muser${i}/$i
$rpc_py nvmf_create_subsystem nqn.2019-07.io.spdk:cnode${i} -s SPDK00${i} -a
if ((i == vm_no)); then
$rootdir/scripts/gen_nvme.sh | $rpc_py load_subsystem_config
$rpc_py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode${i} Nvme0n1
else
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc${i}
$rpc_py nvmf_subsystem_add_ns nqn.2019-07.io.spdk:cnode${i} Malloc${i}
fi
$rpc_py nvmf_subsystem_add_listener nqn.2019-07.io.spdk:cnode${i} -t VFIOUSER -a $vm_muser_dir/domain/muser${i}/$i -s 0
done
timing_exit start_vfio_user
used_vms=""
timing_enter launch_vms
for i in $(seq 0 $vm_no); do
vm_setup --disk-type=vfio_user --force=$i --os=$VM_IMAGE --disks="$i"
used_vms+=" $i"
done
vm_run $used_vms
vm_wait_for_boot 60 $used_vms
timing_exit launch_vms
timing_enter run_vm_cmd
fio_disks=""
for vm_num in $used_vms; do
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-$vm_num-${!qemu_mask_param}"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $vm_num
vm_check_nvme_location $vm_num
fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done
job_file="default_integrity.job"
run_fio $fio_bin --job-file=$rootdir/test/vhost/common/fio_jobs/$job_file --out="$VHOST_DIR/fio_results" $fio_disks
timing_exit run_vm_cmd
vm_shutdown_all
timing_enter clean_vfio_user
for i in $(seq 0 $vm_no); do
vm_muser_dir="$VM_DIR/$i/muser"
$rpc_py nvmf_subsystem_remove_listener nqn.2019-07.io.spdk:cnode${i} -t vfiouser -a $vm_muser_dir/domain/muser${i}/$i -s 0
$rpc_py nvmf_delete_subsystem nqn.2019-07.io.spdk:cnode${i}
if ((i == vm_no)); then
$rpc_py bdev_nvme_detach_controller Nvme0
else
$rpc_py bdev_malloc_delete Malloc${i}
fi
done
vhost_kill 0
timing_exit clean_vfio_user
vhosttestfini