test: eliminate case and suite from run_test

We will be able to distinguish case and suite in the post process
scripts based on if they have any internal tests.

Change-Id: Iff2aa5caa251924d8a842085d87eb2a17aca45d4
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478522
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Seth Howell 2019-12-19 16:03:30 -07:00 committed by Tomasz Zawadzki
parent 77d591e172
commit cb90136c33
21 changed files with 187 additions and 190 deletions

View File

@ -144,7 +144,7 @@ fi
#####################
if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
run_test suite "unittest" ./test/unit/unittest.sh
run_test "unittest" ./test/unit/unittest.sh
report_test_completion "unittest"
fi
@ -152,119 +152,119 @@ fi
if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
timing_enter lib
run_test suite "env" test/env/env.sh
run_test suite "rpc_client" test/rpc_client/rpc_client.sh
run_test suite "json_config" ./test/json_config/json_config.sh
run_test suite "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh
run_test suite "spdkcli_tcp" test/spdkcli/tcp.sh
run_test "env" test/env/env.sh
run_test "rpc_client" test/rpc_client/rpc_client.sh
run_test "json_config" ./test/json_config/json_config.sh
run_test "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh
run_test "spdkcli_tcp" test/spdkcli/tcp.sh
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
run_test suite "blockdev" test/bdev/blockdev.sh
run_test suite "bdev_raid" test/bdev/bdev_raid.sh
run_test "blockdev" test/bdev/blockdev.sh
run_test "bdev_raid" test/bdev/bdev_raid.sh
fi
if [ $SPDK_TEST_JSON -eq 1 ]; then
run_test suite "test_converter" test/config_converter/test_converter.sh
run_test "test_converter" test/config_converter/test_converter.sh
fi
if [ $SPDK_TEST_EVENT -eq 1 ]; then
run_test suite "event" test/event/event.sh
run_test "event" test/event/event.sh
fi
if [ $SPDK_TEST_NVME -eq 1 ]; then
run_test suite "nvme" test/nvme/nvme.sh
run_test "nvme" test/nvme/nvme.sh
if [[ $SPDK_TEST_NVME_CLI -eq 1 ]]; then
run_test suite "nvme_cli" test/nvme/spdk_nvme_cli.sh
run_test "nvme_cli" test/nvme/spdk_nvme_cli.sh
fi
if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
run_test suite "nvme_cli_cuse" test/nvme/spdk_nvme_cli_cuse.sh
run_test suite "nvme_smartctl_cuse" test/nvme/spdk_smartctl_cuse.sh
run_test "nvme_cli_cuse" test/nvme/spdk_nvme_cli_cuse.sh
run_test "nvme_smartctl_cuse" test/nvme/spdk_smartctl_cuse.sh
fi
# Only test hotplug without ASAN enabled. Since if it is
# enabled, it catches SEGV earlier than our handler which
# breaks the hotplug logic.
if [ $SPDK_RUN_ASAN -eq 0 ]; then
run_test suite "nvme_hotplug" test/nvme/hotplug.sh intel
run_test "nvme_hotplug" test/nvme/hotplug.sh intel
fi
fi
if [ $SPDK_TEST_IOAT -eq 1 ]; then
run_test suite "ioat" test/ioat/ioat.sh
run_test "ioat" test/ioat/ioat.sh
fi
timing_exit lib
if [ $SPDK_TEST_ISCSI -eq 1 ]; then
run_test suite "iscsi_tgt_posix" ./test/iscsi_tgt/iscsi_tgt.sh posix
run_test suite "spdkcli_iscsi" ./test/spdkcli/iscsi.sh
run_test "iscsi_tgt_posix" ./test/iscsi_tgt/iscsi_tgt.sh posix
run_test "spdkcli_iscsi" ./test/spdkcli/iscsi.sh
# Run raid spdkcli test under iSCSI since blockdev tests run on systems that can't run spdkcli yet
run_test suite "spdkcli_raid" test/spdkcli/raid.sh
run_test "spdkcli_raid" test/spdkcli/raid.sh
fi
if [ $SPDK_TEST_VPP -eq 1 ]; then
run_test suite "iscsi_tgt_vpp" ./test/iscsi_tgt/iscsi_tgt.sh vpp
run_test "iscsi_tgt_vpp" ./test/iscsi_tgt/iscsi_tgt.sh vpp
fi
if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
run_test suite "rocksdb" ./test/blobfs/rocksdb/rocksdb.sh
run_test suite "blobstore" ./test/blobstore/blobstore.sh
run_test suite "blobfs" ./test/blobfs/blobfs.sh
run_test "rocksdb" ./test/blobfs/rocksdb/rocksdb.sh
run_test "blobstore" ./test/blobstore/blobstore.sh
run_test "blobfs" ./test/blobfs/blobfs.sh
fi
if [ $SPDK_TEST_NVMF -eq 1 ]; then
run_test suite "nvmf" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
run_test suite "spdkcli_nvmf" ./test/spdkcli/nvmf.sh
run_test "nvmf" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
run_test "spdkcli_nvmf" ./test/spdkcli/nvmf.sh
fi
if [ $SPDK_TEST_VHOST -eq 1 ]; then
run_test suite "vhost" ./test/vhost/vhost.sh
run_test "vhost" ./test/vhost/vhost.sh
fi
if [ $SPDK_TEST_LVOL -eq 1 ]; then
run_test suite "lvol" ./test/lvol/lvol.sh --test-cases=all
run_test suite "lvol2" ./test/lvol/lvol2.sh
run_test suite "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh
run_test "lvol" ./test/lvol/lvol.sh --test-cases=all
run_test "lvol2" ./test/lvol/lvol2.sh
run_test "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh
report_test_completion "lvol"
fi
if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
timing_enter vhost_initiator
run_test suite "vhost_blockdev" ./test/vhost/initiator/blockdev.sh
run_test suite "spdkcli_virtio" ./test/spdkcli/virtio.sh
run_test suite "vhost_shared" ./test/vhost/shared/shared.sh
run_test suite "vhost_fuzz" ./test/vhost/fuzz/fuzz.sh
run_test "vhost_blockdev" ./test/vhost/initiator/blockdev.sh
run_test "spdkcli_virtio" ./test/spdkcli/virtio.sh
run_test "vhost_shared" ./test/vhost/shared/shared.sh
run_test "vhost_fuzz" ./test/vhost/fuzz/fuzz.sh
report_test_completion "vhost initiator"
timing_exit vhost_initiator
fi
if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test suite "pmem" ./test/pmem/pmem.sh -x
run_test suite "spdkcli_pmem" ./test/spdkcli/pmem.sh
run_test "pmem" ./test/pmem/pmem.sh -x
run_test "spdkcli_pmem" ./test/spdkcli/pmem.sh
fi
if [ $SPDK_TEST_RBD -eq 1 ]; then
run_test suite "spdkcli_rbd" ./test/spdkcli/rbd.sh
run_test "spdkcli_rbd" ./test/spdkcli/rbd.sh
fi
if [ $SPDK_TEST_OCF -eq 1 ]; then
run_test suite "ocf" ./test/ocf/ocf.sh
run_test "ocf" ./test/ocf/ocf.sh
fi
if [ $SPDK_TEST_FTL -eq 1 ]; then
run_test suite "ftl" ./test/ftl/ftl.sh
run_test "ftl" ./test/ftl/ftl.sh
fi
if [ $SPDK_TEST_VMD -eq 1 ]; then
run_test suite "vmd" ./test/vmd/vmd.sh
run_test "vmd" ./test/vmd/vmd.sh
fi
if [ $SPDK_TEST_REDUCE -eq 1 ]; then
run_test suite "compress" ./test/compress/compress.sh
run_test "compress" ./test/compress/compress.sh
fi
if [ $SPDK_TEST_OPAL -eq 1 ]; then
run_test suite "nvme_opal" ./test/nvme/nvme_opal.sh
run_test "nvme_opal" ./test/nvme/nvme_opal.sh
fi
fi

View File

@ -62,12 +62,12 @@ function fio_test_suite() {
local fio_ext_params="--ioengine=spdk_bdev --iodepth=128 --bs=192k --runtime=100 $testdir/bdev.fio --spdk_conf=./test/bdev/bdev.conf"
if [ $RUN_NIGHTLY -eq 0 ]; then
run_test "case" "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \
run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \
--output=$output_dir/blockdev_fio_verify.txt
elif [ $RUN_NIGHTLY_FAILING -eq 1 ]; then
# Use size 192KB which both exceeds typical 128KB max NVMe I/O
# size and will cross 128KB Intel DC P3700 stripe boundaries.
run_test "case" "bdev_fio_rw_verify_ext" fio_bdev $fio_ext_params --spdk_mem=$PRE_RESERVED_MEM \
run_test "bdev_fio_rw_verify_ext" fio_bdev $fio_ext_params --spdk_mem=$PRE_RESERVED_MEM \
--output=$output_dir/blockdev_fio_verify.txt
fi
rm -f ./*.state
@ -80,9 +80,9 @@ function fio_test_suite() {
done
if [ $RUN_NIGHTLY -eq 0 ]; then
run_test "case" "bdev_fio_trim" fio_bdev $fio_params --output=$output_dir/blockdev_trim.txt
run_test "bdev_fio_trim" fio_bdev $fio_params --output=$output_dir/blockdev_trim.txt
elif [ $RUN_NIGHTLY_FAILING -eq 1 ]; then
run_test "case" "bdev_fio_trim_ext" fio_bdev $fio_ext_params --output=$output_dir/blockdev_trim.txt
run_test "bdev_fio_trim_ext" fio_bdev $fio_ext_params --output=$output_dir/blockdev_trim.txt
fi
rm -f ./*.state
@ -158,7 +158,7 @@ function qos_function_test() {
# Run bdevperf with IOPS rate limit on bdev 1
$rpc_py bdev_set_qos_limit --rw_ios_per_sec $iops_limit $QOS_DEV_1
run_test "case" "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1
run_test "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1
# Run bdevperf with bandwidth rate limit on bdev 2
# Set the bandwidth limit as 1/10 of the measure performance without QoS
@ -168,11 +168,11 @@ function qos_function_test() {
bw_limit=$qos_lower_bw_limit
fi
$rpc_py bdev_set_qos_limit --rw_mbytes_per_sec $bw_limit $QOS_DEV_2
run_test "case" "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2
run_test "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2
# Run bdevperf with additional read only bandwidth rate limit on bdev 1
$rpc_py bdev_set_qos_limit --r_mbytes_per_sec $qos_lower_bw_limit $QOS_DEV_1
run_test "case" "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1
run_test "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1
else
echo "Actual IOPS without limiting is too low - exit testing"
fi
@ -262,24 +262,24 @@ $rootdir/scripts/gen_nvme.sh >> $testdir/bdev_gpt.conf
# End bdev configuration
#-----------------------------------------------------
run_test "case" "bdev_hello_world" $rootdir/examples/bdev/hello_world/hello_bdev -c $testdir/bdev.conf -b Malloc0
run_test "case" "bdev_bounds" bdev_bounds
run_test "case" "bdev_nbd" nbd_function_test $testdir/bdev.conf "$bdevs_name"
run_test "bdev_hello_world" $rootdir/examples/bdev/hello_world/hello_bdev -c $testdir/bdev.conf -b Malloc0
run_test "bdev_bounds" bdev_bounds
run_test "bdev_nbd" nbd_function_test $testdir/bdev.conf "$bdevs_name"
if [ -d /usr/src/fio ]; then
run_test "suite" "bdev_fio" fio_test_suite
run_test "bdev_fio" fio_test_suite
else
echo "FIO not available"
exit 1
fi
# Run bdevperf with gpt
run_test "case" "bdev_gpt_verify" $testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w verify -t 5
run_test "case" "bdev_gpt_write_zeroes" $testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w write_zeroes -t 1
run_test "suite" "bdev_qos" qos_test_suite
run_test "bdev_gpt_verify" $testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w verify -t 5
run_test "bdev_gpt_write_zeroes" $testdir/bdevperf/bdevperf -c $testdir/bdev_gpt.conf -q 128 -o 4096 -w write_zeroes -t 1
run_test "bdev_qos" qos_test_suite
# Temporarily disabled - infinite loop
# if [ $RUN_NIGHTLY -eq 1 ]; then
# run_test "case" "bdev_gpt_reset" $testdir/bdevperf/bdevperf -c $testdir/bdev.conf -q 16 -w reset -o 4096 -t 60
# run_test "bdev_gpt_reset" $testdir/bdevperf/bdevperf -c $testdir/bdev.conf -q 16 -w reset -o 4096 -t 60
# report_test_completion "nightly_bdev_reset"
# fi

View File

@ -58,7 +58,7 @@ echo "TpointGroupMask 0x80" >> $ROCKSDB_CONF
trap 'run_bsdump; rm -f $ROCKSDB_CONF; exit 1' SIGINT SIGTERM EXIT
if [ -z "$SKIP_MKFS" ]; then
run_test "case" "blobfs_mkfs" $rootdir/test/blobfs/mkfs/mkfs $ROCKSDB_CONF Nvme0n1
run_test "blobfs_mkfs" $rootdir/test/blobfs/mkfs/mkfs $ROCKSDB_CONF Nvme0n1
fi
mkdir -p $output_dir/rocksdb
@ -124,11 +124,11 @@ cat << EOL >> writesync_flags.txt
--num=$NUM_KEYS
EOL
run_test "case" "rocksdb_insert" run_step insert
run_test "case" "rocksdb_overwrite" run_step overwrite
run_test "case" "rocksdb_readwrite" run_step readwrite
run_test "case" "rocksdb_writesync" run_step writesync
run_test "case" "rocksdb_randread" run_step randread
run_test "rocksdb_insert" run_step insert
run_test "rocksdb_overwrite" run_step overwrite
run_test "rocksdb_readwrite" run_step readwrite
run_test "rocksdb_writesync" run_step writesync
run_test "rocksdb_randread" run_step randread
trap - SIGINT SIGTERM EXIT

View File

@ -572,28 +572,25 @@ function kill_stub() {
}
function run_test() {
if [ $# -le 2 ]; then
if [ $# -le 1 ]; then
echo "Not enough parameters"
echo "usage: run_test test_type test_name test_script [script_params]"
echo "usage: run_test test_name test_script [script_params]"
exit 1
fi
xtrace_disable
local test_type
test_type="$(echo $1 | tr '[:lower:]' '[:upper:]')"
shift
local test_name="$1"
shift
timing_enter $test_name
echo "************************************"
echo "START TEST $test_type $test_name"
echo "START TEST $test_name"
echo "************************************"
xtrace_restore
time "$@"
xtrace_disable
echo "************************************"
echo "END TEST $test_type $test_name"
echo "END TEST $test_name"
echo "************************************"
timing_exit $test_name

10
test/env/env.sh vendored
View File

@ -4,9 +4,9 @@ testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
run_test "case" "env_memory" $testdir/memory/memory_ut
run_test "case" "env_vtophys" $testdir/vtophys/vtophys
run_test "case" "env_pci" $testdir/pci/pci_ut
run_test "env_memory" $testdir/memory/memory_ut
run_test "env_vtophys" $testdir/vtophys/vtophys
run_test "env_pci" $testdir/pci/pci_ut
argv="-c 0x1 "
if [ $(uname) = Linux ]; then
@ -18,12 +18,12 @@ if [ $(uname) = Linux ]; then
# this implicitly.
argv+="--base-virtaddr=0x200000000000"
fi
run_test "case" "env_dpdk_post_init" $testdir/env_dpdk_post_init/env_dpdk_post_init $argv
run_test "env_dpdk_post_init" $testdir/env_dpdk_post_init/env_dpdk_post_init $argv
if [ $(uname) = Linux ]; then
# This tests the --match-allocations DPDK parameter which is only
# supported on Linux
run_test "case" "env_mem_callbacks" $testdir/mem_callbacks/mem_callbacks
run_test "env_mem_callbacks" $testdir/mem_callbacks/mem_callbacks
fi
report_test_completion "env"

View File

@ -45,21 +45,21 @@ if [ -z "$nv_cache" ]; then
echo "Couldn't find NVMe device to be used as non-volatile cache"
fi
run_test suite "ftl_bdevperf" $testdir/bdevperf.sh $device
run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
run_test suite "ftl_restore" $testdir/restore.sh $device
run_test "ftl_restore" $testdir/restore.sh $device
if [ -n "$nv_cache" ]; then
run_test suite "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
run_test "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
fi
if [ -n "$nv_cache" ]; then
run_test suite "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
fi
run_test suite "ftl_json" $testdir/json.sh $device
run_test "ftl_json" $testdir/json.sh $device
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
run_test suite "ftl_fio_basic" $testdir/fio.sh $device basic
run_test "ftl_fio_basic" $testdir/fio.sh $device basic
$rootdir/app/spdk_tgt/spdk_tgt &
svc_pid=$!
@ -72,5 +72,5 @@ if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
trap - SIGINT SIGTERM EXIT
run_test suite "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
run_test "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
fi

View File

@ -4,8 +4,8 @@ testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
run_test "case" "ioat_perf" $rootdir/examples/ioat/perf/ioat_perf -t 1
run_test "ioat_perf" $rootdir/examples/ioat/perf/ioat_perf -t 1
run_test "case" "ioat_verify" $rootdir/examples/ioat/verify/verify -t 1
run_test "ioat_verify" $rootdir/examples/ioat/verify/verify -t 1
report_test_completion "ioat"

View File

@ -82,9 +82,9 @@ iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
# Check and avoid setting DataDigest.
DataDigestAbility=$(iscsiadm -m node -p $TARGET_IP:$ISCSI_PORT -o update -n node.conn[0].iscsi.DataDigest -v None 2>&1 || true)
if [ "$DataDigestAbility"x != x ]; then
run_test "case" "iscsi_tgt_digest" iscsi_header_digest_test
run_test "iscsi_tgt_digest" iscsi_header_digest_test
else
run_test "case" "iscsi_tgt_data_digest" iscsi_header_data_digest_test
run_test "iscsi_tgt_data_digest" iscsi_header_data_digest_test
fi
trap - SIGINT SIGTERM EXIT

View File

@ -22,39 +22,39 @@ create_veth_interfaces $TEST_TYPE
trap 'cleanup_veth_interfaces $TEST_TYPE; exit 1' SIGINT SIGTERM EXIT
run_test suite "iscsi_tgt_sock" ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
run_test "iscsi_tgt_sock" ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
if [ "$TEST_TYPE" == "posix" ]; then
# calsoft doesn't handle TCP stream properly and fails decoding iSCSI
# requests when are divided by TCP segmentation. This is very common
# situation for VPP and causes that calsoft.sh never PASS.
run_test suite "iscsi_tgt_calsoft" ./test/iscsi_tgt/calsoft/calsoft.sh
run_test "iscsi_tgt_calsoft" ./test/iscsi_tgt/calsoft/calsoft.sh
fi
run_test suite "iscsi_tgt_filesystem" ./test/iscsi_tgt/filesystem/filesystem.sh
run_test suite "iscsi_tgt_reset" ./test/iscsi_tgt/reset/reset.sh
run_test suite "iscsi_tgt_rpc_config" ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test suite "iscsi_tgt_iscsi_lvol" ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test suite "iscsi_tgt_fio" ./test/iscsi_tgt/fio/fio.sh
run_test suite "iscsi_tgt_qos" ./test/iscsi_tgt/qos/qos.sh
run_test "iscsi_tgt_filesystem" ./test/iscsi_tgt/filesystem/filesystem.sh
run_test "iscsi_tgt_reset" ./test/iscsi_tgt/reset/reset.sh
run_test "iscsi_tgt_rpc_config" ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test "iscsi_tgt_iscsi_lvol" ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test "iscsi_tgt_fio" ./test/iscsi_tgt/fio/fio.sh
run_test "iscsi_tgt_qos" ./test/iscsi_tgt/qos/qos.sh
# IP Migration tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
run_test suite "iscsi_tgt_ip_migration" ./test/iscsi_tgt/ip_migration/ip_migration.sh
run_test "iscsi_tgt_ip_migration" ./test/iscsi_tgt/ip_migration/ip_migration.sh
fi
run_test suite "iscsi_tgt_trace_record" ./test/iscsi_tgt/trace_record/trace_record.sh
run_test "iscsi_tgt_trace_record" ./test/iscsi_tgt/trace_record/trace_record.sh
if [ $RUN_NIGHTLY -eq 1 ]; then
if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test suite "iscsi_tgt_pmem" ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
run_test "iscsi_tgt_pmem" ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
fi
run_test suite "iscsi_tgt_ext4test" ./test/iscsi_tgt/ext4test/ext4test.sh
run_test suite "iscsi_tgt_digests" ./test/iscsi_tgt/digests/digests.sh
run_test "iscsi_tgt_ext4test" ./test/iscsi_tgt/ext4test/ext4test.sh
run_test "iscsi_tgt_digests" ./test/iscsi_tgt/digests/digests.sh
fi
if [ $SPDK_TEST_RBD -eq 1 ]; then
# RBD tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
run_test suite "iscsi_tgt_rbd" ./test/iscsi_tgt/rbd/rbd.sh
run_test "iscsi_tgt_rbd" ./test/iscsi_tgt/rbd/rbd.sh
fi
fi
@ -65,17 +65,17 @@ if [ $SPDK_TEST_NVMF -eq 1 ]; then
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
# Test configure remote NVMe device from rpc and conf file
run_test suite "iscsi_tgt_fio_remote_nvme" ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
run_test "iscsi_tgt_fio_remote_nvme" ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
fi
fi
if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite "iscsi_tgt_multiconnection" ./test/iscsi_tgt/multiconnection/multiconnection.sh
run_test "iscsi_tgt_multiconnection" ./test/iscsi_tgt/multiconnection/multiconnection.sh
fi
if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
run_test suite "iscsi_tgt_initiator" ./test/iscsi_tgt/initiator/initiator.sh
run_test suite "iscsi_tgt_bdev_io_wait" ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
run_test "iscsi_tgt_initiator" ./test/iscsi_tgt/initiator/initiator.sh
run_test "iscsi_tgt_bdev_io_wait" ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
fi
cleanup_veth_interfaces $TEST_TYPE

View File

@ -137,9 +137,9 @@ spdk_pid=$!
trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
waitforlisten $spdk_pid
run_test "case" "test_construct_lvs" test_construct_lvs
run_test "case" "test_construct_lvol" test_construct_lvol
run_test "case" "test_construct_multi_lvols" test_construct_multi_lvols
run_test "test_construct_lvs" test_construct_lvs
run_test "test_construct_lvol" test_construct_lvol
run_test "test_construct_multi_lvols" test_construct_multi_lvols
trap - SIGINT SIGTERM EXIT
killprocess $spdk_pid

View File

@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh
timing_enter lvol
timing_enter basic
run_test suite "lvol_basic" test/lvol/basic.sh
run_test "lvol_basic" test/lvol/basic.sh
timing_exit basic
timing_exit lvol

View File

@ -109,25 +109,25 @@ if [ $(uname) = Linux ]; then
trap "kill_stub -9; exit 1" SIGINT SIGTERM EXIT
fi
run_test "case" "nvme_reset" $testdir/reset/reset -q 64 -w write -s 4096 -t 5
run_test "case" "nvme_identify" nvme_identify
run_test "case" "nvme_perf" nvme_perf
run_test "case" "nvme_reserve" $rootdir/examples/nvme/reserve/reserve
run_test "case" "nvme_hello_world" $rootdir/examples/nvme/hello_world/hello_world
run_test "case" "nvme_deallocated_value" $testdir/deallocated_value/deallocated_value
run_test "case" "nvme_sgl" $testdir/sgl/sgl
run_test "case" "nvme_e2edp" $testdir/e2edp/nvme_dp
run_test "case" "nvme_err_injection" $testdir/err_injection/err_injection
run_test "case" "nvme_overhead" $testdir/overhead/overhead -s 4096 -t 1 -H
run_test "case" "nvme_arbitration" $rootdir/examples/nvme/arbitration/arbitration -t 3 -i 0
run_test "nvme_reset" $testdir/reset/reset -q 64 -w write -s 4096 -t 5
run_test "nvme_identify" nvme_identify
run_test "nvme_perf" nvme_perf
run_test "nvme_reserve" $rootdir/examples/nvme/reserve/reserve
run_test "nvme_hello_world" $rootdir/examples/nvme/hello_world/hello_world
run_test "nvme_deallocated_value" $testdir/deallocated_value/deallocated_value
run_test "nvme_sgl" $testdir/sgl/sgl
run_test "nvme_e2edp" $testdir/e2edp/nvme_dp
run_test "nvme_err_injection" $testdir/err_injection/err_injection
run_test "nvme_overhead" $testdir/overhead/overhead -s 4096 -t 1 -H
run_test "nvme_arbitration" $rootdir/examples/nvme/arbitration/arbitration -t 3 -i 0
if [ $(uname) != "FreeBSD" ]; then
run_test "case" "nvme_startup" $testdir/startup/startup -t 1000000
run_test "case" "nvme_multi_secondary" nvme_multi_secondary
run_test "nvme_startup" $testdir/startup/startup -t 1000000
run_test "nvme_multi_secondary" nvme_multi_secondary
trap - SIGINT SIGTERM EXIT
kill_stub
fi
if [ -d /usr/src/fio ]; then
run_test "case" "nvme_fio" nvme_fio_test
run_test "nvme_fio" nvme_fio_test
fi

View File

@ -81,10 +81,10 @@ if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP;
exit 0
fi
run_test "case" "nvmf_target_disconnect_tc1" nvmf_target_disconnect_tc1
run_test "case" "nvmf_target_disconnect_tc2" nvmf_target_disconnect_tc2
run_test "nvmf_target_disconnect_tc1" nvmf_target_disconnect_tc1
run_test "nvmf_target_disconnect_tc2" nvmf_target_disconnect_tc2
if [ -n "$NVMF_SECOND_TARGET_IP" ]; then
run_test "case" "nvmf_target_disconnect_tc3" nvmf_target_disconnect_tc3
run_test "nvmf_target_disconnect_tc3" nvmf_target_disconnect_tc3
fi
trap - SIGINT SIGTERM EXIT

View File

@ -13,44 +13,44 @@ trap "exit 1" SIGINT SIGTERM EXIT
TEST_ARGS=( "$@" )
run_test suite "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
run_test "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
run_test "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
run_test suite "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
run_test "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
fi
run_test suite "nvmf_lvol" test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
run_test "nvmf_lvol" test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
#TODO: disabled due to intermittent failures. Need to triage.
# run_test suite "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
run_test suite "nvmf_vhost" test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_bdev_io_wait" test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_create_transport." test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_multitarget" test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}"
# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
run_test "nvmf_vhost" test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdev_io_wait" test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
run_test "nvmf_create_transport." test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}"
run_test "nvmf_multitarget" test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}"
if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite "nvmf_fuzz" test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_multiconnection" test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_initiator_timeout" test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}"
run_test "nvmf_fuzz" test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}"
run_test "nvmf_multiconnection" test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}"
run_test "nvmf_initiator_timeout" test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}"
fi
run_test suite "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_rpc" test/nvmf/target/rpc.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test "nvmf_rpc" test/nvmf/target/rpc.sh "${TEST_ARGS[@]}"
run_test "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
timing_enter host
run_test suite "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
run_test "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
run_test "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test suite "nvmf_aer" test/nvmf/host/aer.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_fio" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
run_test suite "nvmf_target_disconnect" test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}"
run_test "nvmf_aer" test/nvmf/host/aer.sh "${TEST_ARGS[@]}"
run_test "nvmf_fio" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
run_test "nvmf_target_disconnect" test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}"
timing_exit host

View File

@ -56,13 +56,13 @@ function nvmf_filesystem_part {
sleep 1
if [ $incapsule -eq 0 ]; then
run_test "case" "filesystem_ext4" nvmf_filesystem_create "ext4"
run_test "case" "filesystem_btrfs" nvmf_filesystem_create "btrfs"
run_test "case" "filesystem_xfs" nvmf_filesystem_create "xfs"
run_test "filesystem_ext4" nvmf_filesystem_create "ext4"
run_test "filesystem_btrfs" nvmf_filesystem_create "btrfs"
run_test "filesystem_xfs" nvmf_filesystem_create "xfs"
else
run_test "case" "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4"
run_test "case" "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs"
run_test "case" "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs"
run_test "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4"
run_test "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs"
run_test "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs"
fi
parted -s /dev/nvme0n1 rm 1
@ -78,7 +78,7 @@ function nvmf_filesystem_part {
nvmfpid=
}
run_test "suite" "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0
run_test "suite" "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096
run_test "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0
run_test "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096
nvmftestfini

View File

@ -129,9 +129,9 @@ done
$rpc_py < $testdir/rpcs.txt
timing_exit create_subsystems
run_test "case" "nvmf_shutdown_tc1" nvmf_shutdown_tc1
run_test "case" "nvmf_shutdown_tc2" nvmf_shutdown_tc2
run_test "case" "nvmf_shutdown_tc3" nvmf_shutdown_tc3
run_test "nvmf_shutdown_tc1" nvmf_shutdown_tc1
run_test "nvmf_shutdown_tc2" nvmf_shutdown_tc2
run_test "nvmf_shutdown_tc3" nvmf_shutdown_tc3
rm -f ./local-job0-0-verify.state
rm -rf $testdir/bdevperf.conf

View File

@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh
function suite()
{
run_test suite "ocf_$(basename "$@")" "$@"
run_test "ocf_$(basename "$@")" "$@"
}
suite "$testdir/integrity/fio-modes.sh"

View File

@ -53,7 +53,7 @@ WORKDIR=$(readlink -f $(dirname $0))
case $1 in
-hp|--hotplug)
echo 'Running hotplug tests suite...'
run_test case "vhost_hotplug" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
run_test "vhost_hotplug" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
@ -64,7 +64,7 @@ case $1 in
;;
-shr|--scsi-hot-remove)
echo 'Running scsi hotremove tests suite...'
run_test case "vhost_scsi_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
run_test "vhost_scsi_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \
@ -73,7 +73,7 @@ case $1 in
;;
-bhr|--blk-hot-remove)
echo 'Running blk hotremove tests suite...'
run_test case "vhost_blk_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
run_test "vhost_blk_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \

View File

@ -136,8 +136,8 @@ function is_fio_running()
return $ret
}
run_test "case" "vhost_migration_tc1" migration_tc1
run_test "case" "vhost_migration_tc2" migration_tc2
run_test "vhost_migration_tc1" migration_tc1
run_test "vhost_migration_tc2" migration_tc2
trap - SIGINT ERR EXIT

View File

@ -29,88 +29,88 @@ DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"
WORKDIR=$(readlink -f $(dirname $0))
run_test case "vhost_negative" $WORKDIR/other/negative.sh
run_test "vhost_negative" $WORKDIR/other/negative.sh
report_test_completion "vhost_negative"
run_test suite "vhost_boot" $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE
run_test "vhost_boot" $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE
report_test_completion "vhost_boot"
if [ $RUN_NIGHTLY -eq 1 ]; then
echo 'Running blk integrity suite...'
run_test case "vhost_blk_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
run_test "vhost_blk_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
--test-type=spdk_vhost_blk \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
report_test_completion "nightly_vhost_integrity_blk"
echo 'Running SCSI integrity suite...'
run_test case "vhost_scsi_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
run_test "vhost_scsi_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
--test-type=spdk_vhost_scsi \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
report_test_completion "nightly_vhost_integrity"
echo 'Running filesystem integrity suite with SCSI...'
run_test case "vhost_scsi_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
run_test "vhost_scsi_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_scsi"
echo 'Running filesystem integrity suite with BLK...'
run_test case "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
run_test "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_blk"
if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers'
run_test case "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers'
run_test case "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
fi
if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types'
run_test case "vhost_scsi_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
run_test "vhost_scsi_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
fi
echo 'Running lvol integrity nightly suite with one core and one controller'
run_test case "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=1
if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers'
run_test case "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers'
run_test case "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
fi
if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types'
run_test case "vhost_blk_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
run_test "vhost_blk_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
fi
echo 'Running lvol integrity nightly suite with one core and one controller'
run_test case "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
run_test "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=1
echo 'Running readonly tests suite...'
run_test case "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
run_test "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
report_test_completion "vhost_readonly"
echo 'Running migration suite...'
run_test case "vhost_migration" $WORKDIR/migration/migration.sh -x \
run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
--fio-bin=$FIO_BIN --os=$VM_IMAGE
fi
echo 'Running lvol integrity suite...'
run_test case "vhost_scsi_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
run_test "vhost_scsi_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --thin-provisioning
report_test_completion "vhost_integrity_lvol_scsi"
echo 'Running lvol integrity suite...'
run_test case "vhost_blk_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
run_test "vhost_blk_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk
report_test_completion "vhost_integrity_lvol_blk"
run_test suite "spdkcli_vhost" ./test/spdkcli/vhost.sh
run_test "spdkcli_vhost" ./test/spdkcli/vhost.sh

View File

@ -52,11 +52,11 @@ function vmd_bdev_svc {
killprocess $svcpid
}
run_test "case" "vmd_identify" vmd_identify
run_test "case" "vmd_hello_world" $rootdir/examples/nvme/hello_world/hello_world -V
run_test "case" "vmd_perf" vmd_perf
run_test "vmd_identify" vmd_identify
run_test "vmd_hello_world" $rootdir/examples/nvme/hello_world/hello_world -V
run_test "vmd_perf" vmd_perf
if [ -d /usr/src/fio ]; then
run_test "case" "vmd_fio" vmd_fio
run_test "vmd_fio" vmd_fio
fi
run_test "case" "vmd_bdev_svc" vmd_bdev_svc
run_test "vmd_bdev_svc" vmd_bdev_svc