diff --git a/autotest.sh b/autotest.sh
index d0b04700d6..d9acb897da 100755
--- a/autotest.sh
+++ b/autotest.sh
@@ -85,7 +85,7 @@ timing_exit nvmf_setup
 
 if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
 	timing_enter unittest
-	run_test ./test/unit/unittest.sh
+	run_test suite ./test/unit/unittest.sh
 	report_test_completion "unittest"
 	timing_exit unittest
 fi
@@ -93,109 +93,109 @@ fi
 timing_enter lib
 
 if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
-	run_test test/bdev/blockdev.sh
+	run_test suite test/bdev/blockdev.sh
 	if [ $(uname -s) = Linux ]; then
-		run_test test/bdev/bdevjson/json_config.sh
+		run_test suite test/bdev/bdevjson/json_config.sh
 		if modprobe -n nbd; then
-			run_test test/bdev/nbdjson/json_config.sh
+			run_test suite test/bdev/nbdjson/json_config.sh
 		fi
 	fi
 fi
 
 if [ $SPDK_TEST_EVENT -eq 1 ]; then
-	run_test test/event/event.sh
+	run_test suite test/event/event.sh
 fi
 
 if [ $SPDK_TEST_NVME -eq 1 ]; then
-	run_test test/nvme/nvme.sh
+	run_test suite test/nvme/nvme.sh
 	if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
-		run_test test/nvme/spdk_nvme_cli.sh
+		run_test suite test/nvme/spdk_nvme_cli.sh
 	fi
 	# Only test hotplug without ASAN enabled. Since if it is
 	# enabled, it catches SEGV earlier than our handler which
 	# breaks the hotplug logic
 	if [ $SPDK_RUN_ASAN -eq 0 ]; then
-		run_test test/nvme/hotplug.sh intel
+		run_test suite test/nvme/hotplug.sh intel
 	fi
 fi
 
-run_test test/env/env.sh
+run_test suite test/env/env.sh
 
 if [ $SPDK_TEST_IOAT -eq 1 ]; then
-	run_test test/ioat/ioat.sh
+	run_test suite test/ioat/ioat.sh
 fi
 
 timing_exit lib
 
 if [ $SPDK_TEST_ISCSI -eq 1 ]; then
-	run_test ./test/iscsi_tgt/iscsi_tgt.sh posix
-	run_test ./test/iscsi_tgt/iscsijson/json_config.sh
+	run_test suite ./test/iscsi_tgt/iscsi_tgt.sh posix
+	run_test suite ./test/iscsi_tgt/iscsijson/json_config.sh
 fi
 
 if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
-	run_test ./test/blobfs/rocksdb/rocksdb.sh
-	run_test ./test/blobstore/blobstore.sh
+	run_test suite ./test/blobfs/rocksdb/rocksdb.sh
+	run_test suite ./test/blobstore/blobstore.sh
 fi
 
 if [ $SPDK_TEST_NVMF -eq 1 ]; then
-	run_test ./test/nvmf/nvmf.sh
-	run_test ./test/nvmf/nvmfjson/json_config.sh
+	run_test suite ./test/nvmf/nvmf.sh
+	run_test suite ./test/nvmf/nvmfjson/json_config.sh
 fi
 
 if [ $SPDK_TEST_VHOST -eq 1 ]; then
 	timing_enter vhost
 	timing_enter negative
-	run_test ./test/vhost/spdk_vhost.sh --negative
+	run_test suite ./test/vhost/spdk_vhost.sh --negative
 	timing_exit negative
 
 	timing_enter vhost_json_config
-	run_test ./test/vhost/json_config/json_config.sh
+	run_test suite ./test/vhost/json_config/json_config.sh
 	timing_exit vhost_json_config
 
 	if [ $RUN_NIGHTLY -eq 1 ]; then
 		timing_enter integrity_blk
-		run_test ./test/vhost/spdk_vhost.sh --integrity-blk
+		run_test suite ./test/vhost/spdk_vhost.sh --integrity-blk
 		timing_exit integrity_blk
 
 		timing_enter integrity
-		run_test ./test/vhost/spdk_vhost.sh --integrity
+		run_test suite ./test/vhost/spdk_vhost.sh --integrity
 		timing_exit integrity
 
 		timing_enter fs_integrity_scsi
-		run_test ./test/vhost/spdk_vhost.sh --fs-integrity-scsi
+		run_test suite ./test/vhost/spdk_vhost.sh --fs-integrity-scsi
 		timing_exit fs_integrity_scsi
 
 		timing_enter fs_integrity_blk
-		run_test ./test/vhost/spdk_vhost.sh --fs-integrity-blk
+		run_test suite ./test/vhost/spdk_vhost.sh --fs-integrity-blk
 		timing_exit fs_integrity_blk
 
 		timing_enter integrity_lvol_scsi_nightly
-		run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi-nightly
+		run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi-nightly
 		timing_exit integrity_lvol_scsi_nightly
 
 		timing_enter integrity_lvol_blk_nightly
-		run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-blk-nightly
+		run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-blk-nightly
 		timing_exit integrity_lvol_blk_nightly
 
 		timing_enter vhost_migration
-		run_test ./test/vhost/spdk_vhost.sh --migration
+		run_test suite ./test/vhost/spdk_vhost.sh --migration
 		timing_exit vhost_migration
 
 		# timing_enter readonly
-		# run_test ./test/vhost/spdk_vhost.sh --readonly
+		# run_test suite ./test/vhost/spdk_vhost.sh --readonly
 		# timing_exit readonly
 	fi
 
 	timing_enter integrity_lvol_scsi
-	run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi
+	run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi
 	timing_exit integrity_lvol_scsi
 
 	timing_enter integrity_lvol_blk
-	run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-blk
+	run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-blk
 	timing_exit integrity_lvol_blk
 
 	timing_enter spdk_cli
-	run_test ./test/spdkcli/vhost.sh
+	run_test suite ./test/spdkcli/vhost.sh
 	timing_exit spdk_cli
 
 	timing_exit vhost
@@ -208,27 +208,27 @@ if [ $SPDK_TEST_LVOL -eq 1 ]; then
 	test_cases+="600,601,650,651,652,654,655,"
 	test_cases+="700,701,702,750,751,752,753,754,755,756,757,758,759,"
 	test_cases+="800,801,802,803,804,10000"
-	run_test ./test/lvol/lvol.sh --test-cases=$test_cases
+	run_test suite ./test/lvol/lvol.sh --test-cases=$test_cases
 	report_test_completion "lvol"
 	timing_exit lvol
 fi
 
 if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
-	run_test ./test/vhost/initiator/blockdev.sh
-	run_test ./test/vhost/initiator/json_config.sh
-	run_test ./test/spdkcli/virtio.sh
+	run_test suite ./test/vhost/initiator/blockdev.sh
+	run_test suite ./test/vhost/initiator/json_config.sh
+	run_test suite ./test/spdkcli/virtio.sh
 	report_test_completion "vhost_initiator"
 fi
 
 if [ $SPDK_TEST_PMDK -eq 1 ]; then
-	run_test ./test/pmem/pmem.sh -x
-	run_test ./test/pmem/json_config/json_config.sh
-	run_test ./test/spdkcli/pmem.sh
+	run_test suite ./test/pmem/pmem.sh -x
+	run_test suite ./test/pmem/json_config/json_config.sh
+	run_test suite ./test/spdkcli/pmem.sh
 fi
 
 if [ $SPDK_TEST_RBD -eq 1 ]; then
-	run_test ./test/bdev/bdevjson/rbd_json_config.sh
-	run_test ./test/spdkcli/rbd.sh
+	run_test suite ./test/bdev/bdevjson/rbd_json_config.sh
+	run_test suite ./test/spdkcli/rbd.sh
 fi
 
 timing_enter cleanup
diff --git a/test/common/autotest_common.sh b/test/common/autotest_common.sh
index 89cc6a2fe0..a4c82b7420 100755
--- a/test/common/autotest_common.sh
+++ b/test/common/autotest_common.sh
@@ -424,14 +424,16 @@ function kill_stub() {
 
 function run_test() {
 	set +x
+	local test_type="$(echo $1 | tr 'a-z' 'A-Z')"
+	shift
 	echo "************************************"
-	echo "START TEST $1"
+	echo "START TEST $test_type $@"
 	echo "************************************"
 	set -x
 	time "$@"
 	set +x
 	echo "************************************"
-	echo "END TEST $1"
+	echo "END TEST $test_type $@"
 	echo "************************************"
 	set -x
 }
diff --git a/test/iscsi_tgt/iscsi_tgt.sh b/test/iscsi_tgt/iscsi_tgt.sh
index a8c9ce49df..b02c17f6d9 100755
--- a/test/iscsi_tgt/iscsi_tgt.sh
+++ b/test/iscsi_tgt/iscsi_tgt.sh
@@ -32,24 +32,24 @@ create_veth_interfaces $TEST_TYPE
 start_stub "-s 2048 -i 0 -m $ISCSI_TEST_CORE_MASK"
 trap "kill_stub; cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
 
-run_test ./test/iscsi_tgt/calsoft/calsoft.sh
-run_test ./test/iscsi_tgt/filesystem/filesystem.sh
-run_test ./test/iscsi_tgt/reset/reset.sh
-run_test ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
-run_test ./test/iscsi_tgt/lvol/iscsi_lvol.sh
-run_test ./test/iscsi_tgt/fio/fio.sh
-run_test ./test/iscsi_tgt/qos/qos.sh
+run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
+run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh
+run_test suite ./test/iscsi_tgt/reset/reset.sh
+run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
+run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh
+run_test suite ./test/iscsi_tgt/fio/fio.sh
+run_test suite ./test/iscsi_tgt/qos/qos.sh
 
 if [ $RUN_NIGHTLY -eq 1 ]; then
 	if [ $SPDK_TEST_PMDK -eq 1 ]; then
-		run_test ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
+		run_test suite ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
 	fi
-	run_test ./test/iscsi_tgt/ip_migration/ip_migration.sh
-	run_test ./test/iscsi_tgt/ext4test/ext4test.sh
-	run_test ./test/iscsi_tgt/digests/digests.sh
+	run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
+	run_test suite ./test/iscsi_tgt/ext4test/ext4test.sh
+	run_test suite ./test/iscsi_tgt/digests/digests.sh
 fi
 if [ $SPDK_TEST_RBD -eq 1 ]; then
-	run_test ./test/iscsi_tgt/rbd/rbd.sh
+	run_test suite ./test/iscsi_tgt/rbd/rbd.sh
 fi
 
 trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
@@ -59,15 +59,15 @@ if [ $SPDK_TEST_NVMF -eq 1 ]; then
 	# TODO: enable remote NVMe controllers with multi-process so that
 	#  we can use the stub for this test
 	# Test configure remote NVMe device from rpc and conf file
-	run_test ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
+	run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
 fi
 
 if [ $RUN_NIGHTLY -eq 1 ]; then
-	run_test ./test/iscsi_tgt/multiconnection/multiconnection.sh
+	run_test suite ./test/iscsi_tgt/multiconnection/multiconnection.sh
 fi
 
 if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
-	run_test ./test/iscsi_tgt/initiator/initiator.sh
+	run_test suite ./test/iscsi_tgt/initiator/initiator.sh
 fi
 
 cleanup_veth_interfaces $TEST_TYPE
diff --git a/test/lvol/lvol_test.py b/test/lvol/lvol_test.py
index 54394b73be..ba613812cb 100755
--- a/test/lvol/lvol_test.py
+++ b/test/lvol/lvol_test.py
@@ -3,13 +3,6 @@ import sys
 from test_cases import *
 
 
-def check_fail_count(fail_count, num_test):
-    if not fail_count:
-        print("Test: {num_test} - PASS".format(num_test=num_test))
-    else:
-        print("Test: {num_test} - FAIL".format(num_test=num_test))
-
-
 if __name__ == "__main__":
     rpc_py = None
     total_size = None
@@ -37,7 +30,6 @@ if __name__ == "__main__":
             fail_count = 0
             exec("fail_count += tc.test_case{num_test}"
                  "()".format(num_test=num_test))
-            check_fail_count(fail_count, num_test)
             if fail_count:
                 tc_failed.append(num_test)
 
diff --git a/test/lvol/test_cases.py b/test/lvol/test_cases.py
index d4ab30120a..794ff1c292 100644
--- a/test/lvol/test_cases.py
+++ b/test/lvol/test_cases.py
@@ -149,13 +149,16 @@ def case_message(func):
             10000: 'SIGTERM',
         }
         num = int(func.__name__.strip('test_case')[:])
-        print("========================================================")
-        print("Test Case {num}: Start".format(num=num))
-        print("Test Name: {name}".format(name=test_name[num]))
-        print("========================================================")
+        print("************************************")
+        print("START TEST CASE {name}".format(name=test_name[num]))
+        print("************************************")
         fail_count = func(*args, **kwargs)
-        print("Test Case {num}: END\n".format(num=num))
-        print("========================================================")
+        print("************************************")
+        if not fail_count:
+            print("END TEST CASE {name} PASS".format(name=test_name[num]))
+        else:
+            print("END TEST CASE {name} FAIL".format(name=test_name[num]))
+        print("************************************")
         return fail_count
     return inner
 
diff --git a/test/nvmf/nvmf.sh b/test/nvmf/nvmf.sh
index 14720cfbde..89aa63b7da 100755
--- a/test/nvmf/nvmf.sh
+++ b/test/nvmf/nvmf.sh
@@ -23,27 +23,27 @@ trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
 
 export NVMF_APP="./app/nvmf_tgt/nvmf_tgt -i 0"
 
-run_test test/nvmf/filesystem/filesystem.sh
-run_test test/nvmf/discovery/discovery.sh
+run_test suite test/nvmf/filesystem/filesystem.sh
+run_test suite test/nvmf/discovery/discovery.sh
 if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
-	run_test test/nvmf/nvme_cli/nvme_cli.sh
+	run_test suite test/nvmf/nvme_cli/nvme_cli.sh
 fi
-run_test test/nvmf/lvol/nvmf_lvol.sh
-run_test test/nvmf/shutdown/shutdown.sh
+run_test suite test/nvmf/lvol/nvmf_lvol.sh
+run_test suite test/nvmf/shutdown/shutdown.sh
 
 if [ $RUN_NIGHTLY -eq 1 ]; then
-	run_test test/nvmf/multiconnection/multiconnection.sh
+	run_test suite test/nvmf/multiconnection/multiconnection.sh
 fi
 
 timing_enter host
 
-run_test test/nvmf/host/bdevperf.sh
-run_test test/nvmf/host/identify.sh
-run_test test/nvmf/host/perf.sh
+run_test suite test/nvmf/host/bdevperf.sh
+run_test suite test/nvmf/host/identify.sh
+run_test suite test/nvmf/host/perf.sh
 # TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
 #run_test test/nvmf/host/identify_kernel_nvmf.sh
-run_test test/nvmf/host/aer.sh
-run_test test/nvmf/host/fio.sh
+run_test suite test/nvmf/host/aer.sh
+run_test suite test/nvmf/host/fio.sh
 
 timing_exit host
 trap - SIGINT SIGTERM EXIT
@@ -51,8 +51,8 @@ kill_stub
 
 # TODO: enable nvme device detachment for multi-process so that
 #  we can use the stub for this test
-run_test test/nvmf/rpc/rpc.sh
-run_test test/nvmf/fio/fio.sh
+run_test suite test/nvmf/rpc/rpc.sh
+run_test suite test/nvmf/fio/fio.sh
 revert_soft_roce
 
 report_test_completion "nvmf"
diff --git a/test/vhost/spdk_vhost.sh b/test/vhost/spdk_vhost.sh
index 6c14b57c89..4bd020f6d9 100755
--- a/test/vhost/spdk_vhost.sh
+++ b/test/vhost/spdk_vhost.sh
@@ -63,12 +63,12 @@ WORKDIR=$(readlink -f $(dirname $0))
 case $1 in
 	-n|--negative)
 		echo 'Negative tests suite...'
-		$WORKDIR/other/negative.sh
+		run_test case $WORKDIR/other/negative.sh
 		report_test_completion "vhost_negative"
 		;;
 	-p|--performance)
 		echo 'Running performance suite...'
-		$WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
 		--vm=0,$VM_IMAGE,Nvme0n1p0 \
 		--test-type=spdk_vhost_scsi \
 		--fio-job=$WORKDIR/common/fio_jobs/default_performance.job
@@ -76,7 +76,7 @@ case $1 in
 		;;
 	-pb|--performance-blk)
 		echo 'Running blk performance suite...'
-		$WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
 		--vm=0,$VM_IMAGE,Nvme0n1p0 \
 		--test-type=spdk_vhost_blk \
 		--fio-job=$WORKDIR/common/fio_jobs/default_performance.job
@@ -84,12 +84,12 @@ case $1 in
 		;;
 	-m|--migration)
 		echo 'Running migration suite...'
-		$WORKDIR/migration/migration.sh -x \
+		run_test case $WORKDIR/migration/migration.sh -x \
 		--fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2
 		;;
 	-i|--integrity)
 		echo 'Running SCSI integrity suite...'
-		$WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
 		--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
 		--test-type=spdk_vhost_scsi \
 		--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@@ -97,7 +97,7 @@ case $1 in
 		;;
 	-ib|--integrity-blk)
 		echo 'Running blk integrity suite...'
-		$WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
 		--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
 		--test-type=spdk_vhost_blk \
 		--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@@ -105,67 +105,67 @@ case $1 in
 		;;
 	-fs|--fs-integrity-scsi)
 		echo 'Running filesystem integrity suite with SCSI...'
-		$WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
+		run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
 		report_test_completion "vhost_fs_integrity_scsi"
 		;;
 	-fb|--fs-integrity-blk)
 		echo 'Running filesystem integrity suite with BLK...'
-		$WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
+		run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
 		report_test_completion "vhost_fs_integrity_blk"
 		;;
 	-ils|--integrity-lvol-scsi)
 		echo 'Running lvol integrity suite...'
-		$WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
 		--ctrl-type=spdk_vhost_scsi --thin-provisioning
 		report_test_completion "vhost_integrity_lvol_scsi"
 		;;
 	-ilb|--integrity-lvol-blk)
 		echo 'Running lvol integrity suite...'
-		$WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
 		--ctrl-type=spdk_vhost_blk
 		report_test_completion "vhost_integrity_lvol_blk"
 		;;
 	-ilsn|--integrity-lvol-scsi-nightly)
 		if [[ $DISKS_NUMBER -ge 2 ]]; then
 			echo 'Running lvol integrity nightly suite with two cores and two controllers'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 			--ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
 
 			echo 'Running lvol integrity nightly suite with one core and two controllers'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 			--ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
 		fi
 		if [[ -e $CENTOS_VM_IMAGE ]]; then
 			echo 'Running lvol integrity nightly suite with different os types'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
 			--ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
 		fi
 		echo 'Running lvol integrity nightly suite with one core and one controller'
-		$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 		--ctrl-type=spdk_vhost_scsi --max-disks=1
 		;;
 	-ilbn|--integrity-lvol-blk-nightly)
 		if [[ $DISKS_NUMBER -ge 2 ]]; then
 			echo 'Running lvol integrity nightly suite with two cores and two controllers'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 			--ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
 
 			echo 'Running lvol integrity nightly suite with one core and two controllers'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 			--ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
 		fi
 		if [[ -e $CENTOS_VM_IMAGE ]]; then
 			echo 'Running lvol integrity nightly suite with different os types'
-			$WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
+			run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
 			--ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
 		fi
 		echo 'Running lvol integrity nightly suite with one core and one controller'
-		$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
 		--ctrl-type=spdk_vhost_blk --max-disks=1
 		;;
 	-hp|--hotplug)
 		echo 'Running hotplug tests suite...'
-		$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
 			--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
 			--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
 			--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
@@ -176,7 +176,7 @@ case $1 in
 		;;
 	-shr|--scsi-hot-remove)
 		echo 'Running scsi hotremove tests suite...'
-		$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
 			--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
 			--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
 			--test-type=spdk_vhost_scsi \
@@ -185,7 +185,7 @@ case $1 in
 		;;
 	-bhr|--blk-hot-remove)
 		echo 'Running blk hotremove tests suite...'
-		$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
+		run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
 			--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
 			--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
 			--test-type=spdk_vhost_blk \
@@ -194,7 +194,7 @@ case $1 in
 	;;
 	-ro|--readonly)
 		echo 'Running readonly tests suite...'
-		$WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
+		run_test case $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
 		report_test_completion "vhost_readonly"
 		;;
 	*)