nvme: Simplify the multiprocess tests

Nearly all tests now make extensive use of the stub application,
so they're effectively all testing multiprocess all the time.
Further, we believe it to be the best policy to not attempt
to support scenarios where the primary process crashes unexpectedly.
We consider this equivalent to a kernel panic and all of the
processes will need to be halted and restarted.

Given the two things above, we can make some fairly dramatic
simplifications to the NVMe multiprocess testing. Only
one piece of functionality - multiple simultaneous secondary
processes - was not already tested by the other regular
tests. This patch removes all other multiprocess tests
and adds a simple test of multiple secondaries.

Change-Id: If99f85913b99862f02c3815ea7c10cd80ea3ce02
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/368208
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-07-05 10:49:50 -07:00 committed by Jim Harris
parent 782b0f1a75
commit 58b9ea3ac0
4 changed files with 12 additions and 128 deletions

View File

@ -98,9 +98,6 @@ if [ $SPDK_TEST_NVME -eq 1 ]; then
if [ $SPDK_RUN_ASAN -eq 0 ]; then
run_test test/lib/nvme/hotplug.sh intel
fi
if [ $RUN_NIGHTLY -eq 1 ]; then
run_test test/lib/nvme/nvmemp.sh
fi
fi
run_test test/lib/env/env.sh

View File

@ -18,7 +18,6 @@ fi
: ${SPDK_TEST_UNITTEST=1}; export SPDK_TEST_UNITTEST
: ${SPDK_TEST_ISCSI=1}; export SPDK_TEST_ISCSI
: ${SPDK_TEST_NVME=1}; export SPDK_TEST_NVME
: ${SPDK_TEST_NVME_MULTIPROCESS=1}; export SPDK_TEST_NVME_MULTIPROCESS
: ${SPDK_TEST_NVMF=1}; export SPDK_TEST_NVMF
: ${SPDK_TEST_VHOST=1}; export SPDK_TEST_VHOST
: ${SPDK_TEST_BLOCKDEV=1}; export SPDK_TEST_BLOCKDEV

View File

@ -6,9 +6,6 @@ testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/scripts/autotest_common.sh
# delay time for apps to start up as primary
app_start=5
function linux_iter_pci {
lspci -mm -n -D | grep $1 | tr -d '"' | awk -F " " '{print $1}'
}
@ -17,7 +14,6 @@ timing_enter nvme
if [ `uname` = Linux ]; then
start_stub "-s 2048 -i 0 -m 0xF"
sleep $app_start
trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
fi
@ -66,6 +62,18 @@ timing_enter arbitration
$rootdir/examples/nvme/arbitration/arbitration -t 3 -i 0
timing_exit arbitration
if [ `uname` = Linux ]; then
timing_enter multi_secondary
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -s 4096 -t 3 -c 0x1 &
pid0=$!
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -s 4096 -t 3 -c 0x2 &
pid1=$!
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -s 4096 -t 3 -c 0x4
wait $pid0
wait $pid1
timing_exit multi_secondary
fi
if [ `uname` = Linux ]; then
trap - SIGINT SIGTERM EXIT
kill_stub
@ -85,31 +93,4 @@ if [ -d /usr/src/fio ]; then
timing_exit fio_plugin
fi
if [ $(uname -s) = Linux ] && [ $SPDK_TEST_NVME_MULTIPROCESS -eq 1 ]; then
timing_enter multi_process
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 10 -c 0xf &
pid=$!
sleep $app_start
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w randread -s 4096 -t 10 -c 0x10 &
sleep 1
kill -9 $!
count=0
while [ $count -le 2 ]; do
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w read -s 4096 -t 1 -c 0xf
count=$(($count + 1))
done
count=0
while [ $count -le 1 ]; do
core=$((1 << (($count + 4))))
printf -v hexcore "0x%x" "$core"
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 1 -c $hexcore &
sleep $app_start
$rootdir/examples/nvme/identify/identify -i 0 &
sleep $app_start
count=$(($count + 1))
done
wait $pid
timing_exit multi_process
fi
timing_exit nvme

View File

@ -1,93 +0,0 @@
#!/usr/bin/env bash
set -e
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/scripts/autotest_common.sh
if [ $(uname -s) = Linux ]; then
timing_enter nvme_mp
timing_enter mp_func_test
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 5 -c 0xf &
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 1 -c 0x10
wait $!
timing_exit mp_func_test
timing_enter mp_fault_test
timing_enter mp_fault_test_1
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 5 -c 0xf &
pid=$!
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 5 -c 0x10 &
sleep 1
kill -9 $pid
wait $!
timing_exit mp_fault_test_1
timing_enter mp_fault_test_2
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 7 -c 0xf &
pid=$!
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 3 -c 0x10 &
sleep 2
kill -9 $!
wait $pid
timing_exit mp_fault_test_2
timing_exit mp_fault_test
timing_enter mp_stress_test
timing_enter mp_stress_test_1
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 10 -c 0xf &
sleep 3
count=0
while [ $count -le 4 ]; do
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 1 -c 0x10
count=$(( $count + 1 ))
done
wait $!
timing_exit mp_stress_test_1
timing_enter mp_stress_test_2
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 15 -c 0xf &
pid=$!
sleep 3
count=0
while [ $count -le 4 ]; do
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 3 -c 0x10 &
sleep 2
kill -9 $!
count=$(( $count + 1 ))
done
wait $pid
timing_exit mp_stress_test_2
timing_enter mp_stress_test_3
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 10 &
pid=$!
sleep 3
count=0
while [ $count -le 4 ]; do
core=$((1 << (($count + 4))))
printf -v hexcore "0x%x" "$core"
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 1 -c $hexcore &
count=$(( $count + 1 ))
done
wait $pid
timing_exit mp_stress_test_3
timing_exit mp_stress_test
timing_enter mp_perf_test
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w randread -s 4096 -t 5 -c 0x3
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w randread -s 4096 -t 8 -c 0x1 &
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w randread -s 4096 -t 3 -c 0x2
wait $!
timing_exit mp_perf_test
timing_exit nvme_mp
fi