test/nvmf: use stub application

The stub application will ensure that each
nvmf test does not need to reinitialized DPDK
memory and NVMe devices.  This drastically
cuts down on the amount of time needed to run
all of the nvmf tests.

Change-Id: I6abad4e1298111884f18026e72e36f5d8b73c4b9
Signed-off-by: cunyinch <cunyin.chang@intel.com>
Reviewed-on: https://review.gerrithub.io/362810
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
cunyinch 2017-05-27 08:13:55 +08:00 committed by Ben Walker
parent ad84526d1c
commit 3d38fcc312
13 changed files with 57 additions and 22 deletions

View File

@ -6,6 +6,14 @@ NVMF_IP_LEAST_ADDR=8
NVMF_FIRST_TARGET_IP=$NVMF_IP_PREFIX.$NVMF_IP_LEAST_ADDR
RPC_PORT=5260
if [ -z "$NVMF_APP" ]; then
NVMF_APP=./app/nvmf_tgt/nvmf_tgt
fi
if [ -z "$NVMF_TEST_CORE_MASK" ]; then
NVMF_TEST_CORE_MASK=0xFFFF
fi
function load_ib_rdma_modules()
{
if [ `uname` != Linux ]; then

View File

@ -23,14 +23,15 @@ if ! rdma_nic_available; then
fi
timing_enter discovery
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
bdevs="$bdevs $($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter fs_test
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter fio
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"

View File

@ -15,14 +15,15 @@ if ! rdma_nic_available; then
fi
timing_enter aer
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf -m 0x2 -p 1 -s 512 &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -p "*"

View File

@ -20,14 +20,15 @@ if [ ! -d /usr/src/fio ]; then
fi
timing_enter fio
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf -m 0x2 -p 1 -s 512 &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -p "*"

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter identify
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf -m 0x2 -p 1 -s 512 -t nvmf &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter perf
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf -m 0x2 -p 1 -s 512 &
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter multiconnection
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
$NVMF_APP -c $testdir/../nvmf.conf &
pid=$!
trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $pid ${RPC_PORT}
timing_exit start_nvmf_tgt
modprobe -v nvme-rdma

View File

@ -18,13 +18,14 @@ if ! rdma_nic_available; then
fi
timing_enter nvme_cli
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
timing_enter start_nvmf_tgt
$NVMF_APP -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid ${RPC_PORT}
timing_exit start_nvmf_tgt
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"

View File

@ -11,12 +11,23 @@ source $rootdir/test/nvmf/common.sh
timing_enter nvmf_tgt
# NVMF_TEST_CORE_MASK is the biggest core mask specified by
# any of the nvmf_tgt tests. Using this mask for the stub
# ensures that if this mask spans CPU sockets, that we will
# allocate memory from both sockets. The stub will *not*
# run anything on the extra cores (and will sleep on master
# core 0) so there is no impact to the nvmf_tgt tests by
# specifying the bigger core mask.
start_stub "-s 2048 -i 0 -m $NVMF_TEST_CORE_MASK"
trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
export NVMF_APP="./app/nvmf_tgt/nvmf_tgt -i 0"
run_test test/nvmf/fio/fio.sh
run_test test/nvmf/filesystem/filesystem.sh
run_test test/nvmf/discovery/discovery.sh
run_test test/nvmf/nvme_cli/nvme_cli.sh
run_test test/nvmf/shutdown/shutdown.sh
run_test test/nvmf/rpc/rpc.sh
if [ $RUN_NIGHTLY -eq 1 ]; then
run_test test/nvmf/multiconnection/multiconnection.sh
@ -33,5 +44,10 @@ run_test test/nvmf/host/identify_kernel_nvmf.sh
run_test test/nvmf/host/fio.sh
timing_exit host
trap - SIGINT SIGTERM EXIT
kill_stub
# TODO: enable nvme device detachment for multi-process so that
# we can use the stub for this test
run_test test/nvmf/rpc/rpc.sh
timing_exit nvmf_tgt

View File

@ -15,7 +15,7 @@ if ! rdma_nic_available; then
fi
timing_enter rpc
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
pid=$!
@ -23,6 +23,7 @@ pid=$!
trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $pid ${RPC_PORT}
timing_exit start_nvmf_tgt
# set times for subsystem construct/delete
if [ $RUN_NIGHTLY -eq 1 ]; then

View File

@ -18,14 +18,15 @@ if ! rdma_nic_available; then
fi
timing_enter shutdown
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
$NVMF_APP -c $testdir/../nvmf.conf &
pid=$!
trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT
waitforlisten $pid ${RPC_PORT}
timing_exit start_nvmf_tgt
# Create 10 subsystems
for i in `seq 1 10`