autotest: Replace fio.py with a bash wrapper in tests

fio.py simply wraps itself around fio and doesn't do anything that
would require python to be in use. Having it in a simple bash form
makes it easier to integrate it with autotest's common sh tooling
and to debug any potential issues with the underlying tests.

This also fixes #1919 by making sure only proper nvme devices are
selected for the nvmf targets.

Fixes: #1919.

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: I111d00df3c7b2517f431cae865e258a665c2ecb3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7684
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Michal Berger 2021-04-29 15:29:46 +02:00 committed by Tomasz Zawadzki
parent 55189bee6b
commit 19f0c9a0d0
19 changed files with 163 additions and 24 deletions

139
scripts/fio-wrapper Executable file
View File

@ -0,0 +1,139 @@
#!/usr/bin/env bash
rootdir=$(readlink -f "$(dirname "$0")/../")
shopt -s nullglob extglob
fio_config() {
local devs=("$@") dev
cat <<- FIO
[global]
thread=1
invalidate=1
rw=$testtype
time_based=1
runtime=$runtime
ioengine=libaio
direct=1
bs=$blocksize
iodepth=$iodepth
norandommap=$((verify == 1 ? 0 : 1))
numjobs=$numjobs
verify_dump=1
FIO
if ((verify == 1)); then
cat <<- FIO
do_verify=$verify
verify=crc32c-intel
FIO
fi
for dev in "${!devs[@]}"; do
cat <<- FIO
[job$dev]
filename=/dev/${devs[dev]}
FIO
done
}
run_fio() {
fio_config "$@" | fio -
}
get_iscsi() {
while read -r; do
[[ $REPLY =~ "Attached scsi disk "(sd[a-z]+) ]] && echo "${BASH_REMATCH[1]}"
done < <(iscsiadm -m session -P 3)
}
get_nvme() {
local blocks nvme nvme_sub
for nvme in /sys/class/nvme/nvme+([0-9]); do
# Make sure we touch only the block devices which belong to bdev subsystem and
# use supported protocols.
[[ $(< "$nvme/transport") == tcp || $(< "$nvme/transport") == rdma ]] || continue
for nvme_sub in /sys/class/nvme-subsystem/nvme-subsys+([0-9]); do
[[ -e $nvme_sub/${nvme##*/} ]] || continue
[[ $(< "$nvme_sub/model") == "SPDK bdev Controller"* ]] || continue
blocks+=("$nvme_sub/${nvme##*/}"n*)
done
done
printf '%s\n' "${blocks[@]##*/}"
}
get_devices() {
local devs=("$@")
if ((${#devs[@]} == 0)); then
case "$protocol" in
iscsi) devs=($(get_iscsi)) ;;
nvmf) devs=($(get_nvme)) ;;
*) ;;
esac
fi
printf '%s\n' "${devs[@]}"
}
configure_devices() {
local devs=("$@") dev qd
for dev in "${devs[@]}"; do
qd=128
# Disable all merge tries"
echo 2 > "/sys/block/$dev/queue/nomerges"
# FIXME: nr_requests already has its default value at 128. Also, when no
# scheduler is associated with the device this value cannot be changed
# and is automatically adjusted as well.
# echo 128 > "/sys/block/$dev/queue/nr_requests"
if [[ -e /sys/block/$dev/device/queue_depth ]]; then
# FIXME: Is this really needed though? Can't we use the default? This is not
# very deterministic as depending on the device we may end up with different
# qd in the range of 1-128.
while ((qd > 0)) && ! echo "$qd" > "/sys/block/$dev/device/queue_depth"; do
((--qd))
done 2> /dev/null
if ((qd == 0)); then
printf 'Failed to set queue_depth (%s)\n' "$dev"
return 1
fi
printf 'queue_depth set to %u (%s)\n' "$qd" "$dev"
else
printf 'Could not set queue depth (%s)\n' "$dev" >&2
fi
echo none > "/sys/block/$dev/queue/scheduler"
done
}
# Defaults
blocksize=4096
iodepth=1
numjobs=1
protocol="nvmf"
runtime=1
testtype="read"
verify=0
# Keep short args compatible with fio.py
while getopts :i:d:n:p:r:t:v arg; do
case "$arg" in
i) blocksize=$OPTARG ;;
d) iodepth=$OPTARG ;;
n) numjobs=$OPTARG ;;
p) protocol=$OPTARG ;;
r) runtime=$OPTARG ;;
t) testtype=$OPTARG ;;
v) verify=1 ;;
*) ;;
esac
done
shift $((OPTIND - 1))
devices=($(get_devices "$@"))
if ((${#devices[@]} == 0)); then
printf '* No devices were found for the test, aborting\n' >&2
exit 1
fi
fio_config "${devices[@]}"
configure_devices "${devices[@]}" && run_fio "${devices[@]}"

View File

@ -47,7 +47,7 @@ MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -56,7 +56,7 @@ MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=4096
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -8,7 +8,7 @@ source $rootdir/test/iscsi_tgt/common.sh
iscsitestinit
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
source "$rootdir/test/common/applications.sh"
NETMASK=127.0.0.0/24

View File

@ -11,7 +11,7 @@ NULL_BDEV_SIZE=64
NULL_BLOCK_SIZE=512
rpc_py=$rootdir/scripts/rpc.py
fio_py=$rootdir/scripts/fio.py
fio_py=$rootdir/scripts/fio-wrapper
rpc_addr1="/var/tmp/spdk0.sock"
rpc_addr2="/var/tmp/spdk1.sock"

View File

@ -18,7 +18,7 @@ else
fi
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -8,7 +8,7 @@ source $rootdir/test/iscsi_tgt/common.sh
iscsitestinit
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
CONNECTION_NUMBER=30

View File

@ -10,7 +10,7 @@ nvmftestinit
iscsitestinit
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
# Namespaces are NOT used here on purpose. Rxe_cfg utilility used for NVMf tests do not support namespaces.
TARGET_IP=127.0.0.1

View File

@ -13,7 +13,7 @@ PMEM_BLOCK_SIZE=512
TGT_NR=10
PMEM_PER_TGT=1
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_target
"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &

View File

@ -53,7 +53,7 @@ MALLOC_BLOCK_SIZE=512
IOPS_RESULT=
BANDWIDTH_RESULT=
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -13,7 +13,7 @@ trap 'rbd_cleanup; exit 1' SIGINT SIGTERM EXIT
timing_exit rbd_setup
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -11,7 +11,7 @@ MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
if ! hash sg_reset; then
exit 1

View File

@ -31,7 +31,7 @@ MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=4096
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio.py"
fio_py="$rootdir/scripts/fio-wrapper"
timing_enter start_iscsi_tgt

View File

@ -35,15 +35,15 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_
waitforserial $NVMF_SERIAL 3
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t write -r 1 -v
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 1 -v
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t write -r 1 -v
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v
sync
#start hotplug test case
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t read -r 10 &
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t read -r 10 &
fio_pid=$!
sleep 3

View File

@ -30,7 +30,7 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_
waitforserial "$NVMF_SERIAL"
# Once our timed out I/O complete, we will still have 10 sec of I/O.
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 60 -v &
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 60 -v &
fio_pid=$!
sleep 3

View File

@ -37,8 +37,8 @@ for i in $(seq 1 $NVMF_SUBSYS); do
waitforserial SPDK$i
done
$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t read -r 10
$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t randwrite -r 10
$rootdir/scripts/fio-wrapper -p nvmf -i 262144 -d 64 -t read -r 10
$rootdir/scripts/fio-wrapper -p nvmf -i 262144 -d 64 -t randwrite -r 10
sync
for i in $(seq 1 $NVMF_SUBSYS); do

View File

@ -50,7 +50,7 @@ ctrl2_id=$(nvme list-subsys | sed -n "s/traddr=$NVMF_SECOND_TARGET_IP trsvcid=$N
# Set IO policy to numa
echo numa > /sys/class/nvme-subsystem/nvme-subsys$subsys_id/iopolicy
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randrw -r 6 -v &
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randrw -r 6 -v &
fio_pid=$!
sleep 1
@ -84,7 +84,7 @@ sleep 1
# Set IO policy to round-robin
echo round-robin > /sys/class/nvme-subsystem/nvme-subsys$subsys_id/iopolicy
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randrw -r 6 -v &
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randrw -r 6 -v &
fio_pid=$!
sleep 1

View File

@ -42,7 +42,7 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_
waitforserial "$NVMF_SERIAL"
$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v
$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 1 -v
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"

View File

@ -31,7 +31,7 @@ done
# working even at very high queue depths because the rdma qpair doesn't fail.
# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target,
# but the connection should come up and FIO should complete without errors.
$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13
$rootdir/scripts/fio-wrapper -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13
sync