test/ftl: Use emulated zoned devices instead of ocssd

Support for Open-Channel SSDs is dying out (already marked as
deprecated in linux kernel, scheduled for complete removal in 5.15)
hence we should slowly adjust our test suites and move towards more
standardized zoned nvmes.

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: I038b6361a78b27c2b350ccf594d201ffe92794e3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8295
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@gmail.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
This commit is contained in:
Michal Berger 2021-06-10 16:26:26 +02:00 committed by Tomasz Zawadzki
parent 13905aa3e7
commit a90e749f44
7 changed files with 59 additions and 46 deletions

View File

@ -8,7 +8,6 @@ source $testdir/common.sh
tests=('-q 1 -w randwrite -t 4 -o 69632' '-q 128 -w randwrite -t 4 -o 4096' '-q 128 -w verify -t 4 -o 4096')
device=$1
use_append=$2
rpc_py=$rootdir/scripts/rpc.py
for ((i = 0; i < ${#tests[@]}; i++)); do
timing_enter "${tests[$i]}"
@ -18,12 +17,12 @@ for ((i = 0; i < ${#tests[@]}; i++)); do
trap 'killprocess $bdevperf_pid; exit 1' SIGINT SIGTERM EXIT
waitforlisten $bdevperf_pid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 $use_append
bdev_create_zone nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d "$ZONE_DEV" $use_append
$rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
$rpc_py delete_ftl_bdev -b ftl0
$rpc_py bdev_ocssd_delete nvme0n1
$rpc_py bdev_ftl_delete -b ftl0
bdev_delete_zone "$ZONE_DEV"
$rpc_py bdev_nvme_detach_controller nvme0
killprocess $bdevperf_pid
trap - SIGINT SIGTERM EXIT

View File

@ -34,3 +34,46 @@ function gen_ftl_nvme_conf() {
}
JSON
}
get_ftl_nvme_dev() {
# Find device with LBA matching the FTL_BLOCK_SIZE
local nvmes nvme identify lba
for nvme in $(nvme_in_userspace); do
identify=$("$SPDK_EXAMPLE_DIR/identify" -r trtype:pcie -r "traddr:$nvme")
[[ $identify =~ "Current LBA Format:"\ +"LBA Format #"([0-9]+) ]]
[[ $identify =~ "LBA Format #${BASH_REMATCH[1]}: Data Size:"\ +([0-9]+) ]]
lba=${BASH_REMATCH[1]}
((lba && lba % FTL_BLOCK_SIZE == 0)) && nvmes+=("$nvme")
done
((${#nvmes[@]} > 0)) || return 1
printf '%s\n' "${nvmes[@]}"
}
bdev_create_zone() {
local base_bdev=$1
# TODO: Consider use of ZNSed nvme controllers
"$rpc_py" bdev_zone_block_create \
-b "$ZONE_DEV" \
-o "$OPTIMAL_OPEN_ZONES" \
-z "$ZONE_CAPACITY" \
-n "$base_bdev"
}
bdev_delete_zone() {
local zone_dev=$1
# TODO: Consider use of ZNSed nvme controllers
"$rpc_py" bdev_zone_block_delete "$zone_dev"
}
# Optimal number of zones refers to the number of zones that need to be written at the same
# time in order to maximize drive's write bandwidth.
# ZONE_CAPACITY * FTL_BLOCK_SIZE * OPTIMAL_OPEN_ZONES should be <= size of the drive.
FTL_BLOCK_SIZE=4096
ZONE_CAPACITY=4096
OPTIMAL_OPEN_ZONES=32
ZONE_DEV=zone0
rpc_py=$rootdir/scripts/rpc.py

View File

@ -5,8 +5,6 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
rpc_py=$rootdir/scripts/rpc.py
device=$1
restore_kill() {
@ -34,8 +32,8 @@ svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o"
bdev_create_zone nvme0n1
ftl_construct_args="bdev_ftl_create -b ftl0 -d $ZONE_DEV -o"
$rpc_py $ftl_construct_args

View File

@ -31,8 +31,8 @@ svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d nvme0n1
bdev_create_zone nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d "$ZONE_DEV"
waitforbdev ftl0

View File

@ -5,30 +5,7 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
# The FTL tests are currently disabled, pending conversion to ZNS from OCSSD.
exit 0
function at_ftl_exit() {
# restore original driver
PCI_ALLOWED="$device" PCI_BLOCKED="" DRIVER_OVERRIDE="$ocssd_original_dirver" $rootdir/scripts/setup.sh
}
read -r device _ <<< "$OCSSD_PCI_DEVICES"
if [[ -z "$device" ]]; then
echo "OCSSD device list is empty."
echo "This test require that OCSSD_PCI_DEVICES environment variable to be set"
echo "and point to OCSSD devices PCI BDF. You can specify multiple space"
echo "separated BDFs in this case first one will be used."
exit 1
fi
ocssd_original_dirver="$(basename $(readlink /sys/bus/pci/devices/$device/driver))"
trap 'at_ftl_exit' SIGINT SIGTERM EXIT
# OCSSD is blocked so bind it to vfio/uio driver before testing
PCI_ALLOWED="$device" PCI_BLOCKED="" DRIVER_OVERRIDE="" $rootdir/scripts/setup.sh
devices=($(get_ftl_nvme_dev)) device=${devices[0]}
run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
run_test "ftl_bdevperf_append" $testdir/bdevperf.sh $device --use_append

View File

@ -5,8 +5,6 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
rpc_py=$rootdir/scripts/rpc.py
device=$1
json_kill() {
@ -20,9 +18,9 @@ svcpid=$!
waitforlisten $svcpid
# Create new bdev from json configuration
$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 | $rpc_py load_subsystem_config
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
bdev_create_zone nvme0n1
$rootdir/scripts/gen_ftl.sh -n ftl0 -d "$ZONE_DEV" | $rpc_py load_subsystem_config
waitforbdev ftl0
uuid=$($rpc_py bdev_get_bdevs | jq -r ".[] | select(.name==\"ftl0\").uuid")
@ -30,7 +28,7 @@ uuid=$($rpc_py bdev_get_bdevs | jq -r ".[] | select(.name==\"ftl0\").uuid")
$rpc_py bdev_ftl_delete -b ftl0
# Restore bdev from json configuration
$rootdir/scripts/gen_ftl.sh -n ftl0 -d nvme0n1 -u $uuid | $rpc_py load_subsystem_config
$rootdir/scripts/gen_ftl.sh -n ftl0 -d "$ZONE_DEV" -u $uuid | $rpc_py load_subsystem_config
$rpc_py bdev_ftl_delete -b ftl0
$rpc_py bdev_nvme_detach_controller nvme0

View File

@ -5,8 +5,6 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
rpc_py=$rootdir/scripts/rpc.py
mount_dir=$(mktemp -d)
device=$1
@ -33,8 +31,8 @@ svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1"
bdev_create_zone nvme0n1
ftl_construct_args="bdev_ftl_create -b ftl0 -d $ZONE_DEV"
$rpc_py $ftl_construct_args
@ -48,7 +46,7 @@ $rpc_py save_config > "$config"
# Prepare the disk by creating ext4 fs and putting a file on it
make_filesystem ext4 /dev/nbd0
mount /dev/nbd0 $mount_dir
dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=256K
dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=4k
sync
mount -o remount /dev/nbd0 $mount_dir
md5sum $mount_dir/testfile > "$SPDK_TEST_STORAGE/testfile.md5"
@ -68,7 +66,7 @@ waitfornbd nbd0
mount /dev/nbd0 $mount_dir
# Write second file, to make sure writer thread has restored properly
dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=256K
dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=4k
md5sum $mount_dir/testfile2 > "$SPDK_TEST_STORAGE/testfile2.md5"
# Make sure second file will be read from disk