test/ftl: Remove dead code
In particular, non-volatile cache setup and EXTENDED fio tests: these haven't been supported nor tested by the CI, thus there's no value in keeping these routines anymore. fio tests are limited to what was previously called as a "basic" suite and hooked to standard SPDK_TEST_FTL flow. Code which is not used by the remaining tests is removed as well. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: I865da1ea4d8743322d4c303908c598efe6ecd40b Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8294 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
parent
1d4a88104d
commit
093d01421d
@ -135,8 +135,6 @@ export SPDK_TEST_CRYPTO
|
||||
export SPDK_TEST_FTL
|
||||
: ${SPDK_TEST_OCF=0}
|
||||
export SPDK_TEST_OCF
|
||||
: ${SPDK_TEST_FTL_EXTENDED=0}
|
||||
export SPDK_TEST_FTL_EXTENDED
|
||||
: ${SPDK_TEST_VMD=0}
|
||||
export SPDK_TEST_VMD
|
||||
: ${SPDK_TEST_OPAL=0}
|
||||
|
@ -15,38 +15,6 @@ function get_num_pu() {
|
||||
| grep 'PUs' | sed 's/[^0-9]//g'
|
||||
}
|
||||
|
||||
function has_separate_md() {
|
||||
local md_type
|
||||
md_type=$($SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
|
||||
| grep 'Metadata Transferred' | cut -d: -f2)
|
||||
if [[ "$md_type" =~ Separate ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function create_nv_cache_bdev() {
|
||||
local name=$1
|
||||
local ocssd_bdf=$2
|
||||
local cache_bdf=$3
|
||||
local num_punits=$4
|
||||
|
||||
local bytes_to_mb=$((1024 * 1024))
|
||||
local chunk_size
|
||||
chunk_size=$(get_chunk_size $ocssd_bdf)
|
||||
|
||||
# We need at least 2 bands worth of data + 1 block
|
||||
local size=$((2 * 4096 * chunk_size * num_punits + 1))
|
||||
# Round the size up to the nearest megabyte
|
||||
local size=$(((size + bytes_to_mb) / bytes_to_mb))
|
||||
|
||||
# Create NVMe bdev on specified device and split it so that it has the desired size
|
||||
local nvc_bdev
|
||||
nvc_bdev=$($rootdir/scripts/rpc.py bdev_nvme_attach_controller -b $name -t PCIe -a $cache_bdf)
|
||||
$rootdir/scripts/rpc.py bdev_split_create $nvc_bdev -s $size 1
|
||||
}
|
||||
|
||||
function gen_ftl_nvme_conf() {
|
||||
jq . <<- JSON
|
||||
{
|
||||
|
@ -1,15 +0,0 @@
|
||||
[drive_prep]
|
||||
ioengine=spdk_bdev
|
||||
spdk_json_conf=${FTL_JSON_CONF}
|
||||
filename=${FTL_BDEV_NAME}
|
||||
thread=1
|
||||
|
||||
direct=1
|
||||
buffered=0
|
||||
size=100%
|
||||
randrepeat=0
|
||||
norandommap
|
||||
bs=4k
|
||||
iodepth=128
|
||||
numjobs=1
|
||||
rw=write
|
@ -1,19 +0,0 @@
|
||||
[global]
|
||||
ioengine=spdk_bdev
|
||||
spdk_json_conf=${FTL_JSON_CONF}
|
||||
filename=${FTL_BDEV_NAME}
|
||||
direct=1
|
||||
thread=1
|
||||
buffered=0
|
||||
size=100%
|
||||
randrepeat=0
|
||||
time_based
|
||||
norandommap
|
||||
|
||||
[test]
|
||||
stonewall
|
||||
bs=4k
|
||||
numjobs=4
|
||||
rw=randread
|
||||
iodepth=128
|
||||
runtime=1200
|
@ -1,20 +0,0 @@
|
||||
[global]
|
||||
ioengine=spdk_bdev
|
||||
spdk_json_conf=${FTL_JSON_CONF}
|
||||
filename=${FTL_BDEV_NAME}
|
||||
direct=1
|
||||
thread=1
|
||||
buffered=0
|
||||
size=100%
|
||||
randrepeat=0
|
||||
time_based
|
||||
norandommap
|
||||
|
||||
[test]
|
||||
stonewall
|
||||
bs=4k
|
||||
numjobs=4
|
||||
rw=randrw
|
||||
rwmixread=70
|
||||
iodepth=32
|
||||
runtime=1200
|
@ -1,20 +0,0 @@
|
||||
[global]
|
||||
ioengine=spdk_bdev
|
||||
spdk_json_conf=${FTL_JSON_CONF}
|
||||
filename=${FTL_BDEV_NAME}
|
||||
thread=1
|
||||
direct=1
|
||||
iodepth=128
|
||||
rw=randwrite
|
||||
verify=crc32c
|
||||
do_verify=1
|
||||
verify_dump=0
|
||||
verify_state_save=0
|
||||
verify_fatal=1
|
||||
bs=4k
|
||||
random_distribution=normal
|
||||
serialize_overlap=1
|
||||
|
||||
[test]
|
||||
io_size=64G
|
||||
numjobs=1
|
@ -5,7 +5,7 @@ filename=${FTL_BDEV_NAME}
|
||||
direct=1
|
||||
thread=1
|
||||
buffered=0
|
||||
size=100%
|
||||
size=256M
|
||||
randrepeat=0
|
||||
time_based
|
||||
norandommap
|
||||
@ -15,4 +15,4 @@ bs=4k
|
||||
numjobs=1
|
||||
rw=randwrite
|
||||
iodepth=64
|
||||
runtime=1200
|
||||
runtime=10
|
||||
|
@ -7,21 +7,12 @@ source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
while getopts ':u:c:' opt; do
|
||||
case $opt in
|
||||
u) uuid=$OPTARG ;;
|
||||
c) nv_cache=$OPTARG ;;
|
||||
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
device=$1
|
||||
|
||||
restore_kill() {
|
||||
rm -f $testdir/config/ftl.json
|
||||
rm -f $testdir/testfile.md5
|
||||
rm -f $testdir/testfile2.md5
|
||||
rm -f "$config"
|
||||
rm -f "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
rm -f "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
|
||||
killprocess $svcpid || true
|
||||
rmmod nbd || true
|
||||
@ -33,6 +24,7 @@ chunk_size=$(get_chunk_size $device)
|
||||
num_group=$(get_num_group $device)
|
||||
num_pu=$(get_num_pu $device)
|
||||
pu_count=$((num_group * num_pu))
|
||||
config=$SPDK_TEST_STORAGE/ftl.json
|
||||
|
||||
# Write one band worth of data + one extra chunk
|
||||
data_size=$((chunk_size * (pu_count + 1)))
|
||||
@ -41,17 +33,10 @@ data_size=$((chunk_size * (pu_count + 1)))
|
||||
svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
if [ -n "$nv_cache" ]; then
|
||||
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
|
||||
fi
|
||||
|
||||
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
|
||||
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
|
||||
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o"
|
||||
|
||||
[ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev"
|
||||
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
|
||||
|
||||
$rpc_py $ftl_construct_args
|
||||
|
||||
# Load the nbd driver
|
||||
@ -59,11 +44,11 @@ modprobe nbd
|
||||
$rpc_py nbd_start_disk ftl0 /dev/nbd0
|
||||
waitfornbd nbd0
|
||||
|
||||
$rpc_py save_config > $testdir/config/ftl.json
|
||||
$rpc_py save_config > "$config"
|
||||
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
|
||||
# Calculate checksum of the data written
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
$rpc_py nbd_stop_disk /dev/nbd0
|
||||
|
||||
# Force kill bdev service (dirty shutdown) and start it again
|
||||
@ -74,20 +59,20 @@ rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
|
||||
svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
$rpc_py load_config < $testdir/config/ftl.json
|
||||
$rpc_py load_config < "$config"
|
||||
waitfornbd nbd0
|
||||
|
||||
# Write extra data after restore
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$data_size oflag=dsync
|
||||
# Save md5 data
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > $testdir/testfile2.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
|
||||
# Make sure all data will be read from disk
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Verify that the checksum matches and the data is consistent
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c $testdir/testfile.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c $testdir/testfile2.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
restore_kill
|
||||
|
@ -5,9 +5,7 @@ rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
declare -A suite
|
||||
suite['basic']='randw-verify randw-verify-j2 randw-verify-depth128'
|
||||
suite['extended']='drive-prep randw-verify-qd128-ext randw randr randrw'
|
||||
tests=(randw randw-verify randw-verify-j2 randw-verify-depth128)
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
@ -17,19 +15,12 @@ fio_kill() {
|
||||
}
|
||||
|
||||
device=$1
|
||||
tests=${suite[$2]}
|
||||
uuid=$3
|
||||
|
||||
if [[ $CONFIG_FIO_PLUGIN != y ]]; then
|
||||
echo "FIO not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$tests" ]; then
|
||||
echo "Invalid test suite '$2'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export FTL_BDEV_NAME=ftl0
|
||||
export FTL_JSON_CONF=$testdir/config/ftl.json
|
||||
|
||||
@ -41,12 +32,7 @@ waitforlisten $svcpid
|
||||
|
||||
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
|
||||
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1
|
||||
|
||||
if [ -z "$uuid" ]; then
|
||||
$rpc_py bdev_ftl_create -b ftl0 -d nvme0n1
|
||||
else
|
||||
$rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 -u $uuid
|
||||
fi
|
||||
$rpc_py bdev_ftl_create -b ftl0 -d nvme0n1
|
||||
|
||||
waitforbdev ftl0
|
||||
|
||||
@ -59,7 +45,7 @@ waitforbdev ftl0
|
||||
killprocess $svcpid
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
|
||||
for test in ${tests}; do
|
||||
for test in "${tests[@]}"; do
|
||||
timing_enter $test
|
||||
fio_bdev $testdir/config/fio/$test.fio
|
||||
timing_exit $test
|
||||
|
@ -5,8 +5,6 @@ rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
function at_ftl_exit() {
|
||||
# restore original driver
|
||||
PCI_ALLOWED="$device" PCI_BLOCKED="" DRIVER_OVERRIDE="$ocssd_original_dirver" $rootdir/scripts/setup.sh
|
||||
@ -29,52 +27,8 @@ trap 'at_ftl_exit' SIGINT SIGTERM EXIT
|
||||
# OCSSD is blocked so bind it to vfio/uio driver before testing
|
||||
PCI_ALLOWED="$device" PCI_BLOCKED="" DRIVER_OVERRIDE="" $rootdir/scripts/setup.sh
|
||||
|
||||
# Use first regular NVMe disk (non-OC) as non-volatile cache
|
||||
nvme_disks=$($rootdir/scripts/gen_nvme.sh | jq -r \
|
||||
".config[] | select(.params.traddr != \"$device\").params.traddr")
|
||||
|
||||
for disk in $nvme_disks; do
|
||||
if has_separate_md $disk; then
|
||||
nv_cache=$disk
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$nv_cache" ]; then
|
||||
# TODO: once CI has devices with separate metadata support fail the test here
|
||||
echo "Couldn't find NVMe device to be used as non-volatile cache"
|
||||
fi
|
||||
|
||||
run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
|
||||
run_test "ftl_bdevperf_append" $testdir/bdevperf.sh $device --use_append
|
||||
|
||||
run_test "ftl_restore" $testdir/restore.sh $device
|
||||
if [ -n "$nv_cache" ]; then
|
||||
run_test "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
|
||||
fi
|
||||
|
||||
if [ -n "$nv_cache" ]; then
|
||||
run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
|
||||
fi
|
||||
|
||||
run_test "ftl_json" $testdir/json.sh $device
|
||||
|
||||
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
|
||||
run_test "ftl_fio_basic" $testdir/fio.sh $device basic
|
||||
|
||||
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
|
||||
svcpid=$!
|
||||
|
||||
trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT
|
||||
|
||||
waitforlisten $svcpid
|
||||
|
||||
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
|
||||
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
|
||||
uuid=$($rpc_py bdev_ftl_create -b ftl0 -d nvme0n1 | jq -r '.uuid')
|
||||
killprocess $svcpid
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
|
||||
run_test "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
|
||||
fi
|
||||
run_test "ftl_fio" "$testdir/fio.sh" "$device"
|
||||
|
@ -9,27 +9,17 @@ rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
mount_dir=$(mktemp -d)
|
||||
|
||||
while getopts ':u:c:' opt; do
|
||||
case $opt in
|
||||
u) uuid=$OPTARG ;;
|
||||
c) nv_cache=$OPTARG ;;
|
||||
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
device=$1
|
||||
num_group=$(get_num_group $device)
|
||||
num_pu=$(get_num_pu $device)
|
||||
pu_count=$((num_group * num_pu))
|
||||
config=$SPDK_TEST_STORAGE/ftl.json
|
||||
|
||||
restore_kill() {
|
||||
if mount | grep $mount_dir; then
|
||||
umount $mount_dir
|
||||
fi
|
||||
rm -rf $mount_dir
|
||||
rm -f $testdir/testfile.md5
|
||||
rm -f $testdir/testfile2.md5
|
||||
rm -f $testdir/config/ftl.json
|
||||
rm -f "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
rm -f "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
rm -f "$config"
|
||||
|
||||
killprocess $svcpid
|
||||
rmmod nbd || true
|
||||
@ -42,17 +32,10 @@ svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
if [ -n "$nv_cache" ]; then
|
||||
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
|
||||
fi
|
||||
|
||||
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
|
||||
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
|
||||
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1"
|
||||
|
||||
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
|
||||
[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
|
||||
|
||||
$rpc_py $ftl_construct_args
|
||||
|
||||
# Load the nbd driver
|
||||
@ -60,7 +43,7 @@ modprobe nbd
|
||||
$rpc_py nbd_start_disk ftl0 /dev/nbd0
|
||||
waitfornbd nbd0
|
||||
|
||||
$rpc_py save_config > $testdir/config/ftl.json
|
||||
$rpc_py save_config > "$config"
|
||||
|
||||
# Prepare the disk by creating ext4 fs and putting a file on it
|
||||
make_filesystem ext4 /dev/nbd0
|
||||
@ -68,7 +51,7 @@ mount /dev/nbd0 $mount_dir
|
||||
dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=256K
|
||||
sync
|
||||
mount -o remount /dev/nbd0 $mount_dir
|
||||
md5sum $mount_dir/testfile > $testdir/testfile.md5
|
||||
md5sum $mount_dir/testfile > "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
|
||||
# Kill bdev service and start it again
|
||||
umount $mount_dir
|
||||
@ -79,21 +62,21 @@ svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
$rpc_py load_config < $testdir/config/ftl.json
|
||||
$rpc_py load_config < "$config"
|
||||
waitfornbd nbd0
|
||||
|
||||
mount /dev/nbd0 $mount_dir
|
||||
|
||||
# Write second file, to make sure writer thread has restored properly
|
||||
dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=256K
|
||||
md5sum $mount_dir/testfile2 > $testdir/testfile2.md5
|
||||
md5sum $mount_dir/testfile2 > "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
|
||||
# Make sure second file will be read from disk
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Check both files have proper data
|
||||
md5sum -c $testdir/testfile.md5
|
||||
md5sum -c $testdir/testfile2.md5
|
||||
md5sum -c "$SPDK_TEST_STORAGE/testfile.md5"
|
||||
md5sum -c "$SPDK_TEST_STORAGE/testfile2.md5"
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
restore_kill
|
||||
|
Loading…
Reference in New Issue
Block a user