test/ftl: use non-volatile cache in functional tests
This patch adds non-volatile cache to some of the test configurations if required device exists in the system (regular NVMe disk with separate metadata support). Change-Id: I0ea43990b360712361f34aeeb1982755f48b4dc5 Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459624 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
28439890e4
commit
5c9a503990
35
test/ftl/common.sh
Normal file
35
test/ftl/common.sh
Normal file
@ -0,0 +1,35 @@
|
||||
# Common utility functions to be sourced by the libftl test scripts
|
||||
|
||||
function get_chunk_size() {
|
||||
echo $($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | \
|
||||
grep 'Logical blks per chunk' | sed 's/[^0-9]//g')
|
||||
}
|
||||
|
||||
function has_separate_md() {
|
||||
local md_type=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | \
|
||||
grep 'Metadata Transferred' | cut -d: -f2)
|
||||
if [[ "$md_type" =~ Separate ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function create_nv_cache_bdev() {
|
||||
local name=$1
|
||||
local ocssd_bdf=$2
|
||||
local cache_bdf=$3
|
||||
local num_punits=$4
|
||||
|
||||
local bytes_to_mb=$[1024 * 1024]
|
||||
local chunk_size=$(get_chunk_size $ocssd_bdf)
|
||||
|
||||
# We need at least 2 bands worth of data + 1 block
|
||||
local size=$[2 * 4096 * $chunk_size * $num_punits + 1]
|
||||
# Round the size up to the nearest megabyte
|
||||
local size=$[($size + $bytes_to_mb) / $bytes_to_mb]
|
||||
|
||||
# Create NVMe bdev on specified device and split it so that it has the desired size
|
||||
local nvc_bdev=$($rootdir/scripts/rpc.py construct_nvme_bdev -b $name -t PCIe -a $cache_bdf)
|
||||
$rootdir/scripts/rpc.py construct_split_vbdev $nvc_bdev -s $size 1
|
||||
}
|
@ -3,19 +3,25 @@
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
pu_start=0
|
||||
pu_end=3
|
||||
additional_blocks=16
|
||||
|
||||
while getopts ':u:c:' opt; do
|
||||
case $opt in
|
||||
u) uuid=$OPTARG ;;
|
||||
c) nv_cache=$OPTARG ;;
|
||||
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
|
||||
device=$1
|
||||
uuid=$2
|
||||
|
||||
restore_kill() {
|
||||
rm -f $testdir/config/ftl.json
|
||||
rm -f $testdir/empty
|
||||
rm -f $testdir/testblock
|
||||
rm -f $testdir/testfile.md5
|
||||
rm -f $testdir/testfile2.md5
|
||||
|
||||
@ -26,22 +32,26 @@ restore_kill() {
|
||||
|
||||
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
|
||||
|
||||
# Extract chunk size
|
||||
chunk_size=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$device" |
|
||||
grep 'Logical blks per chunk' | sed 's/[^0-9]//g')
|
||||
chunk_size=$(get_chunk_size $device)
|
||||
pu_count=$(($pu_end - $pu_start + 1))
|
||||
|
||||
band_size=$(($chunk_size*($pu_end-$pu_start+1)))
|
||||
# Write one band worth of data + one extra chunk
|
||||
data_size=$(($chunk_size * ($pu_count + 1)))
|
||||
|
||||
$rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
if [ -n "$uuid" ]; then
|
||||
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l $pu_start-$pu_end -u $uuid -o
|
||||
else
|
||||
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l $pu_start-$pu_end -o
|
||||
if [ -n "$nv_cache" ]; then
|
||||
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $pu_count)
|
||||
fi
|
||||
|
||||
ftl_construct_args="construct_ftl_bdev -b nvme0 -a $device -l $pu_start-$pu_end -o"
|
||||
|
||||
[ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev"
|
||||
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
|
||||
|
||||
$rpc_py $ftl_construct_args
|
||||
|
||||
# Load the nbd driver
|
||||
modprobe nbd
|
||||
$rpc_py start_nbd_disk nvme0 /dev/nbd0
|
||||
@ -49,53 +59,32 @@ waitfornbd nbd0
|
||||
|
||||
$rpc_py save_config > $testdir/config/ftl.json
|
||||
|
||||
# Send band worth of data in 2 steps (some data should be written to 2nd band due to metadata overhead)
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$(($band_size - $chunk_size)) oflag=dsync
|
||||
offset=$(($band_size - $chunk_size))
|
||||
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$offset oflag=dsync
|
||||
offset=$(($offset + $chunk_size))
|
||||
|
||||
# Save md5 data of first batch (which should be fully on a closed band and recoverable)
|
||||
dd if=/dev/nbd0 bs=4K count=$(($band_size - $chunk_size)) | md5sum > $testdir/testfile.md5
|
||||
|
||||
# Make sure the third batch of written data is fully on the second band
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$additional_blocks seek=$offset oflag=dsync
|
||||
offset=$(($offset + $additional_blocks))
|
||||
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
|
||||
# Calculate checksum of the data written
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
|
||||
$rpc_py stop_nbd_disk /dev/nbd0
|
||||
|
||||
# Force kill bdev service (dirty shutdown) and start it again
|
||||
kill -9 $svcpid
|
||||
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
|
||||
# TODO Adapt this after waitforlisten is expanded
|
||||
sleep 5
|
||||
|
||||
$rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
$rootdir/app/spdk_tgt/spdk_tgt -L ftl_init & svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
# Ftl should recover, though with a loss of data (-o config option)
|
||||
$rpc_py load_config < $testdir/config/ftl.json
|
||||
waitfornbd nbd0
|
||||
|
||||
# Write extra data after restore
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$offset oflag=dsync
|
||||
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$data_size oflag=dsync
|
||||
# Save md5 data
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$offset | md5sum > $testdir/testfile2.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > $testdir/testfile2.md5
|
||||
|
||||
# Make sure all data will be read from disk
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Without persistent cache, first batch of data should be recoverable
|
||||
dd if=/dev/nbd0 bs=4K count=$(($band_size - $chunk_size)) | md5sum -c $testdir/testfile.md5
|
||||
|
||||
dd if=/dev/nbd0 of=$testdir/testblock bs=4k count=$additional_blocks skip=$band_size
|
||||
# Last 4k blocks from before restore should be on second band, and return as 0s
|
||||
dd if=/dev/zero of=$testdir/empty bs=4k count=$additional_blocks
|
||||
cmp $testdir/empty $testdir/testblock
|
||||
# Verify data written after restore is still there
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$(($band_size + $additional_blocks)) | md5sum -c $testdir/testfile2.md5
|
||||
# Verify that the checksum matches and the data is consistent
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c $testdir/testfile.md5
|
||||
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c $testdir/testfile2.md5
|
||||
|
||||
report_test_completion ftl_dirty_shutdown
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
@ -28,6 +29,22 @@ trap "at_ftl_exit" SIGINT SIGTERM EXIT
|
||||
# OCSSD is blacklisted so bind it to vfio/uio driver before testing
|
||||
PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="" ./scripts/setup.sh
|
||||
|
||||
# Use first regular NVMe disk (non-OC) as non-volatile cache
|
||||
nvme_disks=$($rootdir/scripts/gen_nvme.sh --json | jq -r \
|
||||
".config[] | select(.params.traddr != \"$device\").params.traddr")
|
||||
|
||||
for disk in $nvme_disks; do
|
||||
if has_separate_md $disk; then
|
||||
nv_cache=$disk
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$nv_cache" ]; then
|
||||
# TODO: once CI has devices with separate metadata support fail the test here
|
||||
echo "Couldn't find NVMe device to be used as non-volatile cache"
|
||||
fi
|
||||
|
||||
timing_enter ftl
|
||||
timing_enter bdevperf
|
||||
|
||||
@ -37,11 +54,16 @@ timing_exit bdevperf
|
||||
|
||||
timing_enter restore
|
||||
run_test suite $testdir/restore.sh $device
|
||||
if [ -n "$nv_cache" ]; then
|
||||
run_test suite $testdir/restore.sh -c $nv_cache $device
|
||||
fi
|
||||
timing_exit restore
|
||||
|
||||
timing_enter dirty_shutdown
|
||||
run_test suite $testdir/dirty_shutdown.sh $device
|
||||
timing_exit dirty_shutdown
|
||||
if [ -n "$nv_cache" ]; then
|
||||
timing_enter dirty_shutdown
|
||||
run_test suite $testdir/dirty_shutdown.sh -c $nv_cache $device
|
||||
timing_exit dirty_shutdown
|
||||
fi
|
||||
|
||||
timing_enter json
|
||||
run_test suite $testdir/json.sh $device
|
||||
|
@ -3,12 +3,23 @@
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
mount_dir=$(mktemp -d)
|
||||
pu_start=0
|
||||
pu_end=3
|
||||
|
||||
while getopts ':u:c:' opt; do
|
||||
case $opt in
|
||||
u) uuid=$OPTARG ;;
|
||||
c) nv_cache=$OPTARG ;;
|
||||
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
device=$1
|
||||
uuid=$2
|
||||
|
||||
restore_kill() {
|
||||
if mount | grep $mount_dir; then
|
||||
@ -30,12 +41,17 @@ $rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
if [ -n "$uuid" ]; then
|
||||
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l 0-3 -u $uuid
|
||||
else
|
||||
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l 0-3
|
||||
if [ -n "$nv_cache" ]; then
|
||||
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $(($pu_end - $pu_start + 1)))
|
||||
fi
|
||||
|
||||
ftl_construct_args="construct_ftl_bdev -b nvme0 -a $device -l ${pu_start}-${pu_end}"
|
||||
|
||||
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
|
||||
[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
|
||||
|
||||
$rpc_py $ftl_construct_args
|
||||
|
||||
# Load the nbd driver
|
||||
modprobe nbd
|
||||
$rpc_py start_nbd_disk nvme0 /dev/nbd0
|
||||
@ -55,7 +71,7 @@ md5sum $mount_dir/testfile > $testdir/testfile.md5
|
||||
umount $mount_dir
|
||||
killprocess $svcpid
|
||||
|
||||
$rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
|
||||
$rootdir/app/spdk_tgt/spdk_tgt -L ftl_init & svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user