test/setup: Add tests for different block devices' setups

nvme_mount - basic test verifying if setup.sh sees mounted nvme
             devices (either partitions or the entire drive).

lvm_mount - test verifying if setup.sh sees nvme devices when
            they are part of dm setup.

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Id250b4081b3e04a7f5b26401d2ae1b226fdb2d8f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5311
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
Michal Berger 2020-11-27 11:14:05 +01:00 committed by Tomasz Zawadzki
parent d69b091262
commit 18020ec73b
3 changed files with 232 additions and 0 deletions

View File

@ -31,3 +31,40 @@ get_meminfo() {
xtrace_restore
}
partition_drive() {
local disk=$1
local part_no=${2:-2}
local size=${3:-1073741824} # default 1G
local part part_start=0 part_end=0
local parts=()
for ((part = 1; part <= part_no; part++)); do
parts+=("${disk}p$part")
done
# Convert size to sectors for more precise partitioning
((size /= $(< "/sys/class/block/$disk/queue/physical_block_size")))
"$rootdir/scripts/sync_dev_uevents.sh" block/partition "${parts[@]}" &
# Avoid parted since it generates to much noise over netlink
sgdisk "/dev/$disk" --zap-all || :
for ((part = 1; part <= part_no; part++)); do
((part_start = part_start == 0 ? 2048 : part_end + 1))
((part_end = part_start + size - 1))
sgdisk "/dev/$disk" --new="$part:$part_start:$part_end"
done
wait "$!"
}
mkfs() {
local dev=$1 mount=$2 size=$3
mkdir -p "$mount"
[[ -e $dev ]]
mkfs.ext4 -qF "$dev" $size
mount "$dev" "$mount"
}

194
test/setup/devices.sh Executable file
View File

@ -0,0 +1,194 @@
#!/usr/bin/env bash
testdir=$(readlink -f "$(dirname "$0")")
rootdir=$(readlink -f "$testdir/../../")
source "$testdir/common.sh"
shopt -s nullglob
cleanup() {
cleanup_nvme
cleanup_dm
if [[ -b /dev/$test_disk ]]; then
wipefs --all "/dev/$test_disk"
fi
}
cleanup_nvme() {
if mountpoint -q "$nvme_mount"; then
umount "$nvme_mount"
fi
if [[ -b /dev/$nvme_disk_p ]]; then
wipefs --all "/dev/$nvme_disk_p"
fi
if [[ -b /dev/$nvme_disk ]]; then
wipefs --all "/dev/$nvme_disk"
fi
}
cleanup_dm() {
if mountpoint -q "$dm_mount"; then
umount "$dm_mount"
fi
if [[ -L /dev/mapper/$dm_name ]]; then
dmsetup remove --force "$dm_name"
fi
if [[ -b /dev/$pv0 ]]; then
wipefs --all "/dev/$pv0"
fi
if [[ -b /dev/$pv1 ]]; then
wipefs --all "/dev/$pv1"
fi
}
verify() {
local dev=$1
local mounts=$2
local mount_point=$3
local test_file=$4
local found=0
: > "$test_file"
local pci status
while read -r pci _ _ status; do
if [[ $pci == "$dev" && \
$status == *"Active mountpoints on $mounts"* ]]; then
found=1
fi
done < <(PCI_ALLOWED="$dev" setup output config)
((found == 1))
# Does the mount still exist?
mountpoint -q "$mount_point"
# Does the test file still exist?
[[ -e $test_file ]]
rm "$test_file"
}
nvme_mount() {
# Agenda 1:
# - Create single partition on the nvme drive
# - Install ext4 fs on the first partition
# - Mount the partition
# - Run tests and check if setup.sh skipped
# nvme controller given block device is
# bound to.
# Agenda 2:
# - Install ext4 on the entire nvme drive
# - Mount the drive
# Run tests and check if setup.sh skipped
# nvme controller given block device is
# bound to.
# Keep scope of all the variables global to make the cleanup process easier.
nvme_disk=$test_disk
nvme_disk_p=${nvme_disk}p1
nvme_mount=$SPDK_TEST_STORAGE/nvme_mount
nvme_dummy_test_file=$nvme_mount/test_nvme
# Agenda 1
partition_drive "$nvme_disk" 1
mkfs "/dev/$nvme_disk_p" "$nvme_mount"
verify \
"${blocks_to_pci["$nvme_disk"]}" \
"$nvme_disk:$nvme_disk_p" \
"$nvme_mount" \
"$nvme_dummy_test_file"
cleanup_nvme
# Agenda 2
mkfs "/dev/$nvme_disk" "$nvme_mount" 1024M
verify \
"${blocks_to_pci["$nvme_disk"]}" \
"$nvme_disk:$nvme_disk" \
"$nvme_mount" \
"$nvme_dummy_test_file"
# All done, final cleanup
cleanup_nvme
}
dm_mount() {
# Agenda:
# - Create two partitions on the nvme drive
# - Create dm device consisting of half of
# the size of each partition.
# - Install ext4 fs on the dm device
# - Mount dm device
# - Run tests and check if setup.sh skipped
# nvme controller given block devices are
# bound to.
# Keep scope of all the variables global to make the cleanup process easier.
pv=$test_disk
pv0=${pv}p1
pv1=${pv}p2
partition_drive "$pv"
dm_name=nvme_dm_test
dm_mount=$SPDK_TEST_STORAGE/dm_mount
dm_dummy_test_file=$dm_mount/test_dm
# Each partition is 1G in size, join their halfs
dmsetup create "$dm_name" <<- DM_TABLE
0 1048576 linear /dev/$pv0 0
1048576 1048576 linear /dev/$pv1 0
DM_TABLE
[[ -e /dev/mapper/$dm_name ]]
dm=$(readlink -f "/dev/mapper/$dm_name")
dm=${dm##*/}
[[ -e /sys/class/block/$pv0/holders/$dm ]]
[[ -e /sys/class/block/$pv1/holders/$dm ]]
mkfs "/dev/mapper/$dm_name" "$dm_mount"
verify \
"${blocks_to_pci["$pv"]}" \
"$pv:$dm_name" \
"$dm_mount" \
"$dm_dummy_test_file"
# All done, start tiding up
cleanup_dm
}
trap "cleanup" EXIT
setup reset
declare -a blocks=()
declare -A blocks_to_pci=()
for block in "/sys/block/nvme"*; do
pci=$(readlink -f "$block/device/device")
pci=${pci##*/}
# Skip OCSSD drives if somehow some are still bound to the
# nvme driver.
for ocssd in $OCSSD_PCI_DEVICES; do
[[ $pci == "$ocssd" ]] && continue 2
done
# Skip devices that are in use - simple blkid it to see if
# there's any metadata (pt, fs, etc.) present on the drive.
if ! blkid "/dev/${block##*/}"; then
blocks+=("${block##*/}")
blocks_to_pci["${block##*/}"]=$pci
fi
done
((${#blocks[@]} > 0))
declare -r test_disk=${blocks[0]}
run_test "nvme_mount" nvme_mount
run_test "dm_mount" dm_mount

View File

@ -8,3 +8,4 @@ source "$testdir/common.sh"
run_test "acl" "$testdir/acl.sh"
run_test "hugepages" "$testdir/hugepages.sh"
run_test "driver" "$testdir/driver.sh"
run_test "devices" "$testdir/devices.sh"