zfs tests: stop writing to arbitrary devices

TL;DR:  Three ZFS tests created ZFS pools on all unmounted devices listed
in /etc/fstab, corrupting their contents.  Stop that.

Imagine my surprise when the ESP on my main dev/test VM would "randomly"
become corrupted, making it unbootable.  Three tests collect various devices
from the system and try to add them to a test pool.  The test expects this
to fail because it _assumes_ these devices are in use and ZFS will correctly
reject the request.

My /etc/fstab has two entries for devices in /dev:

    /dev/gpt/swap0  none        swap    sw,trimonce,late
    /dev/gpt/esp0   /boot/efi   msdosfs rw,noauto

Note the `noauto` on the ESP.  In a remarkable example of irony, I chose
this because it should keep the ESP more protected from corruption;
in fact, mounting it would have protected it from this case.

The tests added all of these devices to a test pool in a _single command_,
expecting the command to fail.  The swap device was in use, so the command
correctly failed, but the ESP was added and therefore corrupted.  However,
since the command correctly failed, the test didn't notice the ESP problem.
If each device had been added with its own command, the test _might_ have
noticed that one of them incorrectly succeeded.  However, two of these
tests would not have noticed:

hotspare_create_001_neg was incorrectly specified as needing the Solaris
dumpadm command, so it was skipped.  _Some_ of the test needs that command,
but it checks for its presence and runs fine without it.

Due to bug 241070, zpool_add_005_pos was marked as an expected failure.
Due to the coarse level of integration with ATF, this test would still
"pass" even if it failed for the wrong reason.  I wrote bug 267554 to
reconsider the use of atf_expect_fail in these tests.

Let's further consider the use of various devices found around the system.
In addition to devices in /etc/fstab, the tests also used mounted devices
listed by the `mount` command.  If ZFS behaves correctly, it will refuse
to added mounted devices and swap devices to a pool.  However, these are
unit tests used by developers to ensure that ZFS still works after they
modify it, so it's reasonable to expect ZFS to do the _wrong_ thing
sometimes.  Using random host devices is unsafe.

Fix the root problem by using only the disks provided via the "disks"
variable in kyua.conf.  Use one to create a UFS file system and mount it.
Use another as a swap device.  Use a third as a dump device, but expect
it to fail due to bug 241070.

While I'm here:

Due to commit 6b6e2954dd, we can simply add a second dump device and
remove it in cleanup.  We no longer need to save, replace, and restore the
pre-existing dump device.

The cleanup_devices function used `camcontrol inquiry` to distinguish disks
from other devices, such as partitions.  That works fine for SCSI, but not
for ATA or VirtIO block.  Use `geom disk list` instead.

PR:		241070
PR:		267554
Reviewed by:	asomers
Sponsored by:	Dell Inc.
Differential Revision:	https://reviews.freebsd.org/D37257
This commit is contained in:
Eric van Gyzen 2022-11-02 21:42:54 -05:00
parent 208d02fd5d
commit 11ed0a95bf
7 changed files with 83 additions and 143 deletions

View File

@ -1856,7 +1856,7 @@ function cleanup_devices #vdevs
$ZPOOL labelclear -f $device
# Only wipe partition tables for arguments that are disks,
# as opposed to slices (which are valid arguments here).
if camcontrol inquiry $device >/dev/null 2>&1; then
if geom disk list | grep -qx "Geom name: ${device#/dev/}"; then
wipe_partition_table $device
fi
done

View File

@ -67,74 +67,6 @@ function iscontained
}
#
# Find the storage device in /etc/fstab
#
function find_vfstab_dev
{
typeset vfstab="/etc/fstab"
typeset tmpfile="$TMPDIR/fstab.tmp"
typeset vfstabdev
typeset vfstabdevs=""
typeset line
$CAT $vfstab | $GREP "^/dev/" >$tmpfile
while read -r line
do
vfstabdev=`$ECHO "$line" | $AWK '{print $1}'`
vfstabdev=${vfstabdev%%:}
vfstabdevs="$vfstabdev $vfstabdevs"
done <$tmpfile
$RM -f $tmpfile
$ECHO $vfstabdevs
}
#
# Find the storage device in /etc/mnttab
#
function find_mnttab_dev
{
typeset mnttab="/etc/mnttab"
typeset tmpfile="$TMPDIR/mnttab.tmp"
typeset mnttabdev
typeset mnttabdevs=""
typeset line
$MOUNT | $GREP "^/dev/" >$tmpfile
while read -r line
do
mnttabdev=`$ECHO "$line" | $AWK '{print $1}'`
mnttabdev=${mnttabdev%%:}
mnttabdevs="$mnttabdev $mnttabdevs"
done <$tmpfile
$RM -f $tmpfile
$ECHO $mnttabdevs
}
#
# Save the systme current dump device configuration
#
function save_dump_dev
{
typeset dumpdev
typeset swapdev
typeset swapdevs=""
typeset tmpfile="$TMPDIR/swapinfo.tmp"
dumpdev=`readlink /dev/dumpdev`
swapinfo | $GREP "^/dev/" >$tmpfile
while read -r line
do
swapdev=`$ECHO "$line" | $AWK '{print $1}'`
swapdev=${swapdev%%:}
swapdevs="$swapdev $swapdevs"
done <$tmpfile
$ECHO "$dumpdev $swapdevs"
}
#
# Common cleanup routine for partitions used in testing
#

View File

@ -70,29 +70,40 @@ function cleanup
poolexists "$TESTPOOL1" && \
destroy_pool "$TESTPOOL1"
$DUMPON -r $dump_dev
log_onfail $UMOUNT $TMPDIR/mounted_dir
log_onfail $SWAPOFF $swap_dev
log_onfail $DUMPON -r $dump_dev
}
log_assert "'zpool add' should fail with inapplicable scenarios."
log_onexit cleanup
mnttab_dev=$(find_mnttab_dev)
vfstab_dev=$(find_vfstab_dev)
dump_dev=${DISK2}
create_pool "$TESTPOOL" "${DISK0}"
log_must poolexists "$TESTPOOL"
create_pool "$TESTPOOL1" "${DISK1}"
log_must poolexists "$TESTPOOL1"
mounted_dev=${DISK2}
log_must $MKDIR $TMPDIR/mounted_dir
log_must $NEWFS $mounted_dev
log_must $MOUNT $mounted_dev $TMPDIR/mounted_dir
swap_dev=${DISK3}
log_must $SWAPON $swap_dev
dump_dev=${DISK4}
log_must $DUMPON $dump_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" ${DISK1}
log_mustnot $ZPOOL add -f "$TESTPOOL" $mnttab_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" $mounted_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" $vfstab_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" $swap_dev
log_must $DUMPON $dump_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" $dump_dev
# https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=241070
# When that bug is fixed, change this to log_mustnot.
log_must $ZPOOL add -f "$TESTPOOL" $dump_dev
log_pass "'zpool add' should fail with inapplicable scenarios."

View File

@ -148,8 +148,7 @@ zpool_add_005_pos_body()
. $(atf_get_srcdir)/zpool_add.kshlib
. $(atf_get_srcdir)/zpool_add.cfg
verify_disk_count "$DISKS" 3
atf_expect_fail "PR 241070 dumpon opens geom devices non-exclusively"
verify_disk_count "$DISKS" 5
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/zpool_add_005_pos.ksh || atf_fail "Testcase failed"
}

View File

@ -39,11 +39,11 @@
# DESCRIPTION:
# 'zpool add' with hot spares will fail
# while the hot spares belong to the following cases:
# - nonexist device,
# - nonexistent device,
# - part of an active pool,
# - currently mounted,
# - devices in /etc/vfstab,
# - specified as the dedicated dump device,
# - a swap device,
# - a dump device,
# - identical with the basic or spares vdev within the pool,
# - belong to a exported or potentially active ZFS pool,
# - a volume device that belong to the given pool,
@ -72,15 +72,9 @@ function cleanup
poolexists "$TESTPOOL1" && \
destroy_pool "$TESTPOOL1"
if [[ -n $saved_dump_dev ]]; then
if [[ -n $DUMPADM ]]; then
log_must $DUMPADM -u -d $saved_dump_dev
fi
fi
if [[ -n $DUMPADM ]]; then
cleanup_devices $dump_dev
fi
log_onfail $UMOUNT $TMPDIR/mounted_dir
log_onfail $SWAPOFF $swap_dev
log_onfail $DUMPON -r $dump_dev
partition_cleanup
}
@ -91,11 +85,10 @@ log_onexit cleanup
set_devs
mnttab_dev=$(find_mnttab_dev)
vfstab_dev=$(find_vfstab_dev)
saved_dump_dev=$(save_dump_dev)
dump_dev=${disk}s0
nonexist_dev=${disk}sbad_slice_num
mounted_dev=${DISK0}
swap_dev=${DISK1}
dump_dev=${DISK2}
nonexist_dev=${DISK2}bad_slice_num
create_pool "$TESTPOOL" "${pooldevs[0]}"
log_must poolexists "$TESTPOOL"
@ -103,19 +96,25 @@ log_must poolexists "$TESTPOOL"
create_pool "$TESTPOOL1" "${pooldevs[1]}"
log_must poolexists "$TESTPOOL1"
[[ -n $mnttab_dev ]] || log_note "No mnttab devices found"
[[ -n $vfstab_dev ]] || log_note "No vfstab devices found"
# - nonexist device,
log_must $MKDIR $TMPDIR/mounted_dir
log_must $NEWFS $mounted_dev
log_must $MOUNT $mounted_dev $TMPDIR/mounted_dir
log_must $SWAPON $swap_dev
log_must $DUMPON $dump_dev
# - nonexistent device,
# - part of an active pool,
# - currently mounted,
# - devices in /etc/vfstab,
# - a swap device,
# - identical with the basic or spares vdev within the pool,
set -A arg "$nonexist_dev" \
"${pooldevs[0]}" \
"${pooldevs[1]}" \
"$mnttab_dev" \
"$vfstab_dev"
"$mounted_dev" \
"$swap_dev"
typeset -i i=0
while (( i < ${#arg[*]} )); do
@ -126,14 +125,13 @@ while (( i < ${#arg[*]} )); do
(( i = i + 1 ))
done
# - specified as the dedicated dump device,
# This part of the test can only be run on platforms for which DUMPADM is
# defined; ie Solaris
if [[ -n $DUMPADM ]]; then
log_must $DUMPADM -u -d /dev/$dump_dev
log_mustnot $ZPOOL add "$TESTPOOL" spare $dump_dev
log_mustnot $ZPOOL add -f "$TESTPOOL" spare $dump_dev
fi
# - a dump device,
# https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=241070
# When that bug is fixed, add $dump_dev to $arg and remove this block.
log_must $ZPOOL add $TESTPOOL spare ${dump_dev}
log_must $ZPOOL remove $TESTPOOL ${dump_dev}
log_must $ZPOOL add -f $TESTPOOL spare ${dump_dev}
log_must $ZPOOL remove $TESTPOOL ${dump_dev}
# - belong to a exported or potentially active ZFS pool,

View File

@ -40,11 +40,11 @@
# 'zpool create [-f]' with hot spares will fail
# while the hot spares belong to the following cases:
# - existing pool
# - nonexist device,
# - nonexistent device,
# - part of an active pool,
# - currently mounted,
# - devices in /etc/vfstab,
# - specified as the dedicated dump device,
# - a swap device,
# - a dump device,
# - identical with the basic vdev within the pool,
#
# STRATEGY:
@ -72,11 +72,9 @@ function cleanup
destroy_pool $pool
done
if [[ -n $saved_dump_dev ]]; then
if [[ -n $DUMPADM ]]; then
log_must $DUMPADM -u -d $saved_dump_dev
fi
fi
log_onfail $UMOUNT $TMPDIR/mounted_dir
log_onfail $SWAPOFF $swap_dev
log_onfail $DUMPON -r $dump_dev
partition_cleanup
}
@ -87,28 +85,35 @@ log_onexit cleanup
set_devs
mnttab_dev=$(find_mnttab_dev)
vfstab_dev=$(find_vfstab_dev)
saved_dump_dev=$(save_dump_dev)
dump_dev=${disk}s0
mounted_dev=${DISK0}
swap_dev=${DISK1}
dump_dev=${DISK2}
nonexist_dev=${disk}sbad_slice_num
create_pool "$TESTPOOL" ${pooldevs[0]}
log_must $MKDIR $TMPDIR/mounted_dir
log_must $NEWFS $mounted_dev
log_must $MOUNT $mounted_dev $TMPDIR/mounted_dir
log_must $SWAPON $swap_dev
log_must $DUMPON $dump_dev
#
# Set up the testing scenarios parameters
# - existing pool
# - nonexist device,
# - nonexistent device,
# - part of an active pool,
# - currently mounted,
# - devices in /etc/vfstab,
# - a swap device,
# - identical with the basic vdev within the pool,
set -A arg "$TESTPOOL ${pooldevs[1]} spare ${pooldevs[2]}" \
"$TESTPOOL1 ${pooldevs[1]} spare $nonexist_dev" \
"$TESTPOOL1 ${pooldevs[1]} spare ${pooldevs[0]}" \
"$TESTPOOL1 ${pooldevs[1]} spare $mnttab_dev" \
"$TESTPOOL1 ${pooldevs[1]} spare $vfstab_dev" \
"$TESTPOOL1 ${pooldevs[1]} spare $mounted_dev" \
"$TESTPOOL1 ${pooldevs[1]} spare $swap_dev" \
"$TESTPOOL1 ${pooldevs[1]} spare ${pooldevs[1]}"
typeset -i i=0
@ -118,22 +123,15 @@ while (( i < ${#arg[*]} )); do
(( i = i + 1 ))
done
# - a dump device,
# https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=241070
# When that bug is fixed, add $dump_dev to $arg and remove this block.
log_must $ZPOOL create $TESTPOOL1 ${pooldevs[1]} spare $dump_dev
log_must $ZPOOL destroy -f $TESTPOOL1
log_must $ZPOOL create -f $TESTPOOL1 ${pooldevs[1]} spare $dump_dev
log_must $ZPOOL destroy -f $TESTPOOL1
# now destroy the pool to be polite
log_must $ZPOOL destroy -f $TESTPOOL
#
# - specified as the dedicated dump device,
# This part of the test can only be run on platforms for which DUMPADM is
# defined; ie Solaris
#
if [[ -n $DUMPADM ]]; then
# create/destroy a pool as a simple way to set the partitioning
# back to something normal so we can use this $disk as a dump device
cleanup_devices $dump_dev
log_must $DUMPADM -u -d /dev/$dump_dev
log_mustnot $ZPOOL create $TESTPOOL1 ${pooldevs[1]} spare "$dump_dev"
log_mustnot $ZPOOL create -f $TESTPOOL1 ${pooldevs[1]} spare "$dump_dev"
fi
log_pass "'zpool create [-f]' with hot spare is failed as expected with inapplicable scenarios."

View File

@ -90,6 +90,7 @@ hotspare_add_003_neg_body()
. $(atf_get_srcdir)/hotspare.kshlib
. $(atf_get_srcdir)/hotspare.cfg
verify_disk_count "$DISKS" 3
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/hotspare_add_003_neg.ksh || atf_fail "Testcase failed"
}
@ -185,7 +186,7 @@ atf_test_case hotspare_create_001_neg cleanup
hotspare_create_001_neg_head()
{
atf_set "descr" "'zpool create [-f]' with hot spares should be failedwith inapplicable scenarios."
atf_set "require.progs" "ksh93 dumpadm zpool"
atf_set "require.progs" "ksh93 zpool"
atf_set "timeout" 3600
}
hotspare_create_001_neg_body()
@ -194,6 +195,7 @@ hotspare_create_001_neg_body()
. $(atf_get_srcdir)/hotspare.kshlib
. $(atf_get_srcdir)/hotspare.cfg
verify_disk_count "$DISKS" 3
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/hotspare_create_001_neg.ksh || atf_fail "Testcase failed"
}