tests: don't >-redirect without eval

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #13259
This commit is contained in:
наб 2022-03-09 13:39:34 +01:00 committed by Brian Behlendorf
parent 053dac9e7d
commit 62c5ccdf92
51 changed files with 190 additions and 265 deletions

View File

@ -657,7 +657,7 @@ function default_container_cleanup
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1 > /dev/null 2>&1
log_must rm -rf $TESTDIR1
default_cleanup
}
@ -3680,15 +3680,17 @@ function is_swap_inuse
return 1
fi
if is_linux; then
swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
elif is_freebsd; then
swapctl -l | grep -w $device
else
swap -l | grep -w $device > /dev/null 2>&1
fi
return $?
case "$(uname)" in
Linux)
swapon -s | grep -wq $(readlink -f $device)
;;
FreeBSD)
swapctl -l | grep -wq $device
;;
*)
swap -l | grep -wq $device
;;
esac
}
#
@ -3698,14 +3700,18 @@ function swap_setup
{
typeset swapdev=$1
if is_linux; then
case "$(uname)" in
Linux)
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
elif is_freebsd; then
;;
FreeBSD)
log_must swapctl -a $swapdev
else
;;
*)
log_must swap -a $swapdev
fi
;;
esac
return 0
}

View File

@ -48,7 +48,7 @@ verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count
log_must echo "zfs" >> $init_data
echo "zfs" >> $init_data
sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)

View File

@ -56,7 +56,7 @@ log_must zfs create $TESTDS
MNTPFS="$(get_prop mountpoint $TESTDS)"
FILENAME="$MNTPFS/file"
log_must mkfile 128k $FILENAME
log_must exec 9<> $FILENAME # open file
log_must eval "exec 9<> $FILENAME" # open file
# 3. Lazy umount
if is_freebsd; then
@ -74,7 +74,7 @@ log_must zfs mount $TESTDS
if [ ! -f $FILENAME ]; then
log_fail "Lazy remount failed"
fi
log_must exec 9>&- # close fd
log_must eval "exec 9>&-" # close fd
# 5. Verify multiple mounts of the same dataset are possible
MNTPFS2="$MNTPFS-second"

View File

@ -134,8 +134,7 @@ dd if=/dev/urandom of=$mntpnt/f18 bs=128k count=64
touch $mntpnt2/f18
# Remove objects that are intended to be missing.
rm $mntpnt/h17
rm $mntpnt2/h*
rm $mntpnt/h17 $mntpnt2/h*
# Add empty objects to $fs to exercise dmu_traverse code
for i in {1..100}; do
@ -145,15 +144,15 @@ done
log_must zfs snapshot $fs@s1
log_must zfs snapshot $fs2@s1
log_must zfs send $fs@s1 > $TESTDIR/zr010p
log_must zfs send $fs2@s1 > $TESTDIR/zr010p2
log_must eval "zfs send $fs@s1 > $TESTDIR/zr010p"
log_must eval "zfs send $fs2@s1 > $TESTDIR/zr010p2"
#
# Test that, when we receive a full send as a clone of itself,
# nop-write saves us all the space used by data blocks.
#
cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p"
size=$(get_prop used $rfs)
size2=$(get_prop used $fs)
if [[ $size -ge $(($size2 / 10)) ]] then
@ -163,13 +162,13 @@ fi
log_must zfs destroy -fr $rfs
# Correctness testing: receive each full send as a clone of the other fiesystem.
cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
log_must eval "zfs receive -o origin=$fs2@s1 $rfs < $TESTDIR/zr010p"
mntpnt_old=$(get_prop mountpoint $fs)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new
log_must zfs destroy -r $rfs
cat $TESTDIR/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p2"
mntpnt_old=$(get_prop mountpoint $fs2)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new

View File

@ -70,8 +70,8 @@ log_must zpool set feature@filesystem_limits=enabled "$rpoolname"
log_must zfs create -o filesystem_limit=100 "$sendfs"
log_must zfs snapshot "$sendfs@a"
log_must zfs send -R "$sendfs@a" >"$streamfile"
log_must eval "zfs recv -svuF $recvfs <$streamfile"
log_must eval "zfs send -R \"$sendfs@a\" >\"$streamfile\""
log_must eval "zfs recv -svuF \"$recvfs\" <\"$streamfile\""
log_pass "ZFS can handle receiving streams with filesystem limits on \
pools where the feature was recently enabled"

View File

@ -57,7 +57,7 @@ test_pool ()
POOL=$1
log_must zfs create -o recordsize=512 $POOL/fs
mntpnt=$(get_prop mountpoint "$POOL/fs")
log_must dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null
log_must eval "dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null"
object=$(ls -i $mntpnt | awk '{print $1}')
log_must zfs snapshot $POOL/fs@a
while true; do

View File

@ -46,35 +46,35 @@ cleanup
log_must zfs set sharenfs="rw=[::1]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null
log_must grep -q "::1(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "2::3(" <<< "$output" > /dev/null
log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null
log_must grep "2::3(" <<< "$output" > /dev/null
log_must grep -q "::1(" <<< "$output"
log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/64" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "::1/64(" <<< "$output" > /dev/null
log_must grep -q "::1/64(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "2::3/128(" <<< "$output" > /dev/null
log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/32:[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "::1/32(" <<< "$output" > /dev/null
log_must grep "2::3/128(" <<< "$output" > /dev/null
log_must grep -q "::1/32(" <<< "$output"
log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]/64:[2a01:1234:1234:1234:aa34:234:1234:1234]:1.2.3.4/24" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null
log_must grep "2::3/64(" <<< "$output" > /dev/null
log_must grep "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output" > /dev/null
log_must grep "1\\.2\\.3\\.4/24(" <<< "$output" > /dev/null
log_must grep -q "::1(" <<< "$output"
log_must grep -q "2::3/64(" <<< "$output"
log_must grep -q "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output"
log_must grep -q "1\\.2\\.3\\.4/24(" <<< "$output"
log_pass "NFS share ip address propagated correctly."

View File

@ -39,7 +39,6 @@
# Once set zpool autoexpand=off, zpool can *NOT* autoexpand by
# Dynamic VDEV Expansion
#
#
# STRATEGY:
# 1) Create three vdevs (loopback, scsi_debug, and file)
# 2) Create pool by using the different devices and set autoexpand=off
@ -89,11 +88,7 @@ for type in " " mirror raidz draid; do
# The -f is required since we're mixing disk and file vdevs.
log_must zpool create -f $TESTPOOL1 $type $DEV1 $DEV2 $DEV3
typeset autoexp=$(get_pool_prop autoexpand $TESTPOOL1)
if [[ $autoexp != "off" ]]; then
log_fail "zpool $TESTPOOL1 autoexpand should be off but is " \
"$autoexp"
fi
log_must [ "$(get_pool_prop autoexpand $TESTPOOL1)" = "off" ]
typeset prev_size=$(get_pool_prop size $TESTPOOL1)
@ -107,8 +102,8 @@ for type in " " mirror raidz draid; do
log_must losetup -c $DEV1
sleep 3
echo "2" > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb
echo "1" > /sys/class/block/$DEV2/device/rescan
log_must eval "echo 2 > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb"
log_must eval "echo 1 > /sys/class/block/$DEV2/device/rescan"
block_device_wait
sleep 3
@ -119,18 +114,10 @@ for type in " " mirror raidz draid; do
# check for zpool history for the pool size expansion
zpool history -il $TESTPOOL1 | grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" >/dev/null 2>&1
grep "vdev online" &&
log_fail "pool $TESTPOOL1 is not autoexpand after vdev expansion"
if [[ $? -eq 0 ]]; then
log_fail "pool $TESTPOOL1 is not autoexpand after vdev " \
"expansion"
fi
typeset expand_size=$(get_pool_prop size $TESTPOOL1)
if [[ "$prev_size" != "$expand_size" ]]; then
log_fail "pool $TESTPOOL1 size changed after vdev expansion"
fi
log_must [ "$(get_pool_prop size $TESTPOOL1)" = "$prev_size" ]
cleanup
done

View File

@ -22,7 +22,7 @@
function cleanup
{
# clear any remaining zinjections
log_must zinject -c all > /dev/null
log_must eval "zinject -c all > /dev/null"
destroy_pool $TESTPOOL1

View File

@ -44,17 +44,15 @@ POOL_FILE=cryptv0.dat
function uncompress_pool
{
log_note "Creating pool from $POOL_FILE"
log_must bzcat \
log_must eval bzcat \
$STF_SUITE/tests/functional/cli_root/zpool_import/blockfiles/$POOL_FILE.bz2 \
> /$TESTPOOL/$POOL_FILE
return 0
"> /$TESTPOOL/$POOL_FILE"
}
function cleanup
{
poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME
[[ -e /$TESTPOOL/$POOL_FILE ]] && rm /$TESTPOOL/$POOL_FILE
return 0
log_must rm -f /$TESTPOOL/$POOL_FILE
}
log_onexit cleanup

View File

@ -71,7 +71,7 @@ for ver_old in $VERSIONS; do
typeset -i ver_new=$(random_int_between $ver_old $MAX_VER)
create_old_pool $ver_old
log_must zpool upgrade -V $ver_new $pool_name > /dev/null
log_must eval 'zpool upgrade -V $ver_new $pool_name > /dev/null'
check_poolversion $pool_name $ver_new
destroy_upgraded_pool $ver_old
done

View File

@ -54,11 +54,11 @@ log_assert "zfs send stream with large dnodes accepted by new pool"
log_must zfs create -o dnodesize=1k $TEST_SEND_FS
log_must touch /$TEST_SEND_FS/$TEST_FILE
log_must zfs snap $TEST_SNAP
log_must zfs send $TEST_SNAP > $TEST_STREAM
log_must eval "zfs send $TEST_SNAP > $TEST_STREAM"
log_must rm -f /$TEST_SEND_FS/$TEST_FILE
log_must touch /$TEST_SEND_FS/$TEST_FILEINCR
log_must zfs snap $TEST_SNAPINCR
log_must zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR
log_must eval "zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR"
log_must eval "zfs recv $TEST_RECV_FS < $TEST_STREAM"
inode=$(ls -li /$TEST_RECV_FS/$TEST_FILE | awk '{print $1}')

View File

@ -66,6 +66,6 @@ log_must wait
log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
log_must ls -lR "/$TEST_FS/" >/dev/null 2>&1
log_must eval "ls -lR /$TEST_FS/ >/dev/null 2>&1"
log_must zdb -d $TESTPOOL
log_pass

View File

@ -54,10 +54,10 @@ log_assert "Verify 'zpool get|history|list|status|iostat' will not be logged."
# Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL >$OLD_HISTORY"
log_must zpool get all $TESTPOOL >/dev/null
log_must zpool list $TESTPOOL >/dev/null
log_must zpool status $TESTPOOL >/dev/null
log_must zpool iostat $TESTPOOL >/dev/null
log_must eval "zpool get all $TESTPOOL >/dev/null"
log_must eval "zpool list $TESTPOOL >/dev/null"
log_must eval "zpool status $TESTPOOL >/dev/null"
log_must eval "zpool iostat $TESTPOOL >/dev/null"
log_must eval "zpool history $TESTPOOL >$NEW_HISTORY"
log_must diff $OLD_HISTORY $NEW_HISTORY

View File

@ -67,15 +67,15 @@ log_must zfs snapshot $snap2
# Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL > $OLD_HISTORY"
log_must zfs list $fs > /dev/null
log_must zfs get mountpoint $fs > /dev/null
log_must eval "zfs list $fs > /dev/null"
log_must eval "zfs get mountpoint $fs > /dev/null"
log_must zfs unmount $fs
log_must zfs mount $fs
if ! is_linux; then
log_must zfs share $fs
log_must zfs unshare $fs
fi
log_must zfs send -i $snap1 $snap2 > /dev/null
log_must eval "zfs send -i $snap1 $snap2 > /dev/null"
log_must zfs holds $snap1
log_must eval "zpool history $TESTPOOL > $NEW_HISTORY"

View File

@ -49,7 +49,7 @@ function cleanup
# Remove dump device.
#
if [[ -n $PREVDUMPDEV ]]; then
log_must dumpadm -u -d $PREVDUMPDEV > /dev/null
log_must eval "dumpadm -u -d $PREVDUMPDEV > /dev/null"
fi
destroy_pool $TESTPOOL
@ -61,16 +61,16 @@ log_onexit cleanup
typeset dumpdev=""
PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
PREVDUMPDEV=`dumpadm | awk '/Dump device/ {print $3}'`
log_note "Zero $FS_DISK0"
log_must cleanup_devices $FS_DISK0
log_note "Configuring $rawdisk0 as dump device"
log_must dumpadm -d $rawdisk0 > /dev/null
log_must eval "dumpadm -d $rawdisk0 > /dev/null"
log_note "Confirm that dump device has been setup"
dumpdev=`dumpadm | grep "Dump device" | awk '{print $3}'`
dumpdev=`dumpadm | awk '/Dump device/ {print $3}'`
[[ -z "$dumpdev" ]] && log_untested "No dump device has been configured"
[[ "$dumpdev" != "$rawdisk0" ]] && \

View File

@ -58,25 +58,21 @@ function cleanup
log_note "Kill off ufsdump process if still running"
kill -0 $PIDUFSDUMP > /dev/null 2>&1 && \
log_must kill -9 $PIDUFSDUMP > /dev/null 2>&1
log_must eval "kill -9 $PIDUFSDUMP"
#
# Note: It would appear that ufsdump spawns a number of processes
# which are not killed when the $PIDUFSDUMP is whacked. So best bet
# is to find the rest of the them and deal with them individually.
#
for all in `pgrep ufsdump`
do
kill -9 $all > /dev/null 2>&1
done
kill -9 `pgrep ufsdump` > /dev/null 2>&1
log_note "Kill off ufsrestore process if still running"
kill -0 $PIDUFSRESTORE > /dev/null 2>&1 && \
log_must kill -9 $PIDUFSRESTORE > /dev/null 2>&1
log_must eval "kill -9 $PIDUFSRESTORE"
ismounted $UFSMP ufs && log_must umount $UFSMP
rm -rf $UFSMP
rm -rf $TESTDIR
rm -rf $UFSMP $TESTDIR
#
# Tidy up the disks we used.

View File

@ -180,13 +180,11 @@ function seconds_mmp_waits_for_activity
typeset mmp_write
typeset mmp_delay
log_must zdb -e -p $devpath $pool >$tmpfile 2>/dev/null
log_must eval "zdb -e -p $devpath $pool >$tmpfile 2>/dev/null"
mmp_fail=$(awk '/mmp_fail/ {print $NF}' $tmpfile)
mmp_write=$(awk '/mmp_write/ {print $NF}' $tmpfile)
mmp_delay=$(awk '/mmp_delay/ {print $NF}' $tmpfile)
if [ -f $tmpfile ]; then
rm $tmpfile
fi
# In order of preference:
if [ -n $mmp_fail -a -n $mmp_write ]; then

View File

@ -60,9 +60,9 @@ log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK
log_must zpool set multihost=off $TESTPOOL
log_must zdb -u $TESTPOOL > $PREV_UBER
log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER
log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if ! diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp thread has updated an uberblock"
@ -70,7 +70,7 @@ fi
log_must zpool set multihost=on $TESTPOOL
log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER
log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks"

View File

@ -53,9 +53,9 @@ log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK
log_must zpool set multihost=on $TESTPOOL
log_must zdb -u $TESTPOOL > $PREV_UBER
log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER
log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff -u "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks"

View File

@ -62,8 +62,8 @@ done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone
log_must zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$
log_mustnot_expect space zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$
log_must eval "zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$"
log_mustnot_expect space eval "zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$"
log_must rm $TEST_BASE_DIR/stream.$$
log_must zfs rename $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS@snap_newname

View File

@ -74,9 +74,9 @@ function do_test
# finish reading.
#
{
log_must dd bs=512 count=4 >/dev/null
log_must eval "dd bs=512 count=4 >/dev/null"
log_must eval "$cmd"
cat 2>&1 >/dev/null | log_must grep "Input/output error"
log_must eval 'cat 2>&1 >/dev/null | grep "Input/output error"'
} <$TXG_HIST
}

View File

@ -77,7 +77,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
#
@ -100,7 +100,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool

View File

@ -77,7 +77,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
#
@ -100,7 +100,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool

View File

@ -162,7 +162,7 @@ do
# inject read io errors on vdev and verify resilver does not restart
log_must zinject -a -d ${VDEV_FILES[2]} -e io -T read -f 0.25 $TESTPOOL1
log_must cat ${DATAPATHS[1]} > /dev/null
log_must eval "cat ${DATAPATHS[1]} > /dev/null"
log_must zinject -c all
# there should still be 2 resilver starts w/o defer, 1 with defer

View File

@ -57,7 +57,7 @@ log_must set_tunable32 SCAN_LEGACY 1
# create the pool and a 32M file (32k blocks)
log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_must zpool create -f -O recordsize=1k $TESTPOOL1 ${VDEV_FILES[0]}
log_must dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 > /dev/null 2>&1
log_must eval "dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 2>/dev/null"
# determine objset/object
objset=$(zdb -d $TESTPOOL1/ | sed -ne 's/.*ID \([0-9]*\).*/\1/p')

View File

@ -128,7 +128,7 @@ function cleanup_pool
#
# https://github.com/openzfs/zfs/issues/6143
#
log_must df >/dev/null
log_must eval "df >/dev/null"
log_must_busy zfs destroy -Rf $pool
else
typeset list=$(zfs list -H -r -t all -o name $pool)
@ -153,8 +153,6 @@ function cleanup_pool
if [[ -d $mntpnt ]]; then
rm -rf $mntpnt/*
fi
return 0
}
function cleanup_pools
@ -655,7 +653,7 @@ function resume_test
for ((i=0; i<2; i=i+1)); do
mess_send_file /$streamfs/$stream_num
log_mustnot zfs recv -suv $recvfs </$streamfs/$stream_num
log_mustnot eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
stream_num=$((stream_num+1))
token=$(zfs get -Hp -o value receive_resume_token $recvfs)
@ -665,10 +663,8 @@ function resume_test
log_must eval "zfs send -nvt $token > /dev/null"
log_must eval "zfs send -t $token >/$streamfs/$stream_num"
[[ -f /$streamfs/$stream_num ]] || \
log_fail "NO FILE /$streamfs/$stream_num"
done
log_must zfs recv -suv $recvfs </$streamfs/$stream_num
log_must eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
}
function get_resume_token
@ -679,11 +675,8 @@ function get_resume_token
log_must eval "$sendcmd > /$streamfs/1"
mess_send_file /$streamfs/1
log_mustnot zfs recv -suv $recvfs < /$streamfs/1 2>&1
token=$(zfs get -Hp -o value receive_resume_token $recvfs)
echo "$token" > /$streamfs/resume_token
return 0
log_mustnot eval "zfs recv -suv $recvfs < /$streamfs/1 2>&1"
get_prop receive_resume_token $recvfs > /$streamfs/resume_token
}
#

View File

@ -113,8 +113,7 @@ log_onexit cleanup_all
setup_all
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
typeset -i i=0

View File

@ -55,16 +55,14 @@ function cleanup
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that a rollback to a previous snapshot succeeds."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10

View File

@ -59,16 +59,14 @@ function cleanup
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify rollback is with respect to latest snapshot."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
@ -109,8 +107,7 @@ while [[ $i -le $COUNT ]]; do
(( i = i + 1 ))
done
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/original_file* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -f $TESTDIR/original_file*
#
# Now rollback to latest snapshot

View File

@ -100,7 +100,7 @@ log_must zfs snapshot $SNAPPOOL.1
#
# https://github.com/openzfs/zfs/issues/6143
#
log_must df >/dev/null
log_must eval "df >/dev/null"
export __ZFS_POOL_RESTRICT="$TESTPOOL"
log_must zfs unmount -a
@ -110,6 +110,6 @@ unset __ZFS_POOL_RESTRICT
log_must touch /$TESTPOOL/$TESTFILE/$TESTFILE.1
log_must zfs rollback $SNAPPOOL.1
log_must df >/dev/null
log_must eval "df >/dev/null"
log_pass "Rollbacks succeed when nested file systems are present."

View File

@ -54,7 +54,7 @@ function cleanup
log_must zfs destroy $SNAPFS
fi
log_must rm -rf $SNAPDIR $TESTDIR/* > /dev/null 2>&1
log_must rm -rf $SNAPDIR $TESTDIR/*
}
log_assert "Verify a file system snapshot is identical to original."

View File

@ -51,26 +51,13 @@ verify_runnable "both"
function cleanup
{
if [[ -d $CWD ]]; then
cd $CWD || log_fail "Could not cd $CWD"
fi
[ -d $CWD ] && log_must cd $CWD
snapexists $SNAPFS
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPFS
fi
snapexists $SNAPFS && log_must zfs destroy $SNAPFS
if [[ -e $SNAPDIR ]]; then
log_must rm -rf $SNAPDIR > /dev/null 2>&1
fi
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
fi
if [[ -d "$SNAPSHOT_TARDIR" ]]; then
log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
fi
[ -e $SNAPDIR ] && log_must rm -rf $SNAPDIR
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
[ -d "$SNAPSHOT_TARDIR" ] && log_must rm -rf $SNAPSHOT_TARDIR
}
log_assert "Verify an archive of a file system is identical to " \
@ -82,8 +69,7 @@ log_onexit cleanup
typeset -i COUNT=21
typeset OP=create
[[ -n $TESTDIR ]] && \
rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && rm -rf $TESTDIR/*
log_note "Create files in the zfs filesystem..."
@ -96,33 +82,32 @@ done
log_note "Create a tarball from $TESTDIR contents..."
CWD=$PWD
cd $TESTDIR || log_fail "Could not cd $TESTDIR"
log_must cd $TESTDIR
log_must tar cf $SNAPSHOT_TARDIR/original.tar .
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPFS
log_note "Remove all of the original files..."
log_must rm -f $TESTDIR/file* > /dev/null 2>&1
log_must rm -f $TESTDIR/file*
log_note "Create tarball of snapshot..."
CWD=$PWD
cd $SNAPDIR || log_fail "Could not cd $SNAPDIR"
log_must cd $SNAPDIR
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_must mkdir $TESTDIR/original
log_must mkdir $TESTDIR/snapshot
log_must mkdir $TESTDIR/original $TESTDIR/snapshot
CWD=$PWD
cd $TESTDIR/original || log_fail "Could not cd $TESTDIR/original"
log_must cd $TESTDIR/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar
cd $TESTDIR/snapshot || log_fail "Could not cd $TESTDIR/snapshot"
log_must cd $TESTDIR/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_must directory_diff $TESTDIR/original $TESTDIR/snapshot
log_pass "Directory structures match."

View File

@ -49,20 +49,17 @@ function cleanup
{
typeset -i i=1
while [ $i -lt $COUNT ]; do
snapexists $SNAPFS.$i
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPFS.$i
fi
snapexists $SNAPFS.$i && log_must zfs destroy $SNAPFS.$i
if [[ -e $SNAPDIR.$i ]]; then
log_must rm -rf $SNAPDIR.$i > /dev/null 2>&1
if [ -e $SNAPDIR.$i ]; then
log_must rm -rf $SNAPDIR.$i
fi
(( i = i + 1 ))
done
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
if [ -e $TESTDIR ]; then
log_must rm -rf $TESTDIR/*
fi
}
@ -70,8 +67,7 @@ log_assert "Verify many snapshots of a file system can be taken."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
@ -86,8 +82,7 @@ while [[ $i -lt $COUNT ]]; do
done
log_note "Remove all of the original files"
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/file* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/file*
i=1
while [[ $i -lt $COUNT ]]; do

View File

@ -48,20 +48,16 @@ verify_runnable "both"
function cleanup
{
snapexists $SNAPFS
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
snapexists $SNAPFS && log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that a snapshot of an empty file system remains empty."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must zfs snapshot $SNAPFS
FILE_COUNT=`ls -Al $SNAPDIR | grep -v "total 0" | wc -l`

View File

@ -49,17 +49,14 @@ verify_runnable "both"
function cleanup
{
snapexists $SNAPCTR
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR
snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1
fi
if [[ -e $SNAPDIR1 ]]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
fi
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
if [ -e $TESTDIR ]; then
log_must rm -rf $TESTDIR/*
fi
}

View File

@ -51,24 +51,21 @@ verify_runnable "both"
function cleanup
{
if [[ -d $CWD ]]; then
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
fi
snapexists $SNAPCTR
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR
snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1
fi
if [[ -e $SNAPDIR1 ]]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
if [ -e $TESTDIR1 ]; then
log_must rm -rf $TESTDIR1/*
fi
if [[ -e $TESTDIR1 ]]; then
log_must rm -rf $TESTDIR1/* > /dev/null 2>&1
fi
if [[ -d "$SNAPSHOT_TARDIR" ]]; then
log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
if [ -d "$SNAPSHOT_TARDIR" ]; then
log_must rm -rf $SNAPSHOT_TARDIR
fi
}
@ -81,7 +78,7 @@ log_onexit cleanup
typeset -i COUNT=21
typeset OP=create
[[ -n $TESTDIR1 ]] && rm -rf $TESTDIR1/* > /dev/null 2>&1
[ -n $TESTDIR1 ] && rm -rf $TESTDIR1/*
log_note "Create files in the zfs dataset ..."
@ -94,33 +91,32 @@ done
log_note "Create a tarball from $TESTDIR1 contents..."
CWD=$PWD
cd $TESTDIR1 || log_fail "Could not cd $TESTDIR1"
log_must cd $TESTDIR1
log_must tar cf $SNAPSHOT_TARDIR/original.tar .
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPCTR
log_note "Remove all of the original files..."
log_must rm -f $TESTDIR1/file* > /dev/null 2>&1
log_must rm -f $TESTDIR1/file*
log_note "Create tarball of snapshot..."
CWD=$PWD
cd $SNAPDIR1 || log_fail "Could not cd $SNAPDIR1"
log_must cd $SNAPDIR1
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_must mkdir $TESTDIR1/original
log_must mkdir $TESTDIR1/snapshot
log_must mkdir $TESTDIR1/original mkdir $TESTDIR1/snapshot
CWD=$PWD
cd $TESTDIR1/original || log_fail "Could not cd $TESTDIR1/original"
log_must cd $TESTDIR1/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar
cd $TESTDIR1/snapshot || log_fail "Could not cd $TESTDIR1/snapshot"
log_must cd $TESTDIR1/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
cd $CWD || log_fail "Could not cd $CWD"
log_must cd $CWD
log_must directory_diff $TESTDIR1/original $TESTDIR1/snapshot
log_pass "Directory structures match."

View File

@ -49,24 +49,21 @@ function cleanup
{
typeset -i i=1
while [ $i -lt $COUNT ]; do
snapexists $SNAPCTR.$i
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR.$i
fi
snapexists $SNAPCTR.$i && log_must zfs destroy $SNAPCTR.$i
if [[ -e $SNAPDIR.$i ]]; then
log_must rm -rf $SNAPDIR1.$i > /dev/null 2>&1
if [ -e $SNAPDIR.$i ]; then
log_must rm -rf $SNAPDIR1.$i
fi
(( i = i + 1 ))
done
if [[ -e $SNAPDIR1 ]]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1
fi
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
if [ -e $TESTDIR ]; then
log_must rm -rf $TESTDIR/*
fi
}
@ -74,8 +71,7 @@ log_assert "Verify that many snapshots can be made on a zfs dataset."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
@ -90,12 +86,11 @@ while [[ $i -lt $COUNT ]]; do
done
log_note "Remove all of the original files"
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR1/file* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -f $TESTDIR1/file*
i=1
while [[ $i -lt $COUNT ]]; do
FILECOUNT=`ls $SNAPDIR1.$i/file* | wc -l`
FILECOUNT=`echo $SNAPDIR1.$i/file* | wc -w`
typeset j=1
while [ $j -lt $FILECOUNT ]; do
log_must file_check $SNAPDIR1.$i/file$j $j

View File

@ -55,16 +55,14 @@ function cleanup
(( i = i + 1 ))
done
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that destroying snapshots returns space to the pool."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
@ -82,7 +80,7 @@ done
typeset -i i=1
while [[ $i -lt $COUNT ]]; do
log_must rm -rf $TESTDIR/file$i > /dev/null 2>&1
log_must rm -f $TESTDIR/file$i
log_must zfs destroy $SNAPFS.$i
(( i = i + 1 ))

View File

@ -53,15 +53,13 @@ function cleanup
{
snapexists $SNAPPOOL && destroy_dataset $SNAPPOOL -r
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that rollback to a snapshot created by snapshot -r succeeds."
log_onexit cleanup
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10

View File

@ -51,8 +51,7 @@ function cleanup
datasetexists $ctrfs && destroy_dataset $ctrfs -r
snapexists $snappool && destroy_dataset $snappool -r
[[ -e $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify snapshots from 'snapshot -r' can be used for zfs send/recv"
@ -67,8 +66,7 @@ snapctrfs=$ctrfs@$TESTSNAP
fsdir=/$ctrfs
snapdir=$fsdir/.zfs/snapshot/$TESTSNAP
[[ -n $TESTDIR ]] && \
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10

View File

@ -48,8 +48,7 @@ verify_runnable "both"
function cleanup
{
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1/* > /dev/null 2>&1
[ -e $TESTDIR1 ] && log_must rm -rf $TESTDIR1/*
snapexists $SNAPCTR && destroy_dataset $SNAPCTR

View File

@ -33,7 +33,7 @@ DISK=${DISKS%% *}
log_must zpool create -f $TESTPOOL $DISK
conf="$TESTDIR/vz001"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_top_zap $TESTPOOL $DISK "$conf"
assert_leaf_zap $TESTPOOL $DISK "$conf"

View File

@ -33,7 +33,7 @@ log_assert "Per-vdev ZAPs are created on pool creation with many disks."
log_must zpool create -f $TESTPOOL $DISKS
conf="$TESTDIR/vz002"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
for DISK in $DISKS; do

View File

@ -34,7 +34,7 @@ log_assert "Per-vdev ZAPs are created on pool creation with multi-level vdev "\
log_must zpool create -f $TESTPOOL mirror $DISKS
conf="$TESTDIR/vz003"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
assert_top_zap $TESTPOOL "type: 'mirror'" "$conf"

View File

@ -38,7 +38,7 @@ log_must zpool create -f $TESTPOOL $DISK
# Make the pool.
conf="$TESTDIR/vz004"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf)
@ -51,7 +51,7 @@ assert_zap_common $TESTPOOL $DISK "top" $orig_top
disk2=$(echo $DISKS | awk '{print $2}')
log_must zpool attach $TESTPOOL $DISK $disk2
log_must zpool wait -t resilver $TESTPOOL
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
# Ensure top-level ZAP was transferred successfully.
new_top=$(get_top_vd_zap "type: 'mirror'" $conf)
@ -80,7 +80,7 @@ dsk2_leaf=$(get_leaf_vd_zap $disk2 $conf)
#
log_must zpool detach $TESTPOOL $DISK
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
final_top=$(get_top_vd_zap $disk2 $conf)
final_leaf=$(get_leaf_vd_zap $disk2 $conf)

View File

@ -35,7 +35,7 @@ log_must zpool create -f $TESTPOOL $DISK
# Make the pool.
conf="$TESTDIR/vz005"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf)
@ -50,7 +50,7 @@ log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
# Verify that ZAPs persisted.
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
new_top=$(get_top_vd_zap $DISK $conf)
new_leaf=$(get_leaf_vd_zap $DISK $conf)

View File

@ -36,7 +36,7 @@ log_assert "Per-vdev ZAPs are created for added vdevs."
log_must zpool add -f $TESTPOOL ${DISK_ARR[1]}
conf="$TESTDIR/vz006"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap ${DISK_ARR[1]} $conf)

View File

@ -36,7 +36,7 @@ log_must zpool create -f $TESTPOOL mirror ${DISK_ARR[0]} ${DISK_ARR[1]}
log_assert "Per-vdev ZAPs persist correctly on the original pool after split."
conf="$TESTDIR/vz007"
log_must zdb -PC $TESTPOOL > $conf
log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap "type: 'mirror'" $conf)

View File

@ -43,14 +43,11 @@ fi
function check_for
{
grep "^${1}," $tmpfile >/dev/null 2>/dev/null
if [ $? -ne 0 ]; then
log_fail "cannot find stats for $1"
fi
log_must grep -q "^${1}," $tmpfile
}
# by default, all stats and histograms for all pools
log_must zpool_influxdb > $tmpfile
log_must eval "zpool_influxdb > $tmpfile"
STATS="
zpool_io_size
@ -64,8 +61,8 @@ for stat in $STATS; do
done
# scan stats aren't expected to be there until after a scan has started
zpool scrub $TESTPOOL
zpool_influxdb > $tmpfile
log_must zpool scrub $TESTPOOL
log_must eval "zpool_influxdb > $tmpfile"
check_for zpool_scan_stats
log_pass "zpool_influxdb gathers statistics"

View File

@ -46,13 +46,13 @@ fi
for swapdev in $SAVESWAPDEVS
do
if ! is_swap_inuse $swapdev ; then
log_must swap_setup $swapdev >/dev/null 2>&1
swap_setup $swapdev
fi
done
voldev=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
if is_swap_inuse $voldev ; then
log_must swap_cleanup $voldev
swap_cleanup $voldev
fi
default_zvol_cleanup