tests: don't >-redirect without eval

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #13259
This commit is contained in:
наб 2022-03-09 13:39:34 +01:00 committed by Brian Behlendorf
parent 053dac9e7d
commit 62c5ccdf92
51 changed files with 190 additions and 265 deletions

View File

@ -657,7 +657,7 @@ function default_container_cleanup
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf" destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \ [[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1 > /dev/null 2>&1 log_must rm -rf $TESTDIR1
default_cleanup default_cleanup
} }
@ -3680,15 +3680,17 @@ function is_swap_inuse
return 1 return 1
fi fi
if is_linux; then case "$(uname)" in
swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1 Linux)
elif is_freebsd; then swapon -s | grep -wq $(readlink -f $device)
swapctl -l | grep -w $device ;;
else FreeBSD)
swap -l | grep -w $device > /dev/null 2>&1 swapctl -l | grep -wq $device
fi ;;
*)
return $? swap -l | grep -wq $device
;;
esac
} }
# #
@ -3698,14 +3700,18 @@ function swap_setup
{ {
typeset swapdev=$1 typeset swapdev=$1
if is_linux; then case "$(uname)" in
Linux)
log_must eval "mkswap $swapdev > /dev/null 2>&1" log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev log_must swapon $swapdev
elif is_freebsd; then ;;
FreeBSD)
log_must swapctl -a $swapdev log_must swapctl -a $swapdev
else ;;
log_must swap -a $swapdev *)
fi log_must swap -a $swapdev
;;
esac
return 0 return 0
} }

View File

@ -48,7 +48,7 @@ verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count file_write -o create -w -f $init_data -b $blksize -c $write_count
log_must echo "zfs" >> $init_data echo "zfs" >> $init_data
sync_pool $TESTPOOL sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile) output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)

View File

@ -56,7 +56,7 @@ log_must zfs create $TESTDS
MNTPFS="$(get_prop mountpoint $TESTDS)" MNTPFS="$(get_prop mountpoint $TESTDS)"
FILENAME="$MNTPFS/file" FILENAME="$MNTPFS/file"
log_must mkfile 128k $FILENAME log_must mkfile 128k $FILENAME
log_must exec 9<> $FILENAME # open file log_must eval "exec 9<> $FILENAME" # open file
# 3. Lazy umount # 3. Lazy umount
if is_freebsd; then if is_freebsd; then
@ -74,7 +74,7 @@ log_must zfs mount $TESTDS
if [ ! -f $FILENAME ]; then if [ ! -f $FILENAME ]; then
log_fail "Lazy remount failed" log_fail "Lazy remount failed"
fi fi
log_must exec 9>&- # close fd log_must eval "exec 9>&-" # close fd
# 5. Verify multiple mounts of the same dataset are possible # 5. Verify multiple mounts of the same dataset are possible
MNTPFS2="$MNTPFS-second" MNTPFS2="$MNTPFS-second"

View File

@ -134,8 +134,7 @@ dd if=/dev/urandom of=$mntpnt/f18 bs=128k count=64
touch $mntpnt2/f18 touch $mntpnt2/f18
# Remove objects that are intended to be missing. # Remove objects that are intended to be missing.
rm $mntpnt/h17 rm $mntpnt/h17 $mntpnt2/h*
rm $mntpnt2/h*
# Add empty objects to $fs to exercise dmu_traverse code # Add empty objects to $fs to exercise dmu_traverse code
for i in {1..100}; do for i in {1..100}; do
@ -145,15 +144,15 @@ done
log_must zfs snapshot $fs@s1 log_must zfs snapshot $fs@s1
log_must zfs snapshot $fs2@s1 log_must zfs snapshot $fs2@s1
log_must zfs send $fs@s1 > $TESTDIR/zr010p log_must eval "zfs send $fs@s1 > $TESTDIR/zr010p"
log_must zfs send $fs2@s1 > $TESTDIR/zr010p2 log_must eval "zfs send $fs2@s1 > $TESTDIR/zr010p2"
# #
# Test that, when we receive a full send as a clone of itself, # Test that, when we receive a full send as a clone of itself,
# nop-write saves us all the space used by data blocks. # nop-write saves us all the space used by data blocks.
# #
cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p"
size=$(get_prop used $rfs) size=$(get_prop used $rfs)
size2=$(get_prop used $fs) size2=$(get_prop used $fs)
if [[ $size -ge $(($size2 / 10)) ]] then if [[ $size -ge $(($size2 / 10)) ]] then
@ -163,13 +162,13 @@ fi
log_must zfs destroy -fr $rfs log_must zfs destroy -fr $rfs
# Correctness testing: receive each full send as a clone of the other fiesystem. # Correctness testing: receive each full send as a clone of the other fiesystem.
cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs log_must eval "zfs receive -o origin=$fs2@s1 $rfs < $TESTDIR/zr010p"
mntpnt_old=$(get_prop mountpoint $fs) mntpnt_old=$(get_prop mountpoint $fs)
mntpnt_new=$(get_prop mountpoint $rfs) mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new log_must directory_diff $mntpnt_old $mntpnt_new
log_must zfs destroy -r $rfs log_must zfs destroy -r $rfs
cat $TESTDIR/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p2"
mntpnt_old=$(get_prop mountpoint $fs2) mntpnt_old=$(get_prop mountpoint $fs2)
mntpnt_new=$(get_prop mountpoint $rfs) mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new log_must directory_diff $mntpnt_old $mntpnt_new

View File

@ -70,8 +70,8 @@ log_must zpool set feature@filesystem_limits=enabled "$rpoolname"
log_must zfs create -o filesystem_limit=100 "$sendfs" log_must zfs create -o filesystem_limit=100 "$sendfs"
log_must zfs snapshot "$sendfs@a" log_must zfs snapshot "$sendfs@a"
log_must zfs send -R "$sendfs@a" >"$streamfile" log_must eval "zfs send -R \"$sendfs@a\" >\"$streamfile\""
log_must eval "zfs recv -svuF $recvfs <$streamfile" log_must eval "zfs recv -svuF \"$recvfs\" <\"$streamfile\""
log_pass "ZFS can handle receiving streams with filesystem limits on \ log_pass "ZFS can handle receiving streams with filesystem limits on \
pools where the feature was recently enabled" pools where the feature was recently enabled"

View File

@ -57,7 +57,7 @@ test_pool ()
POOL=$1 POOL=$1
log_must zfs create -o recordsize=512 $POOL/fs log_must zfs create -o recordsize=512 $POOL/fs
mntpnt=$(get_prop mountpoint "$POOL/fs") mntpnt=$(get_prop mountpoint "$POOL/fs")
log_must dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null log_must eval "dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null"
object=$(ls -i $mntpnt | awk '{print $1}') object=$(ls -i $mntpnt | awk '{print $1}')
log_must zfs snapshot $POOL/fs@a log_must zfs snapshot $POOL/fs@a
while true; do while true; do

View File

@ -46,35 +46,35 @@ cleanup
log_must zfs set sharenfs="rw=[::1]" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[::1]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null log_must grep -q "::1(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "2::3(" <<< "$output" > /dev/null log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[::1]:[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null log_must grep -q "::1(" <<< "$output"
log_must grep "2::3(" <<< "$output" > /dev/null log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/64" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[::1]/64" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "::1/64(" <<< "$output" > /dev/null log_must grep -q "::1/64(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]/128" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "2::3/128(" <<< "$output" > /dev/null log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/32:[2::3]/128" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[::1]/32:[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "::1/32(" <<< "$output" > /dev/null log_must grep -q "::1/32(" <<< "$output"
log_must grep "2::3/128(" <<< "$output" > /dev/null log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]/64:[2a01:1234:1234:1234:aa34:234:1234:1234]:1.2.3.4/24" $TESTPOOL/$TESTFS log_must zfs set sharenfs="rw=[::1]:[2::3]/64:[2a01:1234:1234:1234:aa34:234:1234:1234]:1.2.3.4/24" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1) output=$(showshares_nfs 2>&1)
log_must grep "::1(" <<< "$output" > /dev/null log_must grep -q "::1(" <<< "$output"
log_must grep "2::3/64(" <<< "$output" > /dev/null log_must grep -q "2::3/64(" <<< "$output"
log_must grep "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output" > /dev/null log_must grep -q "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output"
log_must grep "1\\.2\\.3\\.4/24(" <<< "$output" > /dev/null log_must grep -q "1\\.2\\.3\\.4/24(" <<< "$output"
log_pass "NFS share ip address propagated correctly." log_pass "NFS share ip address propagated correctly."

View File

@ -39,7 +39,6 @@
# Once set zpool autoexpand=off, zpool can *NOT* autoexpand by # Once set zpool autoexpand=off, zpool can *NOT* autoexpand by
# Dynamic VDEV Expansion # Dynamic VDEV Expansion
# #
#
# STRATEGY: # STRATEGY:
# 1) Create three vdevs (loopback, scsi_debug, and file) # 1) Create three vdevs (loopback, scsi_debug, and file)
# 2) Create pool by using the different devices and set autoexpand=off # 2) Create pool by using the different devices and set autoexpand=off
@ -73,7 +72,7 @@ log_onexit cleanup
log_assert "zpool can not expand if set autoexpand=off after vdev expansion" log_assert "zpool can not expand if set autoexpand=off after vdev expansion"
for type in " " mirror raidz draid; do for type in "" mirror raidz draid; do
log_note "Setting up loopback, scsi_debug, and file vdevs" log_note "Setting up loopback, scsi_debug, and file vdevs"
log_must truncate -s $org_size $FILE_LO log_must truncate -s $org_size $FILE_LO
DEV1=$(losetup -f) DEV1=$(losetup -f)
@ -89,11 +88,7 @@ for type in " " mirror raidz draid; do
# The -f is required since we're mixing disk and file vdevs. # The -f is required since we're mixing disk and file vdevs.
log_must zpool create -f $TESTPOOL1 $type $DEV1 $DEV2 $DEV3 log_must zpool create -f $TESTPOOL1 $type $DEV1 $DEV2 $DEV3
typeset autoexp=$(get_pool_prop autoexpand $TESTPOOL1) log_must [ "$(get_pool_prop autoexpand $TESTPOOL1)" = "off" ]
if [[ $autoexp != "off" ]]; then
log_fail "zpool $TESTPOOL1 autoexpand should be off but is " \
"$autoexp"
fi
typeset prev_size=$(get_pool_prop size $TESTPOOL1) typeset prev_size=$(get_pool_prop size $TESTPOOL1)
@ -107,8 +102,8 @@ for type in " " mirror raidz draid; do
log_must losetup -c $DEV1 log_must losetup -c $DEV1
sleep 3 sleep 3
echo "2" > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb log_must eval "echo 2 > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb"
echo "1" > /sys/class/block/$DEV2/device/rescan log_must eval "echo 1 > /sys/class/block/$DEV2/device/rescan"
block_device_wait block_device_wait
sleep 3 sleep 3
@ -119,18 +114,10 @@ for type in " " mirror raidz draid; do
# check for zpool history for the pool size expansion # check for zpool history for the pool size expansion
zpool history -il $TESTPOOL1 | grep "pool '$TESTPOOL1' size:" | \ zpool history -il $TESTPOOL1 | grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" >/dev/null 2>&1 grep "vdev online" &&
log_fail "pool $TESTPOOL1 is not autoexpand after vdev expansion"
if [[ $? -eq 0 ]]; then log_must [ "$(get_pool_prop size $TESTPOOL1)" = "$prev_size" ]
log_fail "pool $TESTPOOL1 is not autoexpand after vdev " \
"expansion"
fi
typeset expand_size=$(get_pool_prop size $TESTPOOL1)
if [[ "$prev_size" != "$expand_size" ]]; then
log_fail "pool $TESTPOOL1 size changed after vdev expansion"
fi
cleanup cleanup
done done

View File

@ -22,7 +22,7 @@
function cleanup function cleanup
{ {
# clear any remaining zinjections # clear any remaining zinjections
log_must zinject -c all > /dev/null log_must eval "zinject -c all > /dev/null"
destroy_pool $TESTPOOL1 destroy_pool $TESTPOOL1

View File

@ -44,17 +44,15 @@ POOL_FILE=cryptv0.dat
function uncompress_pool function uncompress_pool
{ {
log_note "Creating pool from $POOL_FILE" log_note "Creating pool from $POOL_FILE"
log_must bzcat \ log_must eval bzcat \
$STF_SUITE/tests/functional/cli_root/zpool_import/blockfiles/$POOL_FILE.bz2 \ $STF_SUITE/tests/functional/cli_root/zpool_import/blockfiles/$POOL_FILE.bz2 \
> /$TESTPOOL/$POOL_FILE "> /$TESTPOOL/$POOL_FILE"
return 0
} }
function cleanup function cleanup
{ {
poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME
[[ -e /$TESTPOOL/$POOL_FILE ]] && rm /$TESTPOOL/$POOL_FILE log_must rm -f /$TESTPOOL/$POOL_FILE
return 0
} }
log_onexit cleanup log_onexit cleanup

View File

@ -71,7 +71,7 @@ for ver_old in $VERSIONS; do
typeset -i ver_new=$(random_int_between $ver_old $MAX_VER) typeset -i ver_new=$(random_int_between $ver_old $MAX_VER)
create_old_pool $ver_old create_old_pool $ver_old
log_must zpool upgrade -V $ver_new $pool_name > /dev/null log_must eval 'zpool upgrade -V $ver_new $pool_name > /dev/null'
check_poolversion $pool_name $ver_new check_poolversion $pool_name $ver_new
destroy_upgraded_pool $ver_old destroy_upgraded_pool $ver_old
done done

View File

@ -54,11 +54,11 @@ log_assert "zfs send stream with large dnodes accepted by new pool"
log_must zfs create -o dnodesize=1k $TEST_SEND_FS log_must zfs create -o dnodesize=1k $TEST_SEND_FS
log_must touch /$TEST_SEND_FS/$TEST_FILE log_must touch /$TEST_SEND_FS/$TEST_FILE
log_must zfs snap $TEST_SNAP log_must zfs snap $TEST_SNAP
log_must zfs send $TEST_SNAP > $TEST_STREAM log_must eval "zfs send $TEST_SNAP > $TEST_STREAM"
log_must rm -f /$TEST_SEND_FS/$TEST_FILE log_must rm -f /$TEST_SEND_FS/$TEST_FILE
log_must touch /$TEST_SEND_FS/$TEST_FILEINCR log_must touch /$TEST_SEND_FS/$TEST_FILEINCR
log_must zfs snap $TEST_SNAPINCR log_must zfs snap $TEST_SNAPINCR
log_must zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR log_must eval "zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR"
log_must eval "zfs recv $TEST_RECV_FS < $TEST_STREAM" log_must eval "zfs recv $TEST_RECV_FS < $TEST_STREAM"
inode=$(ls -li /$TEST_RECV_FS/$TEST_FILE | awk '{print $1}') inode=$(ls -li /$TEST_RECV_FS/$TEST_FILE | awk '{print $1}')

View File

@ -66,6 +66,6 @@ log_must wait
log_must_busy zpool export $TESTPOOL log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL log_must zpool import $TESTPOOL
log_must ls -lR "/$TEST_FS/" >/dev/null 2>&1 log_must eval "ls -lR /$TEST_FS/ >/dev/null 2>&1"
log_must zdb -d $TESTPOOL log_must zdb -d $TESTPOOL
log_pass log_pass

View File

@ -54,10 +54,10 @@ log_assert "Verify 'zpool get|history|list|status|iostat' will not be logged."
# Save initial TESTPOOL history # Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL >$OLD_HISTORY" log_must eval "zpool history $TESTPOOL >$OLD_HISTORY"
log_must zpool get all $TESTPOOL >/dev/null log_must eval "zpool get all $TESTPOOL >/dev/null"
log_must zpool list $TESTPOOL >/dev/null log_must eval "zpool list $TESTPOOL >/dev/null"
log_must zpool status $TESTPOOL >/dev/null log_must eval "zpool status $TESTPOOL >/dev/null"
log_must zpool iostat $TESTPOOL >/dev/null log_must eval "zpool iostat $TESTPOOL >/dev/null"
log_must eval "zpool history $TESTPOOL >$NEW_HISTORY" log_must eval "zpool history $TESTPOOL >$NEW_HISTORY"
log_must diff $OLD_HISTORY $NEW_HISTORY log_must diff $OLD_HISTORY $NEW_HISTORY

View File

@ -67,15 +67,15 @@ log_must zfs snapshot $snap2
# Save initial TESTPOOL history # Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL > $OLD_HISTORY" log_must eval "zpool history $TESTPOOL > $OLD_HISTORY"
log_must zfs list $fs > /dev/null log_must eval "zfs list $fs > /dev/null"
log_must zfs get mountpoint $fs > /dev/null log_must eval "zfs get mountpoint $fs > /dev/null"
log_must zfs unmount $fs log_must zfs unmount $fs
log_must zfs mount $fs log_must zfs mount $fs
if ! is_linux; then if ! is_linux; then
log_must zfs share $fs log_must zfs share $fs
log_must zfs unshare $fs log_must zfs unshare $fs
fi fi
log_must zfs send -i $snap1 $snap2 > /dev/null log_must eval "zfs send -i $snap1 $snap2 > /dev/null"
log_must zfs holds $snap1 log_must zfs holds $snap1
log_must eval "zpool history $TESTPOOL > $NEW_HISTORY" log_must eval "zpool history $TESTPOOL > $NEW_HISTORY"

View File

@ -49,7 +49,7 @@ function cleanup
# Remove dump device. # Remove dump device.
# #
if [[ -n $PREVDUMPDEV ]]; then if [[ -n $PREVDUMPDEV ]]; then
log_must dumpadm -u -d $PREVDUMPDEV > /dev/null log_must eval "dumpadm -u -d $PREVDUMPDEV > /dev/null"
fi fi
destroy_pool $TESTPOOL destroy_pool $TESTPOOL
@ -61,16 +61,16 @@ log_onexit cleanup
typeset dumpdev="" typeset dumpdev=""
PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'` PREVDUMPDEV=`dumpadm | awk '/Dump device/ {print $3}'`
log_note "Zero $FS_DISK0" log_note "Zero $FS_DISK0"
log_must cleanup_devices $FS_DISK0 log_must cleanup_devices $FS_DISK0
log_note "Configuring $rawdisk0 as dump device" log_note "Configuring $rawdisk0 as dump device"
log_must dumpadm -d $rawdisk0 > /dev/null log_must eval "dumpadm -d $rawdisk0 > /dev/null"
log_note "Confirm that dump device has been setup" log_note "Confirm that dump device has been setup"
dumpdev=`dumpadm | grep "Dump device" | awk '{print $3}'` dumpdev=`dumpadm | awk '/Dump device/ {print $3}'`
[[ -z "$dumpdev" ]] && log_untested "No dump device has been configured" [[ -z "$dumpdev" ]] && log_untested "No dump device has been configured"
[[ "$dumpdev" != "$rawdisk0" ]] && \ [[ "$dumpdev" != "$rawdisk0" ]] && \

View File

@ -58,25 +58,21 @@ function cleanup
log_note "Kill off ufsdump process if still running" log_note "Kill off ufsdump process if still running"
kill -0 $PIDUFSDUMP > /dev/null 2>&1 && \ kill -0 $PIDUFSDUMP > /dev/null 2>&1 && \
log_must kill -9 $PIDUFSDUMP > /dev/null 2>&1 log_must eval "kill -9 $PIDUFSDUMP"
# #
# Note: It would appear that ufsdump spawns a number of processes # Note: It would appear that ufsdump spawns a number of processes
# which are not killed when the $PIDUFSDUMP is whacked. So best bet # which are not killed when the $PIDUFSDUMP is whacked. So best bet
# is to find the rest of the them and deal with them individually. # is to find the rest of the them and deal with them individually.
# #
for all in `pgrep ufsdump` kill -9 `pgrep ufsdump` > /dev/null 2>&1
do
kill -9 $all > /dev/null 2>&1
done
log_note "Kill off ufsrestore process if still running" log_note "Kill off ufsrestore process if still running"
kill -0 $PIDUFSRESTORE > /dev/null 2>&1 && \ kill -0 $PIDUFSRESTORE > /dev/null 2>&1 && \
log_must kill -9 $PIDUFSRESTORE > /dev/null 2>&1 log_must eval "kill -9 $PIDUFSRESTORE"
ismounted $UFSMP ufs && log_must umount $UFSMP ismounted $UFSMP ufs && log_must umount $UFSMP
rm -rf $UFSMP rm -rf $UFSMP $TESTDIR
rm -rf $TESTDIR
# #
# Tidy up the disks we used. # Tidy up the disks we used.

View File

@ -180,13 +180,11 @@ function seconds_mmp_waits_for_activity
typeset mmp_write typeset mmp_write
typeset mmp_delay typeset mmp_delay
log_must zdb -e -p $devpath $pool >$tmpfile 2>/dev/null log_must eval "zdb -e -p $devpath $pool >$tmpfile 2>/dev/null"
mmp_fail=$(awk '/mmp_fail/ {print $NF}' $tmpfile) mmp_fail=$(awk '/mmp_fail/ {print $NF}' $tmpfile)
mmp_write=$(awk '/mmp_write/ {print $NF}' $tmpfile) mmp_write=$(awk '/mmp_write/ {print $NF}' $tmpfile)
mmp_delay=$(awk '/mmp_delay/ {print $NF}' $tmpfile) mmp_delay=$(awk '/mmp_delay/ {print $NF}' $tmpfile)
if [ -f $tmpfile ]; then rm $tmpfile
rm $tmpfile
fi
# In order of preference: # In order of preference:
if [ -n $mmp_fail -a -n $mmp_write ]; then if [ -n $mmp_fail -a -n $mmp_write ]; then

View File

@ -60,9 +60,9 @@ log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK default_setup_noexit $DISK
log_must zpool set multihost=off $TESTPOOL log_must zpool set multihost=off $TESTPOOL
log_must zdb -u $TESTPOOL > $PREV_UBER log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5 log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if ! diff "$CURR_UBER" "$PREV_UBER"; then if ! diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp thread has updated an uberblock" log_fail "mmp thread has updated an uberblock"
@ -70,7 +70,7 @@ fi
log_must zpool set multihost=on $TESTPOOL log_must zpool set multihost=on $TESTPOOL
log_must sleep 5 log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff "$CURR_UBER" "$PREV_UBER"; then if diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks" log_fail "mmp failed to update uberblocks"

View File

@ -53,9 +53,9 @@ log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK default_setup_noexit $DISK
log_must zpool set multihost=on $TESTPOOL log_must zpool set multihost=on $TESTPOOL
log_must zdb -u $TESTPOOL > $PREV_UBER log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5 log_must sleep 5
log_must zdb -u $TESTPOOL > $CURR_UBER log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff -u "$CURR_UBER" "$PREV_UBER"; then if diff -u "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks" log_fail "mmp failed to update uberblocks"

View File

@ -62,8 +62,8 @@ done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone
log_must zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$ log_must eval "zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$"
log_mustnot_expect space zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$ log_mustnot_expect space eval "zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$"
log_must rm $TEST_BASE_DIR/stream.$$ log_must rm $TEST_BASE_DIR/stream.$$
log_must zfs rename $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS@snap_newname log_must zfs rename $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS@snap_newname

View File

@ -74,9 +74,9 @@ function do_test
# finish reading. # finish reading.
# #
{ {
log_must dd bs=512 count=4 >/dev/null log_must eval "dd bs=512 count=4 >/dev/null"
log_must eval "$cmd" log_must eval "$cmd"
cat 2>&1 >/dev/null | log_must grep "Input/output error" log_must eval 'cat 2>&1 >/dev/null | grep "Input/output error"'
} <$TXG_HIST } <$TXG_HIST
} }

View File

@ -77,7 +77,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs) typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1 log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors" log_must check_pool_status $pool "errors" "No known data errors"
# #
@ -100,7 +100,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs) typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1 log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors" log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool log_must zpool scrub -w $pool

View File

@ -77,7 +77,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs) typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1 log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors" log_must check_pool_status $pool "errors" "No known data errors"
# #
@ -100,7 +100,7 @@ function test_selfheal # <pool> <parity> <dir>
log_must zpool import -o cachefile=none -d $dir $pool log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs) typeset mntpnt=$(get_prop mountpoint $pool/fs)
log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1 log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors" log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool log_must zpool scrub -w $pool

View File

@ -162,7 +162,7 @@ do
# inject read io errors on vdev and verify resilver does not restart # inject read io errors on vdev and verify resilver does not restart
log_must zinject -a -d ${VDEV_FILES[2]} -e io -T read -f 0.25 $TESTPOOL1 log_must zinject -a -d ${VDEV_FILES[2]} -e io -T read -f 0.25 $TESTPOOL1
log_must cat ${DATAPATHS[1]} > /dev/null log_must eval "cat ${DATAPATHS[1]} > /dev/null"
log_must zinject -c all log_must zinject -c all
# there should still be 2 resilver starts w/o defer, 1 with defer # there should still be 2 resilver starts w/o defer, 1 with defer

View File

@ -57,7 +57,7 @@ log_must set_tunable32 SCAN_LEGACY 1
# create the pool and a 32M file (32k blocks) # create the pool and a 32M file (32k blocks)
log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[0]} $SPARE_VDEV_FILE log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_must zpool create -f -O recordsize=1k $TESTPOOL1 ${VDEV_FILES[0]} log_must zpool create -f -O recordsize=1k $TESTPOOL1 ${VDEV_FILES[0]}
log_must dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 > /dev/null 2>&1 log_must eval "dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 2>/dev/null"
# determine objset/object # determine objset/object
objset=$(zdb -d $TESTPOOL1/ | sed -ne 's/.*ID \([0-9]*\).*/\1/p') objset=$(zdb -d $TESTPOOL1/ | sed -ne 's/.*ID \([0-9]*\).*/\1/p')

View File

@ -128,7 +128,7 @@ function cleanup_pool
# #
# https://github.com/openzfs/zfs/issues/6143 # https://github.com/openzfs/zfs/issues/6143
# #
log_must df >/dev/null log_must eval "df >/dev/null"
log_must_busy zfs destroy -Rf $pool log_must_busy zfs destroy -Rf $pool
else else
typeset list=$(zfs list -H -r -t all -o name $pool) typeset list=$(zfs list -H -r -t all -o name $pool)
@ -153,8 +153,6 @@ function cleanup_pool
if [[ -d $mntpnt ]]; then if [[ -d $mntpnt ]]; then
rm -rf $mntpnt/* rm -rf $mntpnt/*
fi fi
return 0
} }
function cleanup_pools function cleanup_pools
@ -655,7 +653,7 @@ function resume_test
for ((i=0; i<2; i=i+1)); do for ((i=0; i<2; i=i+1)); do
mess_send_file /$streamfs/$stream_num mess_send_file /$streamfs/$stream_num
log_mustnot zfs recv -suv $recvfs </$streamfs/$stream_num log_mustnot eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
stream_num=$((stream_num+1)) stream_num=$((stream_num+1))
token=$(zfs get -Hp -o value receive_resume_token $recvfs) token=$(zfs get -Hp -o value receive_resume_token $recvfs)
@ -665,10 +663,8 @@ function resume_test
log_must eval "zfs send -nvt $token > /dev/null" log_must eval "zfs send -nvt $token > /dev/null"
log_must eval "zfs send -t $token >/$streamfs/$stream_num" log_must eval "zfs send -t $token >/$streamfs/$stream_num"
[[ -f /$streamfs/$stream_num ]] || \
log_fail "NO FILE /$streamfs/$stream_num"
done done
log_must zfs recv -suv $recvfs </$streamfs/$stream_num log_must eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
} }
function get_resume_token function get_resume_token
@ -679,11 +675,8 @@ function get_resume_token
log_must eval "$sendcmd > /$streamfs/1" log_must eval "$sendcmd > /$streamfs/1"
mess_send_file /$streamfs/1 mess_send_file /$streamfs/1
log_mustnot zfs recv -suv $recvfs < /$streamfs/1 2>&1 log_mustnot eval "zfs recv -suv $recvfs < /$streamfs/1 2>&1"
token=$(zfs get -Hp -o value receive_resume_token $recvfs) get_prop receive_resume_token $recvfs > /$streamfs/resume_token
echo "$token" > /$streamfs/resume_token
return 0
} }
# #

View File

@ -113,8 +113,7 @@ log_onexit cleanup_all
setup_all setup_all
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10
typeset -i i=0 typeset -i i=0

View File

@ -55,16 +55,14 @@ function cleanup
[[ $? -eq 0 ]] && \ [[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify that a rollback to a previous snapshot succeeds." log_assert "Verify that a rollback to a previous snapshot succeeds."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10

View File

@ -59,16 +59,14 @@ function cleanup
[[ $? -eq 0 ]] && \ [[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify rollback is with respect to latest snapshot." log_assert "Verify rollback is with respect to latest snapshot."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10
@ -109,8 +107,7 @@ while [[ $i -le $COUNT ]]; do
(( i = i + 1 )) (( i = i + 1 ))
done done
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -f $TESTDIR/original_file*
log_must rm -rf $TESTDIR/original_file* > /dev/null 2>&1
# #
# Now rollback to latest snapshot # Now rollback to latest snapshot

View File

@ -100,7 +100,7 @@ log_must zfs snapshot $SNAPPOOL.1
# #
# https://github.com/openzfs/zfs/issues/6143 # https://github.com/openzfs/zfs/issues/6143
# #
log_must df >/dev/null log_must eval "df >/dev/null"
export __ZFS_POOL_RESTRICT="$TESTPOOL" export __ZFS_POOL_RESTRICT="$TESTPOOL"
log_must zfs unmount -a log_must zfs unmount -a
@ -110,6 +110,6 @@ unset __ZFS_POOL_RESTRICT
log_must touch /$TESTPOOL/$TESTFILE/$TESTFILE.1 log_must touch /$TESTPOOL/$TESTFILE/$TESTFILE.1
log_must zfs rollback $SNAPPOOL.1 log_must zfs rollback $SNAPPOOL.1
log_must df >/dev/null log_must eval "df >/dev/null"
log_pass "Rollbacks succeed when nested file systems are present." log_pass "Rollbacks succeed when nested file systems are present."

View File

@ -54,7 +54,7 @@ function cleanup
log_must zfs destroy $SNAPFS log_must zfs destroy $SNAPFS
fi fi
log_must rm -rf $SNAPDIR $TESTDIR/* > /dev/null 2>&1 log_must rm -rf $SNAPDIR $TESTDIR/*
} }
log_assert "Verify a file system snapshot is identical to original." log_assert "Verify a file system snapshot is identical to original."

View File

@ -51,26 +51,13 @@ verify_runnable "both"
function cleanup function cleanup
{ {
if [[ -d $CWD ]]; then [ -d $CWD ] && log_must cd $CWD
cd $CWD || log_fail "Could not cd $CWD"
fi
snapexists $SNAPFS snapexists $SNAPFS && log_must zfs destroy $SNAPFS
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPFS
fi
if [[ -e $SNAPDIR ]]; then [ -e $SNAPDIR ] && log_must rm -rf $SNAPDIR
log_must rm -rf $SNAPDIR > /dev/null 2>&1 [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
fi [ -d "$SNAPSHOT_TARDIR" ] && log_must rm -rf $SNAPSHOT_TARDIR
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
fi
if [[ -d "$SNAPSHOT_TARDIR" ]]; then
log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
fi
} }
log_assert "Verify an archive of a file system is identical to " \ log_assert "Verify an archive of a file system is identical to " \
@ -82,8 +69,7 @@ log_onexit cleanup
typeset -i COUNT=21 typeset -i COUNT=21
typeset OP=create typeset OP=create
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && rm -rf $TESTDIR/*
rm -rf $TESTDIR/* > /dev/null 2>&1
log_note "Create files in the zfs filesystem..." log_note "Create files in the zfs filesystem..."
@ -96,33 +82,32 @@ done
log_note "Create a tarball from $TESTDIR contents..." log_note "Create a tarball from $TESTDIR contents..."
CWD=$PWD CWD=$PWD
cd $TESTDIR || log_fail "Could not cd $TESTDIR" log_must cd $TESTDIR
log_must tar cf $SNAPSHOT_TARDIR/original.tar . log_must tar cf $SNAPSHOT_TARDIR/original.tar .
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_note "Create a snapshot and mount it..." log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPFS log_must zfs snapshot $SNAPFS
log_note "Remove all of the original files..." log_note "Remove all of the original files..."
log_must rm -f $TESTDIR/file* > /dev/null 2>&1 log_must rm -f $TESTDIR/file*
log_note "Create tarball of snapshot..." log_note "Create tarball of snapshot..."
CWD=$PWD CWD=$PWD
cd $SNAPDIR || log_fail "Could not cd $SNAPDIR" log_must cd $SNAPDIR
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar . log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_must mkdir $TESTDIR/original log_must mkdir $TESTDIR/original $TESTDIR/snapshot
log_must mkdir $TESTDIR/snapshot
CWD=$PWD CWD=$PWD
cd $TESTDIR/original || log_fail "Could not cd $TESTDIR/original" log_must cd $TESTDIR/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar log_must tar xf $SNAPSHOT_TARDIR/original.tar
cd $TESTDIR/snapshot || log_fail "Could not cd $TESTDIR/snapshot" log_must cd $TESTDIR/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_must directory_diff $TESTDIR/original $TESTDIR/snapshot log_must directory_diff $TESTDIR/original $TESTDIR/snapshot
log_pass "Directory structures match." log_pass "Directory structures match."

View File

@ -49,20 +49,17 @@ function cleanup
{ {
typeset -i i=1 typeset -i i=1
while [ $i -lt $COUNT ]; do while [ $i -lt $COUNT ]; do
snapexists $SNAPFS.$i snapexists $SNAPFS.$i && log_must zfs destroy $SNAPFS.$i
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPFS.$i
fi
if [[ -e $SNAPDIR.$i ]]; then if [ -e $SNAPDIR.$i ]; then
log_must rm -rf $SNAPDIR.$i > /dev/null 2>&1 log_must rm -rf $SNAPDIR.$i
fi fi
(( i = i + 1 )) (( i = i + 1 ))
done done
if [[ -e $TESTDIR ]]; then if [ -e $TESTDIR ]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1 log_must rm -rf $TESTDIR/*
fi fi
} }
@ -70,8 +67,7 @@ log_assert "Verify many snapshots of a file system can be taken."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10
@ -86,8 +82,7 @@ while [[ $i -lt $COUNT ]]; do
done done
log_note "Remove all of the original files" log_note "Remove all of the original files"
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/file*
log_must rm -rf $TESTDIR/file* > /dev/null 2>&1
i=1 i=1
while [[ $i -lt $COUNT ]]; do while [[ $i -lt $COUNT ]]; do

View File

@ -48,20 +48,16 @@ verify_runnable "both"
function cleanup function cleanup
{ {
snapexists $SNAPFS snapexists $SNAPFS && log_must zfs destroy $SNAPFS
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify that a snapshot of an empty file system remains empty." log_assert "Verify that a snapshot of an empty file system remains empty."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
log_must zfs snapshot $SNAPFS log_must zfs snapshot $SNAPFS
FILE_COUNT=`ls -Al $SNAPDIR | grep -v "total 0" | wc -l` FILE_COUNT=`ls -Al $SNAPDIR | grep -v "total 0" | wc -l`

View File

@ -49,17 +49,14 @@ verify_runnable "both"
function cleanup function cleanup
{ {
snapexists $SNAPCTR snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1
fi fi
if [[ -e $SNAPDIR1 ]]; then if [ -e $TESTDIR ]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1 log_must rm -rf $TESTDIR/*
fi
if [[ -e $TESTDIR ]]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
fi fi
} }

View File

@ -51,24 +51,21 @@ verify_runnable "both"
function cleanup function cleanup
{ {
if [[ -d $CWD ]]; then if [[ -d $CWD ]]; then
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
fi fi
snapexists $SNAPCTR snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1
fi fi
if [[ -e $SNAPDIR1 ]]; then if [ -e $TESTDIR1 ]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1 log_must rm -rf $TESTDIR1/*
fi fi
if [[ -e $TESTDIR1 ]]; then if [ -d "$SNAPSHOT_TARDIR" ]; then
log_must rm -rf $TESTDIR1/* > /dev/null 2>&1 log_must rm -rf $SNAPSHOT_TARDIR
fi
if [[ -d "$SNAPSHOT_TARDIR" ]]; then
log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
fi fi
} }
@ -81,7 +78,7 @@ log_onexit cleanup
typeset -i COUNT=21 typeset -i COUNT=21
typeset OP=create typeset OP=create
[[ -n $TESTDIR1 ]] && rm -rf $TESTDIR1/* > /dev/null 2>&1 [ -n $TESTDIR1 ] && rm -rf $TESTDIR1/*
log_note "Create files in the zfs dataset ..." log_note "Create files in the zfs dataset ..."
@ -94,33 +91,32 @@ done
log_note "Create a tarball from $TESTDIR1 contents..." log_note "Create a tarball from $TESTDIR1 contents..."
CWD=$PWD CWD=$PWD
cd $TESTDIR1 || log_fail "Could not cd $TESTDIR1" log_must cd $TESTDIR1
log_must tar cf $SNAPSHOT_TARDIR/original.tar . log_must tar cf $SNAPSHOT_TARDIR/original.tar .
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_note "Create a snapshot and mount it..." log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPCTR log_must zfs snapshot $SNAPCTR
log_note "Remove all of the original files..." log_note "Remove all of the original files..."
log_must rm -f $TESTDIR1/file* > /dev/null 2>&1 log_must rm -f $TESTDIR1/file*
log_note "Create tarball of snapshot..." log_note "Create tarball of snapshot..."
CWD=$PWD CWD=$PWD
cd $SNAPDIR1 || log_fail "Could not cd $SNAPDIR1" log_must cd $SNAPDIR1
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar . log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_must mkdir $TESTDIR1/original log_must mkdir $TESTDIR1/original mkdir $TESTDIR1/snapshot
log_must mkdir $TESTDIR1/snapshot
CWD=$PWD CWD=$PWD
cd $TESTDIR1/original || log_fail "Could not cd $TESTDIR1/original" log_must cd $TESTDIR1/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar log_must tar xf $SNAPSHOT_TARDIR/original.tar
cd $TESTDIR1/snapshot || log_fail "Could not cd $TESTDIR1/snapshot" log_must cd $TESTDIR1/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
cd $CWD || log_fail "Could not cd $CWD" log_must cd $CWD
log_must directory_diff $TESTDIR1/original $TESTDIR1/snapshot log_must directory_diff $TESTDIR1/original $TESTDIR1/snapshot
log_pass "Directory structures match." log_pass "Directory structures match."

View File

@ -49,24 +49,21 @@ function cleanup
{ {
typeset -i i=1 typeset -i i=1
while [ $i -lt $COUNT ]; do while [ $i -lt $COUNT ]; do
snapexists $SNAPCTR.$i snapexists $SNAPCTR.$i && log_must zfs destroy $SNAPCTR.$i
if [[ $? -eq 0 ]]; then
log_must zfs destroy $SNAPCTR.$i
fi
if [[ -e $SNAPDIR.$i ]]; then if [ -e $SNAPDIR.$i ]; then
log_must rm -rf $SNAPDIR1.$i > /dev/null 2>&1 log_must rm -rf $SNAPDIR1.$i
fi fi
(( i = i + 1 )) (( i = i + 1 ))
done done
if [[ -e $SNAPDIR1 ]]; then if [ -e $SNAPDIR1 ]; then
log_must rm -rf $SNAPDIR1 > /dev/null 2>&1 log_must rm -rf $SNAPDIR1
fi fi
if [[ -e $TESTDIR ]]; then if [ -e $TESTDIR ]; then
log_must rm -rf $TESTDIR/* > /dev/null 2>&1 log_must rm -rf $TESTDIR/*
fi fi
} }
@ -74,8 +71,7 @@ log_assert "Verify that many snapshots can be made on a zfs dataset."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10
@ -90,12 +86,11 @@ while [[ $i -lt $COUNT ]]; do
done done
log_note "Remove all of the original files" log_note "Remove all of the original files"
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -f $TESTDIR1/file*
log_must rm -rf $TESTDIR1/file* > /dev/null 2>&1
i=1 i=1
while [[ $i -lt $COUNT ]]; do while [[ $i -lt $COUNT ]]; do
FILECOUNT=`ls $SNAPDIR1.$i/file* | wc -l` FILECOUNT=`echo $SNAPDIR1.$i/file* | wc -w`
typeset j=1 typeset j=1
while [ $j -lt $FILECOUNT ]; do while [ $j -lt $FILECOUNT ]; do
log_must file_check $SNAPDIR1.$i/file$j $j log_must file_check $SNAPDIR1.$i/file$j $j

View File

@ -55,16 +55,14 @@ function cleanup
(( i = i + 1 )) (( i = i + 1 ))
done done
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify that destroying snapshots returns space to the pool." log_assert "Verify that destroying snapshots returns space to the pool."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10
@ -82,7 +80,7 @@ done
typeset -i i=1 typeset -i i=1
while [[ $i -lt $COUNT ]]; do while [[ $i -lt $COUNT ]]; do
log_must rm -rf $TESTDIR/file$i > /dev/null 2>&1 log_must rm -f $TESTDIR/file$i
log_must zfs destroy $SNAPFS.$i log_must zfs destroy $SNAPFS.$i
(( i = i + 1 )) (( i = i + 1 ))

View File

@ -53,15 +53,13 @@ function cleanup
{ {
snapexists $SNAPPOOL && destroy_dataset $SNAPPOOL -r snapexists $SNAPPOOL && destroy_dataset $SNAPPOOL -r
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify that rollback to a snapshot created by snapshot -r succeeds." log_assert "Verify that rollback to a snapshot created by snapshot -r succeeds."
log_onexit cleanup log_onexit cleanup
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10

View File

@ -51,8 +51,7 @@ function cleanup
datasetexists $ctrfs && destroy_dataset $ctrfs -r datasetexists $ctrfs && destroy_dataset $ctrfs -r
snapexists $snappool && destroy_dataset $snappool -r snapexists $snappool && destroy_dataset $snappool -r
[[ -e $TESTDIR ]] && \ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
} }
log_assert "Verify snapshots from 'snapshot -r' can be used for zfs send/recv" log_assert "Verify snapshots from 'snapshot -r' can be used for zfs send/recv"
@ -67,8 +66,7 @@ snapctrfs=$ctrfs@$TESTSNAP
fsdir=/$ctrfs fsdir=/$ctrfs
snapdir=$fsdir/.zfs/snapshot/$TESTSNAP snapdir=$fsdir/.zfs/snapshot/$TESTSNAP
[[ -n $TESTDIR ]] && \ [ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must rm -rf $TESTDIR/* > /dev/null 2>&1
typeset -i COUNT=10 typeset -i COUNT=10

View File

@ -48,8 +48,7 @@ verify_runnable "both"
function cleanup function cleanup
{ {
[[ -e $TESTDIR1 ]] && \ [ -e $TESTDIR1 ] && log_must rm -rf $TESTDIR1/*
log_must rm -rf $TESTDIR1/* > /dev/null 2>&1
snapexists $SNAPCTR && destroy_dataset $SNAPCTR snapexists $SNAPCTR && destroy_dataset $SNAPCTR

View File

@ -33,7 +33,7 @@ DISK=${DISKS%% *}
log_must zpool create -f $TESTPOOL $DISK log_must zpool create -f $TESTPOOL $DISK
conf="$TESTDIR/vz001" conf="$TESTDIR/vz001"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_top_zap $TESTPOOL $DISK "$conf" assert_top_zap $TESTPOOL $DISK "$conf"
assert_leaf_zap $TESTPOOL $DISK "$conf" assert_leaf_zap $TESTPOOL $DISK "$conf"

View File

@ -33,7 +33,7 @@ log_assert "Per-vdev ZAPs are created on pool creation with many disks."
log_must zpool create -f $TESTPOOL $DISKS log_must zpool create -f $TESTPOOL $DISKS
conf="$TESTDIR/vz002" conf="$TESTDIR/vz002"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
for DISK in $DISKS; do for DISK in $DISKS; do

View File

@ -34,7 +34,7 @@ log_assert "Per-vdev ZAPs are created on pool creation with multi-level vdev "\
log_must zpool create -f $TESTPOOL mirror $DISKS log_must zpool create -f $TESTPOOL mirror $DISKS
conf="$TESTDIR/vz003" conf="$TESTDIR/vz003"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
assert_top_zap $TESTPOOL "type: 'mirror'" "$conf" assert_top_zap $TESTPOOL "type: 'mirror'" "$conf"

View File

@ -38,7 +38,7 @@ log_must zpool create -f $TESTPOOL $DISK
# Make the pool. # Make the pool.
conf="$TESTDIR/vz004" conf="$TESTDIR/vz004"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf) orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf) orig_leaf=$(get_leaf_vd_zap $DISK $conf)
@ -51,7 +51,7 @@ assert_zap_common $TESTPOOL $DISK "top" $orig_top
disk2=$(echo $DISKS | awk '{print $2}') disk2=$(echo $DISKS | awk '{print $2}')
log_must zpool attach $TESTPOOL $DISK $disk2 log_must zpool attach $TESTPOOL $DISK $disk2
log_must zpool wait -t resilver $TESTPOOL log_must zpool wait -t resilver $TESTPOOL
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
# Ensure top-level ZAP was transferred successfully. # Ensure top-level ZAP was transferred successfully.
new_top=$(get_top_vd_zap "type: 'mirror'" $conf) new_top=$(get_top_vd_zap "type: 'mirror'" $conf)
@ -80,7 +80,7 @@ dsk2_leaf=$(get_leaf_vd_zap $disk2 $conf)
# #
log_must zpool detach $TESTPOOL $DISK log_must zpool detach $TESTPOOL $DISK
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
final_top=$(get_top_vd_zap $disk2 $conf) final_top=$(get_top_vd_zap $disk2 $conf)
final_leaf=$(get_leaf_vd_zap $disk2 $conf) final_leaf=$(get_leaf_vd_zap $disk2 $conf)

View File

@ -35,7 +35,7 @@ log_must zpool create -f $TESTPOOL $DISK
# Make the pool. # Make the pool.
conf="$TESTDIR/vz005" conf="$TESTDIR/vz005"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf) orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf) orig_leaf=$(get_leaf_vd_zap $DISK $conf)
@ -50,7 +50,7 @@ log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL log_must zpool import $TESTPOOL
# Verify that ZAPs persisted. # Verify that ZAPs persisted.
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
new_top=$(get_top_vd_zap $DISK $conf) new_top=$(get_top_vd_zap $DISK $conf)
new_leaf=$(get_leaf_vd_zap $DISK $conf) new_leaf=$(get_leaf_vd_zap $DISK $conf)

View File

@ -36,7 +36,7 @@ log_assert "Per-vdev ZAPs are created for added vdevs."
log_must zpool add -f $TESTPOOL ${DISK_ARR[1]} log_must zpool add -f $TESTPOOL ${DISK_ARR[1]}
conf="$TESTDIR/vz006" conf="$TESTDIR/vz006"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap ${DISK_ARR[1]} $conf) orig_top=$(get_top_vd_zap ${DISK_ARR[1]} $conf)

View File

@ -36,7 +36,7 @@ log_must zpool create -f $TESTPOOL mirror ${DISK_ARR[0]} ${DISK_ARR[1]}
log_assert "Per-vdev ZAPs persist correctly on the original pool after split." log_assert "Per-vdev ZAPs persist correctly on the original pool after split."
conf="$TESTDIR/vz007" conf="$TESTDIR/vz007"
log_must zdb -PC $TESTPOOL > $conf log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf" assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap "type: 'mirror'" $conf) orig_top=$(get_top_vd_zap "type: 'mirror'" $conf)

View File

@ -43,14 +43,11 @@ fi
function check_for function check_for
{ {
grep "^${1}," $tmpfile >/dev/null 2>/dev/null log_must grep -q "^${1}," $tmpfile
if [ $? -ne 0 ]; then
log_fail "cannot find stats for $1"
fi
} }
# by default, all stats and histograms for all pools # by default, all stats and histograms for all pools
log_must zpool_influxdb > $tmpfile log_must eval "zpool_influxdb > $tmpfile"
STATS=" STATS="
zpool_io_size zpool_io_size
@ -64,8 +61,8 @@ for stat in $STATS; do
done done
# scan stats aren't expected to be there until after a scan has started # scan stats aren't expected to be there until after a scan has started
zpool scrub $TESTPOOL log_must zpool scrub $TESTPOOL
zpool_influxdb > $tmpfile log_must eval "zpool_influxdb > $tmpfile"
check_for zpool_scan_stats check_for zpool_scan_stats
log_pass "zpool_influxdb gathers statistics" log_pass "zpool_influxdb gathers statistics"

View File

@ -46,13 +46,13 @@ fi
for swapdev in $SAVESWAPDEVS for swapdev in $SAVESWAPDEVS
do do
if ! is_swap_inuse $swapdev ; then if ! is_swap_inuse $swapdev ; then
log_must swap_setup $swapdev >/dev/null 2>&1 swap_setup $swapdev
fi fi
done done
voldev=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL voldev=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
if is_swap_inuse $voldev ; then if is_swap_inuse $voldev ; then
log_must swap_cleanup $voldev swap_cleanup $voldev
fi fi
default_zvol_cleanup default_zvol_cleanup