ZTS: normalize on use of `sync_pool` and `sync_all_pools`

- Replaces use of manual `zpool sync`
- Don't use `log_must sync_pool` as `sync_pool` uses it internally
- Replace many (but not all) uses of `sync` with `sync_pool`

This makes the tests more consistent, and makes searching easier.

Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Closes #12894
This commit is contained in:
Allan Jude 2022-01-06 13:57:09 -05:00 committed by GitHub
parent 6b2e32019e
commit 7454275a53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
95 changed files with 148 additions and 118 deletions

View File

@ -595,7 +595,7 @@ function list_file_blocks # input_file
else else
AWK='awk' AWK='awk'
fi fi
log_must zpool sync -f sync_all_pools true
zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 ' zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 '
/^$/ { looking = 0 } /^$/ { looking = 0 }
looking { looking {

View File

@ -3104,6 +3104,7 @@ function datasetcksum
{ {
typeset cksum typeset cksum
sync sync
sync_all_pools
cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \ cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
| awk -F= '{print $7}') | awk -F= '{print $7}')
echo $cksum echo $cksum
@ -3519,6 +3520,24 @@ function sync_pool #pool <force>
return 0 return 0
} }
#
# Sync all pools
#
# $1 boolean to force uberblock (and config including zpool cache file) update
#
function sync_all_pools #<force>
{
typeset force=${1:-false}
if [[ $force == true ]]; then
log_must zpool sync -f
else
log_must zpool sync
fi
return 0
}
# #
# Wait for zpool 'freeing' property drops to zero. # Wait for zpool 'freeing' property drops to zero.
# #

View File

@ -85,7 +85,7 @@ function check_removal
bs=1M count=$blocks bs=1M count=$blocks
((blocks = blocks + 25)) ((blocks = blocks + 25))
done done
log_must sync_pool $TESTPOOL sync_pool $TESTPOOL
log_must zpool list -v $TESTPOOL log_must zpool list -v $TESTPOOL
# Verify the files were written in the special class vdevs # Verify the files were written in the special class vdevs
@ -98,7 +98,7 @@ function check_removal
log_must zpool remove $TESTPOOL $CLASS_DISK0 log_must zpool remove $TESTPOOL $CLASS_DISK0
sleep 5 sleep 5
log_must sync_pool $TESTPOOL sync_pool $TESTPOOL
sleep 1 sleep 1
log_must zdb -bbcc $TESTPOOL log_must zdb -bbcc $TESTPOOL

View File

@ -53,7 +53,7 @@ log_must zpool list -v $TESTPOOL
log_must zpool remove $TESTPOOL $CLASS_DISK0 log_must zpool remove $TESTPOOL $CLASS_DISK0
sleep 5 sleep 5
log_must sync_pool $TESTPOOL sync_pool $TESTPOOL
sleep 1 sleep 1
log_must zdb -bbcc $TESTPOOL log_must zdb -bbcc $TESTPOOL

View File

@ -75,7 +75,7 @@ log_assert "dbufstats produces correct statistics"
log_onexit cleanup log_onexit cleanup
log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R
log_must zpool sync sync_all_pools
log_must eval "kstat dbufs > $DBUFS_FILE" log_must eval "kstat dbufs > $DBUFS_FILE"
log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE" log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE"

View File

@ -56,7 +56,7 @@ log_assert "dbufs move from mru to mfu list"
log_onexit cleanup log_onexit cleanup
log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 1 -d R log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 1 -d R
log_must zpool sync sync_all_pools
objid=$(get_objnum "$TESTDIR/file") objid=$(get_objnum "$TESTDIR/file")
log_note "Object ID for $TESTDIR/file is $objid" log_note "Object ID for $TESTDIR/file is $objid"

View File

@ -46,7 +46,7 @@ log_must_program $TESTPOOL - <<-EOF
EOF EOF
log_must mkdir $dir log_must mkdir $dir
sync sync_all_pools
log_must_program $TESTPOOL - <<-EOF log_must_program $TESTPOOL - <<-EOF
ans, setpoint = zfs.get_prop("$fs", "written@$TESTSNAP") ans, setpoint = zfs.get_prop("$fs", "written@$TESTSNAP")

View File

@ -128,7 +128,7 @@ function histo_populate_test_pool
# to the device. This 'sync' command prevents that from # to the device. This 'sync' command prevents that from
# happening. # happening.
#################### ####################
log_must zpool sync ${pool} sync_pool ${pool}
} }
function histo_check_test_pool function histo_check_test_pool
{ {

View File

@ -40,7 +40,7 @@ verify_runnable "both"
verify_disk_count "$DISKS" 2 verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS default_mirror_setup_noexit $DISKS
log_must zpool sync sync_all_pools
set -A bad_flags a b c e g h i j k l n o p q r s t u v w x y \ set -A bad_flags a b c e g h i j k l n o p q r s t u v w x y \
B C D E F G H I J K L M N O P Q R S T U V W X Y Z \ B C D E F G H I J K L M N O P Q R S T U V W X Y Z \

View File

@ -76,7 +76,7 @@ for x in $(seq 0 7); do
mkdir $TESTDIR/dir$x mkdir $TESTDIR/dir$x
done done
log_must zpool sync sync_all_pools
# Get list of all objects, but filter out user/group objects which don't # Get list of all objects, but filter out user/group objects which don't
# appear when using object or object range arguments # appear when using object or object range arguments

View File

@ -47,7 +47,7 @@ verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count file_write -o create -w -f $init_data -b $blksize -c $write_count
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile) output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
log_must cmp $init_data $tmpfile log_must cmp $init_data $tmpfile

View File

@ -49,7 +49,7 @@ verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count file_write -o create -w -f $init_data -b $blksize -c $write_count
log_must echo "zfs" >> $init_data log_must echo "zfs" >> $init_data
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile) output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
log_must cmp $init_data $tmpfile log_must cmp $init_data $tmpfile

View File

@ -67,7 +67,7 @@ done
# #
# Sync up the filesystem # Sync up the filesystem
# #
sync sync_all_pools
# #
# Verify 'zfs list' can correctly list the space charged # Verify 'zfs list' can correctly list the space charged

View File

@ -66,7 +66,7 @@ function test_condense
# sync between each write to make sure a new entry is created # sync between each write to make sure a new entry is created
for i in {0..4}; do for i in {0..4}; do
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
done done
check_ll_len "5 entries" "Unexpected livelist size" check_ll_len "5 entries" "Unexpected livelist size"
@ -74,7 +74,7 @@ function test_condense
# sync between each write to allow for a condense of the previous entry # sync between each write to allow for a condense of the previous entry
for i in {0..4}; do for i in {0..4}; do
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
done done
check_ll_len "6 entries" "Condense did not occur" check_ll_len "6 entries" "Condense did not occur"
@ -91,7 +91,7 @@ function test_deactivated
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0 log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1 log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# snapshot and clone share 'atestfile', 33 percent # snapshot and clone share 'atestfile', 33 percent
check_livelist_gone check_livelist_gone
log_must zfs destroy -R $TESTPOOL/$TESTCLONE log_must zfs destroy -R $TESTPOOL/$TESTCLONE
@ -103,7 +103,7 @@ function test_deactivated
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0 log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1 log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE2 log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE2
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# snapshot and clone share 'atestfile', 25 percent # snapshot and clone share 'atestfile', 25 percent
check_livelist_exists $TESTCLONE check_livelist_exists $TESTCLONE
log_must rm /$TESTPOOL/$TESTCLONE/atestfile log_must rm /$TESTPOOL/$TESTCLONE/atestfile

View File

@ -49,11 +49,11 @@ function delete_race
set_tunable32 "$1" 0 set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do for i in {1..5}; do
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done done
log_must zfs destroy $TESTPOOL/$TESTCLONE log_must zfs destroy $TESTPOOL/$TESTCLONE
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
[[ "1" == "$(get_tunable "$1")" ]] || \ [[ "1" == "$(get_tunable "$1")" ]] || \
log_fail "delete/condense race test failed" log_fail "delete/condense race test failed"
} }
@ -63,7 +63,7 @@ function export_race
set_tunable32 "$1" 0 set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do for i in {1..5}; do
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done done
log_must zpool export $TESTPOOL log_must zpool export $TESTPOOL
@ -78,12 +78,12 @@ function disable_race
set_tunable32 "$1" 0 set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do for i in {1..5}; do
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done done
# overwrite the file shared with the origin to trigger disable # overwrite the file shared with the origin to trigger disable
log_must mkfile 100m /$TESTPOOL/$TESTCLONE/atestfile log_must mkfile 100m /$TESTPOOL/$TESTCLONE/atestfile
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
[[ "1" == "$(get_tunable "$1")" ]] || \ [[ "1" == "$(get_tunable "$1")" ]] || \
log_fail "disable/condense race test failed" log_fail "disable/condense race test failed"
log_must zfs destroy $TESTPOOL/$TESTCLONE log_must zfs destroy $TESTPOOL/$TESTCLONE
@ -95,7 +95,7 @@ log_onexit cleanup
log_must zfs create $TESTPOOL/$TESTFS1 log_must zfs create $TESTPOOL/$TESTFS1
log_must mkfile 100m /$TESTPOOL/$TESTFS1/atestfile log_must mkfile 100m /$TESTPOOL/$TESTFS1/atestfile
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must zfs snapshot $TESTPOOL/$TESTFS1@snap log_must zfs snapshot $TESTPOOL/$TESTFS1@snap
# Reduce livelist size to trigger condense more easily # Reduce livelist size to trigger condense more easily

View File

@ -54,12 +54,12 @@ function test_dedup
# Note: We sync before and after so all dedup blocks belong to the # Note: We sync before and after so all dedup blocks belong to the
# same TXG, otherwise they won't look identical to the livelist # same TXG, otherwise they won't look identical to the livelist
# iterator due to their logical birth TXG being different. # iterator due to their logical birth TXG being different.
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-0 log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-0
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-1 log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-1
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-2 log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-2
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-3 log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-3
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
check_livelist_exists $TESTCLONE check_livelist_exists $TESTCLONE
# Introduce "double frees" # Introduce "double frees"
@ -67,10 +67,10 @@ function test_dedup
# was what triggered past panics. # was what triggered past panics.
# Note: Similarly to the previouys step we sync before and after our # Note: Similarly to the previouys step we sync before and after our
# our deletions so all the entries end up in the same TXG. # our deletions so all the entries end up in the same TXG.
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must rm /$TESTPOOL/$TESTCLONE/data-dup-2 log_must rm /$TESTPOOL/$TESTCLONE/data-dup-2
log_must rm /$TESTPOOL/$TESTCLONE/data-dup-3 log_must rm /$TESTPOOL/$TESTCLONE/data-dup-3
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
check_livelist_exists $TESTCLONE check_livelist_exists $TESTCLONE
log_must zfs destroy $TESTPOOL/$TESTCLONE log_must zfs destroy $TESTPOOL/$TESTCLONE

View File

@ -47,7 +47,7 @@ function cleanup
function clone_write_file function clone_write_file
{ {
log_must mkfile 1m /$TESTPOOL/$1/$2 log_must mkfile 1m /$TESTPOOL/$1/$2
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
} }
function test_one_empty function test_one_empty

View File

@ -153,7 +153,7 @@ function check_livelist_exists
function check_livelist_gone function check_livelist_gone
{ {
log_must zpool wait -t free $TESTPOOL log_must zpool wait -t free $TESTPOOL
zpool sync sync_all_pools
zdb -vvvvv $TESTPOOL | grep "Livelist" && \ zdb -vvvvv $TESTPOOL | grep "Livelist" && \
log_fail "zdb found Livelist after the clone is deleted." log_fail "zdb found Livelist after the clone is deleted."
} }

View File

@ -65,7 +65,7 @@ log_must zfs clone $TESTPOOL2/$TESTFS@snap $TESTPOOL2/$TESTCLONE
# Create initial files and pause condense zthr on next execution # Create initial files and pause condense zthr on next execution
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
log_must zpool sync $TESTPOOL2 sync_pool $TESTPOOL2
set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1 set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1
# Add a new dev and remove the old one # Add a new dev and remove the old one
@ -76,15 +76,15 @@ wait_for_removal $TESTPOOL2
set_tunable32 LIVELIST_CONDENSE_NEW_ALLOC 0 set_tunable32 LIVELIST_CONDENSE_NEW_ALLOC 0
# Trigger a condense # Trigger a condense
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must zpool sync $TESTPOOL2 sync_pool $TESTPOOL2
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must zpool sync $TESTPOOL2 sync_pool $TESTPOOL2
# Write remapped blkptrs which will modify the livelist mid-condense # Write remapped blkptrs which will modify the livelist mid-condense
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
# Resume condense thr # Resume condense thr
set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0 set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
log_must zpool sync $TESTPOOL2 sync_pool $TESTPOOL2
# Check that we've added new ALLOC blkptrs during the condense # Check that we've added new ALLOC blkptrs during the condense
[[ "0" < "$(get_tunable LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \ [[ "0" < "$(get_tunable LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \
log_fail "removal/condense test failed" log_fail "removal/condense test failed"

View File

@ -155,7 +155,7 @@ CRYPT_MNTPFS="$(get_prop mountpoint $TESTFS/crypt)"
log_must touch $CRYPT_MNTPFS/file.dat log_must touch $CRYPT_MNTPFS/file.dat
log_must mount $RO $TESTFS/crypt $CRYPT_MNTPFS log_must mount $RO $TESTFS/crypt $CRYPT_MNTPFS
log_must umount -f $CRYPT_MNTPFS log_must umount -f $CRYPT_MNTPFS
zpool sync $TESTPOOL sync_pool $TESTPOOL
# 6. Re-import the pool readonly # 6. Re-import the pool readonly
log_must zpool export $TESTPOOL log_must zpool export $TESTPOOL

View File

@ -87,7 +87,7 @@ log_must zfs snapshot $inc_snap
log_must eval "zfs send -i $init_snap $inc_snap > $inc_bkup" log_must eval "zfs send -i $init_snap $inc_snap > $inc_bkup"
log_must touch /$TESTDIR/bar log_must touch /$TESTDIR/bar
sync sync_all_pools
set -A badargs \ set -A badargs \
"" "nonexistent-snap" "blah@blah" "-d" "-d nonexistent-dataset" \ "" "nonexistent-snap" "blah@blah" "-d" "-d nonexistent-dataset" \

View File

@ -65,14 +65,14 @@ origdir=$(get_prop mountpoint $orig)
# 2. Create two equal-sized large files. # 2. Create two equal-sized large files.
log_must mkfile 5M $origdir/file1 log_must mkfile 5M $origdir/file1
log_must mkfile 5M $origdir/file2 log_must mkfile 5M $origdir/file2
log_must sync sync_all_pools
# 3. Snapshot the filesystem. # 3. Snapshot the filesystem.
log_must zfs snapshot $orig@1 log_must zfs snapshot $orig@1
# 4. Remove one of the two large files. # 4. Remove one of the two large files.
log_must rm $origdir/file2 log_must rm $origdir/file2
log_must sync sync_all_pools
# 5. Create a refquota larger than one file, but smaller than both. # 5. Create a refquota larger than one file, but smaller than both.
log_must zfs set refquota=8M $orig log_must zfs set refquota=8M $orig

View File

@ -116,6 +116,7 @@ function setup_snap_env
if datasetnonexists $snap; then if datasetnonexists $snap; then
log_must cp /etc/passwd $fname log_must cp /etc/passwd $fname
if is_linux || is_freebsd; then if is_linux || is_freebsd; then
sync_all_pools
log_must sync log_must sync
else else
# #

View File

@ -62,9 +62,9 @@ test_pool ()
log_must zfs snapshot $POOL/fs@a log_must zfs snapshot $POOL/fs@a
while true; do while true; do
log_must find $mntpnt/ -type f -delete log_must find $mntpnt/ -type f -delete
sync sync_all_pools
log_must mkfiles "$mntpnt/" 4000 log_must mkfiles "$mntpnt/" 4000
sync sync_all_pools
# check if we started reusing objects # check if we started reusing objects
object=$(ls -i $mntpnt | sort -n | awk -v object=$object \ object=$(ls -i $mntpnt | sort -n | awk -v object=$object \
'{if ($1 <= object) {exit 1}} END {print $1}') '{if ($1 <= object) {exit 1}} END {print $1}')

View File

@ -44,7 +44,7 @@ DISK3="$(echo $DISKS | cut -d' ' -f3)"
log_must dd if=/dev/urandom of=/$TESTDIR/testfile bs=10M count=1 log_must dd if=/dev/urandom of=/$TESTDIR/testfile bs=10M count=1
log_must zpool sync sync_all_pools
log_must zpool offline -f $TESTPOOL $DISK3 log_must zpool offline -f $TESTPOOL $DISK3
log_must wait_for_degraded $TESTPOOL log_must wait_for_degraded $TESTPOOL

View File

@ -175,7 +175,8 @@ function do_testing #<clear type> <vdevs>
esac esac
dd if=/dev/zero of=$fbase.$i seek=512 bs=1024 count=$wcount conv=notrunc \ dd if=/dev/zero of=$fbase.$i seek=512 bs=1024 count=$wcount conv=notrunc \
> /dev/null 2>&1 > /dev/null 2>&1
log_must sync sync_all_pools
log_must sync #ensure the vdev files are written out
log_must zpool scrub -w $TESTPOOL1 log_must zpool scrub -w $TESTPOOL1
check_err $TESTPOOL1 && \ check_err $TESTPOOL1 && \

View File

@ -111,7 +111,7 @@ log_must zpool create -f -m $MOUNTDIR -o failmode=continue $POOL raidz $VDEV1 $V
log_must zfs set compression=off recordsize=16k $POOL log_must zfs set compression=off recordsize=16k $POOL
# create a file full of zeros # create a file full of zeros
log_must mkfile -v $FILESIZE $FILEPATH log_must mkfile -v $FILESIZE $FILEPATH
log_must zpool sync $POOL sync_pool $POOL
# run once and observe the checksum errors # run once and observe the checksum errors
damage_and_repair 1 damage_and_repair 1

View File

@ -112,7 +112,7 @@ function do_dup_test
if [ "$RW" == "write" ] ; then if [ "$RW" == "write" ] ; then
log_must mkfile $FILESIZE $FILEPATH log_must mkfile $FILESIZE $FILEPATH
log_must zpool sync $POOL sync_pool $POOL
fi fi
log_must zinject -c all log_must zinject -c all

View File

@ -97,7 +97,7 @@ function do_test
if [ "$RW" == "write" ] ; then if [ "$RW" == "write" ] ; then
log_must mkfile $FILESIZE $MOUNTDIR/file log_must mkfile $FILESIZE $MOUNTDIR/file
log_must zpool sync $POOL sync_pool $POOL
else else
log_must zpool scrub $POOL log_must zpool scrub $POOL
wait_scrubbed $POOL wait_scrubbed $POOL

View File

@ -42,7 +42,7 @@ DISK1=${DISKS%% *}
log_must zpool create -f $TESTPOOL $DISK1 log_must zpool create -f $TESTPOOL $DISK1
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1M count=30 log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1M count=30
log_must sync sync_all_pools
log_must zpool initialize $TESTPOOL log_must zpool initialize $TESTPOOL
@ -52,7 +52,7 @@ log_must zdb -cc $TESTPOOL
log_fail "Initializing did not start" log_fail "Initializing did not start"
log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1M count=30 log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1M count=30
log_must sync sync_all_pools
log_must zdb -cc $TESTPOOL log_must zdb -cc $TESTPOOL

View File

@ -49,12 +49,12 @@ log_must truncate -s $SPA_MINDEVSIZE $DEVICE2 $DEVICE3 $DEVICE4 $DEVICE5
log_must zpool create -f $TESTPOOL $DEVICE1 $DEVICE2 \ log_must zpool create -f $TESTPOOL $DEVICE1 $DEVICE2 \
log $DEVICE3 cache $DEVICE4 spare $DEVICE5 log $DEVICE3 cache $DEVICE4 spare $DEVICE5
log_must zpool sync sync_all_pools
# Remove each type of vdev and verify the label can be cleared. # Remove each type of vdev and verify the label can be cleared.
for dev in $DEVICE5 $DEVICE4 $DEVICE3 $DEVICE2; do for dev in $DEVICE5 $DEVICE4 $DEVICE3 $DEVICE2; do
log_must zpool remove $TESTPOOL $dev log_must zpool remove $TESTPOOL $dev
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL true
log_must zpool labelclear $dev log_must zpool labelclear $dev
log_mustnot zdb -lq $dev log_mustnot zdb -lq $dev
done done

View File

@ -77,7 +77,7 @@ for disk in $DISKLIST; do
i=0 i=0
while [[ $i -lt ${#args[*]} ]]; do while [[ $i -lt ${#args[*]} ]]; do
log_must sync_pool $TESTPOOL sync_pool $TESTPOOL
log_must zpool offline $TESTPOOL $disk log_must zpool offline $TESTPOOL $disk
check_state $TESTPOOL $disk "offline" check_state $TESTPOOL $disk "offline"
if [[ $? != 0 ]]; then if [[ $? != 0 ]]; then

View File

@ -55,6 +55,7 @@ log_must zpool reopen
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail" log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool # Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
sync_pool $TESTPOOL
# 4. "Plug back" disk. # 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again. # 5. Reopen a pool and verify if removed disk is marked online again.

View File

@ -55,6 +55,7 @@ log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail" log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool # Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
sync_pool $TESTPOOL
# 4. "Plug back" disk. # 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again. # 5. Reopen a pool and verify if removed disk is marked online again.

View File

@ -64,6 +64,7 @@ log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write a test file to the pool and calculate its checksum. # 3. Write a test file to the pool and calculate its checksum.
TESTFILE=/$TESTPOOL/data TESTFILE=/$TESTPOOL/data
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
sync_pool $TESTPOOL
TESTFILE_MD5=$(md5digest $TESTFILE) TESTFILE_MD5=$(md5digest $TESTFILE)
# 4. Execute scrub. # 4. Execute scrub.

View File

@ -62,6 +62,7 @@ log_must zpool reopen -n $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail" log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool. # 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
sync_pool $TESTPOOL
# 4. Execute scrub. # 4. Execute scrub.
# add delay to I/O requests for remaining disk in pool # add delay to I/O requests for remaining disk in pool
log_must zinject -d $DISK2 -D125:1 $TESTPOOL log_must zinject -d $DISK2 -D125:1 $TESTPOOL

View File

@ -60,6 +60,7 @@ log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail" log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool. # 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
sync_pool $TESTPOOL
# 4. "Plug back" disk. # 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host insert_disk $REMOVED_DISK $scsi_host

View File

@ -58,12 +58,12 @@ mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
# 1. Write some data and detach the first drive so it has resilver work to do # 1. Write some data and detach the first drive so it has resilver work to do
log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile1 log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile1
log_must sync sync_all_pools
log_must zpool detach $TESTPOOL $DISK2 log_must zpool detach $TESTPOOL $DISK2
# 2. Repeat the process with a second disk # 2. Repeat the process with a second disk
log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile2 log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile2
log_must sync sync_all_pools
log_must zpool detach $TESTPOOL $DISK3 log_must zpool detach $TESTPOOL $DISK3
# 3. Reattach the drives, causing the second drive's resilver to be deferred # 3. Reattach the drives, causing the second drive's resilver to be deferred

View File

@ -61,7 +61,7 @@ log_assert "Verify scrub, scrub -p, and scrub -s show the right status."
# Create 1G of additional data # Create 1G of additional data
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS) mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
log_must sync sync_all_pools
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool scrub $TESTPOOL log_must zpool scrub $TESTPOOL

View File

@ -50,7 +50,7 @@ log_assert "Scrubs and self healing must work with additional copies"
log_must zfs create -o copies=3 $TESTPOOL/$TESTFS2 log_must zfs create -o copies=3 $TESTPOOL/$TESTFS2
typeset mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS2) typeset mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS2)
log_must mkfile 10m $mntpnt/file log_must mkfile 10m $mntpnt/file
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must zinject -a -t data -C 0,1 -e io $mntpnt/file log_must zinject -a -t data -C 0,1 -e io $mntpnt/file

View File

@ -66,7 +66,7 @@ function zpool_split #disk_to_be_offline/online
# Create 2G of additional data # Create 2G of additional data
mntpnt=$(get_prop mountpoint $TESTPOOL) mntpnt=$(get_prop mountpoint $TESTPOOL)
log_must file_write -b 2097152 -c 1024 -o create -d 0 -f $mntpnt/biggerfile log_must file_write -b 2097152 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
log_must sync sync_all_pools
# temporarily prevent resilvering progress, so it will not finish too early # temporarily prevent resilvering progress, so it will not finish too early
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1

View File

@ -66,7 +66,7 @@ log_must mkdir "$TESTDIR"
log_must truncate -s $LARGESIZE "$LARGEFILE" log_must truncate -s $LARGESIZE "$LARGEFILE"
log_must zpool create $TESTPOOL "$LARGEFILE" log_must zpool create $TESTPOOL "$LARGEFILE"
log_must mkfile $(( floor(LARGESIZE * 0.80) )) /$TESTPOOL/file log_must mkfile $(( floor(LARGESIZE * 0.80) )) /$TESTPOOL/file
log_must zpool sync sync_all_pools
new_size=$(du -B1 "$LARGEFILE" | cut -f1) new_size=$(du -B1 "$LARGEFILE" | cut -f1)
log_must test $new_size -le $LARGESIZE log_must test $new_size -le $LARGESIZE
@ -89,7 +89,7 @@ log_must set_tunable64 TRIM_METASLAB_SKIP 1
log_must zpool trim $TESTPOOL log_must zpool trim $TESTPOOL
log_must set_tunable64 TRIM_METASLAB_SKIP 0 log_must set_tunable64 TRIM_METASLAB_SKIP 0
log_must zpool sync sync_all_pools
while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
sleep 0.5 sleep 0.5
done done
@ -102,7 +102,7 @@ log_must test $new_size -gt $LARGESIZE
# space usage of the new metaslabs. # space usage of the new metaslabs.
log_must zpool trim $TESTPOOL log_must zpool trim $TESTPOOL
log_must zpool sync sync_all_pools
while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
sleep 0.5 sleep 0.5
done done

View File

@ -54,14 +54,14 @@ log_must truncate -s $LARGESIZE "$LARGEFILE"
log_must zpool create $TESTPOOL "$LARGEFILE" log_must zpool create $TESTPOOL "$LARGEFILE"
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1048576 count=64 log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1048576 count=64
log_must zpool sync sync_all_pools
log_must zpool trim $TESTPOOL log_must zpool trim $TESTPOOL
[[ -z "$(trim_progress $TESTPOOL $DISK1)" ]] && \ [[ -z "$(trim_progress $TESTPOOL $DISK1)" ]] && \
log_fail "Trimming did not start" log_fail "Trimming did not start"
log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1048576 count=64 log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1048576 count=64
log_must zpool sync sync_all_pools
log_must zpool export $TESTPOOL log_must zpool export $TESTPOOL
log_must zdb -e -p "$TESTDIR" -cc $TESTPOOL log_must zdb -e -p "$TESTDIR" -cc $TESTPOOL

View File

@ -41,7 +41,7 @@ function cleanup
log_must zpool detach $TESTPOOL $DISK2 log_must zpool detach $TESTPOOL $DISK2
get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \ get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK3 log_must zpool detach $TESTPOOL $DISK3
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
} }
typeset pid typeset pid

View File

@ -104,7 +104,7 @@ log_must zfs clone "$SNAP" "$CLONE"
for i in {1..50}; do for i in {1..50}; do
log_must dd if=/dev/urandom of="/$CLONE/testfile$i" bs=1k count=512 log_must dd if=/dev/urandom of="/$CLONE/testfile$i" bs=1k count=512
# Force each new file to be tracked by a new livelist # Force each new file to be tracked by a new livelist
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
done done
log_must zfs destroy "$CLONE" log_must zfs destroy "$CLONE"
test_wait test_wait

View File

@ -70,7 +70,7 @@ log_must file_write -b 1048576 -c 8 -o create -d 0 -f $mntpnt/file
sleep 10 sleep 10
log_must zinject -c all log_must zinject -c all
log_must zpool sync sync_all_pools
# Log txg sync times for reference and the zpool event summary. # Log txg sync times for reference and the zpool event summary.
if is_freebsd; then if is_freebsd; then

View File

@ -83,7 +83,7 @@ log_must zinject -d $DISK1 -D10000:1 $TESTPOOL
log_must eval "dd if=/$mntpnt/file1 of=/dev/null bs=1048576 &" log_must eval "dd if=/$mntpnt/file1 of=/dev/null bs=1048576 &"
sleep 10 sleep 10
log_must zinject -c all log_must zinject -c all
log_must zpool sync sync_all_pools
wait wait
# 5. Verify a "deadman" event is posted. The first appears after 5 # 5. Verify a "deadman" event is posted. The first appears after 5

View File

@ -483,7 +483,7 @@ function verify_userprop
typeset stamp=${perm}.${user}.$RANDOM typeset stamp=${perm}.${user}.$RANDOM
user_run $user zfs set "$user:ts=$stamp" $dtst user_run $user zfs set "$user:ts=$stamp" $dtst
zpool sync ${dtst%%/*} sync_pool ${dtst%%/*}
if [[ $stamp != $(get_prop "$user:ts" $dtst) ]]; then if [[ $stamp != $(get_prop "$user:ts" $dtst) ]]; then
return 1 return 1
fi fi

View File

@ -110,7 +110,7 @@ function run_and_verify
log_must eval "$fullcmd" log_must eval "$fullcmd"
# Collect the new events and verify there are some. # Collect the new events and verify there are some.
log_must zpool sync -f sync_all_pools true
log_must eval "zpool events >$TMP_EVENTS 2>/dev/null" log_must eval "zpool events >$TMP_EVENTS 2>/dev/null"
log_must eval "zpool events -v > $TMP_EVENTS_FULL 2>/dev/null" log_must eval "zpool events -v > $TMP_EVENTS_FULL 2>/dev/null"

View File

@ -102,7 +102,7 @@ do
# 2. Simulate physical removal of one device # 2. Simulate physical removal of one device
remove_disk $removedev remove_disk $removedev
log_must mkfile 1m $mntpnt/file log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# 3. Verify the device is unavailable. # 3. Verify the device is unavailable.
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL" log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
@ -134,7 +134,7 @@ do
# 2. Simulate physical removal of one device # 2. Simulate physical removal of one device
remove_disk $removedev remove_disk $removedev
log_must mkfile 1m $mntpnt/file log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# 3. Verify the device is handled by the spare. # 3. Verify the device is handled by the spare.
log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE" log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
@ -171,7 +171,7 @@ do
# 3. Simulate physical removal of one device # 3. Simulate physical removal of one device
remove_disk $removedev remove_disk $removedev
log_must mkfile 1m $mntpnt/file log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# 4. Verify the device is unavailable # 4. Verify the device is unavailable
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL" log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"

View File

@ -45,7 +45,7 @@ log_must set_tunable64 COMPRESSED_ARC_ENABLED 0
log_must zfs create -o compression=on $TESTPOOL/fs log_must zfs create -o compression=on $TESTPOOL/fs
mntpt=$(get_prop mountpoint $TESTPOOL/fs) mntpt=$(get_prop mountpoint $TESTPOOL/fs)
write_compressible $mntpt 32m 1 1024k "testfile" write_compressible $mntpt 32m 1 1024k "testfile"
log_must sync sync_all_pools
log_must zfs umount $TESTPOOL/fs log_must zfs umount $TESTPOOL/fs
log_must zfs mount $TESTPOOL/fs log_must zfs mount $TESTPOOL/fs
log_must zinject -a -t data -e decompress -f 20 $mntpt/testfile.0 log_must zinject -a -t data -e decompress -f 20 $mntpt/testfile.0

View File

@ -64,7 +64,7 @@ log_must set_tunable64 SLOW_IO_EVENTS_PER_SECOND 1000
# Create 20ms IOs # Create 20ms IOs
log_must zinject -d $DISK -D20:100 $TESTPOOL log_must zinject -d $DISK -D20:100 $TESTPOOL
log_must mkfile 1048576 /$TESTPOOL/testfile log_must mkfile 1048576 /$TESTPOOL/testfile
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must zinject -c all log_must zinject -c all
SLOW_IOS=$(zpool status -sp | grep "$DISK" | awk '{print $6}') SLOW_IOS=$(zpool status -sp | grep "$DISK" | awk '{print $6}')

View File

@ -66,7 +66,7 @@ log_must dd bs=1024k count=128 if=/dev/zero of=/$TEST_FS/file
# #
log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100 log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100
log_must sync sync_all_pools
log_must zfs destroy $TEST_FS log_must zfs destroy $TEST_FS
# #

View File

@ -63,9 +63,9 @@ log_must zpool create -o cachefile=none -f $LOGSM_POOL $TESTDISK
log_must zfs create $LOGSM_POOL/fs log_must zfs create $LOGSM_POOL/fs
log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10 log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
log_must sync sync_all_pools
log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10 log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
log_must sync sync_all_pools
log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 1 log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 1
log_must zpool export $LOGSM_POOL log_must zpool export $LOGSM_POOL

View File

@ -71,7 +71,7 @@ for fails in $(seq $MMP_FAIL_INTERVALS_MIN $((MMP_FAIL_INTERVALS_MIN*2))); do
for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do
log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $fails log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $fails
log_must set_tunable64 MULTIHOST_INTERVAL $interval log_must set_tunable64 MULTIHOST_INTERVAL $interval
log_must sync_pool $TESTPOOL sync_pool $TESTPOOL
typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null | typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null |
awk '/mmp_fail/ {print $NF}') awk '/mmp_fail/ {print $NF}')
if [ $fails -ne $mmp_fail ]; then if [ $fails -ne $mmp_fail ]; then

View File

@ -56,7 +56,7 @@ for i in $(seq 100); do
(( $ret != $ENOSPC )) && \ (( $ret != $ENOSPC )) && \
log_fail "file.$i returned: $ret rather than ENOSPC." log_fail "file.$i returned: $ret rather than ENOSPC."
log_must zpool sync -f sync_all_pools true
done done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs

View File

@ -90,7 +90,8 @@ for disk in $DISKLIST; do
done done
log_must kill $killpid log_must kill $killpid
sync sync_all_pools
log_must sync
typeset dir=$(get_device_dir $DISKS) typeset dir=$(get_device_dir $DISKS)
verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir" verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"

View File

@ -129,7 +129,8 @@ while [[ $i -lt ${#disks[*]} ]]; do
done done
log_must kill $killpid log_must kill $killpid
sync sync_all_pools
log_must sync
typeset dir=$(get_device_dir $DISKS) typeset dir=$(get_device_dir $DISKS)
verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir" verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"

View File

@ -76,6 +76,6 @@ for i in 0 1 2; do
done done
log_must kill $killpid log_must kill $killpid
sync sync_all_pools
log_pass log_pass

View File

@ -65,7 +65,7 @@ log_must zfs create $FS
for i in {1..20}; do for i in {1..20}; do
log_must zfs snapshot "$FS@testsnapshot$i" log_must zfs snapshot "$FS@testsnapshot$i"
done done
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
# #
# Read the debug message file in small chunks to make sure that the read is # Read the debug message file in small chunks to make sure that the read is

View File

@ -60,7 +60,7 @@ log_must zfs create $FS
for i in {1..20}; do for i in {1..20}; do
log_must zfs snapshot "$FS@testsnapshot$i" log_must zfs snapshot "$FS@testsnapshot$i"
done done
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
msgs1=$(mktemp) || log_fail msgs1=$(mktemp) || log_fail
msgs2=$(mktemp) || log_fail msgs2=$(mktemp) || log_fail

View File

@ -50,7 +50,7 @@ function cleanup
function sync_n function sync_n
{ {
for i in {1..$1}; do for i in {1..$1}; do
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
done done
return 0 return 0
} }

View File

@ -65,7 +65,7 @@ mkmount_writable $QFS
log_must user_run $PUSER mkdir $PRJDIR log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 100m $PRJDIR/qf log_must user_run $PUSER mkfile 100m $PRJDIR/qf
sync sync_all_pools
log_note "set projectquota at a smaller size than it current usage" log_note "set projectquota at a smaller size than it current usage"
log_must zfs set projectquota@$PRJID1=90m $QFS log_must zfs set projectquota@$PRJID1=90m $QFS

View File

@ -51,7 +51,7 @@ function cleanup_projectquota
[[ -d $PRJDIR1 ]] && log_must rm -rf $PRJDIR1 [[ -d $PRJDIR1 ]] && log_must rm -rf $PRJDIR1
[[ -d $PRJDIR2 ]] && log_must rm -rf $PRJDIR2 [[ -d $PRJDIR2 ]] && log_must rm -rf $PRJDIR2
[[ -d $PRJDIR3 ]] && log_must rm -rf $PRJDIR3 [[ -d $PRJDIR3 ]] && log_must rm -rf $PRJDIR3
sync sync_all_pools
return 0 return 0
} }

View File

@ -70,7 +70,7 @@ mkmount_writable $QFS
log_must user_run $PUSER mkdir $PRJDIR log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 50m $PRJDIR/qf log_must user_run $PUSER mkfile 50m $PRJDIR/qf
sync sync_all_pools
log_must zfs snapshot $snap_fs log_must zfs snapshot $snap_fs

View File

@ -63,7 +63,7 @@ mkmount_writable $QFS
log_must user_run $PUSER mkdir $PRJDIR log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 50m $PRJDIR/qf log_must user_run $PUSER mkfile 50m $PRJDIR/qf
sync sync_all_pools
typeset snapfs=$QFS@snap typeset snapfs=$QFS@snap

View File

@ -53,7 +53,7 @@ log_must mkfile 20M $mntpnt/$TESTFILE
log_must zfs snapshot $FS@snap20M log_must zfs snapshot $FS@snap20M
log_must rm $mntpnt/$TESTFILE log_must rm $mntpnt/$TESTFILE
log_must sync sync_all_pools
log_must zfs set refquota=10M $FS log_must zfs set refquota=10M $FS
log_mustnot zfs rollback $FS@snap20M log_mustnot zfs rollback $FS@snap20M

View File

@ -69,7 +69,7 @@ function attempt_during_removal # pool disk callback [args]
# We want to make sure that the removal started # We want to make sure that the removal started
# before issuing the callback. # before issuing the callback.
# #
sync sync_pool $pool
log_must is_pool_removing $pool log_must is_pool_removing $pool
log_must $callback "$@" log_must $callback "$@"

View File

@ -79,7 +79,7 @@ log_must wait_for_removal $TESTPOOL
# Run sync once to ensure that the config actually changed. # Run sync once to ensure that the config actually changed.
# #
log_must zpool add $TESTPOOL $DISK2 log_must zpool add $TESTPOOL $DISK2
log_must sync sync_all_pools
# #
# Ensure that zdb does not find any problems with this. # Ensure that zdb does not find any problems with this.

View File

@ -73,7 +73,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS) mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
@ -99,7 +99,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS) mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1

View File

@ -153,9 +153,9 @@ do
# offline then online a vdev to introduce a new DTL range after current # offline then online a vdev to introduce a new DTL range after current
# scan, which should restart (or defer) the resilver # scan, which should restart (or defer) the resilver
log_must zpool offline $TESTPOOL1 ${VDEV_FILES[2]} log_must zpool offline $TESTPOOL1 ${VDEV_FILES[2]}
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
log_must zpool online $TESTPOOL1 ${VDEV_FILES[2]} log_must zpool online $TESTPOOL1 ${VDEV_FILES[2]}
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
# there should now be 2 resilver starts w/o defer, 1 with defer # there should now be 2 resilver starts w/o defer, 1 with defer
verify_restarts ' after offline/online' "${RESTARTS[1]}" "${VDEVS[1]}" verify_restarts ' after offline/online' "${RESTARTS[1]}" "${VDEVS[1]}"
@ -177,8 +177,8 @@ do
log_must is_pool_resilvered $TESTPOOL1 log_must is_pool_resilvered $TESTPOOL1
# wait for a few txg's to see if a resilver happens # wait for a few txg's to see if a resilver happens
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
# there should now be 2 resilver starts # there should now be 2 resilver starts
verify_restarts ' after resilver' "${RESTARTS[3]}" "${VDEVS[3]}" verify_restarts ' after resilver' "${RESTARTS[3]}" "${VDEVS[3]}"

View File

@ -73,7 +73,7 @@ log_must zpool attach $TESTPOOL1 ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_note "waiting for read errors to start showing up" log_note "waiting for read errors to start showing up"
for iter in {0..59} for iter in {0..59}
do do
zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
err=$(zpool status $TESTPOOL1 | grep ${VDEV_FILES[0]} | awk '{print $3}') err=$(zpool status $TESTPOOL1 | grep ${VDEV_FILES[0]} | awk '{print $3}')
(( $err > 0 )) && break (( $err > 0 )) && break
sleep 1 sleep 1
@ -92,8 +92,8 @@ done
(( $finish == 0 )) && log_fail "resilver took too long to finish" (( $finish == 0 )) && log_fail "resilver took too long to finish"
# wait a few syncs to ensure that zfs does not restart the resilver # wait a few syncs to ensure that zfs does not restart the resilver
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
# check if resilver was restarted # check if resilver was restarted
start=$(zpool events | grep "sysevent.fs.zfs.resilver_start" | wc -l) start=$(zpool events | grep "sysevent.fs.zfs.resilver_start" | wc -l)

View File

@ -60,7 +60,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS) mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=64 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=64
log_must zpool sync $TESTPOOL1 sync_pool $TESTPOOL1
# Request a healing or sequential resilver # Request a healing or sequential resilver
for replace_mode in "healing" "sequential"; do for replace_mode in "healing" "sequential"; do

View File

@ -88,7 +88,7 @@ for ((i = 1; i <= $snap_count; i++)); do
log_must cp $mntpnt/file $mntpnt/file$j log_must cp $mntpnt/file $mntpnt/file$j
done done
log_must sync sync_all_pools
log_must mount $remount_ro $zdev $mntpnt log_must mount $remount_ro $zdev $mntpnt
log_must zfs snap $TESTPOOL/$TESTVOL@snap$i log_must zfs snap $TESTPOOL/$TESTVOL@snap$i
log_must mount $remount_rw $zdev $mntpnt log_must mount $remount_rw $zdev $mntpnt

View File

@ -97,7 +97,7 @@ log_must zfs snapshot $TESTPOOL/$TESTFS2@snap1
for i in {1..1000}; do for i in {1..1000}; do
log_must rm /$TESTPOOL/$TESTFS2/dir/file-$i log_must rm /$TESTPOOL/$TESTFS2/dir/file-$i
done done
sync sync_all_pools
log_must zfs snapshot $TESTPOOL/$TESTFS2@snap2 log_must zfs snapshot $TESTPOOL/$TESTFS2@snap2
expected_cksum=$(recursive_cksum /$TESTPOOL/$TESTFS2) expected_cksum=$(recursive_cksum /$TESTPOOL/$TESTFS2)

View File

@ -100,7 +100,7 @@ log_must truncate -s 131072 /$TESTPOOL/$TESTFS2/truncated
log_must truncate -s 393216 /$TESTPOOL/$TESTFS2/truncated2 log_must truncate -s 393216 /$TESTPOOL/$TESTFS2/truncated2
log_must rm -f /$TESTPOOL/$TESTFS2/truncated3 log_must rm -f /$TESTPOOL/$TESTFS2/truncated3
log_must rm -f /$TESTPOOL/$TESTFS2/truncated4 log_must rm -f /$TESTPOOL/$TESTFS2/truncated4
log_must zpool sync $TESTPOOL sync_pool $TESTPOOL
log_must zfs umount $TESTPOOL/$TESTFS2 log_must zfs umount $TESTPOOL/$TESTFS2
log_must zfs mount $TESTPOOL/$TESTFS2 log_must zfs mount $TESTPOOL/$TESTFS2
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/truncated3 \ log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/truncated3 \

View File

@ -58,7 +58,7 @@ for type in "mirror" "raidz" "raidz2"; do
# Ensure the file has been synced out before attempting to # Ensure the file has been synced out before attempting to
# corrupt its contents. # corrupt its contents.
# #
sync sync_all_pools
# #
# Corrupt a pool device to make the pool DEGRADED # Corrupt a pool device to make the pool DEGRADED

View File

@ -102,7 +102,7 @@ else
log_must mount $VOLUME $MNTPNT log_must mount $VOLUME $MNTPNT
FSTYPE=$NEWFS_DEFAULT_FS FSTYPE=$NEWFS_DEFAULT_FS
fi fi
log_must zpool sync sync_all_pools
# #
# 2. Freeze TESTVOL # 2. Freeze TESTVOL

View File

@ -93,6 +93,7 @@ while [[ $i -le $COUNT ]]; do
(( i = i + 1 )) (( i = i + 1 ))
done done
sync_pool $TESTPOOL
# #
# Now rollback to latest snapshot # Now rollback to latest snapshot

View File

@ -82,6 +82,7 @@ log_must zfs rollback $SNAPPOOL
log_mustnot zfs snapshot $SNAPPOOL log_mustnot zfs snapshot $SNAPPOOL
log_must touch /$TESTPOOL/$TESTFILE log_must touch /$TESTPOOL/$TESTFILE
sync_pool $TESTPOOL
log_must zfs rollback $SNAPPOOL log_must zfs rollback $SNAPPOOL
log_must zfs create $TESTPOOL/$TESTFILE log_must zfs create $TESTPOOL/$TESTFILE

View File

@ -73,7 +73,7 @@ for type in "" "mirror" "raidz" "draid"; do
filesize=$((4096 + ((RANDOM * 691) % 131072) )) filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R log_must fill_fs $dir 10 10 $filesize 1 R
zpool sync sync_all_pools
done done
log_must du -hs /$TESTPOOL log_must du -hs /$TESTPOOL

View File

@ -74,7 +74,7 @@ for type in "" "mirror" "raidz" "raidz2" "draid" "draid2"; do
filesize=$((4096 + ((RANDOM * 691) % 131072) )) filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R log_must fill_fs $dir 10 10 $filesize 1 R
zpool sync sync_all_pools
if [[ $((n % 4)) -eq 0 ]]; then if [[ $((n % 4)) -eq 0 ]]; then
log_must timeout 120 zpool trim -w $TESTPOOL log_must timeout 120 zpool trim -w $TESTPOOL

View File

@ -91,7 +91,7 @@ function wait_trim_io # pool type txgs
return return
fi fi
zpool sync -f sync_all_pools true
((i = i + 1)) ((i = i + 1))
done done

View File

@ -72,7 +72,7 @@ for type in "" "mirror" "raidz" "draid"; do
filesize=$((4096 + ((RANDOM * 691) % 131072) )) filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R log_must fill_fs $dir 10 10 $filesize 1 R
zpool sync sync_all_pools
done done
log_must du -hs /$TESTPOOL log_must du -hs /$TESTPOOL

View File

@ -58,7 +58,7 @@ log_must dd if=/dev/urandom of=$srcfile bs=1024k count=1
log_onexit cleanup log_onexit cleanup
log_must cp $srcfile $TESTDIR/$TESTFILE log_must cp $srcfile $TESTDIR/$TESTFILE
log_must cp /dev/null $TESTDIR/$TESTFILE log_must cp /dev/null $TESTDIR/$TESTFILE
log_must sync sync_all_pools
if [[ -s $TESTDIR/$TESTFILE ]]; then if [[ -s $TESTDIR/$TESTFILE ]]; then
log_note "$(ls -l $TESTDIR/$TESTFILE)" log_note "$(ls -l $TESTDIR/$TESTFILE)"
log_fail "testfile not truncated" log_fail "testfile not truncated"

View File

@ -65,7 +65,7 @@ log_must zfs set groupquota@$QGROUP=500m $QFS
mkmount_writable $QFS mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE log_must user_run $QUSER1 mkfile 50m $QFILE
sync sync_all_pools
log_must zfs snapshot $snap_fs log_must zfs snapshot $snap_fs

View File

@ -57,7 +57,7 @@ log_must zfs set groupquota@$QGROUP=500m $QFS
mkmount_writable $QFS mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 100m $QFILE log_must user_run $QUSER1 mkfile 100m $QFILE
sync sync_all_pools
typeset snapfs=$QFS@snap typeset snapfs=$QFS@snap

View File

@ -78,7 +78,7 @@ log_must zfs set xattr=sa $QFS
log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
((grp_cnt = user1_cnt + user2_cnt)) ((grp_cnt = user1_cnt + user2_cnt))
sync_pool sync_all_pools
typeset snapfs=$QFS@snap typeset snapfs=$QFS@snap

View File

@ -63,7 +63,7 @@ log_must zfs get groupquota@$QGROUP $QFS
log_note "write some data to the $QFS" log_note "write some data to the $QFS"
mkmount_writable $QFS mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 100m $QFILE log_must user_run $QUSER1 mkfile 100m $QFILE
sync sync_all_pools
log_note "set user|group quota at a smaller size than it current usage" log_note "set user|group quota at a smaller size than it current usage"
log_must zfs set userquota@$QUSER1=90m $QFS log_must zfs set userquota@$QUSER1=90m $QFS

View File

@ -48,7 +48,7 @@ function cleanup_quota
[[ -f $QFILE ]] && log_must rm -f $QFILE [[ -f $QFILE ]] && log_must rm -f $QFILE
[[ -f $OFILE ]] && log_must rm -f $OFILE [[ -f $OFILE ]] && log_must rm -f $OFILE
sync sync_all_pools
return 0 return 0
} }

View File

@ -64,7 +64,7 @@ typeset snap_fs=$QFS@snap
log_must zfs set userquota@$QUSER1=100m $QFS log_must zfs set userquota@$QUSER1=100m $QFS
mkmount_writable $QFS mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE log_must user_run $QUSER1 mkfile 50m $QFILE
sync sync_all_pools
log_must zfs snapshot $snap_fs log_must zfs snapshot $snap_fs

View File

@ -59,7 +59,7 @@ log_must zfs set userquota@$QUSER1=100m $QFS
mkmount_writable $QFS mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE log_must user_run $QUSER1 mkfile 50m $QFILE
sync sync_all_pools
typeset snapfs=$QFS@snap typeset snapfs=$QFS@snap

View File

@ -79,7 +79,7 @@ log_must zfs set xattr=sa $QFS
log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
sync_pool sync_all_pools
typeset snapfs=$QFS@snap typeset snapfs=$QFS@snap

View File

@ -41,7 +41,7 @@ orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf) orig_leaf=$(get_leaf_vd_zap $DISK $conf)
assert_zap_common $TESTPOOL $DISK "top" $orig_top assert_zap_common $TESTPOOL $DISK "top" $orig_top
assert_zap_common $TESTPOOL $DISK "leaf" $orig_leaf assert_zap_common $TESTPOOL $DISK "leaf" $orig_leaf
log_must zpool sync sync_all_pools
# Export the pool. # Export the pool.
log_must zpool export $TESTPOOL log_must zpool export $TESTPOOL

View File

@ -84,7 +84,7 @@ while (( 1 )); do
done done
if is_linux || is_freebsd ; then if is_linux || is_freebsd ; then
log_must sync sync_all_pools
else else
log_must lockfs -f $TESTDIR log_must lockfs -f $TESTDIR
fi fi