ZTS: Improve enospc tests

The enospc_002_pos test case would frequently fail due a command
succeeding when it was expected to fail due to lack of space.
In order to make this far less likely, files are created across
multiple transaction groups in order to consume as many unused
blocks as possible.

The dependency that the tests run on a partitioned block device
has been removed.  It's simpler to use sparse files.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7663
This commit is contained in:
Brian Behlendorf 2018-06-29 09:40:32 -07:00 committed by GitHub
parent da2feb42fb
commit e03a41a604
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 47 additions and 61 deletions

View File

@ -105,6 +105,14 @@ user_ns_reason = 'Kernel user namespace support required'
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests may by structured in a way that relies on exact knowledge
# of how much free space in available in a pool. These tests cannot be
# made completely reliable because the internal details of how free space
# is managed are not exposed to user space.
#
enospc_reason = 'Exact free space reporting is not guaranteed'
#
# Some tests are not applicable to Linux or need to be updated to operate
# in the manor required by Linux. Any tests which are skipped for this
@ -235,8 +243,7 @@ maybe = {
'inuse/inuse_009_pos': ['SKIP', disk_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
'no_space/setup': ['SKIP', disk_reason],
'no_space/enospc_002_pos': ['FAIL', known_reason],
'no_space/enospc_002_pos': ['FAIL', enospc_reason],
'projectquota/setup': ['SKIP', exec_reason],
'reservation/reservation_018_pos': ['FAIL', '5642'],
'rsend/rsend_019_pos': ['FAIL', '6086'],

View File

@ -33,22 +33,7 @@
verify_runnable "global"
DISK=${DISKS%% *}
ismounted "$TESTPOOL/$TESTFS"
(( $? == 0 )) && \
log_must zfs umount $TESTDIR
destroy_pool $TESTPOOL
if is_mpath_device $DISK; then
delete_partitions
fi
#
# Remove 100mb partition.
#
create_pool dummy$$ "$DISK"
destroy_pool dummy$$
default_cleanup_noexit
log_must rm -f $DISK_SMALL $DISK_LARGE
log_pass

View File

@ -28,15 +28,12 @@
# Copyright (c) 2013 by Delphix. All rights reserved.
#
export TESTFILE0=testfile0.$$
export TESTFILE1=testfile1.$$
DISK_SMALL=${TEST_BASE_DIR%%/}/vdev_small
DISK_LARGE=${TEST_BASE_DIR%%/}/vdev_large
export SIZE=100mb
export SIZE_SMALL=100m
export SIZE_LARGE=512m
export ENOSPC=28
export BLOCKSZ=8192
export NUM_WRITES=65536
export DATA=0
export DISKSARRAY=$DISKS
export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
set_device_dir

View File

@ -48,13 +48,14 @@ verify_runnable "both"
function cleanup
{
rm -f $TESTDIR/$TESTFILE0
rm -f $TESTDIR/$TESTFILE1
default_cleanup_noexit
}
log_onexit cleanup
log_assert "ENOSPC is returned when file system is full."
default_setup_noexit $DISK_SMALL
log_must zfs set compression=off $TESTPOOL/$TESTFS
log_note "Writing file: $TESTFILE0 until ENOSPC."

View File

@ -28,18 +28,36 @@
verify_runnable "both"
function cleanup
{
log_must_busy zpool destroy -f $TESTPOOL
}
log_onexit cleanup
log_assert "ENOSPC is returned when file system is full."
sync
default_setup_noexit $DISK_SMALL
log_must zfs set compression=off $TESTPOOL/$TESTFS
log_must zfs snapshot $TESTPOOL/$TESTFS@snap
log_note "Writing file: $TESTFILE0 until ENOSPC."
file_write -o create -f $TESTDIR/$TESTFILE0 -b $BLOCKSZ \
-c $NUM_WRITES -d $DATA
ret=$?
#
# Completely fill the pool in order to ensure the commands below will more
# reliably succeed or fail as a result of lack of space. Care is taken to
# force multiple transaction groups to ensure as many recently freed blocks
# as possible are reallocated.
#
log_note "Writing files until ENOSPC."
(( $ret != $ENOSPC )) && \
log_fail "$TESTFILE0 returned: $ret rather than ENOSPC."
for i in $(seq 30); do
file_write -o create -f $TESTDIR/file.$i -b $BLOCKSZ \
-c $NUM_WRITES -d $DATA
ret=$?
(( $ret != $ENOSPC )) && \
log_fail "file.$i returned: $ret rather than ENOSPC."
log_must zpool sync -f
done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone

View File

@ -45,19 +45,13 @@ verify_runnable "both"
function cleanup
{
log_must zpool destroy $TESTPOOL1
log_must rm -f $disk
}
log_onexit cleanup
log_assert "ENOSPC is returned on pools with large physical block size"
disk=$TEST_BASE_DIR/$FILEDISK0
# we need a device big enough to test this or failure will not trigger
size="512m"
log_must mkfile $size $disk
log_must zpool create $TESTPOOL1 -o ashift=13 $disk
log_must zpool create $TESTPOOL1 -o ashift=13 $DISK_LARGE
log_must zfs set mountpoint=$TESTDIR $TESTPOOL1
log_must zfs set compression=off $TESTPOOL1
log_must zfs set recordsize=512 $TESTPOOL1

View File

@ -34,23 +34,7 @@
verify_runnable "global"
if ! $(is_physical_device $DISKS) ; then
log_unsupported "This directory cannot be run on raw files."
fi
log_must truncate -s $SIZE_SMALL $DISK_SMALL
log_must truncate -s $SIZE_LARGE $DISK_LARGE
DISK=${DISKS%% *}
log_must set_partition 0 "" $SIZE $DISK
if is_linux; then
if ( is_mpath_device $DISK ) && [[ -z $(echo $DISK | awk 'substr($1,18,1)\
~ /^[[:digit:]]+$/') ]] || ( is_real_device $DISK ); then
default_setup $DISK"1"
elif ( is_mpath_device $DISK || is_loop_device $DISK ); then
default_setup $DISK"p1"
else
log_fail "$DISK not supported for partitioning."
fi
else
default_setup $DISK"s0"
fi
log_pass