ZTS: Eliminate partitioning from zpool_create etc

These tests can be made to work without a bunch of complex
partitioning of physical disks.

Use the 3 disks directly, creating a few file disks if needed for a
compelling reason.

Reduce the use of shared variables that don't have a clear utility.

Catch the fallout in tests that include cfg/shlib from zpool_create.

Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ryan Moeller <ryan@iXsystems.com>
Closes #10002
This commit is contained in:
Ryan Moeller 2020-02-20 11:10:13 -05:00 committed by GitHub
parent 873cd182de
commit 8136956716
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 217 additions and 785 deletions

View File

@ -48,8 +48,8 @@ function cleanup
log_assert "zpool attach -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk1=$TEST_BASE_DIR/$FILEDISK0
disk2=$TEST_BASE_DIR/$FILEDISK1
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2

View File

@ -32,8 +32,6 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
clean_blockfile "$TESTDIR $TESTDIR0 $TESTDIR1"
cleanup_devices $DISKS
log_pass

View File

@ -96,8 +96,7 @@ function verify_device_uberblocks # <device> <count>
log_assert "zpool create -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk=$TEST_BASE_DIR/$FILEDISK0
log_must mkfile $SIZE $disk
disk=$(create_blockfile $SIZE)
typeset ashifts=("9" "10" "11" "12" "13" "14" "15" "16")
# since Illumos 4958 the largest uberblock is 8K so we have at least of 16/label
@ -123,7 +122,7 @@ do
# clean things for the next run
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk
log_must eval "verify_device_uberblocks $disk 0"
log_must verify_device_uberblocks $disk 0
((i = i + 1))
done

View File

@ -34,24 +34,4 @@
verify_runnable "global"
if ! is_physical_device $DISKS; then
log_unsupported "This directory cannot be run on raw files."
fi
if [[ -n $DISK ]]; then
#
# Use 'zpool create' to clean up the information in
# in the given disk to avoid slice overlapping.
#
cleanup_devices $DISK
partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $DISK 7
else
for disk in `echo $DISKSARRAY`; do
cleanup_devices $disk
partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $disk 7
done
fi
log_pass

View File

@ -30,70 +30,23 @@
. $STF_SUITE/include/libtest.shlib
export DISK_ARRAY_NUM=0
export DISK_ARRAY_LIMIT=4
export DISKSARRAY=""
function set_disks
{
typeset -a disk_array=($(find_disks $DISKS))
if (( ${#disk_array[*]} <= 1 )); then
export DISK=${DISKS%% *}
export DISK_ARRAY_NUM=1
else
export DISK=""
typeset DISKSARRAY=""
typeset -i DISK_ARRAY_LIMIT=4
typeset -i i=0
while (( i < ${#disk_array[*]} )); do
while (( i < ${#disk_array[*]} && i <= $DISK_ARRAY_LIMIT )); do
export DISK${i}="${disk_array[$i]}"
DISKSARRAY="$DISKSARRAY ${disk_array[$i]}"
(( i = i + 1 ))
(( i>$DISK_ARRAY_LIMIT )) && break
done
export DISK_ARRAY_NUM=$i
export DISKSARRAY
fi
}
set_disks
export FILESIZE="$MINVDEVSIZE"
export FILESIZE1="$(($MINVDEVSIZE * 2))"
export SIZE="$((MINVDEVSIZE / (1024 * 1024)))"m
export SIZE1="$(($MINVDEVSIZE * 2 / (1024 * 1024)))m"
if is_linux; then
set_device_dir
set_slice_prefix
export SLICE0=1
export SLICE1=2
export SLICE2=3
export SLICE3=4
export SLICE4=5
export SLICE5=6
export SLICE6=7
export SLICE7=8
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
else
export SLICE0=0
export SLICE1=1
export SLICE2=2
export SLICE3=3
export SLICE4=4
export SLICE5=5
export SLICE6=6
export SLICE7=7
fi
export FILEDISK=filedisk_create
export FILEDISK0=filedisk0_create
export FILEDISK1=filedisk1_create
export FILEDISK2=filedisk2_create
export FILEDISK3=filedisk3_create
export BYND_MAX_NAME="byondmaxnamelength\
012345678901234567890123456789\
012345678901234567890123456789\

View File

@ -53,65 +53,15 @@ function create_pool_test
}
#
# Create a ufs|ext file system and make a file within the file
# system for storage pool vdev
# Create a file for storage pool vdev
# $1, file size
# $2, file name
# $3, disk name to create ufs|ext file system
#
function create_blockfile
{
typeset size=$1
typeset file=$2
typeset disk=$3
typeset dir=`dirname $file`
if [[ -d $dir ]]; then
ismounted $dir $NEWFS_DEFAULT_FS
(( $? == 0 )) && \
log_must umount -f $dir
else
log_must mkdir -p $dir
fi
log_must eval "new_fs ${DEV_RDSKDIR}/$disk >/dev/null 2>&1"
log_must mount ${DEV_DSKDIR}/$disk $dir
log_must truncate -s $size $file
}
#
# Umount the ufs|ext filesystem and remove the mountpoint
# $1, the mount point
#
function clean_blockfile
{
typeset dirs=$1
for dir in $dirs; do
if [[ -d $dir ]]; then
if is_linux; then
if ismounted $dir ext2; then
typeset dev=$(df -lht ext2 | \
grep "$dir" | \
awk '{print $1}')
log_must umount -f $dir
create_pool ${TESTPOOL}.tmp $dev
destroy_pool ${TESTPOOL}.tmp
fi
else
if ismounted $dir ufs; then
typeset dev=$(df -lhF ufs | \
grep "$dir" | \
awk '{print $1}')
log_must umount -f $dir
create_pool ${TESTPOOL}.tmp $dev
destroy_pool ${TESTPOOL}.tmp
fi
fi
log_must rm -rf $dir
fi
done
typeset file=$(mktemp)
truncate -s $size $file
echo $file
}
#

View File

@ -49,17 +49,7 @@ function cleanup
{
poolexists $TESTPOOL && destroy_pool $TESTPOOL
clean_blockfile "$TESTDIR0 $TESTDIR1"
if [[ -n $DISK ]]; then
partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $DISK 7
else
typeset disk=""
for disk in $DISK0 $DISK1; do
partition_disk \
$((($MINVDEVSIZE / (1024 * 1024)) * 2))m $disk 7
done
fi
rm -f $disk1 $disk2
}
log_assert "'zpool create <pool> <vspec> ...' can successfully create" \
@ -67,80 +57,21 @@ log_assert "'zpool create <pool> <vspec> ...' can successfully create" \
log_onexit cleanup
set -A keywords "" "mirror" "raidz" "raidz1"
typeset disk1=$(create_blockfile $FILESIZE)
typeset disk2=$(create_blockfile $FILESIZE)
case $DISK_ARRAY_NUM in
0|1)
typeset disk=""
if (( $DISK_ARRAY_NUM == 0 )); then
disk=$DISK
else
disk=$DISK0
fi
create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 \
${disk}${SLICE_PREFIX}${SLICE5}
create_blockfile $FILESIZE $TESTDIR1/$FILEDISK1 \
${disk}${SLICE_PREFIX}${SLICE6}
pooldevs="${DISK0} \
\"${DISK0} ${DISK1}\" \
\"${DISK0} ${DISK1} ${DISK2}\" \
\"$disk1 $disk2\""
raidzdevs="\"${DISK0} ${DISK1} ${DISK2}\""
mirrordevs="\"${DISK0} ${DISK1}\" \
$raidzdevs \
\"$disk1 $disk2\""
pooldevs="${disk}${SLICE_PREFIX}${SLICE0} \
${DEV_DSKDIR}/${disk}${SLICE_PREFIX}${SLICE0} \
\"${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1}\" \
$TESTDIR0/$FILEDISK0"
raidzdevs="\"${DEV_DSKDIR}/${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1}\" \
\"${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1} \
${disk}${SLICE_PREFIX}${SLICE3}\" \
\"${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1} \
${disk}${SLICE_PREFIX}${SLICE3} \
${disk}${SLICE_PREFIX}${SLICE4}\"\
\"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\""
mirrordevs=$raidzdevs
;;
2|*)
create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 \
${DISK0}${SLICE_PREFIX}${SLICE5}
create_blockfile $FILESIZE $TESTDIR1/$FILEDISK1 \
${DISK1}${SLICE_PREFIX}${SLICE5}
pooldevs="${DISK0}${SLICE_PREFIX}${SLICE0} \
\"${DEV_DSKDIR}/${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK1}${SLICE_PREFIX}${SLICE0}\" \
\"${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK0}${SLICE_PREFIX}${SLICE1} \
${DISK1}${SLICE_PREFIX}${SLICE1}\"\
\"${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK1}${SLICE_PREFIX}${SLICE0} \
${DISK0}${SLICE_PREFIX}${SLICE1}\
${DISK1}${SLICE_PREFIX}${SLICE1}\" \
\"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\""
raidzdevs="\"${DEV_DSKDIR}/${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK1}${SLICE_PREFIX}${SLICE0}\" \
\"${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK0}${SLICE_PREFIX}${SLICE1} \
${DISK1}${SLICE_PREFIX}${SLICE1}\" \
\"${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK1}${SLICE_PREFIX}${SLICE0} \
${DISK0}${SLICE_PREFIX}${SLICE1} \
${DISK1}${SLICE_PREFIX}${SLICE1}\" \
\"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\""
mirrordevs=$raidzdevs
;;
esac
typeset -i i=0
while (( $i < ${#keywords[*]} )); do
case ${keywords[i]} in
"")
create_pool_test "$TESTPOOL" "${keywords[i]}" "$pooldevs";;
mirror)
create_pool_test "$TESTPOOL" "${keywords[i]}" "$mirrordevs";;
raidz|raidz1)
create_pool_test "$TESTPOOL" "${keywords[i]}" "$raidzdevs" ;;
esac
(( i = i+1 ))
done
create_pool_test "$TESTPOOL" "" "$pooldevs"
create_pool_test "$TESTPOOL" "mirror" "$mirrordevs"
create_pool_test "$TESTPOOL" "raidz" "$raidzdevs"
create_pool_test "$TESTPOOL" "raidz1" "$raidzdevs"
log_pass "'zpool create <pool> <vspec> ...' success."

View File

@ -47,22 +47,15 @@ verify_runnable "global"
function cleanup
{
for pool in $TESTPOOL $TESTPOOL1 $TESTPOOL2 $TESTPOOL3 $TESTPOOL4 \
$TESTPOOL5 $TESTPOOL6
do
destroy_pool $pool
for pool in $TESTPOOL $TESTPOOL1; do
poolexists $pool && destroy_pool $pool
done
clean_blockfile "$TESTDIR0 $TESTDIR1"
for file in $FILEDISK0 $FILEDISK1 $FILEDISK2
do
if [[ -e $TEST_BASE_DIR/$file ]]; then
rm -f $TEST_BASE_DIR/$file
rm -f $disk1 $disk2
if is_freebsd; then
umount -f $TESTDIR
rm -rf $TESTDIR
fi
done
partition_disk $SIZE $disk 6
}
log_onexit cleanup
@ -70,57 +63,66 @@ log_onexit cleanup
log_assert "'zpool create -f <pool> <vspec> ...' can successfully create" \
"a new pool in some cases."
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
create_pool "$TESTPOOL" "${disk}${SLICE_PREFIX}${SLICE0}"
log_must eval "new_fs \
${DEV_RDSKDIR}/${disk}${SLICE_PREFIX}${SLICE1} >/dev/null 2>&1"
create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE4}
create_blockfile $FILESIZE1 $TESTDIR1/$FILEDISK1 ${disk}${SLICE_PREFIX}${SLICE5}
log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK0
log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK1
log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK2
create_pool $TESTPOOL $DISK0
log_must eval "new_fs ${DEV_RDSKDIR}/${DISK1} >/dev/null 2>&1"
typeset disk1=$(create_blockfile $FILESIZE)
typeset disk2=$(create_blockfile $FILESIZE1)
unset NOINUSE_CHECK
log_must zpool export $TESTPOOL
log_note "'zpool create' without '-f' will fail " \
"while device is belong to an exported pool."
log_mustnot zpool create "$TESTPOOL1" "${disk}${SLICE_PREFIX}${SLICE0}"
create_pool "$TESTPOOL1" "${disk}${SLICE_PREFIX}${SLICE0}"
"while device belongs to an exported pool."
log_mustnot zpool create $TESTPOOL1 $DISK0
create_pool $TESTPOOL1 $DISK0
log_must poolexists $TESTPOOL1
log_must destroy_pool $TESTPOOL1
log_note "'zpool create' without '-f' will fail " \
"while device is using by an ufs filesystem."
log_mustnot zpool create "$TESTPOOL2" "${disk}${SLICE_PREFIX}${SLICE1}"
create_pool "$TESTPOOL2" "${disk}${SLICE_PREFIX}${SLICE1}"
log_must poolexists $TESTPOOL2
"while device is in use by a ufs filesystem."
if is_freebsd; then
# fs must be mounted for create to fail on FreeBSD
log_must mkdir -p $TESTDIR
log_must mount ${DEV_DSKDIR}/${DISK1} $TESTDIR
fi
log_mustnot zpool create $TESTPOOL $DISK1
if is_freebsd; then
# fs must not be mounted to create pool even with -f
log_must umount -f $TESTDIR
log_must rm -rf $TESTDIR
fi
create_pool $TESTPOOL $DISK1
log_must poolexists $TESTPOOL
log_must destroy_pool $TESTPOOL
log_note "'zpool create' mirror without '-f' will fail " \
"while devices have different size."
log_mustnot zpool create "$TESTPOOL3" "mirror" $TESTDIR0/$FILEDISK0 \
$TESTDIR1/$FILEDISK1
create_pool "$TESTPOOL3" "mirror" $TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1
log_must poolexists $TESTPOOL3
log_mustnot zpool create $TESTPOOL mirror $disk1 $disk2
create_pool $TESTPOOL mirror $disk1 $disk2
log_must poolexists $TESTPOOL
log_must destroy_pool $TESTPOOL
if ! is_freebsd; then
log_note "'zpool create' mirror without '-f' will fail " \
"while devices are of different types."
log_mustnot zpool create "$TESTPOOL4" "mirror" $TEST_BASE_DIR/$FILEDISK0 \
${disk}${SLICE_PREFIX}${SLICE3}
create_pool "$TESTPOOL4" "mirror" \
$TEST_BASE_DIR/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE3}
log_must poolexists $TESTPOOL4
log_mustnot zpool create $TESTPOOL mirror $disk1 $DISK0
create_pool $TESTPOOL mirror $disk1 $DISK0
log_must poolexists $TESTPOOL
log_must destroy_pool $TESTPOOL
fi
log_note "'zpool create' without '-f' will fail " \
"while device is part of potentially active pool."
create_pool "$TESTPOOL5" "mirror" $TEST_BASE_DIR/$FILEDISK1 \
$TEST_BASE_DIR/$FILEDISK2
log_must zpool offline $TESTPOOL5 $TEST_BASE_DIR/$FILEDISK2
log_must zpool export $TESTPOOL5
log_mustnot zpool create "$TESTPOOL6" $TEST_BASE_DIR/$FILEDISK2
create_pool $TESTPOOL6 $TEST_BASE_DIR/$FILEDISK2
log_must poolexists $TESTPOOL6
"while a device is part of a potentially active pool."
create_pool $TESTPOOL mirror $DISK0 $DISK1
log_must zpool offline $TESTPOOL $DISK0
log_must zpool export $TESTPOOL
log_mustnot zpool create $TESTPOOL1 $DISK0
create_pool $TESTPOOL1 $DISK0
log_must poolexists $TESTPOOL1
log_must destroy_pool $TESTPOOL1
log_pass "'zpool create -f <pool> <vspec> ...' success."

View File

@ -58,18 +58,6 @@ log_assert "'zpool create -n <pool> <vspec> ...' can display the configuration"
log_onexit cleanup
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
DISK=${DISKS%% *}
if is_mpath_device $DISK; then
partition_disk $SIZE $disk 1
fi
typeset vspec="${disk}${SLICE_PREFIX}${SLICE0}"
typeset goodprops=('' '-o comment=text' '-O checksum=on' '-O ns:prop=value')
typeset badprops=('-o ashift=9999' '-O doesnotexist=on' '-O volsize=10M')
@ -79,10 +67,10 @@ do
#
# Make sure disk is clean before we use it
#
create_pool $TESTPOOL $vspec > $tmpfile
create_pool $TESTPOOL $DISK0 > $tmpfile
destroy_pool $TESTPOOL
log_must eval "zpool create -n $prop $TESTPOOL $vspec > $tmpfile"
log_must eval "zpool create -n $prop $TESTPOOL $DISK0 > $tmpfile"
poolexists $TESTPOOL && \
log_fail "'zpool create -n <pool> <vspec> ...' fail."
@ -98,10 +86,10 @@ do
#
# Make sure disk is clean before we use it
#
create_pool $TESTPOOL $vspec > $tmpfile
create_pool $TESTPOOL $DISK0 > $tmpfile
destroy_pool $TESTPOOL
log_mustnot zpool create -n $prop $TESTPOOL $vspec
log_mustnot zpool create -n $prop $TESTPOOL $DISK0
done
log_pass "'zpool create -n <pool> <vspec>...' success."

View File

@ -49,21 +49,19 @@ function cleanup
poolexists $TESTPOOL && destroy_pool $TESTPOOL
rm -rf $TESTDIR
partition_disk $SIZE $disk 6
}
log_assert "Storage pools with 16 file based vdevs can be created."
log_onexit cleanup
disk=${DISKS%% *}
create_pool $TESTPOOL $disk
create_pool $TESTPOOL $DISK0
log_must zfs create -o mountpoint=$TESTDIR $TESTPOOL/$TESTFS
vdevs_list=$(echo $TESTDIR/file.{01..16})
log_must truncate -s $MINVDEVSIZE $vdevs_list
create_pool "$TESTPOOL1" $vdevs_list
log_must vdevs_in_pool "$TESTPOOL1" "$vdevs_list"
create_pool $TESTPOOL1 $vdevs_list
log_must vdevs_in_pool $TESTPOOL1 "$vdevs_list"
if poolexists $TESTPOOL1; then
destroy_pool $TESTPOOL1

View File

@ -44,38 +44,26 @@
verify_runnable "global"
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
set -A args "" "-?" "-n" "-f" "-nf" "-fn" "-f -n" "--f" "-e" "-s" \
"-m" "-R" "-m -R" "-Rm" "-mR" "-m $TESTDIR $TESTPOOL" \
"-R $TESTDIR $TESTPOOL" "-m nodir $TESTPOOL $disk" \
"-R nodir $TESTPOOL $disk" "-m nodir -R nodir $TESTPOOL $disk" \
"-R nodir -m nodir $TESTPOOL $disk" "-R $TESTDIR -m nodir $TESTPOOL $disk" \
"-R nodir -m $TESTDIR $TESTPOOL $disk" \
"-R $TESTDIR $TESTPOOL" "-m nodir $TESTPOOL $DISK0" \
"-R nodir $TESTPOOL $DISK0" "-m nodir -R nodir $TESTPOOL $DISK0" \
"-R nodir -m nodir $TESTPOOL $DISK0" "-R $TESTDIR -m nodir $TESTPOOL $DISK0" \
"-R nodir -m $TESTDIR $TESTPOOL $DISK0" \
"-blah" "$TESTPOOL" "$TESTPOOL blah" "$TESTPOOL c?t0d0" \
"$TESTPOOL c0txd0" "$TESTPOOL c0t0dx" "$TESTPOOL cxtxdx" \
"$TESTPOOL mirror" "$TESTPOOL raidz" "$TESTPOOL mirror raidz" \
"$TESTPOOL raidz1" "$TESTPOOL mirror raidz1" \
"$TESTPOOL mirror c?t?d?" "$TESTPOOL mirror $disk c0t1d?" \
"$TESTPOOL RAIDZ ${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1}" \
"$TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0} \
log ${disk}${SLICE_PREFIX}${SLICE1} \
log ${disk}${SLICE_PREFIX}${SLICE3}" \
"$TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0} \
spare ${disk}${SLICE_PREFIX}${SLICE1} \
spare ${disk}${SLICE_PREFIX}${SLICE3}" \
"$TESTPOOL RAIDZ1 ${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1}" \
"$TESTPOOL MIRROR $disk" "$TESTPOOL raidz $disk" \
"$TESTPOOL raidz1 $disk" \
"1tank $disk" "1234 $disk" "?tank $disk" \
"tan%k $disk" "ta@# $disk" "tan+k $disk" \
"$BYND_MAX_NAME $disk"
"$TESTPOOL mirror c?t?d?" "$TESTPOOL mirror $DISK0 c0t1d?" \
"$TESTPOOL RAIDZ $DISK0 $DISK1" \
"$TESTPOOL $DISK0 log $DISK1 log $DISK2" \
"$TESTPOOL $DISK0 spare $DISK1 spare $DISK2" \
"$TESTPOOL RAIDZ1 $DISK0 $DISK1" \
"$TESTPOOL MIRROR $DISK0" "$TESTPOOL raidz $DISK0" \
"$TESTPOOL raidz1 $DISK0" \
"1tank $DISK0" "1234 $DISK0" "?tank $DISK0" \
"tan%k $DISK0" "ta@# $DISK0" "tan+k $DISK0" \
"$BYND_MAX_NAME $DISK0"
log_assert "'zpool create' should return an error with badly-formed parameters."
log_onexit default_cleanup_noexit

View File

@ -48,8 +48,7 @@ function cleanup
{
if [[ $exported_pool == true ]]; then
if [[ $force_pool == true ]]; then
log_must zpool create \
-f $TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0}
log_must zpool create -f $TESTPOOL $DISK0
else
log_must zpool import $TESTPOOL
fi
@ -62,49 +61,6 @@ function cleanup
if poolexists $TESTPOOL1 ; then
destroy_pool $TESTPOOL1
fi
#
# recover it back to EFI label
#
create_pool $TESTPOOL $disk
destroy_pool $TESTPOOL
partition_disk $SIZE $disk 6
}
#
# create overlap slice 0 and 1 on $disk
#
function create_overlap_slice
{
typeset format_file=$TEST_BASE_DIR/format_overlap.$$
typeset disk=$1
echo "partition" >$format_file
echo "0" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "0" >> $format_file
echo "200m" >> $format_file
echo "1" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "0" >> $format_file
echo "400m" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
typeset -i ret=$?
rm -fr $format_file
if (( ret != 0 )); then
log_fail "unable to create overlap slice."
fi
return 0
}
log_assert "'zpool create' have to use '-f' scenarios"
@ -113,44 +69,21 @@ log_onexit cleanup
typeset exported_pool=false
typeset force_pool=false
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
# overlapped slices as vdev need -f to create pool
# Make the disk is EFI labeled first via pool creation
create_pool $TESTPOOL $disk
create_pool $TESTPOOL $DISK0
destroy_pool $TESTPOOL
if ! is_linux; then
# Make the disk is VTOC labeled since only VTOC label supports overlap
log_must labelvtoc $disk
log_must create_overlap_slice $disk
unset NOINUSE_CHECK
log_mustnot zpool create $TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0}
log_must zpool create -f $TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0}
destroy_pool $TESTPOOL
fi
# exported device to be as spare vdev need -f to create pool
log_must zpool create -f $TESTPOOL $disk
log_must zpool create -f $TESTPOOL $DISK0
destroy_pool $TESTPOOL
log_must partition_disk $SIZE $disk 6
block_device_wait
create_pool $TESTPOOL ${disk}${SLICE_PREFIX}${SLICE0} \
${disk}${SLICE_PREFIX}${SLICE1}
create_pool $TESTPOOL $DISK0 $DISK1
log_must zpool export $TESTPOOL
exported_pool=true
log_mustnot zpool create $TESTPOOL1 ${disk}${SLICE_PREFIX}${SLICE3} \
spare ${disk}${SLICE_PREFIX}${SLICE1}
create_pool $TESTPOOL1 ${disk}${SLICE_PREFIX}${SLICE3} \
spare ${disk}${SLICE_PREFIX}${SLICE1}
log_mustnot zpool create $TESTPOOL1 $DISK1 spare $DISK2
create_pool $TESTPOOL1 $DISK1 spare $DISK2
force_pool=true
destroy_pool $TESTPOOL1

View File

@ -50,15 +50,10 @@ verify_runnable "global"
function cleanup
{
typeset dtst
typeset disk
typeset pool
for dtst in $TESTPOOL $TESTPOOL1; do
poolexists $dtst && destroy_pool $dtst
done
for disk in $DISKS; do
partition_disk $SIZE $disk 6
for pool in $TESTPOOL $TESTPOOL1; do
poolexists $pool && destroy_pool $pool
done
}
@ -69,26 +64,24 @@ log_onexit cleanup
unset NOINUSE_CHECK
typeset opt
for opt in "" "mirror" "raidz" "raidz1"; do
typeset disk="$DISKS"
(( ${#opt} == 0 )) && disk=${DISKS%% *}
typeset -i count=$(get_word_count $disk)
if (( count < 2 && ${#opt} != 0 )) ; then
continue
if [[ $opt == "" ]]; then
typeset disks=$DISK0
else
typeset disks=$DISKS
fi
# Create two pools but using the same disks.
create_pool $TESTPOOL $opt $disk
log_mustnot zpool create -f $TESTPOOL1 $opt $disk
create_pool $TESTPOOL $opt $disks
log_mustnot zpool create -f $TESTPOOL1 $opt $disks
destroy_pool $TESTPOOL
# Create two pools and part of the devices were overlapped
create_pool $TESTPOOL $opt $disk
log_mustnot zpool create -f $TESTPOOL1 $opt ${DISKS% *}
create_pool $TESTPOOL $opt $disks
log_mustnot zpool create -f $TESTPOOL1 $opt $DISK0
destroy_pool $TESTPOOL
# Create one pool but using the same disks twice.
log_mustnot zpool create -f $TESTPOOL $opt $disk $disk
log_mustnot zpool create -f $TESTPOOL $opt $disks $disks
done
log_pass "Using overlapping or in-use disks to create a new pool fails as expected."

View File

@ -55,18 +55,10 @@ function cleanup
done
rm -rf $TESTDIR
partition_disk $SIZE $disk 6
}
log_onexit cleanup
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
create_pool $TESTPOOL $disk
create_pool $TESTPOOL $DISK0
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS

View File

@ -54,49 +54,40 @@ function cleanup
destroy_pool $pool
done
rm -rf $disk1 $disk2 $disk3
if [[ -n $saved_dump_dev ]]; then
log_must dumpadm -u -d $saved_dump_dev
fi
partition_disk $SIZE $disk 7
}
log_assert "'zpool create' should be failed with inapplicable scenarios."
log_onexit cleanup
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
pooldev1=${disk}${SLICE_PREFIX}${SLICE0}
pooldev2=${disk}${SLICE_PREFIX}${SLICE1}
mirror1="${disk}${SLICE_PREFIX}${SLICE1} ${disk}${SLICE_PREFIX}${SLICE3}"
mirror2="${disk}${SLICE_PREFIX}${SLICE4} ${disk}${SLICE_PREFIX}${SLICE5}"
disk1=$(create_blockfile $FILESIZE)
disk2=$(create_blockfile $FILESIZE)
disk3=$(create_blockfile $FILESIZE1)
mirror1="$DISK0 $DISK1"
mirror2="$disk1 $disk2"
raidz1=$mirror1
raidz2=$mirror2
diff_size_dev="${disk}${SLICE_PREFIX}${SLICE6} ${disk}${SLICE_PREFIX}${SLICE7}"
diff_size_dev="$disk2 $disk3"
vfstab_dev=$(find_vfstab_dev)
if is_illumos; then
specified_dump_dev=${disk}${SLICE_PREFIX}${SLICE0}
specified_dump_dev=${DISK0}s0
saved_dump_dev=$(save_dump_dev)
cyl=$(get_endslice $disk $SLICE6)
log_must set_partition $SLICE7 "$cyl" $SIZE1 $disk
else
partition_disk $SIZE $disk 7
cyl=$(get_endslice $disk $SLICE5)
log_must set_partition $SLICE6 "$cyl" $SIZE1 $disk
cyl=$(get_endslice $DISK0 6)
log_must set_partition 7 "$cyl" $SIZE1 $DISK0
fi
create_pool "$TESTPOOL" "$pooldev1"
create_pool $TESTPOOL $DISK0
#
# Set up the testing scenarios parameters
#
set -A arg "$TESTPOOL $pooldev2" \
"$TESTPOOL1 $pooldev1" \
"$TESTPOOL1 $TESTDIR0/$FILEDISK0" \
set -A arg \
"$TESTPOOL1 $DISK0" \
"$TESTPOOL1 mirror mirror $mirror1 mirror $mirror2" \
"$TESTPOOL1 raidz raidz $raidz1 raidz $raidz2" \
"$TESTPOOL1 raidz1 raidz1 $raidz1 raidz1 $raidz2" \
@ -109,7 +100,7 @@ set -A arg "$TESTPOOL $pooldev2" \
"$TESTPOOL1 raidz1 $diff_size_dev" \
"$TESTPOOL1 mirror $mirror1 spare $mirror2 spare $diff_size_dev" \
"$TESTPOOL1 $vfstab_dev" \
"$TESTPOOL1 ${disk}s10" \
"$TESTPOOL1 ${DISK0}s10" \
"$TESTPOOL1 spare $pooldev2"
unset NOINUSE_CHECK
@ -125,7 +116,7 @@ log_must zpool destroy -f $TESTPOOL
if is_illumos; then
# create/destroy a pool as a simple way to set the partitioning
# back to something normal so we can use this $disk as a dump device
log_must zpool create -f $TESTPOOL3 $disk
log_must zpool create -f $TESTPOOL3 $DISK1
log_must zpool destroy -f $TESTPOOL3
log_must dumpadm -d ${DEV_DSKDIR}/$specified_dump_dev
@ -134,7 +125,7 @@ if is_illumos; then
# Also check to see that in-use checking prevents us from creating
# a zpool from just the first slice on the disk.
log_mustnot zpool create \
-f $TESTPOOL1 ${specified_dump_dev}${SLICE_PREFIX}${SLICE0}
-f $TESTPOOL1 ${specified_dump_dev}s0
fi
log_pass "'zpool create' is failed as expected with inapplicable scenarios."

View File

@ -60,24 +60,17 @@ function cleanup
log_assert "'zpool create' should fail with regular file in swap."
log_onexit cleanup
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
if is_linux; then
set -A options "" "-f"
else
set -A options "-n" "" "-f"
fi
typeset pool_dev=${disk}${SLICE_PREFIX}${SLICE0}
typeset vol_name=$TESTPOOL/$TESTVOL
typeset mntp=/mnt
typeset TMP_FILE=$mntp/tmpfile.$$
create_pool $TESTPOOL $pool_dev
create_pool $TESTPOOL $DISK0
log_must zfs create -V 100m $vol_name
block_device_wait
log_must eval "new_fs ${ZVOL_DEVDIR}/$vol_name > /dev/null 2>&1"

View File

@ -61,13 +61,6 @@ function cleanup
}
unset NOINUSE_CHECK
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
typeset pool_dev=${disk}${SLICE_PREFIX}${SLICE0}
typeset vol_name=$TESTPOOL/$TESTVOL
log_assert "'zpool create' should fail with zfs vol device in swap."
@ -76,7 +69,7 @@ log_onexit cleanup
#
# use zfs vol device in swap to create pool which should fail.
#
create_pool $TESTPOOL $pool_dev
create_pool $TESTPOOL $DISK0
log_must zfs create -V 100m $vol_name
block_device_wait
swap_setup ${ZVOL_DEVDIR}/$vol_name

View File

@ -67,12 +67,6 @@ function cleanup
fi
}
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
typeset pool_dev=${disk}${SLICE_PREFIX}${SLICE0}
typeset swap_disks=$(swap -l | grep -v "swapfile" | awk '{print $1}')
typeset dump_device=$(dumpadm | grep "Dump device" | awk '{print $3}')
@ -88,7 +82,7 @@ for sdisk in $swap_disks; do
fi
done
log_must zpool create $TESTPOOL $pool_dev
log_must zpool create $TESTPOOL $DISK0
log_must zpool destroy $TESTPOOL
log_pass "'zpool create' passed as expected with applicable scenario."

View File

@ -51,14 +51,6 @@ function cleanup
rm -rf $TESTDIR
}
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
typeset pool_dev=${disk}${SLICE_PREFIX}${SLICE0}
log_assert "'zpool create' should fail with mountpoint exists and not empty."
log_onexit cleanup
@ -76,7 +68,7 @@ while (( i < 2 )); do
log_must touch $TESTDIR/testfile
fi
log_mustnot zpool create -m $TESTDIR -f $TESTPOOL $pool_dev
log_mustnot zpool create -m $TESTDIR -f $TESTPOOL $DISK0
log_mustnot poolexists $TESTPOOL
(( i = i + 1 ))

View File

@ -53,12 +53,6 @@ function cleanup
log_onexit cleanup
log_assert "zpool create can create pools with specified properties"
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
#
# we don't include "root" property in this list, as it requires both "cachefile"
# and "root" to be set at the same time. A test for this is included in
@ -70,7 +64,7 @@ typeset vals=("off" "off" "$CPATH" "3" "on")
typeset -i i=0;
while [ $i -lt "${#props[@]}" ]
do
log_must zpool create -o ${props[$i]}=${vals[$i]} $TESTPOOL $disk
log_must zpool create -o ${props[$i]}=${vals[$i]} $TESTPOOL $DISK0
RESULT=$(get_pool_prop ${props[$i]} $TESTPOOL)
if [[ $RESULT != ${vals[$i]} ]]
then
@ -86,7 +80,7 @@ done
poolexists $TESTPOOL && destroy_pool $TESTPOOL
# pick two properties, and verify we can create with those as well
log_must zpool create -o delegation=off -o cachefile=$CPATH $TESTPOOL $disk
log_must zpool create -o delegation=off -o cachefile=$CPATH $TESTPOOL $DISK0
RESULT=$(get_pool_prop delegation $TESTPOOL)
if [[ $RESULT != off ]]
then

View File

@ -50,12 +50,6 @@ log_onexit cleanup
log_assert "zpool create cannot create pools specifying readonly properties"
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
set -A props "available" "capacity" "guid" "health" "size" "used"
set -A vals "100" "10" "12345" "HEALTHY" "10" "10"
@ -63,7 +57,7 @@ typeset -i i=0;
while [ $i -lt "${#props[@]}" ]
do
# try to set each property in the prop list with it's corresponding val
log_mustnot zpool create -o ${props[$i]}=${vals[$i]} $TESTPOOL $disk
log_mustnot zpool create -o ${props[$i]}=${vals[$i]} $TESTPOOL $DISK0
if poolexists $TESTPOOL
then
log_fail "$TESTPOOL was created when setting ${props[$i]}!"

View File

@ -57,15 +57,9 @@ log_assert "zpool create -R works as expected"
typeset values=$TEST_BASE_DIR/values.$$
if [[ -n $DISK ]]; then
disk=$DISK
else
disk=$DISK0
fi
log_must rm -f /etc/zfs/zpool.cache
log_must rm -rf /${TESTPOOL}.root
log_must zpool create -R /${TESTPOOL}.root $TESTPOOL $disk
log_must zpool create -R /${TESTPOOL}.root $TESTPOOL $DISK0
if [ ! -d /${TESTPOOL}.root ]
then
log_fail "Mountpoint was not created when using zpool with -R flag!"

View File

@ -50,8 +50,8 @@ typeset fsprops=('canmount=off' 'mountpoint=none' 'utf8only=on'
for poolprop in "${poolprops[@]}"; do
for fsprop in "${fsprops[@]}"; do
# 1. Create a pool with '-t' option
log_must zpool create $TESTPOOL -t $TEMPPOOL \
-O $fsprop -o $poolprop $DISKS
log_must zpool create -t $TEMPPOOL -O $fsprop -o $poolprop \
$TESTPOOL $DISKS
# 2. Verify the pool is created with the specified temporary name
log_must poolexists $TEMPPOOL
log_mustnot poolexists $TESTPOOL

View File

@ -48,8 +48,8 @@ function cleanup
log_assert "zpool replace -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk1=$TEST_BASE_DIR/$FILEDISK0
disk2=$TEST_BASE_DIR/$FILEDISK1
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2

View File

@ -50,8 +50,8 @@ function cleanup
log_assert "'zpool replace' uses the ashift pool property value as default."
log_onexit cleanup
disk1=$TEST_BASE_DIR/$FILEDISK0
disk2=$TEST_BASE_DIR/$FILEDISK1
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must truncate -s $SIZE $disk1
log_must truncate -s $SIZE $disk2

View File

@ -52,7 +52,7 @@ log_onexit cleanup
log_assert "zpool set can modify 'ashift' property"
disk=$TEST_BASE_DIR/$FILEDISK0
disk=$TEST_BASE_DIR/disk
log_must mkfile $SIZE $disk
log_must zpool create $TESTPOOL1 $disk

View File

@ -30,100 +30,25 @@
. $STF_SUITE/include/libtest.shlib
if is_linux; then
export DISKSARRAY=$DISKS
export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
set_device_dir
set_slice_prefix
export SLICE0=1
export SLICE1=2
else
export SLICE0=0
export SLICE1=1
fi
verify_disk_count "$DISKS" 2
set -A disk_array $(find_disks $DISKS)
case "${#disk_array[@]}" in
2)
FS_DISK0=${disk_array[0]}
FS_DISK1=${disk_array[1]}
FS_DISK2=${disk_array[0]}
FS_DISK3=${disk_array[1]}
FS_SIDE0=${FS_DISK0}${SLICE_PREFIX}${SLICE0}
FS_SIDE1=${FS_DISK0}${SLICE_PREFIX}${SLICE1}
FS_SIDE2=${FS_DISK1}${SLICE_PREFIX}${SLICE0}
FS_SIDE3=${FS_DISK1}${SLICE_PREFIX}${SLICE1}
disk0="${DEV_DSKDIR}/$FS_SIDE0"
disk1="${DEV_DSKDIR}/$FS_SIDE1"
disk2="${DEV_DSKDIR}/$FS_SIDE2"
disk3="${DEV_DSKDIR}/$FS_SIDE3"
disktargets="$disk0 $disk2"
rawdisk0="${DEV_RDSKDIR}/$FS_SIDE0"
rawdisk1="${DEV_RDSKDIR}/$FS_SIDE1"
rawdisk2="${DEV_RDSKDIR}/$FS_SIDE2"
rawdisk3="${DEV_RDSKDIR}/$FS_SIDE3"
rawtargets="$rawdisk0 $rawdisk2"
vdisks="$FS_DISK0"
sdisks="$FS_DISK1"
vslices="$FS_SIDE0 $FS_SIDE1 $FS_SIDE2"
sslices="$FS_SIDE3"
;;
3)
FS_DISK0=${disk_array[0]}
FS_DISK1=${disk_array[1]}
FS_DISK2=${disk_array[2]}
FS_DISK3=${disk_array[0]}
FS_SIDE0=${FS_DISK0}${SLICE_PREFIX}${SLICE0}
FS_SIDE1=${FS_DISK0}${SLICE_PREFIX}${SLICE1}
FS_SIDE2=${FS_DISK1}${SLICE_PREFIX}${SLICE0}
FS_SIDE3=${FS_DISK2}${SLICE_PREFIX}${SLICE0}
disk0="${DEV_DSKDIR}/$FS_SIDE0"
disk1="${DEV_DSKDIR}/$FS_SIDE1"
disk2="${DEV_DSKDIR}/$FS_SIDE2"
disk3="${DEV_DSKDIR}/$FS_SIDE3"
disktargets="$disk0 $disk2 $disk3"
rawdisk0="${DEV_RDSKDIR}/$FS_SIDE0"
rawdisk1="${DEV_RDSKDIR}/$FS_SIDE1"
rawdisk2="${DEV_RDSKDIR}/$FS_SIDE2"
rawdisk3="${DEV_RDSKDIR}/$FS_SIDE3"
rawtargets="$rawdisk0 $rawdisk2 $rawdisk3"
disk0="${DEV_DSKDIR}/$FS_DISK0"
disk1="${DEV_DSKDIR}/$FS_DISK1"
disk2="${DEV_DSKDIR}/$FS_DISK2"
disktargets="$disk0 $disk1 $disk2"
rawdisk0="${DEV_RDSKDIR}/$FS_DISK0"
rawdisk1="${DEV_RDSKDIR}/$FS_DISK1"
rawdisk2="${DEV_RDSKDIR}/$FS_DISK2"
rawtargets="$rawdisk0 $rawdisk1 $rawdisk2"
vdisks="$FS_DISK0 $FS_DISK1"
sdisks="$FS_DISK2"
vslices="$FS_SIDE0 $FS_SIDE2 $FS_SIDE3"
sslices="$FS_SIDE1"
;;
*)
FS_DISK0=${disk_array[0]}
FS_DISK1=${disk_array[1]}
FS_DISK2=${disk_array[2]}
FS_DISK3=${disk_array[3]}
FS_SIDE0=${FS_DISK0}${SLICE_PREFIX}${SLICE0}
FS_SIDE1=${FS_DISK1}${SLICE_PREFIX}${SLICE0}
FS_SIDE2=${FS_DISK2}${SLICE_PREFIX}${SLICE0}
FS_SIDE3=${FS_DISK3}${SLICE_PREFIX}${SLICE0}
disk0="${DEV_DSKDIR}/$FS_SIDE0"
disk1="${DEV_DSKDIR}/$FS_SIDE1"
disk2="${DEV_DSKDIR}/$FS_SIDE2"
disk3="${DEV_DSKDIR}/$FS_SIDE3"
disktargets="$disk0 $disk1 $disk2 $disk3"
rawdisk0="${DEV_RDSKDIR}/$FS_SIDE0"
rawdisk1="${DEV_RDSKDIR}/$FS_SIDE1"
rawdisk2="${DEV_RDSKDIR}/$FS_SIDE2"
rawdisk3="${DEV_RDSKDIR}/$FS_SIDE3"
rawtargets="$rawdisk0 $rawdisk1 $rawdisk2 $rawdisk3"
vdisks="$FS_DISK0 $FS_DISK1 $FS_DISK2"
sdisks="$FS_DISK3"
vslices="$FS_SIDE0 $FS_SIDE1 $FS_SIDE2"
sslices="$FS_SIDE3"
;;
esac
export FS_DISK0 FS_DISK1 FS_DISK2 FS_DISK3 SINGLE_DISK
export FS_SIDE0 FS_SIDE1 FS_SIDE2 FS_SIDE3
export disk0 disk1 disk2 disk3 disktargets
export rawdisk0 rawdisk1 rawdisk2 rawdisk3 rawtargets
export vdisks sdisks vslices sslices
export FS_DISK0 FS_DISK1 FS_DISK2
export disk0 disk1 disk2 disktargets
export rawdisk0 rawdisk1 rawdisk2 rawtargets
export vdisks sdisks
export UFSMP=$TESTDIR/testinuseufsdump
export FS_SIZE=1g

View File

@ -60,27 +60,25 @@ log_assert "Ensure ZFS cannot use a device designated as a dump device"
log_onexit cleanup
typeset dumpdev=""
typeset diskslice=""
PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
log_note "Zero $FS_DISK0 and place free space in to slice 0"
log_note "Zero $FS_DISK0"
log_must cleanup_devices $FS_DISK0
diskslice="${DEV_DSKDIR}/${FS_DISK0}${SLICE0}"
log_note "Configuring $diskslice as dump device"
log_must dumpadm -d $diskslice > /dev/null
log_note "Configuring $rawdisk0 as dump device"
log_must dumpadm -d $rawdisk0 > /dev/null
log_note "Confirm that dump device has been setup"
dumpdev=`dumpadm | grep "Dump device" | awk '{print $3}'`
[[ -z "$dumpdev" ]] && log_untested "No dump device has been configured"
[[ "$dumpdev" != "$diskslice" ]] && \
log_untested "Dump device has not been configured to $diskslice"
[[ "$dumpdev" != "$rawdisk0" ]] && \
log_untested "Dump device has not been configured to $rawdisk0"
log_note "Attempt to zpool the dump device"
unset NOINUSE_CHECK
log_mustnot zpool create $TESTPOOL "$diskslice"
log_mustnot zpool create $TESTPOOL "$rawdisk0"
log_mustnot poolexists $TESTPOOL
log_pass "Unable to zpool a device in use by dumpadm"

View File

@ -94,15 +94,6 @@ typeset restored_files="${UFSMP}/restored_files"
typeset -i dirnum=0
typeset -i filenum=0
typeset cwd=""
typeset cyl=""
for num in 0 1 2; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%s*}
slice=${slice##*${SLICE_PREFIX}}
log_must set_partition $slice "$cyl" $FS_SIZE $disk
cyl=$(get_endslice $disk $slice)
done
log_note "Make a ufs filesystem on source $rawdisk1"
new_fs $rawdisk1 > /dev/null 2>&1
@ -145,7 +136,7 @@ log_mustnot zpool create $TESTPOOL1 "$disk1"
log_mustnot poolexists $TESTPOOL1
log_note "Attempt to take the source device in use by ufsdump as spare device"
log_mustnot zpool create $TESTPOOL1 "$FS_SIDE2" spare "$disk1"
log_mustnot zpool create $TESTPOOL1 "$FS_DISK2" spare "$disk1"
log_mustnot poolexists $TESTPOOL1
wait $PIDUFSDUMP
@ -171,7 +162,7 @@ log_mustnot poolexists $TESTPOOL2
log_note "Attempt to take the restored device in use by ufsrestore as spare" \
"device"
log_mustnot zpool create -f $TESTPOOL2 "$FS_SIDE2" spare "$disk1"
log_mustnot zpool create -f $TESTPOOL2 "$FS_DISK2" spare "$disk1"
log_mustnot poolexists $TESTPOOL2
log_pass "Unable to zpool over a device in use by ufsdump or ufsrestore"

View File

@ -58,7 +58,7 @@ function cleanup
cleanup_devices $vdisks $sdisks
}
function verify_assertion #slices
function verify_assertion #disks
{
typeset targets=$1
@ -82,39 +82,11 @@ typeset -i i=0
unset NOINUSE_CHECK
while (( i < ${#vdevs[*]} )); do
for num in 0 1 2 3 ; do
eval typeset disk=\${FS_DISK$num}
zero_partitions $disk
done
typeset cyl=""
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
[[ -z $SLICE_PREFIX ]] && eval typeset disk=\${FS_DISK$num}
slice=$(echo $slice | awk '{ print substr($1,length($1),1) }')
log_must set_partition $slice "$cyl" $FS_SIZE $disk
[[ $num < 3 ]] && cyl=$(get_endslice $disk $slice)
done
typeset spare="spare $sdisks"
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
verify_assertion "$rawtargets"
destroy_pool $TESTPOOL1
if [[ ( $FS_DISK0 == $FS_DISK2 ) && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
if [[ ( $FS_DISK0 == $FS_DISK3 ) && ( ${vdevs[i]} == "raidz2" ) ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks spare $sdisks
# If this is for raidz2, use 3 disks for the pool.
[[ ${vdevs[i]} = "raidz2" ]] && spare="$sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks $spare
verify_assertion "$rawtargets"
destroy_pool $TESTPOOL1

View File

@ -58,7 +58,7 @@ function cleanup
cleanup_devices $vdisks $sdisks
}
function verify_assertion #slices
function verify_assertion # disks
{
typeset targets=$1
@ -81,39 +81,11 @@ PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
unset NOINUSE_CHECK
while (( i < ${#vdevs[*]} )); do
typeset spare="spare $sdisks"
for num in 0 1 2 3 ; do
eval typeset disk=\${FS_DISK$num}
zero_partitions $disk
done
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
slice=${slice##*${SLICE_PREFIX}}
log_must set_partition $slice "" $FS_SIZE $disk
done
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
verify_assertion "$disktargets"
destroy_pool $TESTPOOL1
if [[ ( $FS_DISK0 == $FS_DISK2 ) && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
if [[ ( $FS_DISK0 == $FS_DISK3 ) && ( ${vdevs[i]} == "raidz2" ) ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks spare $sdisks
# If this is for raidz2, use 3 disks for the pool.
[[ ${vdevs[i]} = "raidz2" ]] && spare="$sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks $spare
verify_assertion "$disktargets"
destroy_pool $TESTPOOL1

View File

@ -61,7 +61,7 @@ function cleanup
cleanup_devices $vdisks $sdisks
}
function verify_assertion #slices
function verify_assertion # disks
{
typeset targets=$1
@ -85,41 +85,11 @@ typeset -i i=0
PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
while (( i < ${#vdevs[*]} )); do
typeset spare="spare $sdisks"
for num in 0 1 2 3 ; do
eval typeset disk=\${FS_DISK$num}
zero_partitions $disk
done
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
slice=${slice##*${SLICE_PREFIX}}
log_must set_partition $slice "" $FS_SIZE $disk
done
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
log_must zpool export $TESTPOOL1
verify_assertion "$disktargets"
log_must zpool import $TESTPOOL1
destroy_pool $TESTPOOL1
if [[ ( $FS_DISK0 == $FS_DISK2 ) && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
if [[ ( $FS_DISK0 == $FS_DISK3 ) && ( ${vdevs[i]} == "raidz2" ) ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks spare $sdisks
# If this is for raidz2, use 3 disks for the pool.
[[ ${vdevs[i]} = "raidz2" ]] && spare="$sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks $spare
log_must zpool export $TESTPOOL1
verify_assertion "$disktargets"
log_must zpool import $TESTPOOL1

View File

@ -61,7 +61,7 @@ function cleanup
cleanup_devices $vdisks $sdisks
}
function verify_assertion #slices
function verify_assertion # disks
{
typeset targets=$1
@ -82,29 +82,12 @@ log_onexit cleanup
set -A vdevs "" "mirror" "raidz" "raidz1" "raidz2"
typeset -i i=0
typeset cyl=""
for num in 0 1 2 3 ; do
eval typeset disk=\${FS_DISK$num}
zero_partitions $disk
done
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
[[ -z $SLICE_PREFIX ]] && eval typeset disk=\${FS_DISK$num}
slice=$(echo $slice | awk '{ print substr($1,length($1),1) }')
log_must set_partition $slice "$cyl" $FS_SIZE $disk
[[ $num < 3 ]] && cyl=$(get_endslice $disk $slice)
done
while (( i < ${#vdevs[*]} )); do
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
typeset spare="spare $sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
# If this is for raidz2, use 3 disks for the pool.
[[ ${vdevs[i]} = "raidz2" ]] && spare="$sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks $spare
log_must zpool export $TESTPOOL1
verify_assertion "$rawtargets"

View File

@ -79,44 +79,12 @@ log_onexit cleanup
set -A vdevs "" "mirror" "raidz" "raidz1" "raidz2"
typeset -i i=0
while (( i < ${#vdevs[*]} )); do
typeset spare="spare $sdisks"
for num in 0 1 2 3 ; do
eval typeset disk=\${FS_DISK$num}
zero_partitions $disk
done
typeset cyl=""
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
[[ -z $SLICE_PREFIX ]] && eval typeset disk=\${FS_DISK$num}
slice=$(echo $slice | awk '{ print substr($1,length($1),1) }')
log_must set_partition $slice "$cyl" $FS_SIZE $disk
[[ $num < 3 ]] && cyl=$(get_endslice $disk $slice)
done
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
log_must zpool export $TESTPOOL1
verify_assertion "$vdisks $sdisks"
if [[ ( $FS_DISK0 == $FS_DISK2 ) && -n ${vdevs[i]} ]]; then
(( i = i + 1 ))
continue
fi
if [[ ( $FS_DISK0 == $FS_DISK3 ) && ( ${vdevs[i]} == "raidz2" ) ]]; then
(( i = i + 1 ))
continue
fi
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks spare $sdisks
# If this is for raidz2, use 3 disks for the pool.
[[ ${vdevs[i]} = "raidz2" ]] && spare="$sdisks"
create_pool $TESTPOOL1 ${vdevs[i]} $vdisks $spare
log_must zpool export $TESTPOOL1
verify_assertion "$vdisks $sdisks"

View File

@ -45,24 +45,27 @@ verify_runnable "both"
function cleanup
{
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
rm -f $testfile0
}
log_onexit cleanup
log_assert "ENOSPC is returned on pools with large physical block size"
typeset testfile0=${TESTDIR}/testfile0
log_must zpool create -o ashift=13 $TESTPOOL1 $DISK_LARGE
log_must zfs set mountpoint=$TESTDIR $TESTPOOL1
log_must zfs set compression=off $TESTPOOL1
log_must zfs set recordsize=512 $TESTPOOL1
log_must zfs set copies=3 $TESTPOOL1
log_note "Writing file: $TESTFILE0 until ENOSPC."
file_write -o create -f $TESTDIR/$TESTFILE0 -b $BLOCKSZ \
log_note "Writing file: $testfile0 until ENOSPC."
file_write -o create -f $testfile0 -b $BLOCKSZ \
-c $NUM_WRITES -d $DATA
ret=$?
(( $ret != $ENOSPC )) && \
log_fail "$TESTFILE0 returned: $ret rather than ENOSPC."
log_fail "$testfile0 returned: $ret rather than ENOSPC."
log_pass "ENOSPC returned as expected."