Merge commit 'refs/top-bases/linux-zfs-branch' into linux-zfs-branch

This commit is contained in:
Brian Behlendorf 2010-08-05 11:33:41 -07:00
commit 9e4b51ab27
3 changed files with 150 additions and 82 deletions

View File

@ -15,6 +15,7 @@ MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
fi fi
PROG="<define PROG>" PROG="<define PROG>"
CLEANUP=
VERBOSE= VERBOSE=
VERBOSE_FLAG= VERBOSE_FLAG=
FORCE= FORCE=
@ -212,6 +213,24 @@ unused_loop_device() {
die "Error: Unable to find unused loopback device" die "Error: Unable to find unused loopback device"
} }
#
# This can be slightly dangerous because the loop devices we are
# cleanup up may not be ours. However, if the devices are currently
# in use we will not be able to remove them, and we only remove
# devices which include 'zpool' in the name. So any damage we might
# do should be limited to other zfs related testing.
#
cleanup_loop_devices() {
local TMP_FILE=`mktemp`
${LOSETUP} -a | tr -d '()' >${TMP_FILE}
${AWK} -F":" -v losetup="$LOSETUP" \
'/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
rm -f ${TMP_FILE}
}
# #
# The following udev helper functions assume that the provided # The following udev helper functions assume that the provided
# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK> # udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>

View File

@ -16,7 +16,7 @@ PROG=zconfig.sh
usage() { usage() {
cat << EOF cat << EOF
USAGE: USAGE:
$0 [hv] $0 [hvc]
DESCRIPTION: DESCRIPTION:
ZFS/ZPOOL configuration tests ZFS/ZPOOL configuration tests
@ -24,11 +24,12 @@ DESCRIPTION:
OPTIONS: OPTIONS:
-h Show this message -h Show this message
-v Verbose -v Verbose
-c Cleanup lo+file devices at start
EOF EOF
} }
while getopts 'hv' OPTION; do while getopts 'hvc?' OPTION; do
case $OPTION in case $OPTION in
h) h)
usage usage
@ -37,6 +38,9 @@ while getopts 'hv' OPTION; do
v) v)
VERBOSE=1 VERBOSE=1
;; ;;
c)
CLEANUP=1
;;
?) ?)
usage usage
exit exit
@ -48,12 +52,34 @@ if [ $(id -u) != 0 ]; then
die "Must run as root" die "Must run as root"
fi fi
# Perform pre-cleanup is requested
if [ ${CLEANUP} ]; then
cleanup_loop_devices
rm -f /tmp/zpool.cache.*
fi
zconfig_partition() {
local DEVICE=$1
local START=$2
local END=$3
local TMP_FILE=`mktemp`
/sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
${START},${END}
;
;
;
EOF
rm ${TMP_FILE}
}
# Validate persistent zpool.cache configuration. # Validate persistent zpool.cache configuration.
zconfig_test1() { zconfig_test1() {
POOL_NAME=test1 local POOL_NAME=test1
TMP_FILE1=`mktemp` local TMP_FILE1=`mktemp`
TMP_FILE2=`mktemp` local TMP_FILE2=`mktemp`
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 1 - persistent zpool.cache: " echo -n "test 1 - persistent zpool.cache: "
@ -79,10 +105,10 @@ zconfig_test1
# Validate ZFS disk scanning and import w/out zpool.cache configuration. # Validate ZFS disk scanning and import w/out zpool.cache configuration.
zconfig_test2() { zconfig_test2() {
POOL_NAME=test2 local POOL_NAME=test2
TMP_FILE1=`mktemp` local TMP_FILE1=`mktemp`
TMP_FILE2=`mktemp` local TMP_FILE2=`mktemp`
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 2 - scan disks for pools to import: " echo -n "test 2 - scan disks for pools to import: "
@ -113,12 +139,11 @@ zconfig_test2
# ZVOL sanity check # ZVOL sanity check
zconfig_test3() { zconfig_test3() {
POOL_NAME=tank local POOL_NAME=tank
ZVOL_NAME=fish local ZVOL_NAME=fish
FULL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
SRC_DIR=/bin/ local SRC_DIR=/bin/
TMP_FILE1=`mktemp` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 3 - zvol+ext3 sanity: " echo -n "test 3 - zvol+ext3 sanity: "
@ -129,15 +154,10 @@ zconfig_test3() {
# Partition the volume, for a 400M volume there will be # Partition the volume, for a 400M volume there will be
# 812 cylinders, 16 heads, and 63 sectors per track. # 812 cylinders, 16 heads, and 63 sectors per track.
/sbin/sfdisk -q /dev/zvol/${FULL_NAME} << EOF &>${TMP_FILE1} || fail 4 zconfig_partition /dev/zvol/${FULL_NAME} 0 812
,812
;
;
;
EOF
# Format the partition with ext3. # Format the partition with ext3.
/sbin/mkfs.ext3 /dev/zvol/${FULL_NAME}1 &>${TMP_FILE1} || fail 5 /sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}1 || fail 5
# Mount the ext3 filesystem and copy some data to it. # Mount the ext3 filesystem and copy some data to it.
mkdir -p /tmp/${ZVOL_NAME} || fail 6 mkdir -p /tmp/${ZVOL_NAME} || fail 6
@ -152,67 +172,93 @@ EOF
umount /tmp/${ZVOL_NAME} || fail 11 umount /tmp/${ZVOL_NAME} || fail 11
${ZFS} destroy ${FULL_NAME} || fail 12 ${ZFS} destroy ${FULL_NAME} || fail 12
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13
rm -f ${TMP_FILE1} || fail 14
${ZFS_SH} -u || fail 15 ${ZFS_SH} -u || fail 15
pass pass
} }
zconfig_test3 zconfig_test3
# zpool import/export device check (1 volume, 1 snapshot, 1 clone) zconfig_zvol_device_stat() {
local EXPECT=$1
local POOL_NAME=/dev/zvol/$2
local ZVOL_NAME=/dev/zvol/$3
local SNAP_NAME=/dev/zvol/$4
local CLONE_NAME=/dev/zvol/$5
local COUNT=0
# Pool exists
stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
# Volume and partitions
stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
# Snapshot with partitions
stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
# Clone with partitions
stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
if [ $EXPECT -ne $COUNT ]; then
return 1
fi
return 0
}
# zpool import/export device check
# (1 volume, 2 partitions, 1 snapshot, 1 clone)
zconfig_test4() { zconfig_test4() {
POOL_NAME=tank local POOL_NAME=tank
ZVOL_NAME=volume local ZVOL_NAME=volume
SNAP_NAME=snapshot local SNAP_NAME=snapshot
CLONE_NAME=clone local CLONE_NAME=clone
FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 4 - zpool import/export device: " echo -n "test 4 - zpool import/export device: "
# Create a pool, volume, snapshot, and clone # Create a pool, volume, partition, snapshot, and clone.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
# Verify the devices were created # Verify the devices were created
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
# Export the pool # Export the pool
${ZPOOL} export ${POOL_NAME} || fail 10 ${ZPOOL} export ${POOL_NAME} || fail 8
# Verify the devices were removed # verify the devices were removed
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
# Import the pool, wait 1 second for udev # Import the pool, wait 1 second for udev
${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 15 ${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 10
# Verify the devices were created # Verify the devices were created
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
# Destroy the pool and consequently the devices # Destroy the pool and consequently the devices
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
# Verify the devices were removed # verify the devices were removed
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
${ZFS_SH} -u || fail 25 ${ZFS_SH} -u || fail 14
pass pass
} }
@ -235,43 +281,36 @@ zconfig_test5() {
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
# Verify the devices were created # Verify the devices were created
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
# Unload the modules # Unload the modules
${ZFS_SH} -u || fail 10 ${ZFS_SH} -u || fail 8
# Verify the devices were removed # Verify the devices were removed
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
# Load the modules, wait 1 second for udev # Load the modules, wait 1 second for udev
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 15 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 10
# Verify the devices were created # Verify the devices were created
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
# Destroy the pool and consequently the devices # Destroy the pool and consequently the devices
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
# Verify the devices were removed # Verify the devices were removed
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
${ZFS_SH} -u || fail 25 ${ZFS_SH} -u || fail 14
pass pass
} }

View File

@ -17,7 +17,7 @@ HEADER=
usage() { usage() {
cat << EOF cat << EOF
USAGE: USAGE:
$0 [hv] $0 [hvxfc]
DESCRIPTION: DESCRIPTION:
ZPIOS sanity tests ZPIOS sanity tests
@ -27,11 +27,12 @@ OPTIONS:
-v Verbose -v Verbose
-x Destructive hd/sd/md/dm/ram tests -x Destructive hd/sd/md/dm/ram tests
-f Don't prompt due to -x -f Don't prompt due to -x
-c Cleanup lo+file devices at start
EOF EOF
} }
while getopts 'hvxf' OPTION; do while getopts 'hvxfc?' OPTION; do
case $OPTION in case $OPTION in
h) h)
usage usage
@ -46,6 +47,9 @@ while getopts 'hvxf' OPTION; do
f) f)
FORCE=1 FORCE=1
;; ;;
c)
CLEANUP=1
;;
?) ?)
usage usage
exit exit
@ -57,6 +61,12 @@ if [ $(id -u) != 0 ]; then
die "Must run as root" die "Must run as root"
fi fi
# Perform pre-cleanup is requested
if [ ${CLEANUP} ]; then
cleanup_loop_devices
rm -f /tmp/zpool.cache.*
fi
zpios_test() { zpios_test() {
CONFIG=$1 CONFIG=$1
TEST=$2 TEST=$2