From 019953e0b7736ea8742e02186f3c43d0366b7ef9 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 5 Aug 2010 10:17:05 -0700 Subject: [PATCH 1/2] Update zconfig to check partitions Update the zconfig.sh test script to verify not only that volumes, snapshots, and clones are created and removed properly. But also verify that the partition information for each of these types of devices is properly enumerated by the kernel. Tests 4 and 5 now also create two partitions on the original volume and these partitions are expected to also exist on the snapshot and the clone. Correctness is verified after import/export, module load/unload, dataset creation, and pool destruction. Additionally, the code to create a partition table was refactored in to a small helper function to simplify the test cases. And finally all of the function variables were flagged 'local' to ensure their scope is limited. This should have been done a while ago. --- scripts/zconfig.sh | 185 ++++++++++++++++++++++++++------------------- 1 file changed, 107 insertions(+), 78 deletions(-) diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh index a34e656daa..0220d9f5fa 100755 --- a/scripts/zconfig.sh +++ b/scripts/zconfig.sh @@ -48,12 +48,28 @@ if [ $(id -u) != 0 ]; then die "Must run as root" fi +zconfig_partition() { + local DEVICE=$1 + local START=$2 + local END=$3 + local TMP_FILE=`mktemp` + + /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4 +${START},${END} +; +; +; +EOF + + rm ${TMP_FILE} +} + # Validate persistent zpool.cache configuration. zconfig_test1() { - POOL_NAME=test1 - TMP_FILE1=`mktemp` - TMP_FILE2=`mktemp` - TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` + local POOL_NAME=test1 + local TMP_FILE1=`mktemp` + local TMP_FILE2=`mktemp` + local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` echo -n "test 1 - persistent zpool.cache: " @@ -79,10 +95,10 @@ zconfig_test1 # Validate ZFS disk scanning and import w/out zpool.cache configuration. zconfig_test2() { - POOL_NAME=test2 - TMP_FILE1=`mktemp` - TMP_FILE2=`mktemp` - TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` + local POOL_NAME=test2 + local TMP_FILE1=`mktemp` + local TMP_FILE2=`mktemp` + local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` echo -n "test 2 - scan disks for pools to import: " @@ -113,12 +129,11 @@ zconfig_test2 # ZVOL sanity check zconfig_test3() { - POOL_NAME=tank - ZVOL_NAME=fish - FULL_NAME=${POOL_NAME}/${ZVOL_NAME} - SRC_DIR=/bin/ - TMP_FILE1=`mktemp` - TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` + local POOL_NAME=tank + local ZVOL_NAME=fish + local FULL_NAME=${POOL_NAME}/${ZVOL_NAME} + local SRC_DIR=/bin/ + local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` echo -n "test 3 - zvol+ext3 sanity: " @@ -129,15 +144,10 @@ zconfig_test3() { # Partition the volume, for a 400M volume there will be # 812 cylinders, 16 heads, and 63 sectors per track. - /sbin/sfdisk -q /dev/zvol/${FULL_NAME} << EOF &>${TMP_FILE1} || fail 4 -,812 -; -; -; -EOF + zconfig_partition /dev/zvol/${FULL_NAME} 0 812 # Format the partition with ext3. - /sbin/mkfs.ext3 /dev/zvol/${FULL_NAME}1 &>${TMP_FILE1} || fail 5 + /sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}1 || fail 5 # Mount the ext3 filesystem and copy some data to it. mkdir -p /tmp/${ZVOL_NAME} || fail 6 @@ -152,67 +162,93 @@ EOF umount /tmp/${ZVOL_NAME} || fail 11 ${ZFS} destroy ${FULL_NAME} || fail 12 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13 - rm -f ${TMP_FILE1} || fail 14 ${ZFS_SH} -u || fail 15 pass } zconfig_test3 -# zpool import/export device check (1 volume, 1 snapshot, 1 clone) +zconfig_zvol_device_stat() { + local EXPECT=$1 + local POOL_NAME=/dev/zvol/$2 + local ZVOL_NAME=/dev/zvol/$3 + local SNAP_NAME=/dev/zvol/$4 + local CLONE_NAME=/dev/zvol/$5 + local COUNT=0 + + # Pool exists + stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1 + + # Volume and partitions + stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1 + stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1 + stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + + # Snapshot with partitions + stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1 + stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1 + stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + + # Clone with partitions + stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1 + stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1 + stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1 + + if [ $EXPECT -ne $COUNT ]; then + return 1 + fi + + return 0 +} + +# zpool import/export device check +# (1 volume, 2 partitions, 1 snapshot, 1 clone) zconfig_test4() { - POOL_NAME=tank - ZVOL_NAME=volume - SNAP_NAME=snapshot - CLONE_NAME=clone - FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} - FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} - FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} - TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` + local POOL_NAME=tank + local ZVOL_NAME=volume + local SNAP_NAME=snapshot + local CLONE_NAME=clone + local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} + local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} + local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} + local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` echo -n "test 4 - zpool import/export device: " - # Create a pool, volume, snapshot, and clone + # Create a pool, volume, partition, snapshot, and clone. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 - ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4 - ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5 + zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4 + ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 + ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 # Verify the devices were created - stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9 + zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7 # Export the pool - ${ZPOOL} export ${POOL_NAME} || fail 10 + ${ZPOOL} export ${POOL_NAME} || fail 8 - # Verify the devices were removed - stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14 + # verify the devices were removed + zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 # Import the pool, wait 1 second for udev - ${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 15 + ${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 10 # Verify the devices were created - stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19 + zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11 # Destroy the pool and consequently the devices - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12 - # Verify the devices were removed - stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24 + # verify the devices were removed + zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13 - ${ZFS_SH} -u || fail 25 + ${ZFS_SH} -u || fail 14 pass } @@ -235,43 +271,36 @@ zconfig_test5() { ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 - ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4 - ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5 + zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4 + ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 + ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 # Verify the devices were created - stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9 + zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7 # Unload the modules - ${ZFS_SH} -u || fail 10 + ${ZFS_SH} -u || fail 8 # Verify the devices were removed - stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14 + zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 # Load the modules, wait 1 second for udev - ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 15 + ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 10 # Verify the devices were created - stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19 + zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11 # Destroy the pool and consequently the devices - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 + ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12 # Verify the devices were removed - stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21 - stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22 - stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23 - stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24 + zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ + ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13 - ${ZFS_SH} -u || fail 25 + ${ZFS_SH} -u || fail 14 pass } From a6644f49a5af1dae5de7c87b704c8a7bee2ccf87 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Thu, 5 Aug 2010 11:15:09 -0700 Subject: [PATCH 2/2] Add cleanup option -c to zconfig.sh Several folks have now remarked that when the regression tests fail they leave a mess behind. This was done intentionally at the time to facilitate debugging the wreckage. However, this also means that you may need to do some manual cleanup such as removing the loopback devices before re-running the tests. To simplify this proceedure I've added the '-c' option to zconfig.sh which will attempt to cleanup the mess from a previous test before starting. This is somewhat dangerous because it must guess as to which loopback devices you were using. But this risk is fairly minimal because devices which are currently still is use can not be cleaned up. And because only devices with 'zpool' in the name are considered for removal. That said if your running parallel copies of say zconfig.sh this may cause you some trouble. --- scripts/common.sh.in | 19 +++++++++++++++++++ scripts/zconfig.sh | 14 ++++++++++++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/scripts/common.sh.in b/scripts/common.sh.in index bb602474b0..00418696c8 100644 --- a/scripts/common.sh.in +++ b/scripts/common.sh.in @@ -15,6 +15,7 @@ MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs) fi PROG="" +CLEANUP= VERBOSE= VERBOSE_FLAG= FORCE= @@ -207,6 +208,24 @@ unused_loop_device() { die "Error: Unable to find unused loopback device" } +# +# This can be slightly dangerous because the loop devices we are +# cleanup up may not be ours. However, if the devices are currently +# in use we will not be able to remove them, and we only remove +# devices which include 'zpool' in the name. So any damage we might +# do should be limited to other zfs related testing. +# +cleanup_loop_devices() { + local TMP_FILE=`mktemp` + + ${LOSETUP} -a | tr -d '()' >${TMP_FILE} + ${AWK} -F":" -v losetup="$LOSETUP" \ + '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE} + ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE} + + rm -f ${TMP_FILE} +} + # # The following udev helper functions assume that the provided # udev rules file will create a /dev/disk/zpool/ diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh index 0220d9f5fa..c671206186 100755 --- a/scripts/zconfig.sh +++ b/scripts/zconfig.sh @@ -16,7 +16,7 @@ PROG=zconfig.sh usage() { cat << EOF USAGE: -$0 [hv] +$0 [hvc] DESCRIPTION: ZFS/ZPOOL configuration tests @@ -24,11 +24,12 @@ DESCRIPTION: OPTIONS: -h Show this message -v Verbose + -c Cleanup lo+file devices at start EOF } -while getopts 'hv' OPTION; do +while getopts 'hvc?' OPTION; do case $OPTION in h) usage @@ -37,6 +38,9 @@ while getopts 'hv' OPTION; do v) VERBOSE=1 ;; + c) + CLEANUP=1 + ;; ?) usage exit @@ -48,6 +52,12 @@ if [ $(id -u) != 0 ]; then die "Must run as root" fi +# Perform pre-cleanup is requested +if [ ${CLEANUP} ]; then + cleanup_loop_devices + rm -f /tmp/zpool.cache.* +fi + zconfig_partition() { local DEVICE=$1 local START=$2