Update zconfig to check partitions
Update the zconfig.sh test script to verify not only that volumes, snapshots, and clones are created and removed properly. But also verify that the partition information for each of these types of devices is properly enumerated by the kernel. Tests 4 and 5 now also create two partitions on the original volume and these partitions are expected to also exist on the snapshot and the clone. Correctness is verified after import/export, module load/unload, dataset creation, and pool destruction. Additionally, the code to create a partition table was refactored in to a small helper function to simplify the test cases. And finally all of the function variables were flagged 'local' to ensure their scope is limited. This should have been done a while ago.
This commit is contained in:
parent
63a645c3c8
commit
019953e0b7
|
@ -48,12 +48,28 @@ if [ $(id -u) != 0 ]; then
|
|||
die "Must run as root"
|
||||
fi
|
||||
|
||||
zconfig_partition() {
|
||||
local DEVICE=$1
|
||||
local START=$2
|
||||
local END=$3
|
||||
local TMP_FILE=`mktemp`
|
||||
|
||||
/sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
|
||||
${START},${END}
|
||||
;
|
||||
;
|
||||
;
|
||||
EOF
|
||||
|
||||
rm ${TMP_FILE}
|
||||
}
|
||||
|
||||
# Validate persistent zpool.cache configuration.
|
||||
zconfig_test1() {
|
||||
POOL_NAME=test1
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_FILE2=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=test1
|
||||
local TMP_FILE1=`mktemp`
|
||||
local TMP_FILE2=`mktemp`
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 1 - persistent zpool.cache: "
|
||||
|
||||
|
@ -79,10 +95,10 @@ zconfig_test1
|
|||
|
||||
# Validate ZFS disk scanning and import w/out zpool.cache configuration.
|
||||
zconfig_test2() {
|
||||
POOL_NAME=test2
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_FILE2=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=test2
|
||||
local TMP_FILE1=`mktemp`
|
||||
local TMP_FILE2=`mktemp`
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 2 - scan disks for pools to import: "
|
||||
|
||||
|
@ -113,12 +129,11 @@ zconfig_test2
|
|||
|
||||
# ZVOL sanity check
|
||||
zconfig_test3() {
|
||||
POOL_NAME=tank
|
||||
ZVOL_NAME=fish
|
||||
FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
SRC_DIR=/bin/
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=tank
|
||||
local ZVOL_NAME=fish
|
||||
local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
local SRC_DIR=/bin/
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 3 - zvol+ext3 sanity: "
|
||||
|
||||
|
@ -129,15 +144,10 @@ zconfig_test3() {
|
|||
|
||||
# Partition the volume, for a 400M volume there will be
|
||||
# 812 cylinders, 16 heads, and 63 sectors per track.
|
||||
/sbin/sfdisk -q /dev/zvol/${FULL_NAME} << EOF &>${TMP_FILE1} || fail 4
|
||||
,812
|
||||
;
|
||||
;
|
||||
;
|
||||
EOF
|
||||
zconfig_partition /dev/zvol/${FULL_NAME} 0 812
|
||||
|
||||
# Format the partition with ext3.
|
||||
/sbin/mkfs.ext3 /dev/zvol/${FULL_NAME}1 &>${TMP_FILE1} || fail 5
|
||||
/sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}1 || fail 5
|
||||
|
||||
# Mount the ext3 filesystem and copy some data to it.
|
||||
mkdir -p /tmp/${ZVOL_NAME} || fail 6
|
||||
|
@ -152,67 +162,93 @@ EOF
|
|||
umount /tmp/${ZVOL_NAME} || fail 11
|
||||
${ZFS} destroy ${FULL_NAME} || fail 12
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13
|
||||
rm -f ${TMP_FILE1} || fail 14
|
||||
${ZFS_SH} -u || fail 15
|
||||
|
||||
pass
|
||||
}
|
||||
zconfig_test3
|
||||
|
||||
# zpool import/export device check (1 volume, 1 snapshot, 1 clone)
|
||||
zconfig_zvol_device_stat() {
|
||||
local EXPECT=$1
|
||||
local POOL_NAME=/dev/zvol/$2
|
||||
local ZVOL_NAME=/dev/zvol/$3
|
||||
local SNAP_NAME=/dev/zvol/$4
|
||||
local CLONE_NAME=/dev/zvol/$5
|
||||
local COUNT=0
|
||||
|
||||
# Pool exists
|
||||
stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Volume and partitions
|
||||
stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Snapshot with partitions
|
||||
stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Clone with partitions
|
||||
stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
if [ $EXPECT -ne $COUNT ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# zpool import/export device check
|
||||
# (1 volume, 2 partitions, 1 snapshot, 1 clone)
|
||||
zconfig_test4() {
|
||||
POOL_NAME=tank
|
||||
ZVOL_NAME=volume
|
||||
SNAP_NAME=snapshot
|
||||
CLONE_NAME=clone
|
||||
FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
|
||||
FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=tank
|
||||
local ZVOL_NAME=volume
|
||||
local SNAP_NAME=snapshot
|
||||
local CLONE_NAME=clone
|
||||
local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
|
||||
local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 4 - zpool import/export device: "
|
||||
|
||||
# Create a pool, volume, snapshot, and clone
|
||||
# Create a pool, volume, partition, snapshot, and clone.
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
|
||||
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5
|
||||
zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
|
||||
|
||||
# Export the pool
|
||||
${ZPOOL} export ${POOL_NAME} || fail 10
|
||||
${ZPOOL} export ${POOL_NAME} || fail 8
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
|
||||
# verify the devices were removed
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
|
||||
|
||||
# Import the pool, wait 1 second for udev
|
||||
${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 15
|
||||
${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 10
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
|
||||
|
||||
# Destroy the pool and consequently the devices
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
|
||||
# verify the devices were removed
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
|
||||
|
||||
${ZFS_SH} -u || fail 25
|
||||
${ZFS_SH} -u || fail 14
|
||||
|
||||
pass
|
||||
}
|
||||
|
@ -235,43 +271,36 @@ zconfig_test5() {
|
|||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
|
||||
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5
|
||||
zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
|
||||
|
||||
# Unload the modules
|
||||
${ZFS_SH} -u || fail 10
|
||||
${ZFS_SH} -u || fail 8
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
|
||||
|
||||
# Load the modules, wait 1 second for udev
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 15
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 10
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
|
||||
|
||||
# Destroy the pool and consequently the devices
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
|
||||
|
||||
${ZFS_SH} -u || fail 25
|
||||
${ZFS_SH} -u || fail 14
|
||||
|
||||
pass
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue