Merge commit 'refs/top-bases/linux-zfs-branch' into linux-zfs-branch
This commit is contained in:
commit
9e4b51ab27
|
@ -15,6 +15,7 @@ MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
|
|||
fi
|
||||
|
||||
PROG="<define PROG>"
|
||||
CLEANUP=
|
||||
VERBOSE=
|
||||
VERBOSE_FLAG=
|
||||
FORCE=
|
||||
|
@ -212,6 +213,24 @@ unused_loop_device() {
|
|||
die "Error: Unable to find unused loopback device"
|
||||
}
|
||||
|
||||
#
|
||||
# This can be slightly dangerous because the loop devices we are
|
||||
# cleanup up may not be ours. However, if the devices are currently
|
||||
# in use we will not be able to remove them, and we only remove
|
||||
# devices which include 'zpool' in the name. So any damage we might
|
||||
# do should be limited to other zfs related testing.
|
||||
#
|
||||
cleanup_loop_devices() {
|
||||
local TMP_FILE=`mktemp`
|
||||
|
||||
${LOSETUP} -a | tr -d '()' >${TMP_FILE}
|
||||
${AWK} -F":" -v losetup="$LOSETUP" \
|
||||
'/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
|
||||
${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
|
||||
|
||||
rm -f ${TMP_FILE}
|
||||
}
|
||||
|
||||
#
|
||||
# The following udev helper functions assume that the provided
|
||||
# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
|
||||
|
|
|
@ -16,7 +16,7 @@ PROG=zconfig.sh
|
|||
usage() {
|
||||
cat << EOF
|
||||
USAGE:
|
||||
$0 [hv]
|
||||
$0 [hvc]
|
||||
|
||||
DESCRIPTION:
|
||||
ZFS/ZPOOL configuration tests
|
||||
|
@ -24,11 +24,12 @@ DESCRIPTION:
|
|||
OPTIONS:
|
||||
-h Show this message
|
||||
-v Verbose
|
||||
-c Cleanup lo+file devices at start
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
while getopts 'hv' OPTION; do
|
||||
while getopts 'hvc?' OPTION; do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
|
@ -37,6 +38,9 @@ while getopts 'hv' OPTION; do
|
|||
v)
|
||||
VERBOSE=1
|
||||
;;
|
||||
c)
|
||||
CLEANUP=1
|
||||
;;
|
||||
?)
|
||||
usage
|
||||
exit
|
||||
|
@ -48,12 +52,34 @@ if [ $(id -u) != 0 ]; then
|
|||
die "Must run as root"
|
||||
fi
|
||||
|
||||
# Perform pre-cleanup is requested
|
||||
if [ ${CLEANUP} ]; then
|
||||
cleanup_loop_devices
|
||||
rm -f /tmp/zpool.cache.*
|
||||
fi
|
||||
|
||||
zconfig_partition() {
|
||||
local DEVICE=$1
|
||||
local START=$2
|
||||
local END=$3
|
||||
local TMP_FILE=`mktemp`
|
||||
|
||||
/sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
|
||||
${START},${END}
|
||||
;
|
||||
;
|
||||
;
|
||||
EOF
|
||||
|
||||
rm ${TMP_FILE}
|
||||
}
|
||||
|
||||
# Validate persistent zpool.cache configuration.
|
||||
zconfig_test1() {
|
||||
POOL_NAME=test1
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_FILE2=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=test1
|
||||
local TMP_FILE1=`mktemp`
|
||||
local TMP_FILE2=`mktemp`
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 1 - persistent zpool.cache: "
|
||||
|
||||
|
@ -79,10 +105,10 @@ zconfig_test1
|
|||
|
||||
# Validate ZFS disk scanning and import w/out zpool.cache configuration.
|
||||
zconfig_test2() {
|
||||
POOL_NAME=test2
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_FILE2=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=test2
|
||||
local TMP_FILE1=`mktemp`
|
||||
local TMP_FILE2=`mktemp`
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 2 - scan disks for pools to import: "
|
||||
|
||||
|
@ -113,12 +139,11 @@ zconfig_test2
|
|||
|
||||
# ZVOL sanity check
|
||||
zconfig_test3() {
|
||||
POOL_NAME=tank
|
||||
ZVOL_NAME=fish
|
||||
FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
SRC_DIR=/bin/
|
||||
TMP_FILE1=`mktemp`
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=tank
|
||||
local ZVOL_NAME=fish
|
||||
local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
local SRC_DIR=/bin/
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 3 - zvol+ext3 sanity: "
|
||||
|
||||
|
@ -129,15 +154,10 @@ zconfig_test3() {
|
|||
|
||||
# Partition the volume, for a 400M volume there will be
|
||||
# 812 cylinders, 16 heads, and 63 sectors per track.
|
||||
/sbin/sfdisk -q /dev/zvol/${FULL_NAME} << EOF &>${TMP_FILE1} || fail 4
|
||||
,812
|
||||
;
|
||||
;
|
||||
;
|
||||
EOF
|
||||
zconfig_partition /dev/zvol/${FULL_NAME} 0 812
|
||||
|
||||
# Format the partition with ext3.
|
||||
/sbin/mkfs.ext3 /dev/zvol/${FULL_NAME}1 &>${TMP_FILE1} || fail 5
|
||||
/sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}1 || fail 5
|
||||
|
||||
# Mount the ext3 filesystem and copy some data to it.
|
||||
mkdir -p /tmp/${ZVOL_NAME} || fail 6
|
||||
|
@ -152,67 +172,93 @@ EOF
|
|||
umount /tmp/${ZVOL_NAME} || fail 11
|
||||
${ZFS} destroy ${FULL_NAME} || fail 12
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 13
|
||||
rm -f ${TMP_FILE1} || fail 14
|
||||
${ZFS_SH} -u || fail 15
|
||||
|
||||
pass
|
||||
}
|
||||
zconfig_test3
|
||||
|
||||
# zpool import/export device check (1 volume, 1 snapshot, 1 clone)
|
||||
zconfig_zvol_device_stat() {
|
||||
local EXPECT=$1
|
||||
local POOL_NAME=/dev/zvol/$2
|
||||
local ZVOL_NAME=/dev/zvol/$3
|
||||
local SNAP_NAME=/dev/zvol/$4
|
||||
local CLONE_NAME=/dev/zvol/$5
|
||||
local COUNT=0
|
||||
|
||||
# Pool exists
|
||||
stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Volume and partitions
|
||||
stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Snapshot with partitions
|
||||
stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
# Clone with partitions
|
||||
stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
|
||||
stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
|
||||
|
||||
if [ $EXPECT -ne $COUNT ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# zpool import/export device check
|
||||
# (1 volume, 2 partitions, 1 snapshot, 1 clone)
|
||||
zconfig_test4() {
|
||||
POOL_NAME=tank
|
||||
ZVOL_NAME=volume
|
||||
SNAP_NAME=snapshot
|
||||
CLONE_NAME=clone
|
||||
FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
|
||||
FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
|
||||
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
local POOL_NAME=tank
|
||||
local ZVOL_NAME=volume
|
||||
local SNAP_NAME=snapshot
|
||||
local CLONE_NAME=clone
|
||||
local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
|
||||
local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
|
||||
local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
|
||||
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
|
||||
|
||||
echo -n "test 4 - zpool import/export device: "
|
||||
|
||||
# Create a pool, volume, snapshot, and clone
|
||||
# Create a pool, volume, partition, snapshot, and clone.
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
|
||||
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5
|
||||
zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
|
||||
|
||||
# Export the pool
|
||||
${ZPOOL} export ${POOL_NAME} || fail 10
|
||||
${ZPOOL} export ${POOL_NAME} || fail 8
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
|
||||
# verify the devices were removed
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
|
||||
|
||||
# Import the pool, wait 1 second for udev
|
||||
${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 15
|
||||
${ZPOOL} import ${POOL_NAME} && sleep 1 || fail 10
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
|
||||
|
||||
# Destroy the pool and consequently the devices
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
|
||||
# verify the devices were removed
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
|
||||
|
||||
${ZFS_SH} -u || fail 25
|
||||
${ZFS_SH} -u || fail 14
|
||||
|
||||
pass
|
||||
}
|
||||
|
@ -235,43 +281,36 @@ zconfig_test5() {
|
|||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
|
||||
${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 4
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 5
|
||||
zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
|
||||
${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
|
||||
${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 6
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 7
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 8
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 9
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
|
||||
|
||||
# Unload the modules
|
||||
${ZFS_SH} -u || fail 10
|
||||
${ZFS_SH} -u || fail 8
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 11
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 12
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 13
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 14
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
|
||||
|
||||
# Load the modules, wait 1 second for udev
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 15
|
||||
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" && sleep 1 || fail 10
|
||||
|
||||
# Verify the devices were created
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null || fail 16
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null || fail 17
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null || fail 18
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null || fail 19
|
||||
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
|
||||
|
||||
# Destroy the pool and consequently the devices
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
|
||||
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
|
||||
|
||||
# Verify the devices were removed
|
||||
stat /dev/zvol/${POOL_NAME} &>/dev/null && fail 21
|
||||
stat /dev/zvol/${FULL_ZVOL_NAME} &>/dev/null && fail 22
|
||||
stat /dev/zvol/${FULL_SNAP_NAME} &>/dev/null && fail 23
|
||||
stat /dev/zvol/${FULL_CLONE_NAME} &>/dev/null && fail 24
|
||||
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
|
||||
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
|
||||
|
||||
${ZFS_SH} -u || fail 25
|
||||
${ZFS_SH} -u || fail 14
|
||||
|
||||
pass
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ HEADER=
|
|||
usage() {
|
||||
cat << EOF
|
||||
USAGE:
|
||||
$0 [hv]
|
||||
$0 [hvxfc]
|
||||
|
||||
DESCRIPTION:
|
||||
ZPIOS sanity tests
|
||||
|
@ -27,11 +27,12 @@ OPTIONS:
|
|||
-v Verbose
|
||||
-x Destructive hd/sd/md/dm/ram tests
|
||||
-f Don't prompt due to -x
|
||||
-c Cleanup lo+file devices at start
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
while getopts 'hvxf' OPTION; do
|
||||
while getopts 'hvxfc?' OPTION; do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
|
@ -46,6 +47,9 @@ while getopts 'hvxf' OPTION; do
|
|||
f)
|
||||
FORCE=1
|
||||
;;
|
||||
c)
|
||||
CLEANUP=1
|
||||
;;
|
||||
?)
|
||||
usage
|
||||
exit
|
||||
|
@ -57,6 +61,12 @@ if [ $(id -u) != 0 ]; then
|
|||
die "Must run as root"
|
||||
fi
|
||||
|
||||
# Perform pre-cleanup is requested
|
||||
if [ ${CLEANUP} ]; then
|
||||
cleanup_loop_devices
|
||||
rm -f /tmp/zpool.cache.*
|
||||
fi
|
||||
|
||||
zpios_test() {
|
||||
CONFIG=$1
|
||||
TEST=$2
|
||||
|
|
Loading…
Reference in New Issue