2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# CDDL HEADER START
|
|
|
|
|
#
|
|
|
|
|
# The contents of this file are subject to the terms of the
|
|
|
|
|
# Common Development and Distribution License (the "License").
|
|
|
|
|
# You may not use this file except in compliance with the License.
|
|
|
|
|
#
|
|
|
|
|
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
|
# or http://www.opensolaris.org/os/licensing.
|
|
|
|
|
# See the License for the specific language governing permissions
|
|
|
|
|
# and limitations under the License.
|
|
|
|
|
#
|
|
|
|
|
# When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
|
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
|
# If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
|
# fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
|
# information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
|
#
|
|
|
|
|
# CDDL HEADER END
|
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
#
|
2019-12-04 21:10:12 +00:00
|
|
|
|
# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
|
2020-07-13 16:19:18 +00:00
|
|
|
|
# Copyright (c) 2012, 2020, Delphix. All rights reserved.
|
2019-12-04 21:10:12 +00:00
|
|
|
|
# Copyright (c) 2017, Tim Chase. All rights reserved.
|
|
|
|
|
# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
|
|
|
|
|
# Copyright (c) 2017, Lawrence Livermore National Security LLC.
|
|
|
|
|
# Copyright (c) 2017, Datto Inc. All rights reserved.
|
|
|
|
|
# Copyright (c) 2017, Open-E Inc. All rights reserved.
|
2021-08-22 15:22:07 +00:00
|
|
|
|
# Copyright (c) 2021, The FreeBSD Foundation.
|
2019-03-29 16:13:20 +00:00
|
|
|
|
# Use is subject to license terms.
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
. ${STF_SUITE}/include/tunables.cfg
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
. ${STF_TOOLS}/include/logapi.shlib
|
2017-04-11 21:56:54 +00:00
|
|
|
|
. ${STF_SUITE}/include/math.shlib
|
2017-10-26 19:26:09 +00:00
|
|
|
|
. ${STF_SUITE}/include/blkdev.shlib
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
#
|
|
|
|
|
# Apply constrained path when available. This is required since the
|
|
|
|
|
# PATH may have been modified by sudo's secure_path behavior.
|
|
|
|
|
#
|
|
|
|
|
if [ -n "$STF_PATH" ]; then
|
2021-03-11 20:01:58 +00:00
|
|
|
|
export PATH="$STF_PATH"
|
2017-04-06 00:18:22 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2018-08-27 17:04:21 +00:00
|
|
|
|
#
|
|
|
|
|
# Generic dot version comparison function
|
|
|
|
|
#
|
|
|
|
|
# Returns success when version $1 is greater than or equal to $2.
|
|
|
|
|
#
|
|
|
|
|
function compare_version_gte
|
|
|
|
|
{
|
2022-03-06 00:14:12 +00:00
|
|
|
|
[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
|
2018-08-27 17:04:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-24 18:03:50 +00:00
|
|
|
|
# Linux kernel version comparison function
|
|
|
|
|
#
|
|
|
|
|
# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
|
|
|
|
|
#
|
|
|
|
|
# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
|
|
|
|
|
#
|
|
|
|
|
function linux_version
|
|
|
|
|
{
|
|
|
|
|
typeset ver="$1"
|
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
[ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
|
2017-07-24 18:03:50 +00:00
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
typeset version major minor _
|
|
|
|
|
IFS='.' read -r version major minor _ <<<"$ver"
|
2017-07-24 18:03:50 +00:00
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
[ -z "$version" ] && version=0
|
|
|
|
|
[ -z "$major" ] && major=0
|
|
|
|
|
[ -z "$minor" ] && minor=0
|
2017-07-24 18:03:50 +00:00
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
echo $((version * 100000 + major * 1000 + minor))
|
2017-07-24 18:03:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# Determine if this is a Linux test system
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if platform Linux, 1 if otherwise
|
|
|
|
|
|
|
|
|
|
function is_linux
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
[ "$UNAME" = "Linux" ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 17:08:23 +00:00
|
|
|
|
# Determine if this is an illumos test system
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if platform illumos, 1 if otherwise
|
|
|
|
|
function is_illumos
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
[ "$UNAME" = "illumos" ]
|
2020-01-03 17:08:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
# Determine if this is a FreeBSD test system
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if platform FreeBSD, 1 if otherwise
|
|
|
|
|
|
|
|
|
|
function is_freebsd
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
[ "$UNAME" = "FreeBSD" ]
|
2019-12-18 20:29:43 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-10-31 21:16:37 +00:00
|
|
|
|
# Determine if this is a 32-bit system
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if platform is 32-bit, 1 if otherwise
|
|
|
|
|
|
|
|
|
|
function is_32bit
|
|
|
|
|
{
|
2022-03-03 23:09:08 +00:00
|
|
|
|
[ $(getconf LONG_BIT) = "32" ]
|
2016-10-31 21:16:37 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-14 17:33:07 +00:00
|
|
|
|
# Determine if kmemleak is enabled
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if kmemleak is enabled, 1 if otherwise
|
|
|
|
|
|
|
|
|
|
function is_kmemleak
|
|
|
|
|
{
|
2022-03-03 23:09:08 +00:00
|
|
|
|
is_linux && [ -e /sys/kernel/debug/kmemleak ]
|
2016-12-14 17:33:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# Determine whether a dataset is mounted
|
|
|
|
|
#
|
|
|
|
|
# $1 dataset name
|
|
|
|
|
# $2 filesystem type; optional - defaulted to zfs
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
|
|
|
|
|
|
|
|
|
|
function ismounted
|
|
|
|
|
{
|
|
|
|
|
typeset fstype=$2
|
|
|
|
|
[[ -z $fstype ]] && fstype=zfs
|
2022-03-23 00:52:39 +00:00
|
|
|
|
typeset out dir name
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
case $fstype in
|
|
|
|
|
zfs)
|
|
|
|
|
if [[ "$1" == "/"* ]] ; then
|
2022-03-11 22:54:08 +00:00
|
|
|
|
! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
2022-03-11 22:54:08 +00:00
|
|
|
|
! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
;;
|
|
|
|
|
ufs|nfs)
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if is_freebsd; then
|
|
|
|
|
mount -pt $fstype | while read dev dir _t _flags; do
|
|
|
|
|
[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
|
|
|
|
|
done
|
|
|
|
|
else
|
2022-03-23 00:52:39 +00:00
|
|
|
|
out=$(df -F $fstype $1 2>/dev/null) || return
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
dir=${out%%\(*}
|
|
|
|
|
dir=${dir%% *}
|
|
|
|
|
name=${out##*\(}
|
|
|
|
|
name=${name%%\)*}
|
|
|
|
|
name=${name%% *}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
;;
|
2017-09-08 22:07:00 +00:00
|
|
|
|
ext*)
|
2022-03-11 22:54:08 +00:00
|
|
|
|
df -t $fstype $1 > /dev/null 2>&1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
;;
|
|
|
|
|
zvol)
|
|
|
|
|
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
|
|
|
|
|
link=$(readlink -f $ZVOL_DEVDIR/$1)
|
|
|
|
|
[[ -n "$link" ]] && \
|
2017-04-06 00:18:22 +00:00
|
|
|
|
mount | grep -q "^$link" && \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
;;
|
2022-03-23 00:52:39 +00:00
|
|
|
|
*)
|
|
|
|
|
false
|
|
|
|
|
;;
|
2015-07-01 22:23:09 +00:00
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if a dataset is mounted; 1 otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 dataset name
|
|
|
|
|
# $2 filesystem type; optional - defaulted to zfs
|
|
|
|
|
|
|
|
|
|
function mounted
|
|
|
|
|
{
|
|
|
|
|
ismounted $1 $2
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if a dataset is unmounted; 1 otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 dataset name
|
|
|
|
|
# $2 filesystem type; optional - defaulted to zfs
|
|
|
|
|
|
|
|
|
|
function unmounted
|
|
|
|
|
{
|
2022-03-23 00:52:39 +00:00
|
|
|
|
! ismounted $1 $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function default_setup
|
|
|
|
|
{
|
|
|
|
|
default_setup_noexit "$@"
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
Fix `zfs set atime|relatime=off|on` behavior on inherited datasets
`zfs set atime|relatime=off|on` doesn't disable or enable the property
on read for datasets whose property was inherited from parent, until
a dataset is once unmounted and mounted again.
(The properties start to work properly if a dataset is once unmounted
and mounted again. The difference comes from regular mount process,
e.g. via zpool import, uses mount options based on properties read
from ondisk layout for each dataset, whereas
`zfs set atime|relatime=off|on` just remounts a specified dataset.)
--
# zpool create p1 <device>
# zfs create p1/f1
# zfs set atime=off p1
# echo test > /p1/f1/test
# sync
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
p1 176K 18.9G 25.5K /p1
p1/f1 26K 18.9G 26K /p1/f1
# zfs get atime
NAME PROPERTY VALUE SOURCE
p1 atime off local
p1/f1 atime off inherited from p1
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:33.741205192 +0900
# cat /p1/f1/test
test
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:50.173231861 +0900
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ changed by read(2)
--
The problem is that zfsvfs::z_atime which was probably intended to keep
incore atime state just gets updated by a callback function of "atime"
property change, atime_changed_cb(), and never used for anything else.
Since now that all file read and atime update use a common function
zpl_iter_read_common() -> file_accessed(), and whether to update atime
via ->dirty_inode() is determined by atime_needs_update(),
atime_needs_update() needs to return false once atime is turned off.
It currently continues to return true on `zfs set atime=off`.
Fix atime_changed_cb() by setting or dropping SB_NOATIME in VFS super
block depending on a new atime value, so that atime_needs_update() works
as expected after property change.
The same problem applies to "relatime" except that a self contained
relatime test is needed. This is because relatime_need_update() is based
on a mount option flag MNT_RELATIME, which doesn't exist in datasets
with inherited "relatime" property via `zfs set relatime=...`, hence it
needs its own relatime test zfs_relatime_need_update().
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
Closes #8674
Closes #8675
2019-05-07 17:06:30 +00:00
|
|
|
|
function default_setup_no_mountpoint
|
|
|
|
|
{
|
|
|
|
|
default_setup_noexit "$1" "$2" "$3" "yes"
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a list of disks, setup storage pools and datasets.
|
|
|
|
|
#
|
|
|
|
|
function default_setup_noexit
|
|
|
|
|
{
|
|
|
|
|
typeset disklist=$1
|
|
|
|
|
typeset container=$2
|
|
|
|
|
typeset volume=$3
|
Fix `zfs set atime|relatime=off|on` behavior on inherited datasets
`zfs set atime|relatime=off|on` doesn't disable or enable the property
on read for datasets whose property was inherited from parent, until
a dataset is once unmounted and mounted again.
(The properties start to work properly if a dataset is once unmounted
and mounted again. The difference comes from regular mount process,
e.g. via zpool import, uses mount options based on properties read
from ondisk layout for each dataset, whereas
`zfs set atime|relatime=off|on` just remounts a specified dataset.)
--
# zpool create p1 <device>
# zfs create p1/f1
# zfs set atime=off p1
# echo test > /p1/f1/test
# sync
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
p1 176K 18.9G 25.5K /p1
p1/f1 26K 18.9G 26K /p1/f1
# zfs get atime
NAME PROPERTY VALUE SOURCE
p1 atime off local
p1/f1 atime off inherited from p1
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:33.741205192 +0900
# cat /p1/f1/test
test
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:50.173231861 +0900
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ changed by read(2)
--
The problem is that zfsvfs::z_atime which was probably intended to keep
incore atime state just gets updated by a callback function of "atime"
property change, atime_changed_cb(), and never used for anything else.
Since now that all file read and atime update use a common function
zpl_iter_read_common() -> file_accessed(), and whether to update atime
via ->dirty_inode() is determined by atime_needs_update(),
atime_needs_update() needs to return false once atime is turned off.
It currently continues to return true on `zfs set atime=off`.
Fix atime_changed_cb() by setting or dropping SB_NOATIME in VFS super
block depending on a new atime value, so that atime_needs_update() works
as expected after property change.
The same problem applies to "relatime" except that a self contained
relatime test is needed. This is because relatime_need_update() is based
on a mount option flag MNT_RELATIME, which doesn't exist in datasets
with inherited "relatime" property via `zfs set relatime=...`, hence it
needs its own relatime test zfs_relatime_need_update().
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
Closes #8674
Closes #8675
2019-05-07 17:06:30 +00:00
|
|
|
|
typeset no_mountpoint=$4
|
2016-06-15 22:47:05 +00:00
|
|
|
|
log_note begin default_setup_noexit
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if is_global_zone; then
|
|
|
|
|
if poolexists $TESTPOOL ; then
|
|
|
|
|
destroy_pool $TESTPOOL
|
|
|
|
|
fi
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
|
|
|
|
|
log_must zpool create -f $TESTPOOL $disklist
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
|
|
|
|
reexport_pool
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
|
|
|
|
|
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs create $TESTPOOL/$TESTFS
|
Fix `zfs set atime|relatime=off|on` behavior on inherited datasets
`zfs set atime|relatime=off|on` doesn't disable or enable the property
on read for datasets whose property was inherited from parent, until
a dataset is once unmounted and mounted again.
(The properties start to work properly if a dataset is once unmounted
and mounted again. The difference comes from regular mount process,
e.g. via zpool import, uses mount options based on properties read
from ondisk layout for each dataset, whereas
`zfs set atime|relatime=off|on` just remounts a specified dataset.)
--
# zpool create p1 <device>
# zfs create p1/f1
# zfs set atime=off p1
# echo test > /p1/f1/test
# sync
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
p1 176K 18.9G 25.5K /p1
p1/f1 26K 18.9G 26K /p1/f1
# zfs get atime
NAME PROPERTY VALUE SOURCE
p1 atime off local
p1/f1 atime off inherited from p1
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:33.741205192 +0900
# cat /p1/f1/test
test
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:50.173231861 +0900
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ changed by read(2)
--
The problem is that zfsvfs::z_atime which was probably intended to keep
incore atime state just gets updated by a callback function of "atime"
property change, atime_changed_cb(), and never used for anything else.
Since now that all file read and atime update use a common function
zpl_iter_read_common() -> file_accessed(), and whether to update atime
via ->dirty_inode() is determined by atime_needs_update(),
atime_needs_update() needs to return false once atime is turned off.
It currently continues to return true on `zfs set atime=off`.
Fix atime_changed_cb() by setting or dropping SB_NOATIME in VFS super
block depending on a new atime value, so that atime_needs_update() works
as expected after property change.
The same problem applies to "relatime" except that a self contained
relatime test is needed. This is because relatime_need_update() is based
on a mount option flag MNT_RELATIME, which doesn't exist in datasets
with inherited "relatime" property via `zfs set relatime=...`, hence it
needs its own relatime test zfs_relatime_need_update().
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
Closes #8674
Closes #8675
2019-05-07 17:06:30 +00:00
|
|
|
|
if [[ -z $no_mountpoint ]]; then
|
|
|
|
|
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if [[ -n $container ]]; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
rm -rf $TESTDIR1 || \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
log_unresolved Could not remove $TESTDIR1
|
2017-04-06 00:18:22 +00:00
|
|
|
|
mkdir -p $TESTDIR1 || \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
log_unresolved Could not create $TESTDIR1
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs create $TESTPOOL/$TESTCTR
|
|
|
|
|
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
|
|
|
|
|
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
|
Fix `zfs set atime|relatime=off|on` behavior on inherited datasets
`zfs set atime|relatime=off|on` doesn't disable or enable the property
on read for datasets whose property was inherited from parent, until
a dataset is once unmounted and mounted again.
(The properties start to work properly if a dataset is once unmounted
and mounted again. The difference comes from regular mount process,
e.g. via zpool import, uses mount options based on properties read
from ondisk layout for each dataset, whereas
`zfs set atime|relatime=off|on` just remounts a specified dataset.)
--
# zpool create p1 <device>
# zfs create p1/f1
# zfs set atime=off p1
# echo test > /p1/f1/test
# sync
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
p1 176K 18.9G 25.5K /p1
p1/f1 26K 18.9G 26K /p1/f1
# zfs get atime
NAME PROPERTY VALUE SOURCE
p1 atime off local
p1/f1 atime off inherited from p1
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:33.741205192 +0900
# cat /p1/f1/test
test
# stat /p1/f1/test | grep Access | tail -1
Access: 2019-04-26 23:32:50.173231861 +0900
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ changed by read(2)
--
The problem is that zfsvfs::z_atime which was probably intended to keep
incore atime state just gets updated by a callback function of "atime"
property change, atime_changed_cb(), and never used for anything else.
Since now that all file read and atime update use a common function
zpl_iter_read_common() -> file_accessed(), and whether to update atime
via ->dirty_inode() is determined by atime_needs_update(),
atime_needs_update() needs to return false once atime is turned off.
It currently continues to return true on `zfs set atime=off`.
Fix atime_changed_cb() by setting or dropping SB_NOATIME in VFS super
block depending on a new atime value, so that atime_needs_update() works
as expected after property change.
The same problem applies to "relatime" except that a self contained
relatime test is needed. This is because relatime_need_update() is based
on a mount option flag MNT_RELATIME, which doesn't exist in datasets
with inherited "relatime" property via `zfs set relatime=...`, hence it
needs its own relatime test zfs_relatime_need_update().
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
Closes #8674
Closes #8675
2019-05-07 17:06:30 +00:00
|
|
|
|
if [[ -z $no_mountpoint ]]; then
|
|
|
|
|
log_must zfs set mountpoint=$TESTDIR1 \
|
|
|
|
|
$TESTPOOL/$TESTCTR/$TESTFS1
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [[ -n $volume ]]; then
|
|
|
|
|
if is_global_zone ; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
|
2015-07-01 22:23:09 +00:00
|
|
|
|
block_device_wait
|
|
|
|
|
else
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs create $TESTPOOL/$TESTVOL
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a list of disks, setup a storage pool, file system and
|
|
|
|
|
# a container.
|
|
|
|
|
#
|
|
|
|
|
function default_container_setup
|
|
|
|
|
{
|
|
|
|
|
typeset disklist=$1
|
|
|
|
|
|
|
|
|
|
default_setup "$disklist" "true"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a list of disks, setup a storage pool,file system
|
|
|
|
|
# and a volume.
|
|
|
|
|
#
|
|
|
|
|
function default_volume_setup
|
|
|
|
|
{
|
|
|
|
|
typeset disklist=$1
|
|
|
|
|
|
|
|
|
|
default_setup "$disklist" "" "true"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a list of disks, setup a storage pool,file system,
|
|
|
|
|
# a container and a volume.
|
|
|
|
|
#
|
|
|
|
|
function default_container_volume_setup
|
|
|
|
|
{
|
|
|
|
|
typeset disklist=$1
|
|
|
|
|
|
|
|
|
|
default_setup "$disklist" "true" "true"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
|
|
|
|
|
# filesystem
|
|
|
|
|
#
|
2018-02-08 16:16:23 +00:00
|
|
|
|
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# $2 snapshot name. Default, $TESTSNAP
|
|
|
|
|
#
|
|
|
|
|
function create_snapshot
|
|
|
|
|
{
|
2018-02-08 16:16:23 +00:00
|
|
|
|
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset snap=${2:-$TESTSNAP}
|
|
|
|
|
|
|
|
|
|
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
|
|
|
|
|
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
|
|
|
|
|
|
|
|
|
|
if snapexists $fs_vol@$snap; then
|
|
|
|
|
log_fail "$fs_vol@$snap already exists."
|
|
|
|
|
fi
|
|
|
|
|
datasetexists $fs_vol || \
|
|
|
|
|
log_fail "$fs_vol must exist."
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs snapshot $fs_vol@$snap
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Create a clone from a snapshot, default clone name is $TESTCLONE.
|
|
|
|
|
#
|
|
|
|
|
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
|
|
|
|
|
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
|
|
|
|
|
#
|
|
|
|
|
function create_clone # snapshot clone
|
|
|
|
|
{
|
|
|
|
|
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
|
|
|
|
|
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
|
|
|
|
|
|
|
|
|
|
[[ -z $snap ]] && \
|
|
|
|
|
log_fail "Snapshot name is undefined."
|
|
|
|
|
[[ -z $clone ]] && \
|
|
|
|
|
log_fail "Clone name is undefined."
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs clone $snap $clone
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
|
#
|
|
|
|
|
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
|
|
|
|
|
# filesystem.
|
|
|
|
|
#
|
|
|
|
|
# $1 Existing filesystem or volume name. Default, $TESTFS
|
|
|
|
|
# $2 Existing snapshot name. Default, $TESTSNAP
|
|
|
|
|
# $3 bookmark name. Default, $TESTBKMARK
|
|
|
|
|
#
|
|
|
|
|
function create_bookmark
|
|
|
|
|
{
|
|
|
|
|
typeset fs_vol=${1:-$TESTFS}
|
|
|
|
|
typeset snap=${2:-$TESTSNAP}
|
|
|
|
|
typeset bkmark=${3:-$TESTBKMARK}
|
|
|
|
|
|
|
|
|
|
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
|
|
|
|
|
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
|
|
|
|
|
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
|
|
|
|
|
|
|
|
|
|
if bkmarkexists $fs_vol#$bkmark; then
|
|
|
|
|
log_fail "$fs_vol#$bkmark already exists."
|
|
|
|
|
fi
|
|
|
|
|
datasetexists $fs_vol || \
|
|
|
|
|
log_fail "$fs_vol must exist."
|
|
|
|
|
snapexists $fs_vol@$snap || \
|
|
|
|
|
log_fail "$fs_vol@$snap must exist."
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
|
2017-01-26 22:42:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-28 21:12:34 +00:00
|
|
|
|
#
|
|
|
|
|
# Create a temporary clone result of an interrupted resumable 'zfs receive'
|
|
|
|
|
# $1 Destination filesystem name. Must not exist, will be created as the result
|
|
|
|
|
# of this function along with its %recv temporary clone
|
|
|
|
|
# $2 Source filesystem name. Must not exist, will be created and destroyed
|
|
|
|
|
#
|
|
|
|
|
function create_recv_clone
|
|
|
|
|
{
|
|
|
|
|
typeset recvfs="$1"
|
|
|
|
|
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
|
|
|
|
|
typeset snap="$sendfs@snap1"
|
|
|
|
|
typeset incr="$sendfs@snap2"
|
|
|
|
|
typeset mountpoint="$TESTDIR/create_recv_clone"
|
|
|
|
|
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
|
|
|
|
|
|
|
|
|
|
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
|
|
|
|
|
|
|
|
|
|
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
|
|
|
|
|
datasetexists $sendfs && log_fail "Send filesystem must not exist."
|
|
|
|
|
|
2022-03-03 18:43:38 +00:00
|
|
|
|
log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
|
2017-07-28 21:12:34 +00:00
|
|
|
|
log_must zfs snapshot $snap
|
|
|
|
|
log_must eval "zfs send $snap | zfs recv -u $recvfs"
|
|
|
|
|
log_must mkfile 1m "$mountpoint/data"
|
|
|
|
|
log_must zfs snapshot $incr
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
|
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
|
|
|
|
|
iflag=fullblock > $sendfile"
|
2017-07-28 21:12:34 +00:00
|
|
|
|
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$sendfs" "-r"
|
2017-07-28 21:12:34 +00:00
|
|
|
|
log_must rm -f "$sendfile"
|
|
|
|
|
|
|
|
|
|
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
|
|
|
|
|
log_fail "Error creating temporary $recvfs/%recv clone"
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
function default_mirror_setup
|
|
|
|
|
{
|
|
|
|
|
default_mirror_setup_noexit $1 $2 $3
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a pair of disks, set up a storage pool and dataset for the mirror
|
|
|
|
|
# @parameters: $1 the primary side of the mirror
|
|
|
|
|
# $2 the secondary side of the mirror
|
|
|
|
|
# @uses: ZPOOL ZFS TESTPOOL TESTFS
|
|
|
|
|
function default_mirror_setup_noexit
|
|
|
|
|
{
|
|
|
|
|
readonly func="default_mirror_setup_noexit"
|
|
|
|
|
typeset primary=$1
|
|
|
|
|
typeset secondary=$2
|
|
|
|
|
|
|
|
|
|
[[ -z $primary ]] && \
|
|
|
|
|
log_fail "$func: No parameters passed"
|
|
|
|
|
[[ -z $secondary ]] && \
|
|
|
|
|
log_fail "$func: No secondary partition passed"
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
|
|
|
|
|
log_must zpool create -f $TESTPOOL mirror $@
|
|
|
|
|
log_must zfs create $TESTPOOL/$TESTFS
|
|
|
|
|
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Destroy the configured testpool mirrors.
|
|
|
|
|
# the mirrors are of the form ${TESTPOOL}{number}
|
|
|
|
|
# @uses: ZPOOL ZFS TESTPOOL
|
|
|
|
|
function destroy_mirrors
|
|
|
|
|
{
|
|
|
|
|
default_cleanup_noexit
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-04 01:18:07 +00:00
|
|
|
|
function default_raidz_setup
|
|
|
|
|
{
|
|
|
|
|
default_raidz_setup_noexit "$*"
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
|
|
|
|
|
# $1 the list of disks
|
|
|
|
|
#
|
2022-03-04 01:18:07 +00:00
|
|
|
|
function default_raidz_setup_noexit
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset disklist="$*"
|
|
|
|
|
disks=(${disklist[*]})
|
|
|
|
|
|
|
|
|
|
if [[ ${#disks[*]} -lt 2 ]]; then
|
|
|
|
|
log_fail "A raid-z requires a minimum of two disks."
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
|
2017-09-25 17:32:34 +00:00
|
|
|
|
log_must zpool create -f $TESTPOOL raidz $disklist
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs create $TESTPOOL/$TESTFS
|
|
|
|
|
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Common function used to cleanup storage pools and datasets.
|
|
|
|
|
#
|
|
|
|
|
# Invoked at the start of the test suite to ensure the system
|
|
|
|
|
# is in a known state, and also at the end of each set of
|
|
|
|
|
# sub-tests to ensure errors from one set of tests doesn't
|
|
|
|
|
# impact the execution of the next set.
|
|
|
|
|
|
|
|
|
|
function default_cleanup
|
|
|
|
|
{
|
|
|
|
|
default_cleanup_noexit
|
|
|
|
|
|
|
|
|
|
log_pass
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-25 17:32:34 +00:00
|
|
|
|
#
|
|
|
|
|
# Utility function used to list all available pool names.
|
|
|
|
|
#
|
|
|
|
|
# NOTE: $KEEP is a variable containing pool names, separated by a newline
|
|
|
|
|
# character, that must be excluded from the returned list.
|
|
|
|
|
#
|
|
|
|
|
function get_all_pools
|
|
|
|
|
{
|
|
|
|
|
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
function default_cleanup_noexit
|
|
|
|
|
{
|
|
|
|
|
typeset pool=""
|
|
|
|
|
#
|
|
|
|
|
# Destroying the pool will also destroy any
|
|
|
|
|
# filesystems it contains.
|
|
|
|
|
#
|
|
|
|
|
if is_global_zone; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zfs unmount -a > /dev/null 2>&1
|
2017-09-25 17:32:34 +00:00
|
|
|
|
ALL_POOLS=$(get_all_pools)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# Here, we loop through the pools we're allowed to
|
|
|
|
|
# destroy, only destroying them if it's safe to do
|
|
|
|
|
# so.
|
|
|
|
|
while [ ! -z ${ALL_POOLS} ]
|
|
|
|
|
do
|
|
|
|
|
for pool in ${ALL_POOLS}
|
|
|
|
|
do
|
|
|
|
|
if safe_to_destroy_pool $pool ;
|
|
|
|
|
then
|
|
|
|
|
destroy_pool $pool
|
|
|
|
|
fi
|
|
|
|
|
done
|
2020-03-06 17:31:32 +00:00
|
|
|
|
ALL_POOLS=$(get_all_pools)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zfs mount -a
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
|
|
|
|
typeset fs=""
|
2017-04-06 00:18:22 +00:00
|
|
|
|
for fs in $(zfs list -H -o name \
|
|
|
|
|
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$fs" "-Rf"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
# Need cleanup here to avoid garbage dir left.
|
2017-04-06 00:18:22 +00:00
|
|
|
|
for fs in $(zfs list -H -o name); do
|
2015-07-01 22:23:09 +00:00
|
|
|
|
[[ $fs == /$ZONE_POOL ]] && continue
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d $fs ]] && log_must rm -rf $fs/*
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
|
|
|
|
|
# the default value
|
|
|
|
|
#
|
2017-04-06 00:18:22 +00:00
|
|
|
|
for fs in $(zfs list -H -o name); do
|
2015-07-01 22:23:09 +00:00
|
|
|
|
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs set reservation=none $fs
|
|
|
|
|
log_must zfs set recordsize=128K $fs
|
|
|
|
|
log_must zfs set mountpoint=/$fs $fs
|
2022-03-11 22:54:08 +00:00
|
|
|
|
typeset enc=$(get_prop encryption $fs)
|
|
|
|
|
if [ -z "$enc" ] || [ "$enc" = "off" ]; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs set checksum=on $fs
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs set compression=off $fs
|
|
|
|
|
log_must zfs set atime=on $fs
|
|
|
|
|
log_must zfs set devices=off $fs
|
|
|
|
|
log_must zfs set exec=on $fs
|
|
|
|
|
log_must zfs set setuid=on $fs
|
|
|
|
|
log_must zfs set readonly=off $fs
|
|
|
|
|
log_must zfs set snapdir=hidden $fs
|
|
|
|
|
log_must zfs set aclmode=groupmask $fs
|
|
|
|
|
log_must zfs set aclinherit=secure $fs
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
[[ -d $TESTDIR ]] && \
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must rm -rf $TESTDIR
|
2016-07-22 15:07:04 +00:00
|
|
|
|
|
|
|
|
|
disk1=${DISKS%% *}
|
|
|
|
|
if is_mpath_device $disk1; then
|
|
|
|
|
delete_partitions
|
|
|
|
|
fi
|
2018-06-12 17:37:12 +00:00
|
|
|
|
|
|
|
|
|
rm -f $TEST_BASE_DIR/{err,out}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Common function used to cleanup storage pools, file systems
|
|
|
|
|
# and containers.
|
|
|
|
|
#
|
|
|
|
|
function default_container_cleanup
|
|
|
|
|
{
|
|
|
|
|
if ! is_global_zone; then
|
|
|
|
|
reexport_pool
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
|
|
|
|
|
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
[[ -e $TESTDIR1 ]] && \
|
2022-03-09 12:39:34 +00:00
|
|
|
|
log_must rm -rf $TESTDIR1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
default_cleanup
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Common function used to cleanup snapshot of file system or volume. Default to
|
|
|
|
|
# delete the file system's snapshot
|
|
|
|
|
#
|
|
|
|
|
# $1 snapshot name
|
|
|
|
|
#
|
|
|
|
|
function destroy_snapshot
|
|
|
|
|
{
|
|
|
|
|
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
|
|
|
|
|
|
|
|
|
|
if ! snapexists $snap; then
|
2018-10-02 00:15:57 +00:00
|
|
|
|
log_fail "'$snap' does not exist."
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# For the sake of the value which come from 'get_prop' is not equal
|
|
|
|
|
# to the really mountpoint when the snapshot is unmounted. So, firstly
|
|
|
|
|
# check and make sure this snapshot's been mounted in current system.
|
|
|
|
|
#
|
|
|
|
|
typeset mtpt=""
|
|
|
|
|
if ismounted $snap; then
|
|
|
|
|
mtpt=$(get_prop mountpoint $snap)
|
|
|
|
|
fi
|
|
|
|
|
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$snap"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
[[ $mtpt != "" && -d $mtpt ]] && \
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must rm -rf $mtpt
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Common function used to cleanup clone.
|
|
|
|
|
#
|
|
|
|
|
# $1 clone name
|
|
|
|
|
#
|
|
|
|
|
function destroy_clone
|
|
|
|
|
{
|
|
|
|
|
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
|
|
|
|
|
|
|
|
|
|
if ! datasetexists $clone; then
|
|
|
|
|
log_fail "'$clone' does not existed."
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# With the same reason in destroy_snapshot
|
|
|
|
|
typeset mtpt=""
|
|
|
|
|
if ismounted $clone; then
|
|
|
|
|
mtpt=$(get_prop mountpoint $clone)
|
|
|
|
|
fi
|
|
|
|
|
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$clone"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
[[ $mtpt != "" && -d $mtpt ]] && \
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must rm -rf $mtpt
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
|
#
|
|
|
|
|
# Common function used to cleanup bookmark of file system or volume. Default
|
|
|
|
|
# to delete the file system's bookmark.
|
|
|
|
|
#
|
|
|
|
|
# $1 bookmark name
|
|
|
|
|
#
|
|
|
|
|
function destroy_bookmark
|
|
|
|
|
{
|
|
|
|
|
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
|
|
|
|
|
|
|
|
|
|
if ! bkmarkexists $bkmark; then
|
|
|
|
|
log_fail "'$bkmarkp' does not existed."
|
|
|
|
|
fi
|
|
|
|
|
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "$bkmark"
|
2017-01-26 22:42:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# Return 0 if a snapshot exists; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 - snapshot name
|
|
|
|
|
|
|
|
|
|
function snapexists
|
|
|
|
|
{
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zfs list -H -t snapshot "$1" > /dev/null 2>&1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-26 22:42:15 +00:00
|
|
|
|
#
|
|
|
|
|
# Return 0 if a bookmark exists; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 - bookmark name
|
|
|
|
|
#
|
|
|
|
|
function bkmarkexists
|
|
|
|
|
{
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zfs list -H -t bookmark "$1" > /dev/null 2>&1
|
2017-01-26 22:42:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-08-12 17:02:34 +00:00
|
|
|
|
#
|
|
|
|
|
# Return 0 if a hold exists; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 - hold tag
|
|
|
|
|
# $2 - snapshot name
|
|
|
|
|
#
|
|
|
|
|
function holdexists
|
|
|
|
|
{
|
2022-03-11 22:54:08 +00:00
|
|
|
|
! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
|
2019-08-12 17:02:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Set a property to a certain value on a dataset.
|
|
|
|
|
# Sets a property of the dataset to the value as passed in.
|
|
|
|
|
# @param:
|
|
|
|
|
# $1 dataset who's property is being set
|
|
|
|
|
# $2 property to set
|
|
|
|
|
# $3 value to set property to
|
|
|
|
|
# @return:
|
|
|
|
|
# 0 if the property could be set.
|
|
|
|
|
# non-zero otherwise.
|
|
|
|
|
# @use: ZFS
|
|
|
|
|
#
|
|
|
|
|
function dataset_setprop
|
|
|
|
|
{
|
|
|
|
|
typeset fn=dataset_setprop
|
|
|
|
|
|
|
|
|
|
if (($# < 3)); then
|
|
|
|
|
log_note "$fn: Insufficient parameters (need 3, had $#)"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
typeset output=
|
2017-04-06 00:18:22 +00:00
|
|
|
|
output=$(zfs set $2=$3 $1 2>&1)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset rv=$?
|
|
|
|
|
if ((rv != 0)); then
|
|
|
|
|
log_note "Setting property on $1 failed."
|
|
|
|
|
log_note "property $2=$3"
|
|
|
|
|
log_note "Return Code: $rv"
|
|
|
|
|
log_note "Output: $output"
|
|
|
|
|
return $rv
|
|
|
|
|
fi
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Check a numeric assertion
|
|
|
|
|
# @parameter: $@ the assertion to check
|
|
|
|
|
# @output: big loud notice if assertion failed
|
|
|
|
|
# @use: log_fail
|
|
|
|
|
#
|
|
|
|
|
function assert
|
|
|
|
|
{
|
|
|
|
|
(($@)) || log_fail "$@"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Function to format partition size of a disk
|
|
|
|
|
# Given a disk cxtxdx reduces all partitions
|
|
|
|
|
# to 0 size
|
|
|
|
|
#
|
|
|
|
|
function zero_partitions #<whole_disk_name>
|
|
|
|
|
{
|
|
|
|
|
typeset diskname=$1
|
|
|
|
|
typeset i
|
|
|
|
|
|
2020-01-29 19:24:18 +00:00
|
|
|
|
if is_freebsd; then
|
|
|
|
|
gpart destroy -F $diskname
|
|
|
|
|
elif is_linux; then
|
2018-03-08 01:03:33 +00:00
|
|
|
|
DSK=$DEV_DSKDIR/$diskname
|
|
|
|
|
DSK=$(echo $DSK | sed -e "s|//|/|g")
|
|
|
|
|
log_must parted $DSK -s -- mklabel gpt
|
|
|
|
|
blockdev --rereadpt $DSK 2>/dev/null
|
|
|
|
|
block_device_wait
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
|
|
|
|
for i in 0 1 3 4 5 6 7
|
|
|
|
|
do
|
2017-07-12 20:05:37 +00:00
|
|
|
|
log_must set_partition $i "" 0mb $diskname
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
fi
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a slice, size and disk, this function
|
|
|
|
|
# formats the slice to the specified size.
|
|
|
|
|
# Size should be specified with units as per
|
|
|
|
|
# the `format` command requirements eg. 100mb 3gb
|
|
|
|
|
#
|
2019-09-03 01:14:53 +00:00
|
|
|
|
# NOTE: This entire interface is problematic for the Linux parted utility
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# which requires the end of the partition to be specified. It would be
|
|
|
|
|
# best to retire this interface and replace it with something more flexible.
|
|
|
|
|
# At the moment a best effort is made.
|
|
|
|
|
#
|
2019-06-05 23:13:57 +00:00
|
|
|
|
# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
|
|
|
|
|
function set_partition
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset -i slicenum=$1
|
|
|
|
|
typeset start=$2
|
|
|
|
|
typeset size=$3
|
2020-01-31 16:51:23 +00:00
|
|
|
|
typeset disk=${4#$DEV_DSKDIR/}
|
|
|
|
|
disk=${disk#$DEV_RDSKDIR/}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
Linux)
|
2018-03-08 01:03:33 +00:00
|
|
|
|
if [[ -z $size || -z $disk ]]; then
|
|
|
|
|
log_fail "The size or disk name is unspecified."
|
|
|
|
|
fi
|
2020-01-31 16:51:23 +00:00
|
|
|
|
disk=$DEV_DSKDIR/$disk
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset size_mb=${size%%[mMgG]}
|
|
|
|
|
|
|
|
|
|
size_mb=${size_mb%%[mMgG][bB]}
|
|
|
|
|
if [[ ${size:1:1} == 'g' ]]; then
|
|
|
|
|
((size_mb = size_mb * 1024))
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Create GPT partition table when setting slice 0 or
|
|
|
|
|
# when the device doesn't already contain a GPT label.
|
2019-06-05 23:13:57 +00:00
|
|
|
|
parted $disk -s -- print 1 >/dev/null
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset ret_val=$?
|
|
|
|
|
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
|
2022-03-23 00:52:39 +00:00
|
|
|
|
if ! parted $disk -s -- mklabel gpt; then
|
2017-07-12 20:05:37 +00:00
|
|
|
|
log_note "Failed to create GPT partition table on $disk"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# When no start is given align on the first cylinder.
|
|
|
|
|
if [[ -z "$start" ]]; then
|
|
|
|
|
start=1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Determine the cylinder size for the device and using
|
|
|
|
|
# that calculate the end offset in cylinders.
|
|
|
|
|
typeset -i cly_size_kb=0
|
2022-03-11 22:54:08 +00:00
|
|
|
|
cly_size_kb=$(parted -m $disk -s -- unit cyl print |
|
|
|
|
|
awk -F '[:k.]' 'NR == 3 {print $4}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
((end = (size_mb * 1024 / cly_size_kb) + start))
|
|
|
|
|
|
2019-06-05 23:13:57 +00:00
|
|
|
|
parted $disk -s -- \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
mkpart part$slicenum ${start}cyl ${end}cyl
|
2019-06-05 23:13:57 +00:00
|
|
|
|
typeset ret_val=$?
|
|
|
|
|
if [[ $ret_val -ne 0 ]]; then
|
2017-07-12 20:05:37 +00:00
|
|
|
|
log_note "Failed to create partition $slicenum on $disk"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2019-06-05 23:13:57 +00:00
|
|
|
|
blockdev --rereadpt $disk 2>/dev/null
|
|
|
|
|
block_device_wait $disk
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
if [[ -z $size || -z $disk ]]; then
|
|
|
|
|
log_fail "The size or disk name is unspecified."
|
|
|
|
|
fi
|
2020-01-31 16:51:23 +00:00
|
|
|
|
disk=$DEV_DSKDIR/$disk
|
2019-12-18 20:29:43 +00:00
|
|
|
|
|
|
|
|
|
if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
|
|
|
|
|
gpart destroy -F $disk >/dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
if ! gpart create -s GPT $disk; then
|
2019-12-18 20:29:43 +00:00
|
|
|
|
log_note "Failed to create GPT partition table on $disk"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
typeset index=$((slicenum + 1))
|
|
|
|
|
|
|
|
|
|
if [[ -n $start ]]; then
|
|
|
|
|
start="-b $start"
|
|
|
|
|
fi
|
|
|
|
|
gpart add -t freebsd-zfs $start -s $size -i $index $disk
|
|
|
|
|
if [[ $ret_val -ne 0 ]]; then
|
|
|
|
|
log_note "Failed to create partition $slicenum on $disk"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
block_device_wait $disk
|
|
|
|
|
;;
|
|
|
|
|
*)
|
2018-03-08 01:03:33 +00:00
|
|
|
|
if [[ -z $slicenum || -z $size || -z $disk ]]; then
|
|
|
|
|
log_fail "The slice, size or disk name is unspecified."
|
|
|
|
|
fi
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset format_file=/var/tmp/format_in.$$
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo "partition" >$format_file
|
|
|
|
|
echo "$slicenum" >> $format_file
|
|
|
|
|
echo "" >> $format_file
|
|
|
|
|
echo "" >> $format_file
|
|
|
|
|
echo "$start" >> $format_file
|
|
|
|
|
echo "$size" >> $format_file
|
|
|
|
|
echo "label" >> $format_file
|
|
|
|
|
echo "" >> $format_file
|
|
|
|
|
echo "q" >> $format_file
|
|
|
|
|
echo "q" >> $format_file
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
format -e -s -d $disk -f $format_file
|
2019-06-05 23:13:57 +00:00
|
|
|
|
typeset ret_val=$?
|
|
|
|
|
rm -f $format_file
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
esac
|
2017-04-06 00:18:22 +00:00
|
|
|
|
|
2017-07-12 20:05:37 +00:00
|
|
|
|
if [[ $ret_val -ne 0 ]]; then
|
|
|
|
|
log_note "Unable to format $disk slice $slicenum to $size"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-22 15:07:04 +00:00
|
|
|
|
#
|
|
|
|
|
# Delete all partitions on all disks - this is specifically for the use of multipath
|
|
|
|
|
# devices which currently can only be used in the test suite as raw/un-partitioned
|
|
|
|
|
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
|
|
|
|
|
#
|
|
|
|
|
function delete_partitions
|
|
|
|
|
{
|
2019-08-29 20:11:29 +00:00
|
|
|
|
typeset disk
|
2016-07-22 15:07:04 +00:00
|
|
|
|
|
|
|
|
|
if [[ -z $DISKSARRAY ]]; then
|
|
|
|
|
DISKSARRAY=$DISKS
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if is_linux; then
|
2019-08-29 20:11:29 +00:00
|
|
|
|
typeset -i part
|
|
|
|
|
for disk in $DISKSARRAY; do
|
|
|
|
|
for (( part = 1; part < MAX_PARTITIONS; part++ )); do
|
|
|
|
|
typeset partition=${disk}${SLICE_PREFIX}${part}
|
|
|
|
|
parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
|
|
|
|
|
if lsblk | grep -qF ${partition}; then
|
|
|
|
|
log_fail "Partition ${partition} not deleted"
|
2016-07-22 15:07:04 +00:00
|
|
|
|
else
|
2019-08-29 20:11:29 +00:00
|
|
|
|
log_note "Partition ${partition} deleted"
|
2016-07-22 15:07:04 +00:00
|
|
|
|
fi
|
|
|
|
|
done
|
2019-08-29 20:11:29 +00:00
|
|
|
|
done
|
2019-12-18 20:29:43 +00:00
|
|
|
|
elif is_freebsd; then
|
|
|
|
|
for disk in $DISKSARRAY; do
|
|
|
|
|
if gpart destroy -F $disk; then
|
|
|
|
|
log_note "Partitions for ${disk} deleted"
|
|
|
|
|
else
|
|
|
|
|
log_fail "Partitions for ${disk} not deleted"
|
|
|
|
|
fi
|
|
|
|
|
done
|
2016-07-22 15:07:04 +00:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Get the end cyl of the given slice
|
|
|
|
|
#
|
|
|
|
|
function get_endslice #<disk> <slice>
|
|
|
|
|
{
|
|
|
|
|
typeset disk=$1
|
|
|
|
|
typeset slice=$2
|
|
|
|
|
if [[ -z $disk || -z $slice ]] ; then
|
|
|
|
|
log_fail "The disk name or slice number is unspecified."
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
Linux)
|
2017-04-06 00:18:22 +00:00
|
|
|
|
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
|
2021-11-11 20:27:37 +00:00
|
|
|
|
awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
((endcyl = (endcyl + 1)))
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
disk=${disk#/dev/zvol/}
|
|
|
|
|
disk=${disk%p*}
|
|
|
|
|
slice=$((slice + 1))
|
|
|
|
|
endcyl=$(gpart show $disk | \
|
|
|
|
|
awk -v slice=$slice '$3 == slice { print $1 + $2 }')
|
|
|
|
|
;;
|
|
|
|
|
*)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
disk=${disk#/dev/dsk/}
|
|
|
|
|
disk=${disk#/dev/rdsk/}
|
|
|
|
|
disk=${disk%s*}
|
|
|
|
|
|
|
|
|
|
typeset -i ratio=0
|
2017-04-06 00:18:22 +00:00
|
|
|
|
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
|
2022-03-11 22:54:08 +00:00
|
|
|
|
awk '/sectors\/cylinder/ {print $2}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if ((ratio == 0)); then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
|
2022-03-11 22:54:08 +00:00
|
|
|
|
awk -v token="$slice" '$1 == token {print $6}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
((endcyl = (endcyl + 1) / ratio))
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
esac
|
2018-12-14 18:06:49 +00:00
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
echo $endcyl
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a size,disk and total slice number, this function formats the
|
|
|
|
|
# disk slices from 0 to the total slice number with the same specified
|
|
|
|
|
# size.
|
|
|
|
|
#
|
|
|
|
|
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
|
|
|
|
|
{
|
|
|
|
|
typeset -i i=0
|
|
|
|
|
typeset slice_size=$1
|
|
|
|
|
typeset disk_name=$2
|
|
|
|
|
typeset total_slices=$3
|
|
|
|
|
typeset cyl
|
|
|
|
|
|
|
|
|
|
zero_partitions $disk_name
|
|
|
|
|
while ((i < $total_slices)); do
|
|
|
|
|
if ! is_linux; then
|
|
|
|
|
if ((i == 2)); then
|
|
|
|
|
((i = i + 1))
|
|
|
|
|
continue
|
|
|
|
|
fi
|
|
|
|
|
fi
|
2017-07-12 20:05:37 +00:00
|
|
|
|
log_must set_partition $i "$cyl" $slice_size $disk_name
|
2015-07-01 22:23:09 +00:00
|
|
|
|
cyl=$(get_endslice $disk_name $i)
|
|
|
|
|
((i = i+1))
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# This function continues to write to a filenum number of files into dirnum
|
2017-04-06 00:18:22 +00:00
|
|
|
|
# number of directories until either file_write returns an error or the
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# maximum number of files per directory have been written.
|
|
|
|
|
#
|
|
|
|
|
# Usage:
|
|
|
|
|
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
|
|
|
|
|
#
|
|
|
|
|
# Return value: 0 on success
|
|
|
|
|
# non 0 on error
|
|
|
|
|
#
|
|
|
|
|
# Where :
|
|
|
|
|
# destdir: is the directory where everything is to be created under
|
|
|
|
|
# dirnum: the maximum number of subdirectories to use, -1 no limit
|
|
|
|
|
# filenum: the maximum number of files per subdirectory
|
|
|
|
|
# bytes: number of bytes to write
|
2019-09-03 01:14:53 +00:00
|
|
|
|
# num_writes: number of types to write out bytes
|
2017-01-03 17:31:18 +00:00
|
|
|
|
# data: the data that will be written
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# E.g.
|
2020-03-10 17:44:14 +00:00
|
|
|
|
# fill_fs /testdir 20 25 1024 256 0
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Note: bytes * num_writes equals the size of the testfile
|
|
|
|
|
#
|
|
|
|
|
function fill_fs # destdir dirnum filenum bytes num_writes data
|
|
|
|
|
{
|
|
|
|
|
typeset destdir=${1:-$TESTDIR}
|
|
|
|
|
typeset -i dirnum=${2:-50}
|
|
|
|
|
typeset -i filenum=${3:-50}
|
|
|
|
|
typeset -i bytes=${4:-8192}
|
|
|
|
|
typeset -i num_writes=${5:-10240}
|
2019-03-29 16:13:20 +00:00
|
|
|
|
typeset data=${6:-0}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2020-03-10 17:44:14 +00:00
|
|
|
|
mkdir -p $destdir/{1..$dirnum}
|
|
|
|
|
for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
|
|
|
|
|
file_write -o create -f $f -b $bytes -c $num_writes -d $data \
|
2022-03-23 00:52:39 +00:00
|
|
|
|
|| return
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
# Get the specified dataset property in parsable format or fail
|
2015-07-01 22:23:09 +00:00
|
|
|
|
function get_prop # property dataset
|
|
|
|
|
{
|
|
|
|
|
typeset prop=$1
|
|
|
|
|
typeset dataset=$2
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
# Get the specified pool property in parsable format or fail
|
2015-07-01 22:23:09 +00:00
|
|
|
|
function get_pool_prop # property pool
|
|
|
|
|
{
|
|
|
|
|
typeset prop=$1
|
|
|
|
|
typeset pool=$2
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if a pool exists; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 - pool name
|
|
|
|
|
|
|
|
|
|
function poolexists
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
|
|
|
|
|
if [[ -z $pool ]]; then
|
|
|
|
|
log_note "No pool name given."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zpool get name "$pool" > /dev/null 2>&1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if all the specified datasets exist; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1-n dataset name
|
|
|
|
|
function datasetexists
|
|
|
|
|
{
|
|
|
|
|
if (($# == 0)); then
|
|
|
|
|
log_note "No dataset name given."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-09 22:54:26 +00:00
|
|
|
|
zfs get name "$@" > /dev/null 2>&1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# return 0 if none of the specified datasets exists, otherwise return 1.
|
|
|
|
|
#
|
|
|
|
|
# $1-n dataset name
|
|
|
|
|
function datasetnonexists
|
|
|
|
|
{
|
|
|
|
|
if (($# == 0)); then
|
|
|
|
|
log_note "No dataset name given."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
while (($# > 0)); do
|
2017-04-06 00:18:22 +00:00
|
|
|
|
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
&& return 1
|
|
|
|
|
shift
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-21 23:59:20 +00:00
|
|
|
|
function is_shared_freebsd
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
pgrep -q mountd && showmount -E | grep -qx "$fs"
|
2020-02-21 23:59:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function is_shared_illumos
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
typeset mtpt
|
2016-11-29 19:22:38 +00:00
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
for mtpt in `share | awk '{print $2}'` ; do
|
2015-07-01 22:23:09 +00:00
|
|
|
|
if [[ $mtpt == $fs ]] ; then
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
typeset stat=$(svcs -H -o STA nfs/server:default)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
if [[ $stat != "ON" ]]; then
|
|
|
|
|
log_note "Current nfs/server status: $stat"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-21 23:59:20 +00:00
|
|
|
|
function is_shared_linux
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
2022-03-06 00:14:12 +00:00
|
|
|
|
! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
|
2020-02-21 23:59:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-21 17:54:49 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
|
|
|
|
|
#
|
|
|
|
|
# Returns 0 if shared, 1 otherwise.
|
|
|
|
|
#
|
|
|
|
|
function is_shared
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
typeset mtpt
|
|
|
|
|
|
|
|
|
|
if [[ $fs != "/"* ]] ; then
|
|
|
|
|
if datasetnonexists "$fs" ; then
|
|
|
|
|
return 1
|
|
|
|
|
else
|
|
|
|
|
mtpt=$(get_prop mountpoint "$fs")
|
2022-03-06 00:14:12 +00:00
|
|
|
|
case "$mtpt" in
|
2018-09-21 17:54:49 +00:00
|
|
|
|
none|legacy|-) return 1
|
|
|
|
|
;;
|
|
|
|
|
*) fs=$mtpt
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-02-21 23:59:20 +00:00
|
|
|
|
FreeBSD) is_shared_freebsd "$fs" ;;
|
|
|
|
|
Linux) is_shared_linux "$fs" ;;
|
|
|
|
|
*) is_shared_illumos "$fs" ;;
|
|
|
|
|
esac
|
2018-09-21 17:54:49 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-13 16:19:18 +00:00
|
|
|
|
function is_exported_illumos
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
2022-03-06 00:14:12 +00:00
|
|
|
|
typeset mtpt _
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
while read -r mtpt _; do
|
|
|
|
|
[ "$mtpt" = "$fs" ] && return
|
|
|
|
|
done < /etc/dfs/sharetab
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function is_exported_freebsd
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
2022-03-06 00:14:12 +00:00
|
|
|
|
typeset mtpt _
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
while read -r mtpt _; do
|
|
|
|
|
[ "$mtpt" = "$fs" ] && return
|
|
|
|
|
done < /etc/zfs/exports
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function is_exported_linux
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
2022-03-06 00:14:12 +00:00
|
|
|
|
typeset mtpt _
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
while read -r mtpt _; do
|
|
|
|
|
[ "$(printf "$mtpt")" = "$fs" ] && return
|
|
|
|
|
done < /etc/exports.d/zfs.exports
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a mountpoint, or a dataset name, determine if it is exported via
|
|
|
|
|
# the os-specific NFS exports file.
|
|
|
|
|
#
|
|
|
|
|
# Returns 0 if exported, 1 otherwise.
|
|
|
|
|
#
|
|
|
|
|
function is_exported
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
typeset mtpt
|
|
|
|
|
|
|
|
|
|
if [[ $fs != "/"* ]] ; then
|
|
|
|
|
if datasetnonexists "$fs" ; then
|
|
|
|
|
return 1
|
|
|
|
|
else
|
|
|
|
|
mtpt=$(get_prop mountpoint "$fs")
|
|
|
|
|
case $mtpt in
|
|
|
|
|
none|legacy|-) return 1
|
|
|
|
|
;;
|
|
|
|
|
*) fs=$mtpt
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-07-13 16:19:18 +00:00
|
|
|
|
FreeBSD) is_exported_freebsd "$fs" ;;
|
|
|
|
|
Linux) is_exported_linux "$fs" ;;
|
|
|
|
|
*) is_exported_illumos "$fs" ;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2016-11-29 19:22:38 +00:00
|
|
|
|
# Given a dataset name determine if it is shared via SMB.
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2016-11-29 19:22:38 +00:00
|
|
|
|
# Returns 0 if shared, 1 otherwise.
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2016-11-29 19:22:38 +00:00
|
|
|
|
function is_shared_smb
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
2016-11-29 19:22:38 +00:00
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
datasetexists "$fs" || return
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if is_linux; then
|
2022-03-06 00:14:12 +00:00
|
|
|
|
net usershare list | grep -xFq "${fs//\//_}"
|
2016-11-29 19:22:38 +00:00
|
|
|
|
else
|
2022-03-22 21:18:48 +00:00
|
|
|
|
log_note "SMB on $UNAME currently unsupported by the test framework"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
return 1
|
|
|
|
|
fi
|
2016-11-29 19:22:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Given a mountpoint, determine if it is not shared via NFS.
|
|
|
|
|
#
|
|
|
|
|
# Returns 0 if not shared, 1 otherwise.
|
|
|
|
|
#
|
|
|
|
|
function not_shared
|
|
|
|
|
{
|
2022-03-03 23:09:08 +00:00
|
|
|
|
! is_shared $1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
2016-11-29 19:22:38 +00:00
|
|
|
|
# Given a dataset determine if it is not shared via SMB.
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2016-11-29 19:22:38 +00:00
|
|
|
|
# Returns 0 if not shared, 1 otherwise.
|
|
|
|
|
#
|
|
|
|
|
function not_shared_smb
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2022-03-03 23:09:08 +00:00
|
|
|
|
! is_shared_smb $1
|
2016-11-29 19:22:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Helper function to unshare a mountpoint.
|
|
|
|
|
#
|
|
|
|
|
function unshare_fs #fs
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
|
2022-03-03 23:09:08 +00:00
|
|
|
|
if is_shared $fs || is_shared_smb $fs; then
|
2022-03-23 13:23:51 +00:00
|
|
|
|
log_must zfs unshare $fs
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-29 19:22:38 +00:00
|
|
|
|
#
|
|
|
|
|
# Helper function to share a NFS mountpoint.
|
|
|
|
|
#
|
|
|
|
|
function share_nfs #fs
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
is_shared "$fs" && return
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-06 00:14:12 +00:00
|
|
|
|
Linux)
|
|
|
|
|
log_must exportfs "*:$fs"
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
typeset mountd
|
|
|
|
|
read -r mountd < /var/run/mountd.pid
|
|
|
|
|
log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
|
|
|
|
|
log_must kill -s HUP "$mountd"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
log_must share -F nfs "$fs"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2016-11-29 19:22:38 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Helper function to unshare a NFS mountpoint.
|
|
|
|
|
#
|
|
|
|
|
function unshare_nfs #fs
|
|
|
|
|
{
|
|
|
|
|
typeset fs=$1
|
|
|
|
|
|
2022-03-06 00:14:12 +00:00
|
|
|
|
! is_shared "$fs" && return
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-06 00:14:12 +00:00
|
|
|
|
Linux)
|
|
|
|
|
log_must exportfs -u "*:$fs"
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
typeset mountd
|
|
|
|
|
read -r mountd < /var/run/mountd.pid
|
|
|
|
|
awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
|
|
|
|
|
log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
|
|
|
|
|
log_must kill -s HUP "$mountd"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
log_must unshare -F nfs $fs
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2016-11-29 19:22:38 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Helper function to show NFS shares.
|
|
|
|
|
#
|
|
|
|
|
function showshares_nfs
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-06 00:14:12 +00:00
|
|
|
|
Linux)
|
|
|
|
|
exportfs -v
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
showmount
|
|
|
|
|
;;
|
|
|
|
|
*)
|
2017-04-06 00:18:22 +00:00
|
|
|
|
share -F nfs
|
2022-03-06 00:14:12 +00:00
|
|
|
|
;;
|
|
|
|
|
esac
|
2016-11-29 19:22:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-13 16:19:18 +00:00
|
|
|
|
function check_nfs
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-06 00:14:12 +00:00
|
|
|
|
Linux)
|
|
|
|
|
exportfs -s
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
2020-07-13 16:19:18 +00:00
|
|
|
|
showmount -e
|
2022-03-06 00:14:12 +00:00
|
|
|
|
;;
|
|
|
|
|
*)
|
2020-07-13 16:19:18 +00:00
|
|
|
|
log_unsupported "Unknown platform"
|
2022-03-06 00:14:12 +00:00
|
|
|
|
;;
|
|
|
|
|
esac || log_unsupported "The NFS utilities are not installed"
|
2020-07-13 16:19:18 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Check NFS server status and trigger it online.
|
|
|
|
|
#
|
|
|
|
|
function setup_nfs_server
|
|
|
|
|
{
|
|
|
|
|
# Cannot share directory in non-global zone.
|
|
|
|
|
#
|
|
|
|
|
if ! is_global_zone; then
|
|
|
|
|
log_note "Cannot trigger NFS server by sharing in LZ."
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2020-07-13 16:19:18 +00:00
|
|
|
|
if is_linux; then
|
2018-02-24 18:07:12 +00:00
|
|
|
|
#
|
|
|
|
|
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
|
|
|
|
|
# /etc/exports.d./* to provide a clean test environment.
|
|
|
|
|
#
|
2022-03-06 00:14:12 +00:00
|
|
|
|
log_must exportfs -r
|
2018-02-24 18:07:12 +00:00
|
|
|
|
|
2020-07-13 16:19:18 +00:00
|
|
|
|
log_note "NFS server must be started prior to running ZTS."
|
|
|
|
|
return
|
|
|
|
|
elif is_freebsd; then
|
2022-03-06 00:14:12 +00:00
|
|
|
|
log_must kill -s HUP $(</var/run/mountd.pid)
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
2018-02-24 18:07:12 +00:00
|
|
|
|
log_note "NFS server must be started prior to running ZTS."
|
2015-07-01 22:23:09 +00:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
typeset nfs_fmri="svc:/network/nfs/server:default"
|
2017-04-06 00:18:22 +00:00
|
|
|
|
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Only really sharing operation can enable NFS server
|
|
|
|
|
# to online permanently.
|
|
|
|
|
#
|
|
|
|
|
typeset dummy=/tmp/dummy
|
|
|
|
|
|
|
|
|
|
if [[ -d $dummy ]]; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must rm -rf $dummy
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must mkdir $dummy
|
|
|
|
|
log_must share $dummy
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Waiting for fmri's status to be the final status.
|
|
|
|
|
# Otherwise, in transition, an asterisk (*) is appended for
|
|
|
|
|
# instances, unshare will reverse status to 'DIS' again.
|
|
|
|
|
#
|
|
|
|
|
# Waiting for 1's at least.
|
|
|
|
|
#
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must sleep 1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
timeout=10
|
2017-04-06 00:18:22 +00:00
|
|
|
|
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
do
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must sleep 1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
((timeout -= 1))
|
|
|
|
|
done
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must unshare $dummy
|
|
|
|
|
log_must rm -rf $dummy
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# To verify whether calling process is in global zone
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if in global zone, 1 in non-global zone
|
|
|
|
|
#
|
|
|
|
|
function is_global_zone
|
|
|
|
|
{
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if is_linux || is_freebsd; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
return 0
|
|
|
|
|
else
|
|
|
|
|
typeset cur_zone=$(zonename 2>/dev/null)
|
2022-03-03 23:09:08 +00:00
|
|
|
|
[ $cur_zone = "global" ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Verify whether test is permitted to run from
|
|
|
|
|
# global zone, local zone, or both
|
|
|
|
|
#
|
|
|
|
|
# $1 zone limit, could be "global", "local", or "both"(no limit)
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if permitted, otherwise exit with log_unsupported
|
|
|
|
|
#
|
|
|
|
|
function verify_runnable # zone limit
|
|
|
|
|
{
|
|
|
|
|
typeset limit=$1
|
|
|
|
|
|
|
|
|
|
[[ -z $limit ]] && return 0
|
|
|
|
|
|
|
|
|
|
if is_global_zone ; then
|
|
|
|
|
case $limit in
|
|
|
|
|
global|both)
|
|
|
|
|
;;
|
|
|
|
|
local) log_unsupported "Test is unable to run from "\
|
|
|
|
|
"global zone."
|
|
|
|
|
;;
|
|
|
|
|
*) log_note "Warning: unknown limit $limit - " \
|
|
|
|
|
"use both."
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
else
|
|
|
|
|
case $limit in
|
|
|
|
|
local|both)
|
|
|
|
|
;;
|
|
|
|
|
global) log_unsupported "Test is unable to run from "\
|
|
|
|
|
"local zone."
|
|
|
|
|
;;
|
|
|
|
|
*) log_note "Warning: unknown limit $limit - " \
|
|
|
|
|
"use both."
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
|
reexport_pool
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if create successfully or the pool exists; $? otherwise
|
|
|
|
|
# Note: In local zones, this function should return 0 silently.
|
|
|
|
|
#
|
|
|
|
|
# $1 - pool name
|
|
|
|
|
# $2-n - [keyword] devs_list
|
|
|
|
|
|
|
|
|
|
function create_pool #pool devs_list
|
|
|
|
|
{
|
|
|
|
|
typeset pool=${1%%/*}
|
|
|
|
|
|
|
|
|
|
shift
|
|
|
|
|
|
|
|
|
|
if [[ -z $pool ]]; then
|
|
|
|
|
log_note "Missing pool name."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if poolexists $pool ; then
|
|
|
|
|
destroy_pool $pool
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if is_global_zone ; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d /$pool ]] && rm -rf /$pool
|
|
|
|
|
log_must zpool create -f $pool $@
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Return 0 if destroy successfully or the pool exists; $? otherwise
|
|
|
|
|
# Note: In local zones, this function should return 0 silently.
|
|
|
|
|
#
|
|
|
|
|
# $1 - pool name
|
|
|
|
|
# Destroy pool with the given parameters.
|
|
|
|
|
|
|
|
|
|
function destroy_pool #pool
|
|
|
|
|
{
|
|
|
|
|
typeset pool=${1%%/*}
|
|
|
|
|
typeset mtpt
|
|
|
|
|
|
|
|
|
|
if [[ -z $pool ]]; then
|
|
|
|
|
log_note "No pool name given."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if is_global_zone ; then
|
|
|
|
|
if poolexists "$pool" ; then
|
|
|
|
|
mtpt=$(get_prop mountpoint "$pool")
|
|
|
|
|
|
2017-06-12 16:45:32 +00:00
|
|
|
|
# At times, syseventd/udev activity can cause attempts
|
|
|
|
|
# to destroy a pool to fail with EBUSY. We retry a few
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# times allowing failures before requiring the destroy
|
|
|
|
|
# to succeed.
|
2017-06-12 16:45:32 +00:00
|
|
|
|
log_must_busy zpool destroy -f $pool
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
[[ -d $mtpt ]] && \
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must rm -rf $mtpt
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
|
|
|
|
log_note "Pool does not exist. ($pool)"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-10 20:29:55 +00:00
|
|
|
|
# Return 0 if created successfully; $? otherwise
|
|
|
|
|
#
|
|
|
|
|
# $1 - dataset name
|
|
|
|
|
# $2-n - dataset options
|
|
|
|
|
|
|
|
|
|
function create_dataset #dataset dataset_options
|
|
|
|
|
{
|
|
|
|
|
typeset dataset=$1
|
|
|
|
|
|
|
|
|
|
shift
|
|
|
|
|
|
|
|
|
|
if [[ -z $dataset ]]; then
|
|
|
|
|
log_note "Missing dataset name."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if datasetexists $dataset ; then
|
|
|
|
|
destroy_dataset $dataset
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
log_must zfs create $@ $dataset
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-06 22:54:57 +00:00
|
|
|
|
# Return 0 if destroy successfully or the dataset exists; $? otherwise
|
|
|
|
|
# Note: In local zones, this function should return 0 silently.
|
|
|
|
|
#
|
|
|
|
|
# $1 - dataset name
|
|
|
|
|
# $2 - custom arguments for zfs destroy
|
|
|
|
|
# Destroy dataset with the given parameters.
|
|
|
|
|
|
2022-03-09 22:54:26 +00:00
|
|
|
|
function destroy_dataset # dataset [args]
|
2018-03-06 22:54:57 +00:00
|
|
|
|
{
|
|
|
|
|
typeset dataset=$1
|
|
|
|
|
typeset mtpt
|
|
|
|
|
typeset args=${2:-""}
|
|
|
|
|
|
|
|
|
|
if [[ -z $dataset ]]; then
|
|
|
|
|
log_note "No dataset name given."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if is_global_zone ; then
|
|
|
|
|
if datasetexists "$dataset" ; then
|
|
|
|
|
mtpt=$(get_prop mountpoint "$dataset")
|
|
|
|
|
log_must_busy zfs destroy $args $dataset
|
|
|
|
|
|
2022-03-09 22:54:26 +00:00
|
|
|
|
[ -d $mtpt ] && log_must rm -rf $mtpt
|
2018-03-06 22:54:57 +00:00
|
|
|
|
else
|
|
|
|
|
log_note "Dataset does not exist. ($dataset)"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Reexport TESTPOOL & TESTPOOL(1-4)
|
|
|
|
|
#
|
|
|
|
|
function reexport_pool
|
|
|
|
|
{
|
|
|
|
|
typeset -i cntctr=5
|
|
|
|
|
typeset -i i=0
|
|
|
|
|
|
|
|
|
|
while ((i < cntctr)); do
|
|
|
|
|
if ((i == 0)); then
|
|
|
|
|
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
|
|
|
|
|
if ! ismounted $TESTPOOL; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs mount $TESTPOOL
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
else
|
|
|
|
|
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
|
|
|
|
|
if eval ! ismounted \$TESTPOOL$i; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must eval zfs mount \$TESTPOOL$i
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
((i += 1))
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
2016-09-23 20:51:08 +00:00
|
|
|
|
# Verify a given disk or pool state
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Return 0 is pool/disk matches expected state, 1 otherwise
|
|
|
|
|
#
|
2016-09-23 20:51:08 +00:00
|
|
|
|
function check_state # pool disk state{online,offline,degraded}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset disk=${2#$DEV_DSKDIR/}
|
|
|
|
|
typeset state=$3
|
|
|
|
|
|
2016-09-23 20:51:08 +00:00
|
|
|
|
[[ -z $pool ]] || [[ -z $state ]] \
|
|
|
|
|
&& log_fail "Arguments invalid or missing"
|
|
|
|
|
|
|
|
|
|
if [[ -z $disk ]]; then
|
|
|
|
|
#check pool state only
|
2022-03-23 00:52:39 +00:00
|
|
|
|
zpool get -H -o value health $pool | grep -qi "$state"
|
2016-09-23 20:51:08 +00:00
|
|
|
|
else
|
2022-03-23 00:52:39 +00:00
|
|
|
|
zpool status -v $pool | grep "$disk" | grep -qi "$state"
|
2016-09-23 20:51:08 +00:00
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Get the mountpoint of snapshot
|
|
|
|
|
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
|
|
|
|
|
# as its mountpoint
|
|
|
|
|
#
|
|
|
|
|
function snapshot_mountpoint
|
|
|
|
|
{
|
|
|
|
|
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
|
|
|
|
|
|
|
|
|
|
if [[ $dataset != *@* ]]; then
|
|
|
|
|
log_fail "Error name of snapshot '$dataset'."
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
typeset fs=${dataset%@*}
|
|
|
|
|
typeset snap=${dataset#*@}
|
|
|
|
|
|
|
|
|
|
if [[ -z $fs || -z $snap ]]; then
|
|
|
|
|
log_fail "Error name of snapshot '$dataset'."
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-03 16:31:05 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a device and 'ashift' value verify it's correctly set on every label
|
|
|
|
|
#
|
|
|
|
|
function verify_ashift # device ashift
|
|
|
|
|
{
|
|
|
|
|
typeset device="$1"
|
|
|
|
|
typeset ashift="$2"
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
zdb -e -lll $device | awk -v ashift=$ashift '
|
|
|
|
|
/ashift: / {
|
|
|
|
|
if (ashift != $2)
|
|
|
|
|
exit 1;
|
|
|
|
|
else
|
|
|
|
|
count++;
|
|
|
|
|
}
|
|
|
|
|
END {
|
|
|
|
|
exit (count != 4);
|
2017-05-03 16:31:05 +00:00
|
|
|
|
}'
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a pool and file system, this function will verify the file system
|
|
|
|
|
# using the zdb internal tool. Note that the pool is exported and imported
|
|
|
|
|
# to ensure it has consistent state.
|
|
|
|
|
#
|
|
|
|
|
function verify_filesys # pool filesystem dir
|
|
|
|
|
{
|
|
|
|
|
typeset pool="$1"
|
|
|
|
|
typeset filesys="$2"
|
|
|
|
|
typeset zdbout="/tmp/zdbout.$$"
|
|
|
|
|
|
|
|
|
|
shift
|
|
|
|
|
shift
|
|
|
|
|
typeset dirs=$@
|
|
|
|
|
typeset search_path=""
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_note "Calling zdb to verify filesystem '$filesys'"
|
|
|
|
|
zfs unmount -a > /dev/null 2>&1
|
|
|
|
|
log_must zpool export $pool
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if [[ -n $dirs ]] ; then
|
|
|
|
|
for dir in $dirs ; do
|
|
|
|
|
search_path="$search_path -d $dir"
|
|
|
|
|
done
|
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zpool import $search_path $pool
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-23 00:52:39 +00:00
|
|
|
|
if ! zdb -cudi $filesys > $zdbout 2>&1; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_note "Output: zdb -cudi $filesys"
|
|
|
|
|
cat $zdbout
|
2022-03-23 00:52:39 +00:00
|
|
|
|
rm -f $zdbout
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_fail "zdb detected errors with: '$filesys'"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs mount -a
|
|
|
|
|
log_must rm -rf $zdbout
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-12-04 17:37:37 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a pool issue a scrub and verify that no checksum errors are reported.
|
|
|
|
|
#
|
|
|
|
|
function verify_pool
|
|
|
|
|
{
|
|
|
|
|
typeset pool=${1:-$TESTPOOL}
|
|
|
|
|
|
|
|
|
|
log_must zpool scrub $pool
|
|
|
|
|
log_must wait_scrubbed $pool
|
|
|
|
|
|
2020-05-01 00:50:16 +00:00
|
|
|
|
typeset -i cksum=$(zpool status $pool | awk '
|
|
|
|
|
!NF { isvdev = 0 }
|
|
|
|
|
isvdev { errors += $NF }
|
|
|
|
|
/CKSUM$/ { isvdev = 1 }
|
|
|
|
|
END { print errors }
|
|
|
|
|
')
|
2018-12-04 17:37:37 +00:00
|
|
|
|
if [[ $cksum != 0 ]]; then
|
|
|
|
|
log_must zpool status -v
|
|
|
|
|
log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a pool, and this function list all disks in the pool
|
|
|
|
|
#
|
|
|
|
|
function get_disklist # pool
|
|
|
|
|
{
|
2022-03-11 23:25:47 +00:00
|
|
|
|
echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
|
|
|
|
|
grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-15 22:47:05 +00:00
|
|
|
|
#
|
|
|
|
|
# Given a pool, and this function list all disks in the pool with their full
|
|
|
|
|
# path (like "/dev/sda" instead of "sda").
|
|
|
|
|
#
|
|
|
|
|
function get_disklist_fullpath # pool
|
|
|
|
|
{
|
2022-03-11 23:13:19 +00:00
|
|
|
|
get_disklist "-P $1"
|
2016-06-15 22:47:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# /**
|
|
|
|
|
# This function kills a given list of processes after a time period. We use
|
|
|
|
|
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
|
|
|
|
|
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
|
|
|
|
|
# would be listed as FAIL, which we don't want : we're happy with stress tests
|
|
|
|
|
# running for a certain amount of time, then finishing.
|
|
|
|
|
#
|
|
|
|
|
# @param $1 the time in seconds after which we should terminate these processes
|
|
|
|
|
# @param $2..$n the processes we wish to terminate.
|
|
|
|
|
# */
|
|
|
|
|
function stress_timeout
|
|
|
|
|
{
|
|
|
|
|
typeset -i TIMEOUT=$1
|
|
|
|
|
shift
|
|
|
|
|
typeset cpids="$@"
|
|
|
|
|
|
|
|
|
|
log_note "Waiting for child processes($cpids). " \
|
|
|
|
|
"It could last dozens of minutes, please be patient ..."
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must sleep $TIMEOUT
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
log_note "Killing child processes after ${TIMEOUT} stress timeout."
|
|
|
|
|
typeset pid
|
|
|
|
|
for pid in $cpids; do
|
2022-03-23 00:52:39 +00:00
|
|
|
|
ps -p $pid > /dev/null 2>&1 &&
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must kill -USR1 $pid
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Verify a given hotspare disk is inuse or avail
|
|
|
|
|
#
|
|
|
|
|
# Return 0 is pool/disk matches expected state, 1 otherwise
|
|
|
|
|
#
|
|
|
|
|
function check_hotspare_state # pool disk state{inuse,avail}
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset disk=${2#$DEV_DSKDIR/}
|
|
|
|
|
typeset state=$3
|
|
|
|
|
|
|
|
|
|
cur_state=$(get_device_state $pool $disk "spares")
|
|
|
|
|
|
2022-03-23 13:23:51 +00:00
|
|
|
|
[ $state = $cur_state ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-23 18:42:37 +00:00
|
|
|
|
#
|
|
|
|
|
# Wait until a hotspare transitions to a given state or times out.
|
|
|
|
|
#
|
|
|
|
|
# Return 0 when pool/disk matches expected state, 1 on timeout.
|
|
|
|
|
#
|
|
|
|
|
function wait_hotspare_state # pool disk state timeout
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
2018-08-30 21:43:37 +00:00
|
|
|
|
typeset disk=${2#*$DEV_DSKDIR/}
|
2017-10-23 18:42:37 +00:00
|
|
|
|
typeset state=$3
|
|
|
|
|
typeset timeout=${4:-60}
|
|
|
|
|
typeset -i i=0
|
|
|
|
|
|
|
|
|
|
while [[ $i -lt $timeout ]]; do
|
|
|
|
|
if check_hotspare_state $pool $disk $state; then
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
i=$((i+1))
|
|
|
|
|
sleep 1
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Verify a given vdev disk is inuse or avail
|
|
|
|
|
#
|
|
|
|
|
# Return 0 is pool/disk matches expected state, 1 otherwise
|
|
|
|
|
#
|
|
|
|
|
function check_vdev_state # pool disk state{online,offline,unavail}
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
2018-08-30 21:43:37 +00:00
|
|
|
|
typeset disk=${2#*$DEV_DSKDIR/}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset state=$3
|
|
|
|
|
|
|
|
|
|
cur_state=$(get_device_state $pool $disk)
|
|
|
|
|
|
2022-03-23 13:23:51 +00:00
|
|
|
|
[ $state = $cur_state ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-23 18:42:37 +00:00
|
|
|
|
#
|
|
|
|
|
# Wait until a vdev transitions to a given state or times out.
|
|
|
|
|
#
|
|
|
|
|
# Return 0 when pool/disk matches expected state, 1 on timeout.
|
|
|
|
|
#
|
|
|
|
|
function wait_vdev_state # pool disk state timeout
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
2018-08-30 21:43:37 +00:00
|
|
|
|
typeset disk=${2#*$DEV_DSKDIR/}
|
2017-10-23 18:42:37 +00:00
|
|
|
|
typeset state=$3
|
|
|
|
|
typeset timeout=${4:-60}
|
|
|
|
|
typeset -i i=0
|
|
|
|
|
|
|
|
|
|
while [[ $i -lt $timeout ]]; do
|
|
|
|
|
if check_vdev_state $pool $disk $state; then
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
i=$((i+1))
|
|
|
|
|
sleep 1
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Check the output of 'zpool status -v <pool>',
|
|
|
|
|
# and to see if the content of <token> contain the <keyword> specified.
|
|
|
|
|
#
|
|
|
|
|
# Return 0 is contain, 1 otherwise
|
|
|
|
|
#
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function check_pool_status # pool token keyword <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset token=$2
|
|
|
|
|
typeset keyword=$3
|
2017-07-07 05:16:13 +00:00
|
|
|
|
typeset verbose=${4:-false}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-23 21:01:06 +00:00
|
|
|
|
scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
|
2017-07-07 05:16:13 +00:00
|
|
|
|
if [[ $verbose == true ]]; then
|
|
|
|
|
log_note $scan
|
|
|
|
|
fi
|
2022-03-11 23:25:47 +00:00
|
|
|
|
echo $scan | grep -qi "$keyword"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
Add subcommand to wait for background zfs activity to complete
Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.
This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:
- Scrubs or resilvers to complete
- Devices to initialized
- Devices to be replaced
- Devices to be removed
- Checkpoints to be discarded
- Background freeing to complete
For example, a scrub that is in progress could be waited for by running
zpool wait -t scrub <pool>
This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.
This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.
Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:
- Added ZoL-style ioctl input declaration.
- Reorganized error handling in zpool_initialize in libzfs to integrate
better with changes made for TRIM support.
- Fixed check for whether a checkpoint discard is in progress.
Previously it also waited if the pool had a checkpoint, instead of
just if a checkpoint was being discarded.
- Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
- Updated more existing tests to make use of new 'zpool wait'
functionality, tests that don't exist in Delphix OS.
- Used existing ZoL tunable zfs_scan_suspend_progress, together with
zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
- Added support for a non-integral interval argument to zpool wait.
Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
2019-09-14 01:09:06 +00:00
|
|
|
|
# The following functions are instance of check_pool_status()
|
2020-07-03 18:05:50 +00:00
|
|
|
|
# is_pool_resilvering - to check if the pool resilver is in progress
|
|
|
|
|
# is_pool_resilvered - to check if the pool resilver is completed
|
|
|
|
|
# is_pool_scrubbing - to check if the pool scrub is in progress
|
|
|
|
|
# is_pool_scrubbed - to check if the pool scrub is completed
|
|
|
|
|
# is_pool_scrub_stopped - to check if the pool scrub is stopped
|
|
|
|
|
# is_pool_scrub_paused - to check if the pool scrub has paused
|
|
|
|
|
# is_pool_removing - to check if the pool removing is a vdev
|
|
|
|
|
# is_pool_removed - to check if the pool remove is completed
|
|
|
|
|
# is_pool_discarding - to check if the pool checkpoint is being discarded
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function is_pool_resilvering #pool <verbose>
|
|
|
|
|
{
|
2020-07-03 18:05:50 +00:00
|
|
|
|
check_pool_status "$1" "scan" \
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
|
"resilver[ ()0-9A-Za-z:_-]* in progress since" $2
|
2017-07-07 05:16:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function is_pool_resilvered #pool <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2017-07-07 05:16:13 +00:00
|
|
|
|
check_pool_status "$1" "scan" "resilvered " $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function is_pool_scrubbing #pool <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2017-07-07 05:16:13 +00:00
|
|
|
|
check_pool_status "$1" "scan" "scrub in progress since " $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function is_pool_scrubbed #pool <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2017-07-07 05:16:13 +00:00
|
|
|
|
check_pool_status "$1" "scan" "scrub repaired" $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function is_pool_scrub_stopped #pool <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2017-07-07 05:16:13 +00:00
|
|
|
|
check_pool_status "$1" "scan" "scrub canceled" $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-07 05:16:13 +00:00
|
|
|
|
function is_pool_scrub_paused #pool <verbose>
|
2015-07-01 22:23:09 +00:00
|
|
|
|
{
|
2017-07-07 05:16:13 +00:00
|
|
|
|
check_pool_status "$1" "scan" "scrub paused since " $2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 16:30:13 +00:00
|
|
|
|
function is_pool_removing #pool
|
|
|
|
|
{
|
|
|
|
|
check_pool_status "$1" "remove" "in progress since "
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function is_pool_removed #pool
|
|
|
|
|
{
|
|
|
|
|
check_pool_status "$1" "remove" "completed on"
|
|
|
|
|
}
|
|
|
|
|
|
Add subcommand to wait for background zfs activity to complete
Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.
This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:
- Scrubs or resilvers to complete
- Devices to initialized
- Devices to be replaced
- Devices to be removed
- Checkpoints to be discarded
- Background freeing to complete
For example, a scrub that is in progress could be waited for by running
zpool wait -t scrub <pool>
This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.
This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.
Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:
- Added ZoL-style ioctl input declaration.
- Reorganized error handling in zpool_initialize in libzfs to integrate
better with changes made for TRIM support.
- Fixed check for whether a checkpoint discard is in progress.
Previously it also waited if the pool had a checkpoint, instead of
just if a checkpoint was being discarded.
- Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
- Updated more existing tests to make use of new 'zpool wait'
functionality, tests that don't exist in Delphix OS.
- Used existing ZoL tunable zfs_scan_suspend_progress, together with
zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
- Added support for a non-integral interval argument to zpool wait.
Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
2019-09-14 01:09:06 +00:00
|
|
|
|
function is_pool_discarding #pool
|
|
|
|
|
{
|
|
|
|
|
check_pool_status "$1" "checkpoint" "discarding"
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-17 21:15:27 +00:00
|
|
|
|
function wait_for_degraded
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset timeout=${2:-30}
|
|
|
|
|
typeset t0=$SECONDS
|
|
|
|
|
|
|
|
|
|
while :; do
|
|
|
|
|
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
|
|
|
|
|
log_note "$pool is not yet degraded."
|
|
|
|
|
sleep 1
|
|
|
|
|
if ((SECONDS - t0 > $timeout)); then
|
|
|
|
|
log_note "$pool not degraded after $timeout seconds."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
2017-01-03 17:31:18 +00:00
|
|
|
|
# Use create_pool()/destroy_pool() to clean up the information in
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# in the given disk to avoid slice overlapping.
|
|
|
|
|
#
|
|
|
|
|
function cleanup_devices #vdevs
|
|
|
|
|
{
|
|
|
|
|
typeset pool="foopool$$"
|
|
|
|
|
|
2020-01-06 19:14:19 +00:00
|
|
|
|
for vdev in $@; do
|
|
|
|
|
zero_partitions $vdev
|
|
|
|
|
done
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2020-01-06 19:14:19 +00:00
|
|
|
|
poolexists $pool && destroy_pool $pool
|
2015-07-01 22:23:09 +00:00
|
|
|
|
create_pool $pool $@
|
|
|
|
|
destroy_pool $pool
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#/**
|
|
|
|
|
# A function to find and locate free disks on a system or from given
|
|
|
|
|
# disks as the parameter. It works by locating disks that are in use
|
|
|
|
|
# as swap devices and dump devices, and also disks listed in /etc/vfstab
|
|
|
|
|
#
|
|
|
|
|
# $@ given disks to find which are free, default is all disks in
|
|
|
|
|
# the test system
|
|
|
|
|
#
|
|
|
|
|
# @return a string containing the list of available disks
|
|
|
|
|
#*/
|
|
|
|
|
function find_disks
|
|
|
|
|
{
|
|
|
|
|
# Trust provided list, no attempt is made to locate unused devices.
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if is_linux || is_freebsd; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo "$@"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sfi=/tmp/swaplist.$$
|
|
|
|
|
dmpi=/tmp/dumpdev.$$
|
|
|
|
|
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
swap -l > $sfi
|
|
|
|
|
dumpadm > $dmpi 2>/dev/null
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
|
|
|
|
|
BEGIN { FS="."; }
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
/^Specify disk/{
|
|
|
|
|
searchdisks=0;
|
|
|
|
|
}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
{
|
|
|
|
|
if (searchdisks && $2 !~ "^$"){
|
|
|
|
|
split($2,arr," ");
|
|
|
|
|
print arr[1];
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
2022-03-11 22:54:08 +00:00
|
|
|
|
}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
/^AVAILABLE DISK SELECTIONS:/{
|
|
|
|
|
searchdisks=1;
|
|
|
|
|
}
|
|
|
|
|
')}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
unused=""
|
|
|
|
|
for disk in $disks; do
|
|
|
|
|
# Check for mounted
|
2022-03-23 00:52:39 +00:00
|
|
|
|
grep -q "${disk}[sp]" /etc/mnttab && continue
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# Check for swap
|
2022-03-23 00:52:39 +00:00
|
|
|
|
grep -q "${disk}[sp]" $sfi && continue
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# check for dump device
|
2022-03-23 00:52:39 +00:00
|
|
|
|
grep -q "${disk}[sp]" $dmpi && continue
|
2015-07-01 22:23:09 +00:00
|
|
|
|
# check to see if this disk hasn't been explicitly excluded
|
|
|
|
|
# by a user-set environment variable
|
2022-03-23 00:52:39 +00:00
|
|
|
|
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
|
2015-07-01 22:23:09 +00:00
|
|
|
|
unused_candidates="$unused_candidates $disk"
|
|
|
|
|
done
|
2022-03-23 00:52:39 +00:00
|
|
|
|
rm $sfi $dmpi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
# now just check to see if those disks do actually exist
|
|
|
|
|
# by looking for a device pointing to the first slice in
|
|
|
|
|
# each case. limit the number to max_finddisksnum
|
|
|
|
|
count=0
|
|
|
|
|
for disk in $unused_candidates; do
|
2020-01-03 17:10:17 +00:00
|
|
|
|
if is_disk_device $DEV_DSKDIR/${disk}s0 && \
|
|
|
|
|
[ $count -lt $max_finddisksnum ]; then
|
2015-07-01 22:23:09 +00:00
|
|
|
|
unused="$unused $disk"
|
|
|
|
|
# do not impose limit if $@ is provided
|
|
|
|
|
[[ -z $@ ]] && ((count = count + 1))
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
# finally, return our disk list
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo $unused
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
function add_user_freebsd #<group_name> <user_name> <basedir>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
typeset user=$2
|
|
|
|
|
typeset basedir=$3
|
|
|
|
|
|
|
|
|
|
# Check to see if the user exists.
|
|
|
|
|
if id $user > /dev/null 2>&1; then
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Assign 1000 as the base uid
|
|
|
|
|
typeset -i uid=1000
|
|
|
|
|
while true; do
|
|
|
|
|
pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
0) break ;;
|
|
|
|
|
# The uid is not unique
|
|
|
|
|
65) ((uid += 1)) ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
if [[ $uid == 65000 ]]; then
|
|
|
|
|
log_fail "No user id available under 65000 for $user"
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
# Silence MOTD
|
|
|
|
|
touch $basedir/$user/.hushlogin
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Delete the specified user.
|
|
|
|
|
#
|
|
|
|
|
# $1 login name
|
|
|
|
|
#
|
|
|
|
|
function del_user_freebsd #<logname>
|
|
|
|
|
{
|
|
|
|
|
typeset user=$1
|
|
|
|
|
|
|
|
|
|
if id $user > /dev/null 2>&1; then
|
|
|
|
|
log_must pw userdel $user
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Select valid gid and create specified group.
|
|
|
|
|
#
|
|
|
|
|
# $1 group name
|
|
|
|
|
#
|
|
|
|
|
function add_group_freebsd #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
# See if the group already exists.
|
|
|
|
|
if pw groupshow $group >/dev/null 2>&1; then
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Assign 1000 as the base gid
|
|
|
|
|
typeset -i gid=1000
|
|
|
|
|
while true; do
|
|
|
|
|
pw groupadd -g $gid -n $group > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
0) return 0 ;;
|
|
|
|
|
# The gid is not unique
|
|
|
|
|
65) ((gid += 1)) ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
if [[ $gid == 65000 ]]; then
|
|
|
|
|
log_fail "No user id available under 65000 for $group"
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Delete the specified group.
|
|
|
|
|
#
|
|
|
|
|
# $1 group name
|
|
|
|
|
#
|
|
|
|
|
function del_group_freebsd #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
pw groupdel -n $group > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
# Group does not exist, or was deleted successfully.
|
|
|
|
|
0|6|65) return 0 ;;
|
|
|
|
|
# Name already exists as a group name
|
|
|
|
|
9) log_must pw groupdel $group ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function add_user_illumos #<group_name> <user_name> <basedir>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
typeset user=$2
|
|
|
|
|
typeset basedir=$3
|
|
|
|
|
|
|
|
|
|
log_must useradd -g $group -d $basedir/$user -m $user
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function del_user_illumos #<user_name>
|
|
|
|
|
{
|
|
|
|
|
typeset user=$1
|
|
|
|
|
|
|
|
|
|
if id $user > /dev/null 2>&1; then
|
|
|
|
|
log_must_retry "currently used" 6 userdel $user
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function add_group_illumos #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
typeset -i gid=100
|
|
|
|
|
while true; do
|
|
|
|
|
groupadd -g $gid $group > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
0) return 0 ;;
|
|
|
|
|
# The gid is not unique
|
|
|
|
|
4) ((gid += 1)) ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function del_group_illumos #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
groupmod -n $grp $grp > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
# Group does not exist.
|
|
|
|
|
6) return 0 ;;
|
|
|
|
|
# Name already exists as a group name
|
|
|
|
|
9) log_must groupdel $grp ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function add_user_linux #<group_name> <user_name> <basedir>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
typeset user=$2
|
|
|
|
|
typeset basedir=$3
|
|
|
|
|
|
|
|
|
|
log_must useradd -g $group -d $basedir/$user -m $user
|
|
|
|
|
|
|
|
|
|
# Add new users to the same group and the command line utils.
|
|
|
|
|
# This allows them to be run out of the original users home
|
|
|
|
|
# directory as long as it permissioned to be group readable.
|
2022-03-09 12:52:14 +00:00
|
|
|
|
cmd_group=$(stat --format="%G" $(command -v zfs))
|
2019-12-18 20:29:43 +00:00
|
|
|
|
log_must usermod -a -G $cmd_group $user
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function del_user_linux #<user_name>
|
|
|
|
|
{
|
|
|
|
|
typeset user=$1
|
|
|
|
|
|
|
|
|
|
if id $user > /dev/null 2>&1; then
|
|
|
|
|
log_must_retry "currently used" 6 userdel $user
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function add_group_linux #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
# Assign 100 as the base gid, a larger value is selected for
|
|
|
|
|
# Linux because for many distributions 1000 and under are reserved.
|
|
|
|
|
while true; do
|
|
|
|
|
groupadd $group > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
0) return 0 ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function del_group_linux #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
getent group $group > /dev/null 2>&1
|
2022-03-23 00:52:39 +00:00
|
|
|
|
case $? in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
# Group does not exist.
|
|
|
|
|
2) return 0 ;;
|
|
|
|
|
# Name already exists as a group name
|
|
|
|
|
0) log_must groupdel $group ;;
|
|
|
|
|
*) return 1 ;;
|
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 22:23:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Add specified user to specified group
|
|
|
|
|
#
|
|
|
|
|
# $1 group name
|
|
|
|
|
# $2 user name
|
|
|
|
|
# $3 base of the homedir (optional)
|
|
|
|
|
#
|
|
|
|
|
function add_user #<group_name> <user_name> <basedir>
|
|
|
|
|
{
|
2019-12-18 20:29:43 +00:00
|
|
|
|
typeset group=$1
|
|
|
|
|
typeset user=$2
|
2015-07-01 22:23:09 +00:00
|
|
|
|
typeset basedir=${3:-"/var/tmp"}
|
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if ((${#group} == 0 || ${#user} == 0)); then
|
2015-07-01 22:23:09 +00:00
|
|
|
|
log_fail "group name or user name are not defined."
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
add_user_freebsd "$group" "$user" "$basedir"
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
|
|
|
|
add_user_linux "$group" "$user" "$basedir"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
add_user_illumos "$group" "$user" "$basedir"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Delete the specified user.
|
|
|
|
|
#
|
|
|
|
|
# $1 login name
|
|
|
|
|
# $2 base of the homedir (optional)
|
|
|
|
|
#
|
|
|
|
|
function del_user #<logname> <basedir>
|
|
|
|
|
{
|
|
|
|
|
typeset user=$1
|
|
|
|
|
typeset basedir=${2:-"/var/tmp"}
|
|
|
|
|
|
|
|
|
|
if ((${#user} == 0)); then
|
|
|
|
|
log_fail "login name is necessary."
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
del_user_freebsd "$user"
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
|
|
|
|
del_user_linux "$user"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
del_user_illumos "$user"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Select valid gid and create specified group.
|
|
|
|
|
#
|
|
|
|
|
# $1 group name
|
|
|
|
|
#
|
|
|
|
|
function add_group #<group_name>
|
|
|
|
|
{
|
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
if ((${#group} == 0)); then
|
|
|
|
|
log_fail "group name is necessary."
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
add_group_freebsd "$group"
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
|
|
|
|
add_group_linux "$group"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
add_group_illumos "$group"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
|
return 0
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Delete the specified group.
|
|
|
|
|
#
|
|
|
|
|
# $1 group name
|
|
|
|
|
#
|
|
|
|
|
function del_group #<group_name>
|
|
|
|
|
{
|
2019-12-18 20:29:43 +00:00
|
|
|
|
typeset group=$1
|
|
|
|
|
|
|
|
|
|
if ((${#group} == 0)); then
|
2015-07-01 22:23:09 +00:00
|
|
|
|
log_fail "group name is necessary."
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
del_group_freebsd "$group"
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
|
|
|
|
del_group_linux "$group"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
del_group_illumos "$group"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# This function will return true if it's safe to destroy the pool passed
|
|
|
|
|
# as argument 1. It checks for pools based on zvols and files, and also
|
|
|
|
|
# files contained in a pool that may have a different mountpoint.
|
|
|
|
|
#
|
|
|
|
|
function safe_to_destroy_pool { # $1 the pool name
|
|
|
|
|
|
|
|
|
|
typeset pool=""
|
|
|
|
|
typeset DONT_DESTROY=""
|
|
|
|
|
|
|
|
|
|
# We check that by deleting the $1 pool, we're not
|
|
|
|
|
# going to pull the rug out from other pools. Do this
|
|
|
|
|
# by looking at all other pools, ensuring that they
|
|
|
|
|
# aren't built from files or zvols contained in this pool.
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
for pool in $(zpool list -H -o name)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
do
|
|
|
|
|
ALTMOUNTPOOL=""
|
|
|
|
|
|
|
|
|
|
# this is a list of the top-level directories in each of the
|
|
|
|
|
# files that make up the path to the files the pool is based on
|
2022-03-11 22:54:08 +00:00
|
|
|
|
FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
# this is a list of the zvols that make up the pool
|
2022-03-11 22:54:08 +00:00
|
|
|
|
ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
# also want to determine if it's a file-based pool using an
|
|
|
|
|
# alternate mountpoint...
|
2017-04-06 00:18:22 +00:00
|
|
|
|
POOL_FILE_DIRS=$(zpool status -v $pool | \
|
2022-03-11 22:54:08 +00:00
|
|
|
|
awk '/\// {print $1}' | \
|
|
|
|
|
awk -F/ '!/dev/ {print $2}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
for pooldir in $POOL_FILE_DIRS
|
|
|
|
|
do
|
2017-04-06 00:18:22 +00:00
|
|
|
|
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
|
2022-03-11 22:54:08 +00:00
|
|
|
|
awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if [ ! -z "$ZVOLPOOL" ]
|
|
|
|
|
then
|
|
|
|
|
DONT_DESTROY="true"
|
|
|
|
|
log_note "Pool $pool is built from $ZVOLPOOL on $1"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [ ! -z "$FILEPOOL" ]
|
|
|
|
|
then
|
|
|
|
|
DONT_DESTROY="true"
|
|
|
|
|
log_note "Pool $pool is built from $FILEPOOL on $1"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [ ! -z "$ALTMOUNTPOOL" ]
|
|
|
|
|
then
|
|
|
|
|
DONT_DESTROY="true"
|
|
|
|
|
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ -z "${DONT_DESTROY}" ]
|
|
|
|
|
then
|
|
|
|
|
return 0
|
|
|
|
|
else
|
|
|
|
|
log_note "Warning: it is not safe to destroy $1!"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Verify zfs operation with -p option work as expected
|
|
|
|
|
# $1 operation, value could be create, clone or rename
|
|
|
|
|
# $2 dataset type, value could be fs or vol
|
|
|
|
|
# $3 dataset name
|
|
|
|
|
# $4 new dataset name
|
|
|
|
|
#
|
|
|
|
|
function verify_opt_p_ops
|
|
|
|
|
{
|
|
|
|
|
typeset ops=$1
|
|
|
|
|
typeset datatype=$2
|
|
|
|
|
typeset dataset=$3
|
|
|
|
|
typeset newdataset=$4
|
|
|
|
|
|
|
|
|
|
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
|
|
|
|
|
log_fail "$datatype is not supported."
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# check parameters accordingly
|
|
|
|
|
case $ops in
|
|
|
|
|
create)
|
|
|
|
|
newdataset=$dataset
|
|
|
|
|
dataset=""
|
|
|
|
|
if [[ $datatype == "vol" ]]; then
|
|
|
|
|
ops="create -V $VOLSIZE"
|
|
|
|
|
fi
|
|
|
|
|
;;
|
|
|
|
|
clone)
|
|
|
|
|
if [[ -z $newdataset ]]; then
|
|
|
|
|
log_fail "newdataset should not be empty" \
|
|
|
|
|
"when ops is $ops."
|
|
|
|
|
fi
|
|
|
|
|
log_must datasetexists $dataset
|
|
|
|
|
log_must snapexists $dataset
|
|
|
|
|
;;
|
|
|
|
|
rename)
|
|
|
|
|
if [[ -z $newdataset ]]; then
|
|
|
|
|
log_fail "newdataset should not be empty" \
|
|
|
|
|
"when ops is $ops."
|
|
|
|
|
fi
|
|
|
|
|
log_must datasetexists $dataset
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
log_fail "$ops is not supported."
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
|
# make sure the upper level filesystem does not exist
|
2018-03-06 22:54:57 +00:00
|
|
|
|
destroy_dataset "${newdataset%/*}" "-rRf"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
# without -p option, operation will fail
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_mustnot zfs $ops $dataset $newdataset
|
2015-07-01 22:23:09 +00:00
|
|
|
|
log_mustnot datasetexists $newdataset ${newdataset%/*}
|
|
|
|
|
|
|
|
|
|
# with -p option, operation should succeed
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs $ops -p $dataset $newdataset
|
2015-07-01 22:23:09 +00:00
|
|
|
|
block_device_wait
|
|
|
|
|
|
|
|
|
|
if ! datasetexists $newdataset ; then
|
|
|
|
|
log_fail "-p option does not work for $ops"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# when $ops is create or clone, redo the operation still return zero
|
|
|
|
|
if [[ $ops != "rename" ]]; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
log_must zfs $ops -p $dataset $newdataset
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Get configuration of pool
|
|
|
|
|
# $1 pool name
|
|
|
|
|
# $2 config name
|
|
|
|
|
#
|
|
|
|
|
function get_config
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset config=$2
|
|
|
|
|
|
|
|
|
|
if ! poolexists "$pool" ; then
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
2022-03-11 22:54:08 +00:00
|
|
|
|
if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
|
|
|
|
|
zdb -e $pool
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
2022-03-11 22:54:08 +00:00
|
|
|
|
zdb -C $pool
|
|
|
|
|
fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Privated function. Random select one of items from arguments.
|
|
|
|
|
#
|
|
|
|
|
# $1 count
|
|
|
|
|
# $2-n string
|
|
|
|
|
#
|
|
|
|
|
function _random_get
|
|
|
|
|
{
|
|
|
|
|
typeset cnt=$1
|
|
|
|
|
shift
|
|
|
|
|
|
|
|
|
|
typeset str="$@"
|
|
|
|
|
typeset -i ind
|
|
|
|
|
((ind = RANDOM % cnt + 1))
|
|
|
|
|
|
2022-03-11 22:54:08 +00:00
|
|
|
|
echo "$str" | cut -f $ind -d ' '
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Random select one of item from arguments which include NONE string
|
|
|
|
|
#
|
|
|
|
|
function random_get_with_non
|
|
|
|
|
{
|
|
|
|
|
typeset -i cnt=$#
|
|
|
|
|
((cnt =+ 1))
|
|
|
|
|
|
|
|
|
|
_random_get "$cnt" "$@"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Random select one of item from arguments which doesn't include NONE string
|
|
|
|
|
#
|
|
|
|
|
function random_get
|
|
|
|
|
{
|
|
|
|
|
_random_get "$#" "$@"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# The function will generate a dataset name with specific length
|
|
|
|
|
# $1, the length of the name
|
|
|
|
|
# $2, the base string to construct the name
|
|
|
|
|
#
|
|
|
|
|
function gen_dataset_name
|
|
|
|
|
{
|
|
|
|
|
typeset -i len=$1
|
|
|
|
|
typeset basestr="$2"
|
|
|
|
|
typeset -i baselen=${#basestr}
|
|
|
|
|
typeset -i iter=0
|
|
|
|
|
typeset l_name=""
|
|
|
|
|
|
|
|
|
|
if ((len % baselen == 0)); then
|
|
|
|
|
((iter = len / baselen))
|
|
|
|
|
else
|
|
|
|
|
((iter = len / baselen + 1))
|
|
|
|
|
fi
|
|
|
|
|
while ((iter > 0)); do
|
|
|
|
|
l_name="${l_name}$basestr"
|
|
|
|
|
|
|
|
|
|
((iter -= 1))
|
|
|
|
|
done
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo $l_name
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Get cksum tuple of dataset
|
|
|
|
|
# $1 dataset name
|
|
|
|
|
#
|
|
|
|
|
# sample zdb output:
|
|
|
|
|
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
|
|
|
|
|
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
|
|
|
|
|
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
|
|
|
|
|
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
|
|
|
|
|
function datasetcksum
|
|
|
|
|
{
|
|
|
|
|
typeset cksum
|
2017-04-06 00:18:22 +00:00
|
|
|
|
sync
|
2022-01-06 18:57:09 +00:00
|
|
|
|
sync_all_pools
|
2022-03-11 22:54:08 +00:00
|
|
|
|
zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Get the given disk/slice state from the specific field of the pool
|
|
|
|
|
#
|
|
|
|
|
function get_device_state #pool disk field("", "spares","logs")
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset disk=${2#$DEV_DSKDIR/}
|
|
|
|
|
typeset field=${3:-$pool}
|
|
|
|
|
|
2022-03-11 23:13:19 +00:00
|
|
|
|
zpool status -v "$pool" 2>/dev/null | \
|
|
|
|
|
awk -v device=$disk -v pool=$pool -v field=$field \
|
2015-07-01 22:23:09 +00:00
|
|
|
|
'BEGIN {startconfig=0; startfield=0; }
|
|
|
|
|
/config:/ {startconfig=1}
|
|
|
|
|
(startconfig==1) && ($1==field) {startfield=1; next;}
|
|
|
|
|
(startfield==1) && ($1==device) {print $2; exit;}
|
|
|
|
|
(startfield==1) &&
|
2022-03-11 23:13:19 +00:00
|
|
|
|
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# get the root filesystem name if it's zfsroot system.
|
|
|
|
|
#
|
|
|
|
|
# return: root filesystem name
|
|
|
|
|
function get_rootfs
|
|
|
|
|
{
|
|
|
|
|
typeset rootfs=""
|
2016-12-03 07:13:44 +00:00
|
|
|
|
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if is_freebsd; then
|
|
|
|
|
rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
|
|
|
|
|
elif ! is_linux; then
|
2022-03-11 22:54:08 +00:00
|
|
|
|
rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
|
2016-12-03 07:13:44 +00:00
|
|
|
|
/etc/mnttab)
|
|
|
|
|
fi
|
2015-07-01 22:23:09 +00:00
|
|
|
|
if [[ -z "$rootfs" ]]; then
|
|
|
|
|
log_fail "Can not get rootfs"
|
|
|
|
|
fi
|
2022-03-23 00:52:39 +00:00
|
|
|
|
if datasetexists $rootfs; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
echo $rootfs
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
|
|
|
|
log_fail "This is not a zfsroot system."
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# get the rootfs's pool name
|
|
|
|
|
# return:
|
|
|
|
|
# rootpool name
|
|
|
|
|
#
|
|
|
|
|
function get_rootpool
|
|
|
|
|
{
|
2022-03-23 13:23:51 +00:00
|
|
|
|
typeset rootfs=$(get_rootfs)
|
|
|
|
|
echo ${rootfs%%/*}
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# To verify if the require numbers of disks is given
|
|
|
|
|
#
|
|
|
|
|
function verify_disk_count
|
|
|
|
|
{
|
|
|
|
|
typeset -i min=${2:-1}
|
|
|
|
|
|
2022-03-23 13:23:51 +00:00
|
|
|
|
typeset -i count=$(echo "$1" | wc -w)
|
2015-07-01 22:23:09 +00:00
|
|
|
|
|
|
|
|
|
if ((count < min)); then
|
|
|
|
|
log_untested "A minimum of $min disks is required to run." \
|
|
|
|
|
" You specified $count disk(s)"
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function ds_is_volume
|
|
|
|
|
{
|
|
|
|
|
typeset type=$(get_prop type $1)
|
2022-03-14 00:41:03 +00:00
|
|
|
|
[ $type = "volume" ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function ds_is_filesystem
|
|
|
|
|
{
|
|
|
|
|
typeset type=$(get_prop type $1)
|
2022-03-14 00:41:03 +00:00
|
|
|
|
[ $type = "filesystem" ]
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Check if Trusted Extensions are installed and enabled
|
|
|
|
|
#
|
|
|
|
|
function is_te_enabled
|
|
|
|
|
{
|
2022-03-23 13:23:51 +00:00
|
|
|
|
svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Utility function to determine if a system has multiple cpus.
|
|
|
|
|
function is_mp
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-23 13:23:51 +00:00
|
|
|
|
Linux)
|
2022-03-14 00:41:03 +00:00
|
|
|
|
(($(grep -c '^processor' /proc/cpuinfo) > 1))
|
2022-03-23 13:23:51 +00:00
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl -n kern.smp.cpus
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
(($(psrinfo | wc -l) > 1))
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function get_cpu_freq
|
|
|
|
|
{
|
|
|
|
|
if is_linux; then
|
2017-04-06 00:18:22 +00:00
|
|
|
|
lscpu | awk '/CPU MHz/ { print $3 }'
|
2019-12-18 20:29:43 +00:00
|
|
|
|
elif is_freebsd; then
|
2020-03-10 17:44:14 +00:00
|
|
|
|
sysctl -n hw.clockrate
|
2015-07-01 22:23:09 +00:00
|
|
|
|
else
|
2017-04-06 00:18:22 +00:00
|
|
|
|
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
|
2015-07-01 22:23:09 +00:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Run the given command as the user provided.
|
|
|
|
|
function user_run
|
|
|
|
|
{
|
|
|
|
|
typeset user=$1
|
|
|
|
|
shift
|
|
|
|
|
|
2021-03-11 20:01:58 +00:00
|
|
|
|
log_note "user: $user"
|
|
|
|
|
log_note "cmd: $*"
|
|
|
|
|
|
|
|
|
|
typeset out=$TEST_BASE_DIR/out
|
|
|
|
|
typeset err=$TEST_BASE_DIR/err
|
|
|
|
|
|
|
|
|
|
sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
|
|
|
|
|
typeset res=$?
|
|
|
|
|
log_note "out: $(<$out)"
|
|
|
|
|
log_note "err: $(<$err)"
|
|
|
|
|
return $res
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Check if the pool contains the specified vdevs
|
|
|
|
|
#
|
|
|
|
|
# $1 pool
|
|
|
|
|
# $2..n <vdev> ...
|
|
|
|
|
#
|
|
|
|
|
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
|
|
|
|
|
# vdevs is not in the pool, and 2 if pool name is missing.
|
|
|
|
|
#
|
|
|
|
|
function vdevs_in_pool
|
|
|
|
|
{
|
|
|
|
|
typeset pool=$1
|
|
|
|
|
typeset vdev
|
|
|
|
|
|
|
|
|
|
if [[ -z $pool ]]; then
|
|
|
|
|
log_note "Missing pool name."
|
|
|
|
|
return 2
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
shift
|
|
|
|
|
|
2018-12-04 17:37:37 +00:00
|
|
|
|
# We could use 'zpool list' to only get the vdevs of the pool but we
|
|
|
|
|
# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
|
|
|
|
|
# therefore we use the 'zpool status' output.
|
2017-04-06 00:18:22 +00:00
|
|
|
|
typeset tmpfile=$(mktemp)
|
2018-12-04 17:37:37 +00:00
|
|
|
|
zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
|
2022-03-23 00:52:39 +00:00
|
|
|
|
for vdev in "$@"; do
|
2022-03-23 21:01:06 +00:00
|
|
|
|
grep -wq ${vdev##*/} $tmpfile || return 1
|
2015-07-01 22:23:09 +00:00
|
|
|
|
done
|
|
|
|
|
|
2017-04-06 00:18:22 +00:00
|
|
|
|
rm -f $tmpfile
|
2022-03-23 00:52:39 +00:00
|
|
|
|
return 0
|
2015-07-01 22:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-08-03 21:26:15 +00:00
|
|
|
|
function get_max
|
|
|
|
|
{
|
|
|
|
|
typeset -l i max=$1
|
|
|
|
|
shift
|
|
|
|
|
|
|
|
|
|
for i in "$@"; do
|
2020-03-10 17:44:14 +00:00
|
|
|
|
max=$((max > i ? max : i))
|
2016-08-03 21:26:15 +00:00
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
echo $max
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-11 21:56:54 +00:00
|
|
|
|
# Write data that can be compressed into a directory
|
|
|
|
|
function write_compressible
|
|
|
|
|
{
|
|
|
|
|
typeset dir=$1
|
|
|
|
|
typeset megs=$2
|
|
|
|
|
typeset nfiles=${3:-1}
|
|
|
|
|
typeset bs=${4:-1024k}
|
|
|
|
|
typeset fname=${5:-file}
|
|
|
|
|
|
|
|
|
|
[[ -d $dir ]] || log_fail "No directory: $dir"
|
|
|
|
|
|
|
|
|
|
# Under Linux fio is not currently used since its behavior can
|
|
|
|
|
# differ significantly across versions. This includes missing
|
|
|
|
|
# command line options and cases where the --buffer_compress_*
|
|
|
|
|
# options fail to behave as expected.
|
|
|
|
|
if is_linux; then
|
|
|
|
|
typeset file_bytes=$(to_bytes $megs)
|
|
|
|
|
typeset bs_bytes=4096
|
|
|
|
|
typeset blocks=$(($file_bytes / $bs_bytes))
|
|
|
|
|
|
|
|
|
|
for (( i = 0; i < $nfiles; i++ )); do
|
|
|
|
|
truncate -s $file_bytes $dir/$fname.$i
|
|
|
|
|
|
|
|
|
|
# Write every third block to get 66% compression.
|
|
|
|
|
for (( j = 0; j < $blocks; j += 3 )); do
|
|
|
|
|
dd if=/dev/urandom of=$dir/$fname.$i \
|
|
|
|
|
seek=$j bs=$bs_bytes count=1 \
|
|
|
|
|
conv=notrunc >/dev/null 2>&1
|
|
|
|
|
done
|
|
|
|
|
done
|
|
|
|
|
else
|
2022-03-08 12:36:03 +00:00
|
|
|
|
command -v fio > /dev/null || log_unsupported "fio missing"
|
2022-03-23 13:23:51 +00:00
|
|
|
|
log_must eval fio \
|
2017-04-11 21:56:54 +00:00
|
|
|
|
--name=job \
|
|
|
|
|
--fallocate=0 \
|
|
|
|
|
--minimal \
|
|
|
|
|
--randrepeat=0 \
|
|
|
|
|
--buffer_compress_percentage=66 \
|
|
|
|
|
--buffer_compress_chunk=4096 \
|
2022-03-23 13:23:51 +00:00
|
|
|
|
--directory="$dir" \
|
|
|
|
|
--numjobs="$nfiles" \
|
|
|
|
|
--nrfiles="$nfiles" \
|
2017-04-11 21:56:54 +00:00
|
|
|
|
--rw=write \
|
2022-03-23 13:23:51 +00:00
|
|
|
|
--bs="$bs" \
|
|
|
|
|
--filesize="$megs" \
|
|
|
|
|
"--filename_format='$fname.\$jobnum' >/dev/null"
|
2017-04-11 21:56:54 +00:00
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function get_objnum
|
|
|
|
|
{
|
|
|
|
|
typeset pathname=$1
|
|
|
|
|
typeset objnum
|
|
|
|
|
|
|
|
|
|
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
|
2019-12-18 20:29:43 +00:00
|
|
|
|
if is_freebsd; then
|
|
|
|
|
objnum=$(stat -f "%i" $pathname)
|
|
|
|
|
else
|
|
|
|
|
objnum=$(stat -c %i $pathname)
|
|
|
|
|
fi
|
2017-04-11 21:56:54 +00:00
|
|
|
|
echo $objnum
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-04 18:46:10 +00:00
|
|
|
|
#
|
2017-05-19 19:33:11 +00:00
|
|
|
|
# Sync data to the pool
|
2016-10-04 18:46:10 +00:00
|
|
|
|
#
|
|
|
|
|
# $1 pool name
|
2017-05-19 19:33:11 +00:00
|
|
|
|
# $2 boolean to force uberblock (and config including zpool cache file) update
|
2016-10-04 18:46:10 +00:00
|
|
|
|
#
|
2017-05-19 19:33:11 +00:00
|
|
|
|
function sync_pool #pool <force>
|
2016-10-04 18:46:10 +00:00
|
|
|
|
{
|
|
|
|
|
typeset pool=${1:-$TESTPOOL}
|
2017-05-19 19:33:11 +00:00
|
|
|
|
typeset force=${2:-false}
|
2016-10-04 18:46:10 +00:00
|
|
|
|
|
2017-05-19 19:33:11 +00:00
|
|
|
|
if [[ $force == true ]]; then
|
|
|
|
|
log_must zpool sync -f $pool
|
|
|
|
|
else
|
|
|
|
|
log_must zpool sync $pool
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
2016-10-04 18:46:10 +00:00
|
|
|
|
}
|
2017-02-08 23:27:37 +00:00
|
|
|
|
|
2022-01-06 18:57:09 +00:00
|
|
|
|
#
|
|
|
|
|
# Sync all pools
|
|
|
|
|
#
|
|
|
|
|
# $1 boolean to force uberblock (and config including zpool cache file) update
|
|
|
|
|
#
|
|
|
|
|
function sync_all_pools #<force>
|
|
|
|
|
{
|
|
|
|
|
typeset force=${1:-false}
|
|
|
|
|
|
|
|
|
|
if [[ $force == true ]]; then
|
|
|
|
|
log_must zpool sync -f
|
|
|
|
|
else
|
|
|
|
|
log_must zpool sync
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-08 23:27:37 +00:00
|
|
|
|
#
|
|
|
|
|
# Wait for zpool 'freeing' property drops to zero.
|
|
|
|
|
#
|
|
|
|
|
# $1 pool name
|
|
|
|
|
#
|
|
|
|
|
function wait_freeing #pool
|
|
|
|
|
{
|
|
|
|
|
typeset pool=${1:-$TESTPOOL}
|
|
|
|
|
while true; do
|
2017-04-06 00:18:22 +00:00
|
|
|
|
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
|
|
|
|
|
log_must sleep 1
|
2017-02-08 23:27:37 +00:00
|
|
|
|
done
|
|
|
|
|
}
|
2017-03-02 16:47:26 +00:00
|
|
|
|
|
2017-05-03 16:31:05 +00:00
|
|
|
|
#
|
|
|
|
|
# Wait for every device replace operation to complete
|
|
|
|
|
#
|
|
|
|
|
# $1 pool name
|
|
|
|
|
#
|
|
|
|
|
function wait_replacing #pool
|
|
|
|
|
{
|
|
|
|
|
typeset pool=${1:-$TESTPOOL}
|
2022-03-11 22:54:08 +00:00
|
|
|
|
while zpool status $pool | grep -qE 'replacing-[0-9]+'; do
|
2017-05-03 16:31:05 +00:00
|
|
|
|
log_must sleep 1
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 19:38:05 +00:00
|
|
|
|
# Wait for a pool to be scrubbed
|
|
|
|
|
#
|
|
|
|
|
# $1 pool name
|
2022-03-03 18:43:38 +00:00
|
|
|
|
# $2 timeout
|
2018-02-23 19:38:05 +00:00
|
|
|
|
#
|
2022-03-03 18:43:38 +00:00
|
|
|
|
function wait_scrubbed #pool timeout
|
2018-02-23 19:38:05 +00:00
|
|
|
|
{
|
2022-03-03 18:43:38 +00:00
|
|
|
|
typeset timeout=${2:-300}
|
|
|
|
|
typeset pool=${1:-$TESTPOOL}
|
|
|
|
|
for (( timer = 0; timer < $timeout; timer++ )); do
|
|
|
|
|
is_pool_scrubbed $pool && break;
|
|
|
|
|
sleep 1;
|
|
|
|
|
done
|
2018-02-23 19:38:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-06 23:41:52 +00:00
|
|
|
|
# Backup the zed.rc in our test directory so that we can edit it for our test.
|
|
|
|
|
#
|
|
|
|
|
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
|
|
|
|
|
function zed_rc_backup
|
|
|
|
|
{
|
|
|
|
|
zedrc_backup="$(mktemp)"
|
|
|
|
|
cp $ZEDLET_DIR/zed.rc $zedrc_backup
|
|
|
|
|
echo $zedrc_backup
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function zed_rc_restore
|
|
|
|
|
{
|
|
|
|
|
mv $1 $ZEDLET_DIR/zed.rc
|
|
|
|
|
}
|
|
|
|
|
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
#
|
|
|
|
|
# Setup custom environment for the ZED.
|
|
|
|
|
#
|
2018-02-23 19:38:05 +00:00
|
|
|
|
# $@ Optional list of zedlets to run under zed.
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
function zed_setup
|
|
|
|
|
{
|
|
|
|
|
if ! is_linux; then
|
2022-03-22 21:18:48 +00:00
|
|
|
|
log_unsupported "No zed on $UNAME"
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [[ ! -d $ZEDLET_DIR ]]; then
|
|
|
|
|
log_must mkdir $ZEDLET_DIR
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [[ ! -e $VDEVID_CONF ]]; then
|
|
|
|
|
log_must touch $VDEVID_CONF
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [[ -e $VDEVID_CONF_ETC ]]; then
|
|
|
|
|
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
|
|
|
|
|
fi
|
2018-02-23 19:38:05 +00:00
|
|
|
|
EXTRA_ZEDLETS=$@
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
|
|
|
|
|
# Create a symlink for /etc/zfs/vdev_id.conf file.
|
|
|
|
|
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
|
|
|
|
|
|
|
|
|
|
# Setup minimal ZED configuration. Individual test cases should
|
|
|
|
|
# add additional ZEDLETs as needed for their specific test.
|
2017-05-18 19:57:21 +00:00
|
|
|
|
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
|
|
|
|
|
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
|
2018-02-23 19:38:05 +00:00
|
|
|
|
# Scripts must only be user writable.
|
|
|
|
|
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
|
|
|
|
|
saved_umask=$(umask)
|
|
|
|
|
log_must umask 0022
|
|
|
|
|
for i in $EXTRA_ZEDLETS ; do
|
|
|
|
|
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
|
|
|
|
|
done
|
|
|
|
|
log_must umask $saved_umask
|
|
|
|
|
fi
|
|
|
|
|
|
2017-05-18 19:57:21 +00:00
|
|
|
|
# Customize the zed.rc file to enable the full debug log.
|
|
|
|
|
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
|
2017-10-23 16:45:59 +00:00
|
|
|
|
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
|
2017-05-18 19:57:21 +00:00
|
|
|
|
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Cleanup custom ZED environment.
|
|
|
|
|
#
|
2018-02-23 19:38:05 +00:00
|
|
|
|
# $@ Optional list of zedlets to remove from our test zed.d directory.
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
function zed_cleanup
|
|
|
|
|
{
|
|
|
|
|
if ! is_linux; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-23 13:23:51 +00:00
|
|
|
|
for extra_zedlet; do
|
|
|
|
|
log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
|
|
|
|
|
done
|
|
|
|
|
log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
|
|
|
|
|
$ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
|
|
|
|
|
$ZEDLET_DIR
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-03-03 18:43:38 +00:00
|
|
|
|
#
|
|
|
|
|
# Check if ZED is currently running; if so, returns PIDs
|
|
|
|
|
#
|
|
|
|
|
function zed_check
|
|
|
|
|
{
|
|
|
|
|
if ! is_linux; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
zedpids="$(pgrep -x zed)"
|
|
|
|
|
zedpids2="$(pgrep -x lt-zed)"
|
|
|
|
|
echo ${zedpids} ${zedpids2}
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-02 16:47:26 +00:00
|
|
|
|
#
|
|
|
|
|
# Check if ZED is currently running, if not start ZED.
|
|
|
|
|
#
|
|
|
|
|
function zed_start
|
|
|
|
|
{
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
if ! is_linux; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
2017-03-02 16:47:26 +00:00
|
|
|
|
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
# ZEDLET_DIR=/var/tmp/zed
|
|
|
|
|
if [[ ! -d $ZEDLET_DIR ]]; then
|
|
|
|
|
log_must mkdir $ZEDLET_DIR
|
|
|
|
|
fi
|
2017-03-02 16:47:26 +00:00
|
|
|
|
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
# Verify the ZED is not already running.
|
2022-03-03 18:43:38 +00:00
|
|
|
|
zedpids=$(zed_check)
|
|
|
|
|
if [ -n "$zedpids" ]; then
|
|
|
|
|
# We never, ever, really want it to just keep going if zed
|
|
|
|
|
# is already running - usually this implies our test cases
|
|
|
|
|
# will break very strangely because whatever we wanted to
|
|
|
|
|
# configure zed for won't be listening to our changes in the
|
|
|
|
|
# tmpdir
|
|
|
|
|
log_fail "ZED already running - ${zedpids}"
|
2020-02-26 00:02:10 +00:00
|
|
|
|
else
|
|
|
|
|
log_note "Starting ZED"
|
|
|
|
|
# run ZED in the background and redirect foreground logging
|
|
|
|
|
# output to $ZED_LOG.
|
|
|
|
|
log_must truncate -s 0 $ZED_DEBUG_LOG
|
2021-03-29 13:21:54 +00:00
|
|
|
|
log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
|
|
|
|
|
"-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
|
2017-03-02 16:47:26 +00:00
|
|
|
|
fi
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
|
2017-05-18 19:57:21 +00:00
|
|
|
|
return 0
|
2017-03-02 16:47:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Kill ZED process
|
|
|
|
|
#
|
|
|
|
|
function zed_stop
|
|
|
|
|
{
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
if ! is_linux; then
|
2022-03-03 18:43:38 +00:00
|
|
|
|
return ""
|
Enable remaining tests
Enable most of the remaining test cases which were previously
disabled. The required fixes are as follows:
* cache_001_pos - No changes required.
* cache_010_neg - Updated to use losetup under Linux. Loopback
cache devices are allowed, ZVOLs as cache devices are not.
Disabled until all the builders pass reliably.
* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
updated CPATH1 and CPATH2 to reference unique files.
* zfs_clone_005_pos - Wait for udev to create volumes.
* zfs_mount_007_pos - Updated mount options to expected Linux names.
* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.
* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
Updated to expect -f to not unmount busy mount points under Linux.
* rsend_019_pos - Observed to occasionally take a long time on both
32-bit systems and the kmemleak builder.
* zfs_written_property_001_pos - Switched sync(1) to sync_pool.
* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
for Linux.
* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno. Updated
test case to expect EPERM from Linux as described by mmap(2).
* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
scripts from OpenZFS.
* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.
* history_004_pos, history_006_neg, history_008_pos - Fixed by
previous commits and were not enabled. No changes required.
* zfs_allow_010_pos - Added missing spaces after assorted zfs
commands in delegate_common.kshlib.
* inuse_* - Illumos dump device tests skipped. Remaining test
cases updated to correctly create required partitions.
* large_files_001_pos - Fixed largest_file.c to accept EINVAL
as well as EFBIG as described in write(2).
* link_count_001 - Added nproc to required commands.
* umountall_001 - Updated to use umount -a.
* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
to make the '-c 0' option run the test in a loop. Included
online_offline.cfg file in all test cases.
* rename_dirs_001_pos - Updated to use the rename_dir test binary,
pkill restricted to exact matches and total runtime reduced.
* slog_013_neg, write_dirs_002_pos - No changes required.
* slog_013_pos.ksh - Updated to use losetup under Linux.
* slog_014_pos.ksh - ZED will not be running, manually degrade
the damaged vdev as expected.
* nopwrite_varying_compression, nopwrite_volume - Forced pool
sync with sync_pool to ensure up to date property values.
* Fixed typos in ZED log messages. Refactored zed_* helper
functions to resolve all-syslog exit=1 errors in zedlog.
* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
clone_001_pos, - Skip until layering pools on zvols is solid.
* largest_pool_001_pos - Limited to 7eb pool, maximum
supported size in 8eb-1 on Linux.
* zpool_expand_001_pos, zpool_expand_003_neg - Requires
additional support from the ZED, updated skip reason.
* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
busy mount points under Linux between test loops.
* privilege_001_pos, privilege_003_pos, rollback_003_pos,
threadsappend_001_pos - Skip with log_unsupported.
* snapshot_016_pos - No changes required.
* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
sync_pool to avoid false positives.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128
2017-05-19 00:21:15 +00:00
|
|
|
|
fi
|
|
|
|
|
|
2017-05-18 19:57:21 +00:00
|
|
|
|
log_note "Stopping ZED"
|
2021-03-29 13:21:54 +00:00
|
|
|
|
while true; do
|
2022-03-03 18:43:38 +00:00
|
|
|
|
zedpids=$(zed_check)
|
|
|
|
|
[ ! -n "$zedpids" ] && break
|
2021-03-29 13:21:54 +00:00
|
|
|
|
|
|
|
|
|
log_must kill $zedpids
|
|
|
|
|
sleep 1
|
|
|
|
|
done
|
2017-05-18 19:57:21 +00:00
|
|
|
|
return 0
|
2017-03-02 16:47:26 +00:00
|
|
|
|
}
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
|
2017-12-09 00:58:41 +00:00
|
|
|
|
#
|
|
|
|
|
# Drain all zevents
|
|
|
|
|
#
|
|
|
|
|
function zed_events_drain
|
|
|
|
|
{
|
|
|
|
|
while [ $(zpool events -H | wc -l) -ne 0 ]; do
|
|
|
|
|
sleep 1
|
|
|
|
|
zpool events -c >/dev/null
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-06 23:41:52 +00:00
|
|
|
|
# Set a variable in zed.rc to something, un-commenting it in the process.
|
|
|
|
|
#
|
|
|
|
|
# $1 variable
|
|
|
|
|
# $2 value
|
|
|
|
|
function zed_rc_set
|
|
|
|
|
{
|
|
|
|
|
var="$1"
|
|
|
|
|
val="$2"
|
|
|
|
|
# Remove the line
|
|
|
|
|
cmd="'/$var/d'"
|
|
|
|
|
eval sed -i $cmd $ZEDLET_DIR/zed.rc
|
|
|
|
|
|
|
|
|
|
# Add it at the end
|
|
|
|
|
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
#
|
|
|
|
|
# Check is provided device is being active used as a swap device.
|
|
|
|
|
#
|
|
|
|
|
function is_swap_inuse
|
|
|
|
|
{
|
|
|
|
|
typeset device=$1
|
|
|
|
|
|
|
|
|
|
if [[ -z $device ]] ; then
|
|
|
|
|
log_note "No device specified."
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-09 12:39:34 +00:00
|
|
|
|
Linux)
|
|
|
|
|
swapon -s | grep -wq $(readlink -f $device)
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
swapctl -l | grep -wq $device
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
swap -l | grep -wq $device
|
|
|
|
|
;;
|
|
|
|
|
esac
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Setup a swap device using the provided device.
|
|
|
|
|
#
|
|
|
|
|
function swap_setup
|
|
|
|
|
{
|
|
|
|
|
typeset swapdev=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-09 12:39:34 +00:00
|
|
|
|
Linux)
|
2017-07-31 18:07:05 +00:00
|
|
|
|
log_must eval "mkswap $swapdev > /dev/null 2>&1"
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
log_must swapon $swapdev
|
2022-03-09 12:39:34 +00:00
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
2019-12-18 20:29:43 +00:00
|
|
|
|
log_must swapctl -a $swapdev
|
2022-03-09 12:39:34 +00:00
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
log_must swap -a $swapdev
|
|
|
|
|
;;
|
|
|
|
|
esac
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Cleanup a swap device on the provided device.
|
|
|
|
|
#
|
|
|
|
|
function swap_cleanup
|
|
|
|
|
{
|
|
|
|
|
typeset swapdev=$1
|
|
|
|
|
|
|
|
|
|
if is_swap_inuse $swapdev; then
|
|
|
|
|
if is_linux; then
|
|
|
|
|
log_must swapoff $swapdev
|
2019-12-18 20:29:43 +00:00
|
|
|
|
elif is_freebsd; then
|
|
|
|
|
log_must swapoff $swapdev
|
Enable additional test cases
Enable additional test cases, in most cases this required a few
minor modifications to the test scripts. In a few cases a real
bug was uncovered and fixed. And in a handful of cases where pools
are layered on pools the test case will be skipped until this is
supported. Details below for each test case.
* zpool_add_004_pos - Skip test on Linux until adding zvols to pools
is fully supported and deadlock free.
* zpool_add_005_pos.ksh - Skip dumpadm portion of the test which isn't
relevant for Linux. The find_vfstab_dev, find_mnttab_dev, and
save_dump_dev functions were updated accordingly for Linux. Add
O_EXCL to the in-use check to prevent the -f (force) option from
working for mounted filesystems and improve the resulting error.
* zpool_add_006_pos - Update test case such that it doesn't depend
on nested pools. Switch to truncate from mkfile to reduce space
requirements and speed up the test case.
* zpool_clear_001_pos - Speed up test case by filling filesystem to
25% capacity.
* zpool_create_002_pos, zpool_create_004_pos - Use sparse files for
file vdevs in order to avoid increasing the partition size.
* zpool_create_006_pos - 6ba1ce9 allows raidz+mirror configs with
similar redundancy. Updating the valid_args and forced_args cases.
* zpool_create_008_pos - Disable overlapping partition portion.
* zpool_create_011_neg - Fix to correctly create the extra partition.
Modified zpool_vdev.c to use fstat64_blk() wrapper which includes
the st_size even for block devices.
* zpool_create_012_neg - Updated to properly find swap devices.
* zpool_create_014_neg, zpool_create_015_neg - Updated to use
swap_setup() and swap_cleanup() wrappers which do the right thing
on Linux and Illumos. Removed '-n' option which succeeds under
Linux due to differences in the in-use checks.
* zpool_create_016_pos.ksh - Skipped test case isn't useful.
* zpool_create_020_pos - Added missing / to cleanup() function.
Remove cache file prior to test to ensure a clean environment
and avoid false positives.
* zpool_destroy_001_pos - Removed test case which creates a pool on
a zvol. This is more likely to deadlock under Linux and has never
been completely supported on any platform.
* zpool_destroy_002_pos - 'zpool destroy -f' is unsupported on Linux.
Mount point must not be busy in order to unmount them.
* zfs_destroy_001_pos - Handle EBUSY error which can occur with
volumes when racing with udev.
* zpool_expand_001_pos, zpool_expand_003_neg - Skip test on Linux
until adding zvols to pools is fully supported and deadlock free.
The test could be modified to use loop-back devices but it would
be preferable to use the test case as is for improved coverage.
* zpool_export_004_pos - Updated test case to such that it doesn't
depend on nested pools. Normal file vdev under /var/tmp are fine.
* zpool_import_all_001_pos - Updated to skip partition 1, which is
known as slice 2, on Illumos. This prevents overwriting the
default TESTPOOL which was causing the failure.
* zpool_import_002_pos, zpool_import_012_pos - No changes needed.
* zpool_remove_003_pos - No changes needed
* zpool_upgrade_002_pos, zpool_upgrade_004_pos - Root cause addressed
by upstream OpenZFS commit 3b7f360.
* zpool_upgrade_007_pos - Disabled in test case due to known failure.
Opened issue https://github.com/zfsonlinux/zfs/issues/6112
* zvol_misc_002_pos - Updated to to use ext2.
* zvol_misc_001_neg, zvol_misc_003_neg, zvol_misc_004_pos,
zvol_misc_005_neg, zvol_misc_006_pos - Moved to skip list, these
test case could be updated to use Linux's crash dump facility.
* zvol_swap_* - Updated to use swap_setup/swap_cleanup helpers.
File creation switched from /tmp to /var/tmp. Enabled minimal
useful tests for Linux, skip test cases which aren't applicable.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #3484
Issue #5634
Issue #2437
Issue #5202
Issue #4034
Closes #6095
2017-05-11 21:27:57 +00:00
|
|
|
|
else
|
|
|
|
|
log_must swap -d $swapdev
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
}
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Set a global system tunable (64-bit value)
|
|
|
|
|
#
|
2020-01-14 22:57:28 +00:00
|
|
|
|
# $1 tunable name (use a NAME defined in tunables.cfg)
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
# $2 tunable values
|
|
|
|
|
#
|
|
|
|
|
function set_tunable64
|
|
|
|
|
{
|
|
|
|
|
set_tunable_impl "$1" "$2" Z
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Set a global system tunable (32-bit value)
|
|
|
|
|
#
|
2020-01-14 22:57:28 +00:00
|
|
|
|
# $1 tunable name (use a NAME defined in tunables.cfg)
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
# $2 tunable values
|
|
|
|
|
#
|
|
|
|
|
function set_tunable32
|
|
|
|
|
{
|
|
|
|
|
set_tunable_impl "$1" "$2" W
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function set_tunable_impl
|
|
|
|
|
{
|
2020-01-14 22:57:28 +00:00
|
|
|
|
typeset name="$1"
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
typeset value="$2"
|
|
|
|
|
typeset mdb_cmd="$3"
|
|
|
|
|
|
2020-01-14 22:57:28 +00:00
|
|
|
|
eval "typeset tunable=\$$name"
|
|
|
|
|
case "$tunable" in
|
|
|
|
|
UNSUPPORTED)
|
2022-03-22 21:18:48 +00:00
|
|
|
|
log_unsupported "Tunable '$name' is unsupported on $UNAME"
|
2020-01-14 22:57:28 +00:00
|
|
|
|
;;
|
|
|
|
|
"")
|
|
|
|
|
log_fail "Tunable '$name' must be added to tunables.cfg"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
[[ -z "$value" ]] && return 1
|
|
|
|
|
[[ -z "$mdb_cmd" ]] && return 1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
Linux)
|
2022-03-31 15:20:50 +00:00
|
|
|
|
typeset zfs_tunables="/sys/module/zfs/parameters"
|
2022-03-23 00:52:39 +00:00
|
|
|
|
echo "$value" >"$zfs_tunables/$tunable"
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
;;
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl vfs.zfs.$tunable=$value
|
|
|
|
|
;;
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
SunOS)
|
|
|
|
|
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Get a global system tunable
|
|
|
|
|
#
|
2020-01-14 22:57:28 +00:00
|
|
|
|
# $1 tunable name (use a NAME defined in tunables.cfg)
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
#
|
|
|
|
|
function get_tunable
|
|
|
|
|
{
|
|
|
|
|
get_tunable_impl "$1"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function get_tunable_impl
|
|
|
|
|
{
|
2020-01-14 22:57:28 +00:00
|
|
|
|
typeset name="$1"
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
typeset module="${2:-zfs}"
|
|
|
|
|
|
2020-01-14 22:57:28 +00:00
|
|
|
|
eval "typeset tunable=\$$name"
|
|
|
|
|
case "$tunable" in
|
|
|
|
|
UNSUPPORTED)
|
2022-03-22 21:18:48 +00:00
|
|
|
|
log_unsupported "Tunable '$name' is unsupported on $UNAME"
|
2020-01-14 22:57:28 +00:00
|
|
|
|
;;
|
|
|
|
|
"")
|
|
|
|
|
log_fail "Tunable '$name' must be added to tunables.cfg"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
;;
|
|
|
|
|
esac
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
Linux)
|
|
|
|
|
typeset zfs_tunables="/sys/module/$module/parameters"
|
|
|
|
|
cat $zfs_tunables/$tunable
|
|
|
|
|
;;
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl -n vfs.zfs.$tunable
|
|
|
|
|
;;
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
|
SunOS)
|
|
|
|
|
[[ "$module" -eq "zfs" ]] || return 1
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2016-12-16 22:11:29 +00:00
|
|
|
|
}
|
2019-09-05 16:51:59 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Compute MD5 digest for given file or stdin if no file given.
|
|
|
|
|
# Note: file path must not contain spaces
|
|
|
|
|
#
|
|
|
|
|
function md5digest
|
|
|
|
|
{
|
|
|
|
|
typeset file=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
md5 -q $file
|
|
|
|
|
;;
|
|
|
|
|
*)
|
2022-03-11 22:54:08 +00:00
|
|
|
|
typeset sum _
|
|
|
|
|
read -r sum _ < <(md5sum -b $file)
|
|
|
|
|
echo $sum
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
esac
|
2019-09-05 16:51:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Compute SHA256 digest for given file or stdin if no file given.
|
|
|
|
|
# Note: file path must not contain spaces
|
|
|
|
|
#
|
|
|
|
|
function sha256digest
|
|
|
|
|
{
|
|
|
|
|
typeset file=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
sha256 -q $file
|
|
|
|
|
;;
|
|
|
|
|
*)
|
2022-03-11 22:54:08 +00:00
|
|
|
|
typeset sum _
|
|
|
|
|
read -r sum _ < <(sha256sum -b $file)
|
|
|
|
|
echo $sum
|
2019-12-18 20:29:43 +00:00
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function new_fs #<args>
|
|
|
|
|
{
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
newfs "$@"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
echo y | newfs -v "$@"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function stat_size #<path>
|
|
|
|
|
{
|
|
|
|
|
typeset path=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2019-12-18 20:29:43 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
stat -f %z "$path"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
stat -c %s "$path"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2019-09-05 16:51:59 +00:00
|
|
|
|
}
|
2019-12-20 00:26:07 +00:00
|
|
|
|
|
2021-07-26 20:08:52 +00:00
|
|
|
|
function stat_ctime #<path>
|
|
|
|
|
{
|
|
|
|
|
typeset path=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2021-07-26 20:08:52 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
stat -f %c "$path"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
stat -c %Z "$path"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function stat_crtime #<path>
|
|
|
|
|
{
|
|
|
|
|
typeset path=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2021-07-26 20:08:52 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
stat -f %B "$path"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
stat -c %W "$path"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-18 00:18:37 +00:00
|
|
|
|
function stat_generation #<path>
|
|
|
|
|
{
|
|
|
|
|
typeset path=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2021-12-18 00:18:37 +00:00
|
|
|
|
Linux)
|
|
|
|
|
getversion "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
stat -f %v "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-20 00:26:07 +00:00
|
|
|
|
# Run a command as if it was being run in a TTY.
|
|
|
|
|
#
|
|
|
|
|
# Usage:
|
|
|
|
|
#
|
|
|
|
|
# faketty command
|
|
|
|
|
#
|
|
|
|
|
function faketty
|
|
|
|
|
{
|
|
|
|
|
if is_freebsd; then
|
2020-02-12 21:04:51 +00:00
|
|
|
|
script -q /dev/null env "$@"
|
2019-12-20 00:26:07 +00:00
|
|
|
|
else
|
|
|
|
|
script --return --quiet -c "$*" /dev/null
|
|
|
|
|
fi
|
|
|
|
|
}
|
2020-01-09 17:31:17 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Produce a random permutation of the integers in a given range (inclusive).
|
|
|
|
|
#
|
|
|
|
|
function range_shuffle # begin end
|
|
|
|
|
{
|
|
|
|
|
typeset -i begin=$1
|
|
|
|
|
typeset -i end=$2
|
|
|
|
|
|
2020-01-28 16:36:33 +00:00
|
|
|
|
seq ${begin} ${end} | sort -R
|
2020-01-09 17:31:17 +00:00
|
|
|
|
}
|
2020-01-10 21:24:59 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Cross-platform xattr helpers
|
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
function get_xattr # name path
|
|
|
|
|
{
|
|
|
|
|
typeset name=$1
|
|
|
|
|
typeset path=$2
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-01-10 21:24:59 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
getextattr -qq user "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
attr -qg "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function set_xattr # name value path
|
|
|
|
|
{
|
|
|
|
|
typeset name=$1
|
|
|
|
|
typeset value=$2
|
|
|
|
|
typeset path=$3
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-01-10 21:24:59 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
setextattr user "${name}" "${value}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
attr -qs "${name}" -V "${value}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function set_xattr_stdin # name value
|
|
|
|
|
{
|
|
|
|
|
typeset name=$1
|
|
|
|
|
typeset path=$2
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-01-10 21:24:59 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
setextattr -i user "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
attr -qs "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function rm_xattr # name path
|
|
|
|
|
{
|
|
|
|
|
typeset name=$1
|
|
|
|
|
typeset path=$2
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-01-10 21:24:59 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
rmextattr -q user "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
attr -qr "${name}" "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function ls_xattr # path
|
|
|
|
|
{
|
|
|
|
|
typeset path=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-01-10 21:24:59 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
lsextattr -qq user "${path}"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
attr -ql "${path}"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
2020-03-31 17:46:48 +00:00
|
|
|
|
|
2020-10-08 16:40:23 +00:00
|
|
|
|
function kstat # stat flags?
|
|
|
|
|
{
|
|
|
|
|
typeset stat=$1
|
|
|
|
|
typeset flags=${2-"-n"}
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-10-08 16:40:23 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl $flags kstat.zfs.misc.$stat
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
2022-03-11 22:54:08 +00:00
|
|
|
|
cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
|
2020-10-08 16:40:23 +00:00
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
false
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-31 17:46:48 +00:00
|
|
|
|
function get_arcstat # stat
|
|
|
|
|
{
|
2020-04-14 18:36:28 +00:00
|
|
|
|
typeset stat=$1
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2020-04-14 18:36:28 +00:00
|
|
|
|
FreeBSD)
|
2020-10-08 16:40:23 +00:00
|
|
|
|
kstat arcstats.$stat
|
2020-04-14 18:36:28 +00:00
|
|
|
|
;;
|
|
|
|
|
Linux)
|
2022-03-11 22:54:08 +00:00
|
|
|
|
kstat arcstats | awk "/$stat/"' { print $3 }'
|
2020-04-14 18:36:28 +00:00
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
false
|
|
|
|
|
;;
|
|
|
|
|
esac
|
2020-03-31 17:46:48 +00:00
|
|
|
|
}
|
2020-07-13 16:19:18 +00:00
|
|
|
|
|
2021-08-22 15:22:07 +00:00
|
|
|
|
function punch_hole # offset length file
|
|
|
|
|
{
|
|
|
|
|
typeset offset=$1
|
|
|
|
|
typeset length=$2
|
|
|
|
|
typeset file=$3
|
|
|
|
|
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2021-08-22 15:22:07 +00:00
|
|
|
|
FreeBSD)
|
|
|
|
|
truncate -d -o $offset -l $length "$file"
|
|
|
|
|
;;
|
|
|
|
|
Linux)
|
|
|
|
|
fallocate --punch-hole --offset $offset --length $length "$file"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
false
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-20 23:07:03 +00:00
|
|
|
|
function zero_range # offset length file
|
|
|
|
|
{
|
|
|
|
|
typeset offset=$1
|
|
|
|
|
typeset length=$2
|
|
|
|
|
typeset file=$3
|
|
|
|
|
|
|
|
|
|
case "$UNAME" in
|
|
|
|
|
Linux)
|
|
|
|
|
fallocate --zero-range --offset $offset --length $length "$file"
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
false
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-05 22:29:05 +00:00
|
|
|
|
#
|
|
|
|
|
# Wait for the specified arcstat to reach non-zero quiescence.
|
|
|
|
|
# If echo is 1 echo the value after reaching quiescence, otherwise
|
|
|
|
|
# if echo is 0 print the arcstat we are waiting on.
|
|
|
|
|
#
|
|
|
|
|
function arcstat_quiescence # stat echo
|
|
|
|
|
{
|
|
|
|
|
typeset stat=$1
|
|
|
|
|
typeset echo=$2
|
|
|
|
|
typeset do_once=true
|
|
|
|
|
|
|
|
|
|
if [[ $echo -eq 0 ]]; then
|
|
|
|
|
echo "Waiting for arcstat $1 quiescence."
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
|
|
|
|
|
typeset stat1=$(get_arcstat $stat)
|
|
|
|
|
sleep 2
|
|
|
|
|
typeset stat2=$(get_arcstat $stat)
|
|
|
|
|
do_once=false
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [[ $echo -eq 1 ]]; then
|
|
|
|
|
echo $stat2
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function arcstat_quiescence_noecho # stat
|
|
|
|
|
{
|
|
|
|
|
typeset stat=$1
|
|
|
|
|
arcstat_quiescence $stat 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function arcstat_quiescence_echo # stat
|
|
|
|
|
{
|
|
|
|
|
typeset stat=$1
|
|
|
|
|
arcstat_quiescence $stat 1
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-13 16:19:18 +00:00
|
|
|
|
#
|
|
|
|
|
# Given an array of pids, wait until all processes
|
|
|
|
|
# have completed and check their return status.
|
|
|
|
|
#
|
|
|
|
|
function wait_for_children #children
|
|
|
|
|
{
|
|
|
|
|
rv=0
|
|
|
|
|
children=("$@")
|
|
|
|
|
for child in "${children[@]}"
|
|
|
|
|
do
|
|
|
|
|
child_exit=0
|
|
|
|
|
wait ${child} || child_exit=$?
|
|
|
|
|
if [ $child_exit -ne 0 ]; then
|
|
|
|
|
echo "child ${child} failed with ${child_exit}"
|
|
|
|
|
rv=1
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
return $rv
|
|
|
|
|
}
|
2022-03-01 18:05:32 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Compare two directory trees recursively in a manner similar to diff(1), but
|
|
|
|
|
# using rsync. If there are any discrepancies, a summary of the differences are
|
|
|
|
|
# output and a non-zero error is returned.
|
|
|
|
|
#
|
|
|
|
|
# If you're comparing a directory after a ZIL replay, you should set
|
|
|
|
|
# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
|
|
|
|
|
# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
|
|
|
|
|
# information).
|
|
|
|
|
#
|
|
|
|
|
function directory_diff # dir_a dir_b
|
|
|
|
|
{
|
|
|
|
|
dir_a="$1"
|
|
|
|
|
dir_b="$2"
|
|
|
|
|
zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
|
|
|
|
|
|
|
|
|
|
# If one of the directories doesn't exist, return 2. This is to match the
|
|
|
|
|
# semantics of diff.
|
|
|
|
|
if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
|
|
|
|
|
return 2
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Run rsync with --dry-run --itemize-changes to get something akin to diff
|
|
|
|
|
# output, but rsync is far more thorough in detecting differences (diff
|
|
|
|
|
# doesn't compare file metadata, and cannot handle special files).
|
|
|
|
|
#
|
|
|
|
|
# Also make sure to filter out non-user.* xattrs when comparing. On
|
|
|
|
|
# SELinux-enabled systems the copied tree will probably have different
|
|
|
|
|
# SELinux labels.
|
|
|
|
|
args=("-nicaAHX" '--filter=-x! user.*' "--delete")
|
|
|
|
|
|
|
|
|
|
# NOTE: Quite a few rsync builds do not support --crtimes which would be
|
|
|
|
|
# necessary to verify that creation times are being maintained properly.
|
|
|
|
|
# Unfortunately because of this we cannot use it unconditionally but we can
|
|
|
|
|
# check if this rsync build supports it and use it then. This check is
|
|
|
|
|
# based on the same check in the rsync test suite (testsuite/crtimes.test).
|
|
|
|
|
#
|
|
|
|
|
# We check ctimes even with zil_replay=1 because the ZIL does store
|
|
|
|
|
# creation times and we should make sure they match (if the creation times
|
|
|
|
|
# do not match there is a "c" entry in one of the columns).
|
2022-03-23 21:01:06 +00:00
|
|
|
|
if rsync --version | grep -q "[, ] crtimes"; then
|
2022-03-01 18:05:32 +00:00
|
|
|
|
args+=("--crtimes")
|
|
|
|
|
else
|
2022-03-23 21:01:06 +00:00
|
|
|
|
log_note "This rsync package does not support --crtimes (-N)."
|
2022-03-01 18:05:32 +00:00
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# If we are testing a ZIL replay, we need to ignore timestamp changes.
|
|
|
|
|
# Unfortunately --no-times doesn't do what we want -- it will still tell
|
|
|
|
|
# you if the timestamps don't match but rsync will set the timestamps to
|
|
|
|
|
# the current time (leading to an itemised change entry). It's simpler to
|
|
|
|
|
# just filter out those lines.
|
|
|
|
|
if [ "$zil_replay" -eq 0 ]; then
|
|
|
|
|
filter=("cat")
|
|
|
|
|
else
|
|
|
|
|
# Different rsync versions have different numbers of columns. So just
|
|
|
|
|
# require that aside from the first two, all other columns must be
|
|
|
|
|
# blank (literal ".") or a timestamp field ("[tT]").
|
|
|
|
|
filter=("grep" "-v" '^\..[.Tt]\+ ')
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
|
|
|
|
|
rv=0
|
|
|
|
|
if [ -n "$diff" ]; then
|
|
|
|
|
echo "$diff"
|
|
|
|
|
rv=1
|
|
|
|
|
fi
|
|
|
|
|
return $rv
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Compare two directory trees recursively, without checking whether the mtimes
|
|
|
|
|
# match (creation times will be checked if the available rsync binary supports
|
|
|
|
|
# it). This is necessary for ZIL replay checks (because the ZIL does not
|
|
|
|
|
# contain mtimes and thus after a ZIL replay, mtimes won't match).
|
|
|
|
|
#
|
|
|
|
|
# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
|
|
|
|
|
#
|
|
|
|
|
function replay_directory_diff # dir_a dir_b
|
|
|
|
|
{
|
|
|
|
|
LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
|
|
|
|
|
}
|
2022-03-16 13:27:04 +00:00
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Put coredumps into $1/core.{basename}
|
|
|
|
|
#
|
|
|
|
|
# Output must be saved and passed to pop_coredump_pattern on cleanup
|
|
|
|
|
#
|
|
|
|
|
function push_coredump_pattern # dir
|
|
|
|
|
{
|
|
|
|
|
ulimit -c unlimited
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-16 13:27:04 +00:00
|
|
|
|
Linux)
|
|
|
|
|
cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
|
|
|
|
|
echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
|
|
|
|
|
echo 0 >/proc/sys/kernel/core_uses_pid
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl -n kern.corefile
|
|
|
|
|
sysctl kern.corefile="$1/core.%N" >/dev/null
|
|
|
|
|
;;
|
|
|
|
|
*)
|
|
|
|
|
# Nothing to output – set only for this shell
|
|
|
|
|
coreadm -p "$1/core.%f"
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Put coredumps back into the default location
|
|
|
|
|
#
|
|
|
|
|
function pop_coredump_pattern
|
|
|
|
|
{
|
|
|
|
|
[ -s "$1" ] || return 0
|
2022-03-22 21:18:48 +00:00
|
|
|
|
case "$UNAME" in
|
2022-03-16 13:27:04 +00:00
|
|
|
|
Linux)
|
|
|
|
|
typeset pat pid
|
|
|
|
|
{ read -r pat; read -r pid; } < "$1"
|
|
|
|
|
echo "$pat" >/proc/sys/kernel/core_pattern &&
|
|
|
|
|
echo "$pid" >/proc/sys/kernel/core_uses_pid
|
|
|
|
|
;;
|
|
|
|
|
FreeBSD)
|
|
|
|
|
sysctl kern.corefile="$(<"$1")" >/dev/null
|
|
|
|
|
;;
|
|
|
|
|
esac
|
|
|
|
|
}
|