Compare commits

..

No commits in common. "zfs-0.8-release" and "zfs-0.8.4" have entirely different histories.

80 changed files with 292 additions and 898 deletions

View File

@ -83,7 +83,6 @@ CONTRIBUTORS:
Christopher Voltz <cjunk@voltz.ws>
Chunwei Chen <david.chen@nutanix.com>
Clemens Fruhwirth <clemens@endorphin.org>
Coleman Kane <ckane@colemankane.org>
Colin Ian King <colin.king@canonical.com>
Craig Loomis <cloomis@astro.princeton.edu>
Craig Sanders <github@taz.net.au>

4
META
View File

@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
Version: 0.8.6
Version: 0.8.4
Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS on Linux
Linux-Maximum: 5.9
Linux-Maximum: 5.6
Linux-Minimum: 2.6.32

View File

@ -347,8 +347,9 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
zpool_vdev_offline(zhp, devname, B_TRUE);
} else if (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
replace_with_spare(hdl, zhp, vdev) == B_FALSE) {
/* Could not handle with spare */
fmd_hdl_debug(hdl, "no spare for '%s'", devname);
/* Could not handle with spare: offline the device */
fmd_hdl_debug(hdl, "zpool_vdev_offline '%s'", devname);
zpool_vdev_offline(zhp, devname, B_TRUE);
}
free(devname);

View File

@ -8620,9 +8620,9 @@ zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) strncpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) strncpy(str+7, ctime_str+20, 4); /* '1993' */
(void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);

View File

@ -11,7 +11,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKG_TRYGET], [
#include <linux/bio.h>
#include <linux/fs.h>
],[
struct blkcg_gq blkg __attribute__ ((unused)) = {};
struct blkcg_gq blkg __attribute__ ((unused));
bool rc __attribute__ ((unused));
rc = blkg_tryget(&blkg);
], [], [$ZFS_META_LICENSE])

View File

@ -1,62 +0,0 @@
dnl #
dnl # check_disk_change() was removed in 5.10
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE], [
ZFS_LINUX_TEST_SRC([check_disk_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
bool error;
error = check_disk_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE], [
AC_MSG_CHECKING([whether check_disk_change() exists])
ZFS_LINUX_TEST_RESULT([check_disk_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CHECK_DISK_CHANGE, 1,
[check_disk_change() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.10 API, check_disk_change() is removed, in favor of
dnl # bdev_check_media_change(), which doesn't force revalidation
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
ZFS_LINUX_TEST_SRC([bdev_check_media_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;
error = bdev_check_media_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
AC_MSG_CHECKING([whether bdev_disk_changed() exists])
ZFS_LINUX_TEST_RESULT([bdev_check_media_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_CHECK_MEDIA_CHANGE, 1,
[bdev_check_media_change() exists])
], [
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_CHANGE], [
ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_CHANGE], [
ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
])

View File

@ -6,7 +6,6 @@ dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH], [
ZFS_LINUX_TEST_SRC([blkdev_get_by_path], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
blkdev_get_by_path(NULL, 0, NULL);
])

View File

@ -5,7 +5,6 @@ dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART], [
ZFS_LINUX_TEST_SRC([blkdev_reread_part], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;

View File

@ -91,7 +91,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_CONFIG_DEBUG_LOCK_ALLOC], [
AC_DEFUN([ZFS_AC_KERNEL_CONFIG_DEBUG_LOCK_ALLOC], [
AC_MSG_CHECKING([whether mutex_lock() is GPL-only])
ZFS_LINUX_TEST_RESULT([config_debug_lock_alloc_license], [
ZFS_LINUX_TEST_RESULT([config_debug_lock_alloc], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)

View File

@ -94,6 +94,7 @@ AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY], [
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_FILE_PAGES])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_ANON])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_FILE])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_SLAB_RECLAIMABLE])
AC_MSG_RESULT(yes)
])
@ -116,6 +117,8 @@ AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE], [
[node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE],
[node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE],
[node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_FILE_PAGES],
[zone_stat_item], [$LINUX/include/linux/mmzone.h])
@ -123,6 +126,8 @@ AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE], [
[zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE],
[zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE],
[zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY
])

View File

@ -5,7 +5,6 @@ dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INVALIDATE_BDEV], [
ZFS_LINUX_TEST_SRC([invalidate_bdev], [
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
invalidate_bdev(bdev);

View File

@ -1,24 +0,0 @@
dnl #
dnl # 5.8 API,
dnl # __vmalloc PAGE_KERNEL removal
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [
ZFS_LINUX_TEST_SRC([__vmalloc], [
#include <linux/mm.h>
#include <linux/vmalloc.h>
],[
void *p __attribute__ ((unused));
p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [
AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available])
ZFS_LINUX_TEST_RESULT([__vmalloc], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -5,7 +5,6 @@ dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_LOOKUP_BDEV], [
ZFS_LINUX_TEST_SRC([lookup_bdev_1arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
lookup_bdev(NULL);
])

View File

@ -34,96 +34,75 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN], [
struct request_queue *q __attribute__ ((unused));
q = blk_alloc_queue(make_request, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([block_device_operations_submit_bio], [
#include <linux/blkdev.h>
],[
struct block_device_operations o;
o.submit_bio = NULL;
])
])
AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.9 API Change
dnl # make_request_fn was moved into block_device_operations->submit_bio
dnl # Linux 5.7 API Change
dnl # blk_alloc_queue() expects request function.
dnl #
AC_MSG_CHECKING([whether submit_bio is member of struct block_device_operations])
ZFS_LINUX_TEST_RESULT([block_device_operations_submit_bio], [
AC_MSG_CHECKING([whether blk_alloc_queue() expects request function])
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS, 1,
[submit_bio is member of struct block_device_operations])
],[
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.7 API Change
dnl # blk_alloc_queue() expects request function.
dnl #
AC_MSG_CHECKING([whether blk_alloc_queue() expects request function])
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn], [
AC_MSG_RESULT(yes)
AC_MSG_CHECKING([whether make_request_fn() returns blk_qc_t])
AC_MSG_RESULT(yes)
dnl # Checked as part of the blk_alloc_queue_request_fn test
AC_MSG_CHECKING([whether make_request_fn() returns blk_qc_t])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN, 1,
[blk_alloc_queue() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
],[
AC_MSG_RESULT(no)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN, 1,
[blk_alloc_queue() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
dnl #
dnl # Linux 3.2 API Change
dnl # make_request_fn returns void.
dnl #
AC_MSG_CHECKING([whether make_request_fn() returns void])
ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, void,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
[Noting that make_request_fn() returns void])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 3.2 API Change
dnl # make_request_fn returns void.
dnl # Linux 4.4 API Change
dnl # make_request_fn returns blk_qc_t.
dnl #
AC_MSG_CHECKING([whether make_request_fn() returns void])
ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
AC_MSG_CHECKING(
[whether make_request_fn() returns blk_qc_t])
ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, void,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
[Noting that make_request_fn() returns void])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() ]
[returns blk_qc_t])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 4.4 API Change
dnl # make_request_fn returns blk_qc_t.
dnl # Legacy API
dnl # make_request_fn returns int.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns blk_qc_t])
ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
[whether make_request_fn() returns int])
ZFS_LINUX_TEST_RESULT([make_request_fn_int], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() ]
[returns blk_qc_t])
AC_DEFINE(MAKE_REQUEST_FN_RET, int,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_INT,
1, [Noting that make_request_fn() ]
[returns int])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Legacy API
dnl # make_request_fn returns int.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns int])
ZFS_LINUX_TEST_RESULT([make_request_fn_int], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, int,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_INT,
1, [Noting that make_request_fn() ]
[returns int])
],[
ZFS_LINUX_TEST_ERROR([make_request_fn])
])
ZFS_LINUX_TEST_ERROR([make_request_fn])
])
])
])

View File

@ -1,24 +1,3 @@
dnl #
dnl # Detect objtool functionality.
dnl #
dnl #
dnl # Kernel 5.10: linux/frame.h was renamed linux/objtool.h
dnl #
AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL_HEADER], [
AC_MSG_CHECKING([whether objtool header is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/objtool.h>
],[
],[
AC_DEFINE(HAVE_KERNEL_OBJTOOL_HEADER, 1,
[kernel has linux/objtool.h])
AC_MSG_RESULT(linux/objtool.h)
],[
AC_MSG_RESULT(linux/frame.h)
])
])
dnl #
dnl # Check for objtool support.
dnl #
@ -27,24 +6,19 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_OBJTOOL], [
dnl # 4.6 API for compile-time stack validation
ZFS_LINUX_TEST_SRC([objtool], [
#undef __ASSEMBLY__
#include <asm/ptrace.h>
#include <asm/frame.h>
],[
#if !defined(FRAME_BEGIN)
#error "FRAME_BEGIN is not defined"
CTASSERT(1);
#endif
])
dnl # 4.6 API added STACK_FRAME_NON_STANDARD macro
ZFS_LINUX_TEST_SRC([stack_frame_non_standard], [
#ifdef HAVE_KERNEL_OBJTOOL_HEADER
#include <linux/objtool.h>
#else
#include <linux/frame.h>
#endif
],[
#if !defined(STACK_FRAME_NON_STANDARD)
#error "STACK_FRAME_NON_STANDARD is not defined."
CTASSERT(1);
#endif
])
])

View File

@ -12,7 +12,6 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
@ -46,7 +45,6 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
@ -144,7 +142,6 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_BLKDEV_CHANGE
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
@ -166,7 +163,6 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
@ -264,7 +260,6 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_BLKDEV_CHANGE
])
dnl #

View File

@ -36,13 +36,11 @@ rm -rf "$KERNEL_DIR/include/zfs" "$KERNEL_DIR/fs/zfs"
cp --recursive include "$KERNEL_DIR/include/zfs"
cp --recursive module "$KERNEL_DIR/fs/zfs"
cp zfs_config.h "$KERNEL_DIR/include/zfs/"
rm "$KERNEL_DIR/include/zfs/.gitignore"
for MODULE in "${MODULES[@]}"
do
sed -i '/obj =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i '/src =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i "s|-I$PWD/module/|-I\$(srctree)/fs/zfs/|" "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i.bak '/obj =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
sed -i.bak '/src =/d' "$KERNEL_DIR/fs/zfs/$MODULE/Makefile"
done
cat > "$KERNEL_DIR/fs/zfs/Kconfig" <<"EOF"

View File

@ -38,6 +38,8 @@ do_fail() {
is_known() {
query="$1"
IFS=' '
# protect against special characters
set -f
for element in $2 ; do
if [ "$query" = "$element" ] ; then
return 0
@ -52,7 +54,8 @@ is_known() {
create_dependencies() {
unitfile="$1"
suffix="$2"
IFS=' '
# protect against special characters
set -f
for target in $3 ; do
target_dir="${dest_norm}/${target}.${suffix}/"
mkdir -p "${target_dir}"
@ -69,7 +72,6 @@ else
do_fail "zero or three arguments required"
fi
pools=$(zpool list -H -o name || true)
# All needed information about each ZFS is available from
# zfs list -H -t filesystem -o <properties>
@ -81,11 +83,11 @@ process_line() {
# zfs list -H -o name,...
# fields are tab separated
IFS="$(printf '\t')"
# protect against special characters in, e.g., mountpoints
set -f
# shellcheck disable=SC2086
set -- $1
dataset="${1}"
pool="${dataset%%/*}"
p_mountpoint="${2}"
p_canmount="${3}"
p_atime="${4}"
@ -113,30 +115,10 @@ process_line() {
wants="zfs-import.target"
requires=""
requiredmounts=""
bindsto=""
wantedby=""
requiredby=""
noauto="off"
# If the pool is already imported, zfs-import.target is not needed. This
# avoids a dependency loop on root-on-ZFS systems:
# systemd-random-seed.service After (via RequiresMountsFor) var-lib.mount
# After zfs-import.target After zfs-import-{cache,scan}.service After
# cryptsetup.service After systemd-random-seed.service.
#
# Pools are newline-separated and may contain spaces in their names.
# There is no better portable way to set IFS to just a newline. Using
# $(printf '\n') doesn't work because $(...) strips trailing newlines.
IFS="
"
for p in $pools ; do
if [ "$p" = "$pool" ] ; then
after=""
wants=""
break
fi
done
if [ -n "${p_systemd_after}" ] && \
[ "${p_systemd_after}" != "-" ] ; then
after="${p_systemd_after} ${after}"
@ -190,12 +172,6 @@ set -eu;\
keystatus=\"\$\$(@sbindir@/zfs get -H -o value keystatus \"${dataset}\")\";\
[ \"\$\$keystatus\" = \"unavailable\" ] || exit 0;\
${keyloadscript}'"
keyunloadcmd="\
/bin/sh -c '\
set -eu;\
keystatus=\"\$\$(@sbindir@/zfs get -H -o value keystatus \"${dataset}\")\";\
[ \"\$\$keystatus\" = \"available\" ] || exit 0;\
@sbindir@/zfs unload-key \"${dataset}\"'"
@ -215,23 +191,19 @@ Documentation=man:zfs-mount-generator(8)
DefaultDependencies=no
Wants=${wants}
After=${after}
Before=${before}
${requires}
${keymountdep}
[Service]
Type=oneshot
RemainAfterExit=yes
# This avoids a dependency loop involving systemd-journald.socket if this
# dataset is a parent of the root filesystem.
StandardOutput=null
StandardError=null
ExecStart=${keyloadcmd}
ExecStop=${keyunloadcmd}" > "${dest_norm}/${keyloadunit}"
ExecStop=@sbindir@/zfs unload-key '${dataset}'" > "${dest_norm}/${keyloadunit}"
fi
# Update the dependencies for the mount file to want the
# key-loading unit.
wants="${wants}"
bindsto="BindsTo=${keyloadunit}"
wants="${wants} ${keyloadunit}"
after="${after} ${keyloadunit}"
fi
@ -442,7 +414,6 @@ Documentation=man:zfs-mount-generator(8)
Before=${before}
After=${after}
Wants=${wants}
${bindsto}
${requires}
${requiredmounts}
@ -459,8 +430,6 @@ Options=defaults${opts},zfsutil" > "${dest_norm}/${mountfile}"
}
for cachefile in "${FSLIST}/"* ; do
# Disable glob expansion to protect against special characters when parsing.
set -f
# Sort cachefile's lines by canmount, "on" before "noauto"
# and feed each line into process_line
sort -t "$(printf '\t')" -k 3 -r "${cachefile}" | \

View File

@ -9,7 +9,6 @@ After=multipathd.target
After=systemd-remount-fs.service
Before=zfs-import.target
ConditionPathExists=@sysconfdir@/zfs/zpool.cache
ConditionPathIsDirectory=/sys/module/zfs
[Service]
Type=oneshot

View File

@ -8,7 +8,6 @@ After=cryptsetup.target
After=multipathd.target
Before=zfs-import.target
ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
ConditionPathIsDirectory=/sys/module/zfs
[Service]
Type=oneshot

View File

@ -6,7 +6,7 @@ After=systemd-udev-settle.service
After=zfs-import.target
After=systemd-remount-fs.service
Before=local-fs.target
ConditionPathIsDirectory=/sys/module/zfs
Before=systemd-random-seed.service
[Service]
Type=oneshot

View File

@ -371,48 +371,12 @@ bio_set_bi_error(struct bio *bio, int error)
*
* For older kernels trigger a re-reading of the partition table by calling
* check_disk_change() which calls flush_disk() to invalidate the device.
*
* For newer kernels (as of 5.10), bdev_check_media_chage is used, in favor of
* check_disk_change(), with the modification that invalidation is no longer
* forced.
*/
#ifdef HAVE_CHECK_DISK_CHANGE
#define zfs_check_media_change(bdev) check_disk_change(bdev)
#ifdef HAVE_BLKDEV_REREAD_PART
#define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
#else
#define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
#endif /* HAVE_BLKDEV_REREAD_PART */
#else
#ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
static inline int
zfs_check_media_change(struct block_device *bdev)
{
struct gendisk *gd = bdev->bd_disk;
const struct block_device_operations *bdo = gd->fops;
if (!bdev_check_media_change(bdev))
return (0);
/*
* Force revalidation, to mimic the old behavior of
* check_disk_change()
*/
if (bdo->revalidate_disk)
bdo->revalidate_disk(gd);
return (0);
}
#define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
#else
/*
* This is encountered if check_disk_change() and bdev_check_media_change()
* are not available in the kernel - likely due to an API change that needs
* to be chased down.
*/
#error "Unsupported kernel: no usable disk change check"
#endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
#endif /* HAVE_CHECK_DISK_CHANGE */
/*
* 2.6.22 API change
@ -705,7 +669,6 @@ blk_generic_end_io_acct(struct request_queue *q, int rw,
#endif
}
#ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
static inline struct request_queue *
blk_generic_alloc_queue(make_request_fn make_request, int node_id)
{
@ -719,6 +682,5 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
return (q);
#endif
}
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
#endif /* _ZFS_BLKDEV_H */

View File

@ -35,6 +35,11 @@
#else
#define nr_inactive_file_pages() global_zone_page_state(NR_INACTIVE_FILE)
#endif
#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_SLAB_RECLAIMABLE)
#define nr_slab_reclaimable_pages() global_node_page_state(NR_SLAB_RECLAIMABLE)
#else
#define nr_slab_reclaimable_pages() global_zone_page_state(NR_SLAB_RECLAIMABLE)
#endif
#elif defined(ZFS_GLOBAL_NODE_PAGE_STATE)
@ -54,6 +59,11 @@
#else
#define nr_inactive_file_pages() global_page_state(NR_INACTIVE_FILE)
#endif
#if defined(ZFS_ENUM_NODE_STAT_ITEM_NR_SLAB_RECLAIMABLE)
#define nr_slab_reclaimable_pages() global_node_page_state(NR_SLAB_RECLAIMABLE)
#else
#define nr_slab_reclaimable_pages() global_page_state(NR_SLAB_RECLAIMABLE)
#endif
#else
@ -61,6 +71,7 @@
#define nr_file_pages() global_page_state(NR_FILE_PAGES)
#define nr_inactive_anon_pages() global_page_state(NR_INACTIVE_ANON)
#define nr_inactive_file_pages() global_page_state(NR_INACTIVE_FILE)
#define nr_slab_reclaimable_pages() global_page_state(NR_SLAB_RECLAIMABLE)
#endif /* ZFS_GLOBAL_ZONE_PAGE_STATE */

View File

@ -169,15 +169,6 @@ extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
extern void spl_kmem_free(const void *ptr, size_t sz);
/*
* 5.8 API change, pgprot_t argument removed.
*/
#ifdef HAVE_VMALLOC_PAGE_KERNEL
#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL)
#else
#define spl_vmalloc(size, flags) __vmalloc(size, flags)
#endif
/*
* The following functions are only available for internal use.
*/

View File

@ -26,7 +26,6 @@
#define _SPL_MUTEX_H
#include <sys/types.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/lockdep.h>
#include <linux/compiler_compat.h>

View File

@ -47,6 +47,10 @@
#define membar_producer() smp_wmb()
#define physmem zfs_totalram_pages
#define freemem (nr_free_pages() + \
global_page_state(NR_INACTIVE_FILE) + \
global_page_state(NR_INACTIVE_ANON) + \
global_page_state(NR_SLAB_RECLAIMABLE))
#define xcopyin(from, to, size) copy_from_user(to, from, size)
#define xcopyout(from, to, size) copy_to_user(to, from, size)

View File

@ -175,7 +175,7 @@ typedef struct ddt_ops {
int (*ddt_op_count)(objset_t *os, uint64_t object, uint64_t *count);
} ddt_ops_t;
#define DDT_NAMELEN 107
#define DDT_NAMELEN 80
extern void ddt_object_name(ddt_t *ddt, enum ddt_type type,
enum ddt_class class, char *name);

View File

@ -23,13 +23,8 @@
extern "C" {
#endif
#if defined(__KERNEL__) && defined(HAVE_KERNEL_OBJTOOL) && \
defined(HAVE_STACK_FRAME_NON_STANDARD)
#if defined(HAVE_KERNEL_OBJTOOL_HEADER)
#include <linux/objtool.h>
#else
#if defined(__KERNEL__) && defined(HAVE_STACK_FRAME_NON_STANDARD)
#include <linux/frame.h>
#endif
#else
#define STACK_FRAME_NON_STANDARD(func)
#endif

View File

@ -39,7 +39,7 @@
#endif /* MNTTAB */
#define MNTTAB "/proc/self/mounts"
#define MNT_LINE_MAX 4108
#define MNT_LINE_MAX 4096
#define MNT_TOOLONG 1 /* entry exceeds MNT_LINE_MAX */
#define MNT_TOOMANY 2 /* too many fields in line */

View File

@ -8,7 +8,7 @@ VPATH = \
# Suppress unused but set variable warnings often due to ASSERTs
AM_CFLAGS += $(NO_UNUSED_BUT_SET_VARIABLE)
libzfs_pcdir = $(libdir)/pkgconfig
libzfs_pcdir = $(datarootdir)/pkgconfig
libzfs_pc_DATA = libzfs.pc libzfs_core.pc
DEFAULT_INCLUDES += \

View File

@ -330,7 +330,7 @@ aes_impl_init(void)
sizeof (aes_fastest_impl));
#endif
strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX);
strcpy(aes_fastest_impl.name, "fastest");
/* Finish initialization */
atomic_swap_32(&icp_aes_impl, user_sel_impl);
@ -405,7 +405,7 @@ aes_impl_set(const char *val)
return (err);
}
#if defined(_KERNEL) && defined(__linux__)
#if defined(_KERNEL)
#include <linux/mod_compat.h>
static int

View File

@ -857,7 +857,7 @@ gcm_impl_init(void)
sizeof (gcm_fastest_impl));
}
strlcpy(gcm_fastest_impl.name, "fastest", GCM_IMPL_NAME_MAX);
strcpy(gcm_fastest_impl.name, "fastest");
#ifdef CAN_USE_GCM_ASM
/*
@ -969,7 +969,7 @@ gcm_impl_set(const char *val)
return (err);
}
#if defined(_KERNEL) && defined(__linux__)
#if defined(_KERNEL)
#include <linux/mod_compat.h>
static int

View File

@ -453,19 +453,17 @@ mod_hash_create_extended(
int sleep) /* whether to sleep for mem */
{
mod_hash_t *mod_hash;
size_t size;
ASSERT(hname && keycmp && hash_alg && vdtor && kdtor);
if ((mod_hash = kmem_zalloc(MH_SIZE(nchains), sleep)) == NULL)
return (NULL);
size = strlen(hname) + 1;
mod_hash->mh_name = kmem_alloc(size, sleep);
mod_hash->mh_name = kmem_alloc(strlen(hname) + 1, sleep);
if (mod_hash->mh_name == NULL) {
kmem_free(mod_hash, MH_SIZE(nchains));
return (NULL);
}
(void) strlcpy(mod_hash->mh_name, hname, size);
(void) strcpy(mod_hash->mh_name, hname);
rw_init(&mod_hash->mh_contents, NULL, RW_DEFAULT, NULL);
mod_hash->mh_sleep = sleep;

View File

@ -598,12 +598,10 @@ l_noret luaG_errormsg (lua_State *L) {
l_noret luaG_runerror (lua_State *L, const char *fmt, ...) {
L->runerror++;
va_list argp;
va_start(argp, fmt);
addinfo(L, luaO_pushvfstring(L, fmt, argp));
va_end(argp);
luaG_errormsg(L);
L->runerror--;
}
/* END CSTYLED */

View File

@ -29,24 +29,6 @@
/* Return the number of bytes available on the stack. */
#if defined (_KERNEL) && defined(__linux__)
#include <asm/current.h>
static intptr_t stack_remaining(void) {
char local;
return (intptr_t)(&local - (char *)current->stack);
}
#elif defined (_KERNEL) && defined(__FreeBSD__)
#include <sys/pcpu.h>
static intptr_t stack_remaining(void) {
char local;
return (intptr_t)(&local - (char *)curthread->td_kstack);
}
#else
static intptr_t stack_remaining(void) {
return INTPTR_MAX;
}
#endif
/*
** {======================================================
@ -455,13 +437,8 @@ void luaD_call (lua_State *L, StkId func, int nResults, int allowyield) {
if (L->nCcalls == LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
else if (L->nCcalls >= (LUAI_MAXCCALLS + (LUAI_MAXCCALLS>>3)))
luaD_throw(L, LUA_ERRERR); /* error while handling stack error */
luaD_throw(L, LUA_ERRERR); /* error while handing stack error */
}
intptr_t remaining = stack_remaining();
if (L->runerror == 0 && remaining < LUAI_MINCSTACK)
luaG_runerror(L, "C stack overflow");
if (L->runerror != 0 && remaining < LUAI_MINCSTACK / 2)
luaD_throw(L, LUA_ERRERR); /* error while handling stack error */
if (!allowyield) L->nny++;
if (!luaD_precall(L, func, nResults)) /* is a Lua function? */
luaV_execute(L); /* call it */

View File

@ -122,12 +122,6 @@ typedef LUAI_UACNUMBER l_uacNumber;
#define LUAI_MAXCCALLS 20
#endif
/*
* Minimum amount of available stack space (in bytes) to make a C call. With
* gsub() recursion, the stack space between each luaD_call() is 1256 bytes.
*/
#define LUAI_MINCSTACK 4096
/*
** maximum number of upvalues in a closure (both C and Lua). (Value
** must fit in an unsigned char.)

View File

@ -214,7 +214,6 @@ static void preinit_state (lua_State *L, global_State *g) {
L->nny = 1;
L->status = LUA_OK;
L->errfunc = 0;
L->runerror = 0;
}

View File

@ -166,7 +166,6 @@ struct lua_State {
unsigned short nCcalls; /* number of nested C calls */
lu_byte hookmask;
lu_byte allowhook;
lu_byte runerror; /* handling a runtime error */
int basehookcount;
int hookcount;
lua_Hook hook;

View File

@ -853,9 +853,9 @@ static void addquoted (lua_State *L, luaL_Buffer *b, int arg) {
else if (*s == '\0' || iscntrl(uchar(*s))) {
char buff[10];
if (!isdigit(uchar(*(s+1))))
snprintf(buff, sizeof(buff), "\\%d", (int)uchar(*s));
sprintf(buff, "\\%d", (int)uchar(*s));
else
snprintf(buff, sizeof(buff), "\\%03d", (int)uchar(*s));
sprintf(buff, "\\%03d", (int)uchar(*s));
luaL_addstring(b, buff);
}
else
@ -890,11 +890,11 @@ static const char *scanformat (lua_State *L, const char *strfrmt, char *form) {
/*
** add length modifier into formats
*/
static void addlenmod (char *form, const char *lenmod, size_t size) {
static void addlenmod (char *form, const char *lenmod) {
size_t l = strlen(form);
size_t lm = strlen(lenmod);
char spec = form[l - 1];
strlcpy(form + l - 1, lenmod, size - (l - 1));
strcpy(form + l - 1, lenmod);
form[l + lm - 1] = spec;
form[l + lm] = '\0';
}
@ -931,7 +931,7 @@ static int str_format (lua_State *L) {
lua_Number diff = n - (lua_Number)ni;
luaL_argcheck(L, -1 < diff && diff < 1, arg,
"not a number in proper range");
addlenmod(form, LUA_INTFRMLEN, MAX_FORMAT);
addlenmod(form, LUA_INTFRMLEN);
nb = str_sprintf(buff, form, ni);
break;
}
@ -941,7 +941,7 @@ static int str_format (lua_State *L) {
lua_Number diff = n - (lua_Number)ni;
luaL_argcheck(L, -1 < diff && diff < 1, arg,
"not a non-negative number in proper range");
addlenmod(form, LUA_INTFRMLEN, MAX_FORMAT);
addlenmod(form, LUA_INTFRMLEN);
nb = str_sprintf(buff, form, ni);
break;
}
@ -951,7 +951,7 @@ static int str_format (lua_State *L) {
case 'a': case 'A':
#endif
case 'g': case 'G': {
addlenmod(form, LUA_FLTFRMLEN, MAX_FORMAT);
addlenmod(form, LUA_FLTFRMLEN);
nb = str_sprintf(buff, form, (LUA_FLTFRM_T)luaL_checknumber(L, arg));
break;
}

View File

@ -929,4 +929,32 @@ void luaV_execute (lua_State *L) {
}
}
/*
* this can live in SPL
*/
#if BITS_PER_LONG == 32
#if defined(_KERNEL) && !defined(SPL_HAS_MODDI3)
extern uint64_t __umoddi3(uint64_t dividend, uint64_t divisor);
/* 64-bit signed modulo for 32-bit machines. */
int64_t
__moddi3(int64_t n, int64_t d)
{
int64_t q;
boolean_t nn = B_FALSE;
if (n < 0) {
nn = B_TRUE;
n = -n;
}
if (d < 0)
d = -d;
q = __umoddi3(n, d);
return (nn ? -q : q);
}
EXPORT_SYMBOL(__moddi3);
#endif
#endif
/* END CSTYLED */

View File

@ -31,19 +31,12 @@
#if defined(__arm__) && !defined(__aarch64__)
#if defined(__thumb2__)
#define _FUNC_MODE .code 16; .thumb_func
#else
#define _FUNC_MODE .code 32
#endif
#define ENTRY(x) \
.text; \
.syntax unified; \
.align 2; \
.global x; \
.type x,#function; \
_FUNC_MODE; \
.code 32; \
x:
#define END(x) \
@ -56,23 +49,13 @@ x:
* setjump + longjmp
*/
ENTRY(setjmp)
#if defined(__thumb2__)
mov ip, sp
stmia r0, {r4-r12,r14}
#else
stmia r0, {r4-r14}
#endif
mov r0, #0x00000000
RET
END(setjmp)
ENTRY(longjmp)
#if defined(__thumb2__)
ldmia r0, {r4-r12,r14}
mov sp, ip
#else
ldmia r0, {r4-r14}
#endif
mov r0, #0x00000001
RET
END(longjmp)

View File

@ -56,7 +56,7 @@
#define ENTRY(name) \
.align 2 ; \
.type name,@function; \
.weak name; \
.globl name; \
name:
#else /* PPC64_ELF_ABI_v1 */
@ -65,8 +65,8 @@ name:
#define GLUE(a,b) XGLUE(a,b)
#define ENTRY(name) \
.align 2 ; \
.weak name; \
.weak GLUE(.,name); \
.globl name; \
.globl GLUE(.,name); \
.pushsection ".opd","aw"; \
name: \
.quad GLUE(.,name); \
@ -83,8 +83,8 @@ GLUE(.,name):
#define ENTRY(name) \
.text; \
.p2align 4; \
.weak name; \
.type name,@function; \
.globl name; \
.type name,@function; \
name:
#endif /* __powerpc64__ */

View File

@ -170,7 +170,6 @@ random_get_pseudo_bytes(uint8_t *ptr, size_t len)
EXPORT_SYMBOL(random_get_pseudo_bytes);
#if BITS_PER_LONG == 32
/*
* Support 64/64 => 64 division on a 32-bit platform. While the kernel
* provides a div64_u64() function for this we do not use it because the
@ -217,14 +216,6 @@ __div_u64(uint64_t u, uint32_t v)
return (u);
}
/*
* Turn off missing prototypes warning for these functions. They are
* replacements for libgcc-provided functions and will never be called
* directly.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-prototypes"
/*
* Implementation of 64-bit unsigned division for 32-bit machines.
*
@ -300,26 +291,6 @@ __umoddi3(uint64_t dividend, uint64_t divisor)
}
EXPORT_SYMBOL(__umoddi3);
/* 64-bit signed modulo for 32-bit machines. */
int64_t
__moddi3(int64_t n, int64_t d)
{
int64_t q;
boolean_t nn = B_FALSE;
if (n < 0) {
nn = B_TRUE;
n = -n;
}
if (d < 0)
d = -d;
q = __umoddi3(n, d);
return (nn ? -q : q);
}
EXPORT_SYMBOL(__moddi3);
/*
* Implementation of 64-bit unsigned division/modulo for 32-bit machines.
*/
@ -423,9 +394,6 @@ __aeabi_ldivmod(int64_t u, int64_t v)
}
EXPORT_SYMBOL(__aeabi_ldivmod);
#endif /* __arm || __arm__ */
#pragma GCC diagnostic pop
#endif /* BITS_PER_LONG */
/*

View File

@ -203,7 +203,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
} else {
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
}
/* Resulting allocated memory will be page aligned */
@ -1242,7 +1242,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
* allocation.
*
* However, this can't be applied to KVM_VMEM due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
* __vmalloc() doesn't honor gfp flags in page table allocation.
*/
if (!(skc->skc_flags & KMC_VMEM)) {
rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);

View File

@ -172,15 +172,16 @@ spl_kmem_alloc_impl(size_t size, int flags, int node)
* kmem_zalloc() callers.
*
* For vmem_alloc() and vmem_zalloc() callers it is permissible
* to use spl_vmalloc(). However, in general use of
* spl_vmalloc() is strongly discouraged because a global lock
* must be acquired. Contention on this lock can significantly
* to use __vmalloc(). However, in general use of __vmalloc()
* is strongly discouraged because a global lock must be
* acquired. Contention on this lock can significantly
* impact performance so frequently manipulating the virtual
* address space is strongly discouraged.
*/
if ((size > spl_kmem_alloc_max) || use_vmem) {
if (flags & KM_VMEM) {
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM,
PAGE_KERNEL);
} else {
return (NULL);
}

View File

@ -89,17 +89,7 @@ procfs_list_next_node(procfs_list_cursor_t *cursor, loff_t *pos)
cursor->cached_node = next_node;
cursor->cached_pos = NODE_ID(procfs_list, cursor->cached_node);
*pos = cursor->cached_pos;
} else {
/*
* seq_read() expects ->next() to update the position even
* when there are no more entries. Advance the position to
* prevent a warning from being logged.
*/
cursor->cached_node = NULL;
cursor->cached_pos++;
*pos = cursor->cached_pos;
}
return (next_node);
}
@ -115,8 +105,6 @@ procfs_list_seq_start(struct seq_file *f, loff_t *pos)
cursor->cached_node = SEQ_START_TOKEN;
cursor->cached_pos = 0;
return (SEQ_START_TOKEN);
} else if (cursor->cached_node == NULL) {
return (NULL);
}
/*

View File

@ -1098,20 +1098,14 @@ static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_DO_ADAPT = 0x2,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, boolean_t);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static boolean_t arc_is_overflowing(void);
static void arc_buf_watch(arc_buf_t *);
@ -1986,7 +1980,7 @@ arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
arc_hdr_alloc_abd(hdr, B_FALSE);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
@ -2013,7 +2007,7 @@ arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, B_TRUE);
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
@ -3318,11 +3312,9 @@ arc_buf_destroy_impl(arc_buf_t *buf)
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
boolean_t do_adapt = ((alloc_flags & ARC_HDR_DO_ADAPT) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
@ -3332,15 +3324,13 @@ arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
do_adapt);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
do_adapt);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
@ -3392,7 +3382,6 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
arc_buf_contents_t type, boolean_t alloc_rdata)
{
arc_buf_hdr_t *hdr;
int flags = ARC_HDR_DO_ADAPT;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
@ -3400,7 +3389,6 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
flags |= alloc_rdata ? ARC_HDR_ALLOC_RDATA : 0;
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
@ -3424,7 +3412,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
* the compressed or uncompressed data depending on the block
* it references and compressed arc enablement.
*/
arc_hdr_alloc_abd(hdr, flags);
arc_hdr_alloc_abd(hdr, alloc_rdata);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
@ -4871,7 +4859,8 @@ arc_free_memory(void)
#else
return (ptob(nr_free_pages() +
nr_inactive_file_pages() +
nr_inactive_anon_pages()));
nr_inactive_anon_pages() +
nr_slab_reclaimable_pages()));
#endif /* CONFIG_HIGHMEM */
#else
@ -5519,12 +5508,11 @@ arc_is_overflowing(void)
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
boolean_t do_adapt)
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, do_adapt);
arc_get_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
@ -5538,7 +5526,7 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, B_TRUE);
arc_get_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
@ -5554,14 +5542,12 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
boolean_t do_adapt)
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
if (do_adapt)
arc_adapt(size, state);
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, and has grown past our
@ -6361,7 +6347,6 @@ top:
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
/*
* Gracefully handle a damaged logical block size as a
@ -6440,9 +6425,8 @@ top:
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
arc_access(hdr, hash_lock);
arc_hdr_alloc_abd(hdr, alloc_flags);
arc_hdr_alloc_abd(hdr, encrypted_read);
}
if (encrypted_read) {
@ -6886,7 +6870,7 @@ arc_release(arc_buf_t *buf, void *tag)
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
@ -7121,7 +7105,7 @@ arc_write_ready(zio_t *zio)
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
arc_hdr_alloc_abd(hdr, B_TRUE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
/*
@ -7131,17 +7115,16 @@ arc_write_ready(zio_t *zio)
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr,
ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
arc_hdr_alloc_abd(hdr, B_TRUE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
@ -7896,8 +7879,8 @@ arc_init(void)
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", boot_ncpus, defclsyspri,
boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri,
max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
@ -8436,8 +8419,7 @@ l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
B_TRUE);
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
@ -8473,8 +8455,7 @@ l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
B_TRUE);
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),

View File

@ -253,7 +253,7 @@ void
ddt_object_name(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
char *name)
{
(void) snprintf(name, DDT_NAMELEN, DMU_POOL_DDT,
(void) sprintf(name, DMU_POOL_DDT,
zio_checksum_table[ddt->ddt_checksum].ci_name,
ddt_ops[type]->ddt_op_name, ddt_class_name[class]);
}

View File

@ -2623,7 +2623,6 @@ EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_offset_next);
EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);

View File

@ -1967,15 +1967,14 @@ do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
if (subtract)
delta = -delta;
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)user);
(void) sprintf(name, "%llx", (longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)group);
(void) sprintf(name, "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name), "%llx",
(longlong_t)project);
(void) sprintf(name, "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
@ -2538,7 +2537,7 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
(void) strcpy(name, attr.za_name);
if (idp)
*idp = attr.za_first_integer;
if (case_conflict)
@ -2583,7 +2582,7 @@ dmu_dir_list_next(objset_t *os, int namelen, char *name,
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
(void) strcpy(name, attr.za_name);
if (idp)
*idp = attr.za_first_integer;
zap_cursor_advance(&cursor);

View File

@ -846,7 +846,7 @@ void
dsl_dataset_name(dsl_dataset_t *ds, char *name)
{
if (ds == NULL) {
(void) strlcpy(name, "mos", ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(name, "mos");
} else {
dsl_dir_name(ds->ds_dir, name);
VERIFY0(dsl_dataset_get_snapname(ds));
@ -2161,12 +2161,9 @@ get_receive_resume_stats_impl(dsl_dataset_t *ds)
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum);
size_t alloc_size = compressed_size * 2 + 1;
str = kmem_alloc(alloc_size, KM_SLEEP);
str = kmem_alloc(compressed_size * 2 + 1, KM_SLEEP);
for (int i = 0; i < compressed_size; i++) {
size_t offset = i * 2;
(void) snprintf(str + offset, alloc_size - offset,
"%02x", compressed[i]);
(void) sprintf(str + i * 2, "%02x", compressed[i]);
}
str[compressed_size * 2] = '\0';
char *propval = kmem_asprintf("%u-%llx-%llx-%s",
@ -2174,7 +2171,7 @@ get_receive_resume_stats_impl(dsl_dataset_t *ds)
(longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str);
kmem_free(packed, packed_size);
kmem_free(str, alloc_size);
kmem_free(str, compressed_size * 2 + 1);
kmem_free(compressed, packed_size);
return (propval);
}
@ -3576,8 +3573,7 @@ dsl_dataset_promote(const char *name, char *conflsnap)
*/
snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
if (snap_pair != NULL && conflsnap != NULL)
(void) strlcpy(conflsnap, nvpair_name(snap_pair),
ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(conflsnap, nvpair_name(snap_pair));
fnvlist_free(ddpa.err_ds);
return (error);

View File

@ -234,8 +234,7 @@ dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
if (err != 0)
goto errout;
} else {
(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
sizeof (dd->dd_myname));
(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
}
if (dsl_dir_is_clone(dd)) {
@ -395,7 +394,7 @@ getcomponent(const char *path, char *component, const char **nextp)
return (SET_ERROR(EINVAL));
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(component, path);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)

View File

@ -222,9 +222,9 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
boot_ncpus, defclsyspri, boot_ncpus, INT_MAX,
max_ncpus, defclsyspri, max_ncpus, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
return (dp);

View File

@ -130,9 +130,8 @@ dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
if (inheriting) {
dsl_dir_name(dd, setpoint);
} else {
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
(void) strcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD);
}
}
break;
@ -205,9 +204,8 @@ dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
strfree(recvdstr);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
(void) strcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD);
return (err);
}
}

View File

@ -358,7 +358,7 @@ scan_init(void)
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
(void) sprintf(name, "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
@ -540,22 +540,6 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
zfs_dbgmsg("new-style scrub was modified "
"by old software; restarting in txg %llu",
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting in txg %llu",
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
@ -897,6 +881,7 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
"errors=%llu", spa_get_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_started = B_FALSE;
spa->spa_scrub_active = B_FALSE;
/*
@ -923,12 +908,6 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.

View File

@ -101,9 +101,9 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
size_t len = strlen(nvpair_name(pair)) +
strlen(fnvpair_value_string(pair));
char *nameval = kmem_zalloc(len + 2, KM_SLEEP);
(void) strlcpy(nameval, nvpair_name(pair), len + 2);
(void) strlcat(nameval, "@", len + 2);
(void) strlcat(nameval, fnvpair_value_string(pair), len + 2);
(void) strcpy(nameval, nvpair_name(pair));
(void) strcat(nameval, "@");
(void) strcat(nameval, fnvpair_value_string(pair));
fnvlist_add_string(tmp_holds, nameval, "");
kmem_free(nameval, len + 2);
}

View File

@ -198,6 +198,14 @@ mmp_init(spa_t *spa)
cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
mmp->mmp_kstat_id = 1;
/*
* mmp_write_done() calculates mmp_delay based on prior mmp_delay and
* the elapsed time since the last write. For the first mmp write,
* there is no "last write", so we start with fake non-zero values.
*/
mmp->mmp_last_write = gethrtime();
mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
}
void
@ -549,18 +557,6 @@ mmp_thread(void *arg)
mmp_thread_enter(mmp, &cpr);
/*
* There have been no MMP writes yet. Setting mmp_last_write here gives
* us one mmp_fail_ns period, which is consistent with the activity
* check duration, to try to land an MMP write before MMP suspends the
* pool (if so configured).
*/
mutex_enter(&mmp->mmp_io_lock);
mmp->mmp_last_write = gethrtime();
mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
mutex_exit(&mmp->mmp_io_lock);
while (!mmp->mmp_thread_exiting) {
hrtime_t next_time = gethrtime() +
MSEC2NSEC(MMP_DEFAULT_INTERVAL);

View File

@ -6169,8 +6169,8 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
(void) sprintf(oldvd->vdev_path, "%s/%s",
newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;

View File

@ -453,7 +453,7 @@ txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
* Commit callback taskq hasn't been created yet.
*/
tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
boot_ncpus, defclsyspri, boot_ncpus, boot_ncpus * 2,
max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
}

View File

@ -2566,6 +2566,7 @@ vdev_dtl_should_excise(vdev_t *vd)
spa_t *spa = vd->vdev_spa;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
ASSERT0(scn->scn_phys.scn_errors);
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
@ -2616,7 +2617,6 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
@ -2626,18 +2626,6 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
if (zfs_scan_ignore_errors && scn)
scn->scn_phys.scn_errors = 0;
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scan cleanly then determine
* if this vdev should remove any DTLs. We only want to
@ -2674,14 +2662,6 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],

View File

@ -471,16 +471,6 @@ vdev_submit_bio_impl(struct bio *bio)
#endif
}
/*
* preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
* replace it with preempt_schedule under the following condition:
*/
#if defined(CONFIG_ARM64) && \
defined(CONFIG_PREEMPTION) && \
defined(CONFIG_BLK_CGROUP)
#define preempt_schedule_notrace(x) preempt_schedule(x)
#endif
#ifdef HAVE_BIO_SET_DEV
#if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
/*
@ -848,7 +838,7 @@ vdev_disk_io_done(zio_t *zio)
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
if (zfs_check_media_change(vd->vd_bdev)) {
if (check_disk_change(vd->vd_bdev)) {
vdev_bdev_invalidate(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);

View File

@ -1030,7 +1030,7 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
(err = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
if ((za->za_first_integer & mask) == (value & mask)) {
(void) strlcpy(name, za->za_name, MAXNAMELEN);
(void) strcpy(name, za->za_name);
break;
}
}

View File

@ -1602,8 +1602,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
za->za_integer_length = 8;
za->za_num_integers = 1;
za->za_first_integer = mzep->mze_value;
(void) strlcpy(za->za_name, mzep->mze_name,
sizeof (za->za_name));
(void) strcpy(za->za_name, mzep->mze_name);
zc->zc_hash = mze->mze_hash;
zc->zc_cd = mze->mze_cd;
err = 0;

View File

@ -81,13 +81,13 @@ get_objset_type_name(dsl_dataset_t *ds, char *str)
return (error);
switch (type) {
case ZFS_TYPE_SNAPSHOT:
(void) strlcpy(str, "snapshot", ZAP_MAXVALUELEN);
(void) strcpy(str, "snapshot");
break;
case ZFS_TYPE_FILESYSTEM:
(void) strlcpy(str, "filesystem", ZAP_MAXVALUELEN);
(void) strcpy(str, "filesystem");
break;
case ZFS_TYPE_VOLUME:
(void) strlcpy(str, "volume", ZAP_MAXVALUELEN);
(void) strcpy(str, "volume");
break;
default:
return (EINVAL);
@ -399,11 +399,11 @@ get_special_prop(lua_State *state, dsl_dataset_t *ds, const char *dsname,
break;
case ZFS_PROP_FILESYSTEM_COUNT:
error = dsl_dir_get_filesystem_count(ds->ds_dir, &numval);
(void) strlcpy(setpoint, "", ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(setpoint, "");
break;
case ZFS_PROP_SNAPSHOT_COUNT:
error = dsl_dir_get_snapshot_count(ds->ds_dir, &numval);
(void) strlcpy(setpoint, "", ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(setpoint, "");
break;
case ZFS_PROP_NUMCLONES:
numval = dsl_get_numclones(ds);
@ -445,8 +445,7 @@ get_special_prop(lua_State *state, dsl_dataset_t *ds, const char *dsname,
sizeof (numval), 1, &numval);
}
if (error == 0)
(void) strlcpy(setpoint, dsname,
ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(setpoint, dsname);
break;
case ZFS_PROP_VOLBLOCKSIZE: {
@ -767,10 +766,9 @@ parse_written_prop(const char *dataset_name, const char *prop_name,
ASSERT(zfs_prop_written(prop_name));
const char *name = prop_name + ZFS_WRITTEN_PROP_PREFIX_LEN;
if (strchr(name, '@') == NULL) {
(void) snprintf(snap_name, ZFS_MAX_DATASET_NAME_LEN, "%s@%s",
dataset_name, name);
(void) sprintf(snap_name, "%s@%s", dataset_name, name);
} else {
(void) strlcpy(snap_name, name, ZFS_MAX_DATASET_NAME_LEN);
(void) strcpy(snap_name, name);
}
}

View File

@ -31,7 +31,6 @@
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2018 George Melikov. All Rights Reserved.
* Copyright (c) 2019 Datto, Inc. All rights reserved.
* Copyright (c) 2020 The MathWorks, Inc. All rights reserved.
*/
/*
@ -978,22 +977,6 @@ out:
return (error);
}
/*
* Flush everything out of the kernel's export table and such.
* This is needed as once the snapshot is used over NFS, its
* entries in svc_export and svc_expkey caches hold reference
* to the snapshot mount point. There is no known way of flushing
* only the entries related to the snapshot.
*/
static void
exportfs_flush(void)
{
char *argv[] = { "/usr/sbin/exportfs", "-f", NULL };
char *envp[] = { NULL };
(void) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
/*
* Attempt to unmount a snapshot by making a call to user space.
* There is no assurance that this can or will succeed, is just a
@ -1016,8 +999,6 @@ zfsctl_snapshot_unmount(char *snapname, int flags)
}
rw_exit(&zfs_snapshot_lock);
exportfs_flush();
if (flags & MNT_FORCE)
argv[4] = "-fn";
argv[5] = se->se_path;

View File

@ -2557,8 +2557,7 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
zfs_cmd_t *zc;
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dsname,
sizeof (zc->zc_name));
(void) strcpy(zc->zc_name, dsname);
(void) zfs_ioc_userspace_upgrade(zc);
(void) zfs_ioc_id_quota_upgrade(zc);
kmem_free(zc, sizeof (zfs_cmd_t));
@ -5070,7 +5069,7 @@ zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
strchr(snapname, '%'))
return (SET_ERROR(EINVAL));
(void) strlcpy(tofs, snapname, sizeof (tofs));
(void) strcpy(tofs, snapname);
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';

View File

@ -203,13 +203,11 @@ zio_init(void)
if (align != 0) {
char name[36];
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL,
data_alloc_arena, data_cflags);

View File

@ -1017,17 +1017,9 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
return (SET_ERROR(error));
}
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
static blk_qc_t
zvol_submit_bio(struct bio *bio)
#else
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
#endif
{
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
struct request_queue *q = bio->bi_disk->queue;
#endif
zvol_state_t *zv = q->queuedata;
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
@ -1147,8 +1139,7 @@ out:
spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
return (BLK_QC_T_NONE);
#endif
}
@ -1450,7 +1441,7 @@ zvol_open(struct block_device *bdev, fmode_t flag)
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
zfs_check_media_change(bdev);
check_disk_change(bdev);
return (0);
@ -1685,9 +1676,6 @@ static struct block_device_operations zvol_ops = {
.revalidate_disk = zvol_revalidate_disk,
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
.submit_bio = zvol_submit_bio,
#endif
};
/*
@ -1715,11 +1703,7 @@ zvol_alloc(dev_t dev, const char *name)
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
zv->zv_queue = blk_alloc_queue(NUMA_NO_NODE);
#else
zv->zv_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
#endif
if (zv->zv_queue == NULL)
goto out_kmem;

View File

@ -93,7 +93,7 @@ fi
CONFIG_H="/var/lib/dkms/%{module}/%{version}/*/*/%{module}_config.h"
SPEC_META_ALIAS="@PACKAGE@-@VERSION@-@RELEASE@"
DKMS_META_ALIAS=`cat $CONFIG_H 2>/dev/null |
awk -F'"' '/META_ALIAS\s+"/ { print $2; exit 0 }'`
awk -F'"' '/META_ALIAS/ { print $2; exit 0 }'`
if [ "$SPEC_META_ALIAS" = "$DKMS_META_ALIAS" ]; then
echo -e
echo -e "Uninstall of %{module} module ($SPEC_META_ALIAS) beginning:"

View File

@ -471,8 +471,8 @@ systemctl --system daemon-reload >/dev/null || true
%{_libdir}/libzfs*.so.*
%files -n libzfs2-devel
%{_libdir}/pkgconfig/libzfs.pc
%{_libdir}/pkgconfig/libzfs_core.pc
%{_datarootdir}/pkgconfig/libzfs.pc
%{_datarootdir}/pkgconfig/libzfs_core.pc
%{_libdir}/*.so
%{_includedir}/*
%doc AUTHORS COPYRIGHT LICENSE NOTICE README.md

View File

@ -76,7 +76,7 @@ tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
'tst.return_recursive_table', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
@ -797,7 +797,7 @@ tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
tags = ['functional', 'reservation']
[tests/functional/resilver]
tests = ['resilver_restart_001', 'resilver_restart_002']
tests = ['resilver_restart_001']
tags = ['functional', 'resilver']
[tests/functional/rootpool]

View File

@ -274,20 +274,9 @@ maybe = {
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
'write_dirs/setup': ['SKIP', disk_reason],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'refreserv/refreserv_raidz': ['FAIL', known_reason],
'rsend/rsend_007_pos': ['FAIL', known_reason],
'rsend/rsend_010_pos': ['FAIL', known_reason],
'rsend/rsend_011_pos': ['FAIL', known_reason],
'snapshot/rollback_003_pos': ['FAIL', known_reason],
}
def usage(s):
print(s)
sys.exit(1)

View File

@ -21,7 +21,6 @@ dist_pkgdata_SCRIPTS = \
tst.return_nvlist_neg.ksh \
tst.return_nvlist_pos.ksh \
tst.return_recursive_table.ksh \
tst.stack_gsub.ksh \
tst.timeout.ksh
dist_pkgdata_DATA = \
@ -41,6 +40,4 @@ dist_pkgdata_DATA = \
tst.recursive.zcp \
tst.return_large.zcp \
tst.return_recursive_table.zcp \
tst.stack_gsub.err \
tst.stack_gsub.zcp \
tst.timeout.zcp

View File

@ -61,9 +61,6 @@ log_mustnot_checkerror_program "Memory limit exhausted" -m 1 $TESTPOOL - <<-EOF
return s
EOF
# Set the memlimit, in case it is a non-default value
log_must set_tunable32 zfs_lua_max_memlimit 100000000
log_mustnot_checkerror_program "Invalid instruction or memory limit" \
-m 200000000 $TESTPOOL - <<-EOF
return 1;

View File

@ -1,18 +0,0 @@
Channel program execution failed:
C stack overflow
stack traceback:
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
[C]: in function 'gsub'
[string "channel program"]:17: in function <[string "channel program"]:16>
(...tail calls...)

View File

@ -1,33 +0,0 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/channel_program/channel_common.kshlib
#
# DESCRIPTION:
# Overflowing the C stack using recursive gsub() should be handled
# gracefully. gsub() uses more stack space than typical, so it relies
# on LUAI_MINCSTACK to ensure that we don't overflow the Linux kernel's
# stack.
#
verify_runnable "global"
log_assert "recursive gsub() should be handled gracefully"
log_mustnot_program $TESTPOOL $ZCP_ROOT/lua_core/tst.stack_gsub.zcp
log_pass "recursive gsub() should be handled gracefully"

View File

@ -1,20 +0,0 @@
--
-- This file and its contents are supplied under the terms of the
-- Common Development and Distribution License ("CDDL"), version 1.0.
-- You may only use this file in accordance with the terms of version
-- 1.0 of the CDDL.
--
-- A full copy of the text of the CDDL should have accompanied this
-- source. A copy of the CDDL is also available via the Internet at
-- http://www.illumos.org/license/CDDL.
--
--
-- Copyright (c) 2020 by Delphix. All rights reserved.
--
function f(s)
return string.gsub(s, ".", f)
end
return f("foo")

View File

@ -25,29 +25,23 @@
#
# DESCRIPTION:
# Testing Fault Management Agent ZED Logic - Physically removed device is
# made unavail and onlined when reattached
# offlined and onlined when reattached
#
# STRATEGY:
# 1. Create a pool
# 2. Simulate physical removal of one device
# 3. Verify the device is unvailable
# 3. Verify the device is offlined
# 4. Reattach the device
# 5. Verify the device is onlined
# 6. Repeat the same tests with a spare device:
# zed will use the spare to handle the removed data device
# 7. Repeat the same tests again with a faulted spare device:
# the removed data device should be unavailable
# 6. Repeat the same tests with a spare device: zed will use the spare to handle
# the removed data device
# 7. Repeat the same tests again with a faulted spare device: zed should offline
# the removed data device if no spare is available
#
# NOTE: the use of 'block_device_wait' throughout the test helps avoid race
# conditions caused by mixing creation/removal events from partitioning the
# disk (zpool create) and events from physically removing it (remove_disk).
#
# NOTE: the test relies on 'zpool sync' to prompt the kmods to transition a
# vdev to the unavailable state. The ZED does receive a removal notification
# but only relies on it to activate a hot spare. Additional work is planned
# to extend an existing ioctl interface to allow the ZED to transition the
# vdev in to a removed state.
#
verify_runnable "both"
if is_linux; then
@ -79,10 +73,13 @@ filedev3="$TEST_BASE_DIR/file-vdev-3"
sparedev="$TEST_BASE_DIR/file-vdev-spare"
removedev=$(get_debug_device)
typeset poolconfs=(
"mirror $filedev1 $removedev"
typeset poolconfs=("mirror $filedev1 $removedev"
"raidz $filedev1 $removedev"
"raidz2 $filedev1 $filedev2 $removedev"
"raidz3 $filedev1 $filedev2 $filedev3 $removedev"
"mirror $filedev1 $filedev2 special mirror $filedev3 $removedev"
"$filedev1 cache $removedev"
"mirror $filedev1 $filedev2 cache $removedev"
"raidz $filedev1 $filedev2 $filedev3 cache $removedev"
)
log_must truncate -s $SPA_MINDEVSIZE $filedev1
@ -94,18 +91,13 @@ for conf in "${poolconfs[@]}"
do
# 1. Create a pool
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
block_device_wait
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 3. Verify the device is unvailable.
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 3. Verify the device is offlined
log_must wait_vdev_state $TESTPOOL $removedev "OFFLINE"
# 4. Reattach the device
insert_disk $removedev
@ -115,8 +107,8 @@ do
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
log_must parted "/dev/${removedev}" -s -- mklabel msdos
block_device_wait
done
# 6. Repeat the same tests with a spare device: zed will use the spare to handle
@ -125,56 +117,19 @@ for conf in "${poolconfs[@]}"
do
# 1. Create a pool with a spare
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
block_device_wait
log_must zpool add $TESTPOOL spare $sparedev
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 3. Verify the device is handled by the spare.
log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 4. Reattach the device
insert_disk $removedev
# 5. Verify the device is onlined
log_must wait_vdev_state $TESTPOOL $removedev "ONLINE"
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
done
# 7. Repeat the same tests again with a faulted spare device: zed should offline
# the removed data device if no spare is available
for conf in "${poolconfs[@]}"
do
# 1. Create a pool with a spare
log_must zpool create -f $TESTPOOL $conf
block_device_wait ${DEV_DSKDIR}/${removedev}
log_must zpool add $TESTPOOL spare $sparedev
mntpnt=$(get_prop mountpoint /$TESTPOOL) ||
log_fail "get_prop mountpoint /$TESTPOOL"
# 2. Fault the spare device making it unavailable
log_must zpool offline -f $TESTPOOL $sparedev
log_must wait_hotspare_state $TESTPOOL $sparedev "FAULTED"
# 3. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL
# 4. Verify the device is unavailable
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 4. Verify the device is handled by the spare unless is a l2arc disk
# which can only be offlined
if [[ $(echo "$conf" | grep -c 'cache') -eq 0 ]]; then
log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
else
log_must wait_vdev_state $TESTPOOL $removedev "OFFLINE"
fi
# 5. Reattach the device
insert_disk $removedev
@ -184,8 +139,39 @@ do
# cleanup
destroy_pool $TESTPOOL
log_must parted "${DEV_DSKDIR}/${removedev}" -s -- mklabel msdos
block_device_wait ${DEV_DSKDIR}/${removedev}
log_must parted "/dev/${removedev}" -s -- mklabel msdos
block_device_wait
done
# 7. Repeat the same tests again with a faulted spare device: zed should offline
# the removed data device if no spare is available
for conf in "${poolconfs[@]}"
do
# 1. Create a pool with a spare
log_must zpool create -f $TESTPOOL $conf
block_device_wait
log_must zpool add $TESTPOOL spare $sparedev
# 2. Fault the spare device making it unavailable
log_must zpool offline -f $TESTPOOL $sparedev
log_must wait_hotspare_state $TESTPOOL $sparedev "FAULTED"
# 3. Simulate physical removal of one device
remove_disk $removedev
# 4. Verify the device is offlined
log_must wait_vdev_state $TESTPOOL $removedev "OFFLINE"
# 5. Reattach the device
insert_disk $removedev
# 6. Verify the device is onlined
log_must wait_vdev_state $TESTPOOL $removedev "ONLINE"
# cleanup
destroy_pool $TESTPOOL
log_must parted "/dev/${removedev}" -s -- mklabel msdos
block_device_wait
done
log_pass "ZED detects physically removed devices"

View File

@ -2,8 +2,7 @@ pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/resilver
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
resilver_restart_001.ksh \
resilver_restart_002.ksh
resilver_restart_001.ksh
dist_pkgdata_DATA = \
resilver.cfg

View File

@ -1,102 +0,0 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020, Datto Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/resilver/resilver.cfg
#
# DESCRIPTION:
# Testing resilver completes when scan errors are encountered, but relevant
# DTL's have not been lost.
#
# STRATEGY:
# 1. Create a pool (1k recordsize)
# 2. Create a 32m file (32k records)
# 3. Inject an error halfway through the file
# 4. Start a resilver, ensure the error is triggered and that the resilver
# does not restart after finishing
#
# NB: use legacy scanning to ensure scan of specific block causes error
#
function cleanup
{
log_must zinject -c all
destroy_pool $TESTPOOL
rm -f ${VDEV_FILES[@]} $SPARE_VDEV_FILE
log_must set_tunable32 zfs_scan_legacy $ORIG_SCAN_LEGACY
}
log_assert "Check for resilver restarts caused by scan errors"
ORIG_SCAN_LEGACY=$(get_tunable zfs_scan_legacy)
log_onexit cleanup
# use legacy scan to ensure injected error will be triggered
log_must set_tunable32 zfs_scan_legacy 1
# create the pool and a 32M file (32k blocks)
log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_must zpool create -f -O recordsize=1k $TESTPOOL ${VDEV_FILES[0]}
log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=1M count=32 > /dev/null 2>&1
# determine objset/object
objset=$(zdb -d $TESTPOOL/ | sed -ne 's/.*ID \([0-9]*\).*/\1/p')
object=$(ls -i /$TESTPOOL/file | awk '{print $1}')
# inject event to cause error during resilver
log_must zinject -b `printf "%x:%x:0:3fff" $objset $object` $TESTPOOL
# clear events and start resilver
log_must zpool events -c
log_must zpool attach $TESTPOOL ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_note "waiting for read errors to start showing up"
for iter in {0..59}
do
zpool sync $TESTPOOL
err=$(zpool status $TESTPOOL | grep ${VDEV_FILES[0]} | awk '{print $3}')
(( $err > 0 )) && break
sleep 1
done
(( $err == 0 )) && log_fail "Unable to induce errors in resilver"
log_note "waiting for resilver to finish"
for iter in {0..59}
do
finish=$(zpool events | grep "sysevent.fs.zfs.resilver_finish" | wc -l)
(( $finish > 0 )) && break
sleep 1
done
(( $finish == 0 )) && log_fail "resilver took too long to finish"
# wait a few syncs to ensure that zfs does not restart the resilver
log_must zpool sync $TESTPOOL
log_must zpool sync $TESTPOOL
# check if resilver was restarted
start=$(zpool events | grep "sysevent.fs.zfs.resilver_start" | wc -l)
(( $start != 1 )) && log_fail "resilver restarted unnecessarily"
log_pass "Resilver did not restart unnecessarily from scan errors"