Merge pull request #169 from truenas/zfs-2.2-release-sync-cobia-release

Sync with Upstream zfs-2.2-release
This commit is contained in:
Ameer Hamza 2023-10-03 00:38:00 +05:00 committed by GitHub
commit 8e57472551
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 529 additions and 129 deletions

2
META
View File

@ -6,5 +6,5 @@ Release: rc4
Release-Tags: relext Release-Tags: relext
License: CDDL License: CDDL
Author: OpenZFS Author: OpenZFS
Linux-Maximum: 6.4 Linux-Maximum: 6.5
Linux-Minimum: 3.10 Linux-Minimum: 3.10

View File

@ -374,6 +374,7 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
/* Only autoreplace bad disks */ /* Only autoreplace bad disks */
if ((vs->vs_state != VDEV_STATE_DEGRADED) && if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
(vs->vs_state != VDEV_STATE_FAULTED) && (vs->vs_state != VDEV_STATE_FAULTED) &&
(vs->vs_state != VDEV_STATE_REMOVED) &&
(vs->vs_state != VDEV_STATE_CANT_OPEN)) { (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in " zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
"a bad state (currently %llu)", vs->vs_state); "a bad state (currently %llu)", vs->vs_state);

View File

@ -121,7 +121,7 @@ state_to_val()
{ {
state="$1" state="$1"
case "$state" in case "$state" in
FAULTED|DEGRADED|UNAVAIL) FAULTED|DEGRADED|UNAVAIL|REMOVED)
echo 1 echo 1
;; ;;
ONLINE) ONLINE)

View File

@ -132,6 +132,8 @@ static int zfs_do_zone(int argc, char **argv);
static int zfs_do_unzone(int argc, char **argv); static int zfs_do_unzone(int argc, char **argv);
#endif #endif
static int zfs_do_help(int argc, char **argv);
/* /*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds. * Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/ */
@ -606,6 +608,9 @@ usage(boolean_t requested)
(void) fprintf(fp, (void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"), gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow"); "zfs allow|unallow");
(void) fprintf(fp,
gettext("\nFor further help on a command or topic, "
"run: %s\n"), "zfs help [<topic>]");
} }
/* /*
@ -8726,6 +8731,25 @@ zfs_do_version(int argc, char **argv)
return (zfs_version_print() != 0); return (zfs_version_print() != 0);
} }
/* Display documentation */
static int
zfs_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zfs") == 0)
strcpy(page, "zfs");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
snprintf(page, sizeof (page), "zfs%s", argv[2]);
else
snprintf(page, sizeof (page), "zfs-%s", argv[2]);
execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
}
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
@ -8781,6 +8805,12 @@ main(int argc, char **argv)
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_do_version(argc, argv)); return (zfs_do_version(argc, argv));
/*
* Special case 'help'
*/
if (strcmp(cmdname, "help") == 0)
return (zfs_do_help(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) { if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1); return (1);

View File

@ -126,6 +126,8 @@ static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **); static int zpool_do_wait(int, char **);
static int zpool_do_help(int argc, char **argv);
static zpool_compat_status_t zpool_do_load_compat( static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *); const char *, boolean_t *);
@ -538,6 +540,10 @@ usage(boolean_t requested)
(void) fprintf(fp, "%s", (void) fprintf(fp, "%s",
get_usage(command_table[i].usage)); get_usage(command_table[i].usage));
} }
(void) fprintf(fp,
gettext("\nFor further help on a command or topic, "
"run: %s\n"), "zpool help [<topic>]");
} else { } else {
(void) fprintf(fp, gettext("usage:\n")); (void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage)); (void) fprintf(fp, "%s", get_usage(current_command->usage));
@ -11051,6 +11057,25 @@ zpool_do_version(int argc, char **argv)
return (zfs_version_print() != 0); return (zfs_version_print() != 0);
} }
/* Display documentation */
static int
zpool_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zpool") == 0)
strcpy(page, "zpool");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
snprintf(page, sizeof (page), "zpool%s", argv[2]);
else
snprintf(page, sizeof (page), "zpool-%s", argv[2]);
execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
}
/* /*
* Do zpool_load_compat() and print error message on failure * Do zpool_load_compat() and print error message on failure
*/ */
@ -11118,6 +11143,12 @@ main(int argc, char **argv)
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv)); return (zpool_do_version(argc, argv));
/*
* Special case 'help'
*/
if (strcmp(cmdname, "help") == 0)
return (zpool_do_help(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) { if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1); return (1);

View File

@ -2457,8 +2457,7 @@ ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd, zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER); object, offset, size, RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db, error = dmu_buf_hold_noread(os, object, offset, zgd, &db);
DMU_READ_NO_PREFETCH);
if (error == 0) { if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr; blkptr_t *bp = &lr->lr_blkptr;
@ -3767,7 +3766,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
else if (ashift > oldvd->vdev_top->vdev_ashift) else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM; expected_error = EDOM;
else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd)) else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd))
expected_error = ENOTSUP; expected_error = EINVAL;
else else
expected_error = 0; expected_error = 0;
@ -6379,6 +6378,7 @@ ztest_reguid(ztest_ds_t *zd, uint64_t id)
spa_t *spa = ztest_spa; spa_t *spa = ztest_spa;
uint64_t orig, load; uint64_t orig, load;
int error; int error;
ztest_shared_t *zs = ztest_shared;
if (ztest_opts.zo_mmp_test) if (ztest_opts.zo_mmp_test)
return; return;
@ -6388,6 +6388,7 @@ ztest_reguid(ztest_ds_t *zd, uint64_t id)
(void) pthread_rwlock_wrlock(&ztest_name_lock); (void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_change_guid(spa); error = spa_change_guid(spa);
zs->zs_guid = spa_guid(spa);
(void) pthread_rwlock_unlock(&ztest_name_lock); (void) pthread_rwlock_unlock(&ztest_name_lock);
if (error != 0) if (error != 0)
@ -6917,7 +6918,7 @@ ztest_trim(ztest_ds_t *zd, uint64_t id)
* Verify pool integrity by running zdb. * Verify pool integrity by running zdb.
*/ */
static void static void
ztest_run_zdb(const char *pool) ztest_run_zdb(uint64_t guid)
{ {
int status; int status;
char *bin; char *bin;
@ -6941,13 +6942,13 @@ ztest_run_zdb(const char *pool)
free(set_gvars_args); free(set_gvars_args);
size_t would = snprintf(zdb, len, size_t would = snprintf(zdb, len,
"%s -bcc%s%s -G -d -Y -e -y %s -p %s %s", "%s -bcc%s%s -G -d -Y -e -y %s -p %s %"PRIu64,
bin, bin,
ztest_opts.zo_verbose >= 3 ? "s" : "", ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "", ztest_opts.zo_verbose >= 4 ? "v" : "",
set_gvars_args_joined, set_gvars_args_joined,
ztest_opts.zo_dir, ztest_opts.zo_dir,
pool); guid);
ASSERT3U(would, <, len); ASSERT3U(would, <, len);
umem_free(set_gvars_args_joined, strlen(set_gvars_args_joined) + 1); umem_free(set_gvars_args_joined, strlen(set_gvars_args_joined) + 1);
@ -7525,14 +7526,15 @@ ztest_import(ztest_shared_t *zs)
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz = zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
zs->zs_guid = spa_guid(spa);
spa_close(spa, FTAG); spa_close(spa, FTAG);
kernel_fini(); kernel_fini();
if (!ztest_opts.zo_mmp_test) { if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool); ztest_run_zdb(zs->zs_guid);
ztest_freeze(); ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool); ztest_run_zdb(zs->zs_guid);
} }
(void) pthread_rwlock_destroy(&ztest_name_lock); (void) pthread_rwlock_destroy(&ztest_name_lock);
@ -7603,7 +7605,6 @@ ztest_run(ztest_shared_t *zs)
dsl_pool_config_enter(dmu_objset_pool(os), FTAG); dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds); dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG); dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
zs->zs_guid = dds.dds_guid;
dmu_objset_disown(os, B_TRUE, FTAG); dmu_objset_disown(os, B_TRUE, FTAG);
/* /*
@ -7874,14 +7875,15 @@ ztest_init(ztest_shared_t *zs)
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG)); VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz = zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift; 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
zs->zs_guid = spa_guid(spa);
spa_close(spa, FTAG); spa_close(spa, FTAG);
kernel_fini(); kernel_fini();
if (!ztest_opts.zo_mmp_test) { if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool); ztest_run_zdb(zs->zs_guid);
ztest_freeze(); ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool); ztest_run_zdb(zs->zs_guid);
} }
(void) pthread_rwlock_destroy(&ztest_name_lock); (void) pthread_rwlock_destroy(&ztest_name_lock);
@ -8304,7 +8306,7 @@ main(int argc, char **argv)
} }
if (!ztest_opts.zo_mmp_test) if (!ztest_opts.zo_mmp_test)
ztest_run_zdb(ztest_opts.zo_pool); ztest_run_zdb(zs->zs_guid);
} }
if (ztest_opts.zo_verbose >= 1) { if (ztest_opts.zo_verbose >= 1) {

View File

@ -16,12 +16,63 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH], [
]) ])
]) ])
dnl #
dnl # 6.5.x API change,
dnl # blkdev_get_by_path() takes 4 args
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH_4ARG], [
ZFS_LINUX_TEST_SRC([blkdev_get_by_path_4arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
struct blk_holder_ops h;
bdev = blkdev_get_by_path(path, mode, holder, &h);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH], [ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH], [
AC_MSG_CHECKING([whether blkdev_get_by_path() exists]) AC_MSG_CHECKING([whether blkdev_get_by_path() exists and takes 3 args])
ZFS_LINUX_TEST_RESULT([blkdev_get_by_path], [ ZFS_LINUX_TEST_RESULT([blkdev_get_by_path], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
], [ ], [
ZFS_LINUX_TEST_ERROR([blkdev_get_by_path()]) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether blkdev_get_by_path() exists and takes 4 args])
ZFS_LINUX_TEST_RESULT([blkdev_get_by_path_4arg], [
AC_DEFINE(HAVE_BLKDEV_GET_BY_PATH_4ARG, 1,
[blkdev_get_by_path() exists and takes 4 args])
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blkdev_get_by_path()])
])
])
])
dnl #
dnl # 6.5.x API change
dnl # blk_mode_t was added as a type to supercede some places where fmode_t
dnl # is used
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BLK_MODE_T], [
ZFS_LINUX_TEST_SRC([blk_mode_t], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
blk_mode_t m __attribute((unused)) = (blk_mode_t)0;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T], [
AC_MSG_CHECKING([whether blk_mode_t is defined])
ZFS_LINUX_TEST_RESULT([blk_mode_t], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_MODE_T, 1, [blk_mode_t is defined])
], [
AC_MSG_RESULT(no)
]) ])
]) ])
@ -41,12 +92,35 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PUT], [
]) ])
]) ])
dnl #
dnl # 6.5.x API change.
dnl # blkdev_put() takes (void* holder) as arg 2
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PUT_HOLDER], [
ZFS_LINUX_TEST_SRC([blkdev_put_holder], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
void *holder = NULL;
blkdev_put(bdev, holder);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PUT], [ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PUT], [
AC_MSG_CHECKING([whether blkdev_put() exists]) AC_MSG_CHECKING([whether blkdev_put() exists])
ZFS_LINUX_TEST_RESULT([blkdev_put], [ ZFS_LINUX_TEST_RESULT([blkdev_put], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
], [ ], [
ZFS_LINUX_TEST_ERROR([blkdev_put()]) AC_MSG_CHECKING([whether blkdev_put() accepts void* as arg 2])
ZFS_LINUX_TEST_RESULT([blkdev_put_holder], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_PUT_HOLDER, 1,
[blkdev_put() accepts void* as arg 2])
], [
ZFS_LINUX_TEST_ERROR([blkdev_put()])
])
]) ])
]) ])
@ -495,7 +569,9 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT], [
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH_4ARG
ZFS_AC_KERNEL_SRC_BLKDEV_PUT ZFS_AC_KERNEL_SRC_BLKDEV_PUT
ZFS_AC_KERNEL_SRC_BLKDEV_PUT_HOLDER
ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART
ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV
@ -510,6 +586,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV
ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_MODE_T
]) ])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
@ -530,4 +607,5 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV
ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T
]) ])

View File

@ -49,12 +49,42 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
], [], []) ], [], [])
]) ])
dnl #
dnl # 5.9.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG], [
ZFS_LINUX_TEST_SRC([block_device_operations_release_void_1arg], [
#include <linux/blkdev.h>
void blk_release(struct gendisk *g) {
(void) g;
return;
}
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = NULL,
.release = blk_release,
.ioctl = NULL,
.compat_ioctl = NULL,
};
], [], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [ AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
AC_MSG_CHECKING([whether bops->release() is void]) AC_MSG_CHECKING([whether bops->release() is void and takes 2 args])
ZFS_LINUX_TEST_RESULT([block_device_operations_release_void], [ ZFS_LINUX_TEST_RESULT([block_device_operations_release_void], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
],[ ],[
ZFS_LINUX_TEST_ERROR([bops->release()]) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether bops->release() is void and takes 1 arg])
ZFS_LINUX_TEST_RESULT([block_device_operations_release_void_1arg], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG], [1],
[Define if release() in block_device_operations takes 1 arg])
],[
ZFS_LINUX_TEST_ERROR([bops->release()])
])
]) ])
]) ])
@ -92,6 +122,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK], [
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS], [
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
]) ])

View File

@ -0,0 +1,25 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_COPY_SPLICE_READ], [
dnl #
dnl # Kernel 6.5 - generic_file_splice_read was removed in favor
dnl # of copy_splice_read for the .splice_read member of the
dnl # file_operations struct.
dnl #
ZFS_LINUX_TEST_SRC([has_copy_splice_read], [
#include <linux/fs.h>
struct file_operations fops __attribute__((unused)) = {
.splice_read = copy_splice_read,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_COPY_SPLICE_READ], [
AC_MSG_CHECKING([whether copy_splice_read() exists])
ZFS_LINUX_TEST_RESULT([has_copy_splice_read], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_COPY_SPLICE_READ, 1,
[copy_splice_read exists])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -0,0 +1,27 @@
dnl #
dnl # Linux 6.5 removes register_sysctl_table
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_REGISTER_SYSCTL_TABLE], [
ZFS_LINUX_TEST_SRC([has_register_sysctl_table], [
#include <linux/sysctl.h>
static struct ctl_table dummy_table[] = {
{}
};
],[
struct ctl_table_header *h
__attribute((unused)) = register_sysctl_table(dummy_table);
])
])
AC_DEFUN([ZFS_AC_KERNEL_REGISTER_SYSCTL_TABLE], [
AC_MSG_CHECKING([whether register_sysctl_table exists])
ZFS_LINUX_TEST_RESULT([has_register_sysctl_table], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_REGISTER_SYSCTL_TABLE, 1,
[register_sysctl_table exists])
],[
AC_MSG_RESULT([no])
])
])

View File

@ -6,8 +6,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/uio.h> #include <linux/uio.h>
],[ ],[
int type __attribute__ ((unused)) = int type __attribute__ ((unused)) = ITER_KVEC;
ITER_IOVEC | ITER_KVEC | ITER_BVEC | ITER_PIPE;
]) ])
ZFS_LINUX_TEST_SRC([iov_iter_advance], [ ZFS_LINUX_TEST_SRC([iov_iter_advance], [
@ -93,6 +92,14 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [
struct iov_iter iter = { 0 }; struct iov_iter iter = { 0 };
__attribute__((unused)) enum iter_type i = iov_iter_type(&iter); __attribute__((unused)) enum iter_type i = iov_iter_type(&iter);
]) ])
ZFS_LINUX_TEST_SRC([iter_iov], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
__attribute__((unused)) const struct iovec *iov = iter_iov(&iter);
])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [ AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [
@ -201,4 +208,19 @@ AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [
AC_DEFINE(HAVE_VFS_IOV_ITER, 1, AC_DEFINE(HAVE_VFS_IOV_ITER, 1,
[All required iov_iter interfaces are available]) [All required iov_iter interfaces are available])
]) ])
dnl #
dnl # Kernel 6.5 introduces the iter_iov() function that returns the
dnl # __iov member of an iov_iter*. The iov member was renamed to this
dnl # __iov member, and is intended to be accessed via the helper
dnl # function now.
dnl #
AC_MSG_CHECKING([whether iter_iov() is available])
ZFS_LINUX_TEST_RESULT([iter_iov], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ITER_IOV, 1,
[iter_iov() is available])
],[
AC_MSG_RESULT(no)
])
]) ])

View File

@ -160,6 +160,8 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_FILEMAP ZFS_AC_KERNEL_SRC_FILEMAP
ZFS_AC_KERNEL_SRC_WRITEPAGE_T ZFS_AC_KERNEL_SRC_WRITEPAGE_T
ZFS_AC_KERNEL_SRC_RECLAIMED ZFS_AC_KERNEL_SRC_RECLAIMED
ZFS_AC_KERNEL_SRC_REGISTER_SYSCTL_TABLE
ZFS_AC_KERNEL_SRC_COPY_SPLICE_READ
case "$host_cpu" in case "$host_cpu" in
powerpc*) powerpc*)
ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE
@ -299,6 +301,8 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_FILEMAP ZFS_AC_KERNEL_FILEMAP
ZFS_AC_KERNEL_WRITEPAGE_T ZFS_AC_KERNEL_WRITEPAGE_T
ZFS_AC_KERNEL_RECLAIMED ZFS_AC_KERNEL_RECLAIMED
ZFS_AC_KERNEL_REGISTER_SYSCTL_TABLE
ZFS_AC_KERNEL_COPY_SPLICE_READ
case "$host_cpu" in case "$host_cpu" in
powerpc*) powerpc*)
ZFS_AC_KERNEL_CPU_HAS_FEATURE ZFS_AC_KERNEL_CPU_HAS_FEATURE

View File

@ -93,7 +93,6 @@ struct zfsvfs {
zfs_teardown_lock_t z_teardown_lock; zfs_teardown_lock_t z_teardown_lock;
zfs_teardown_inactive_lock_t z_teardown_inactive_lock; zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all vnodes in the fs */ list_t z_all_znodes; /* all vnodes in the fs */
uint64_t z_nr_znodes; /* number of znodes in the fs */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */ kmutex_t z_znodes_lock; /* lock for z_all_znodes */
struct zfsctl_root *z_ctldir; /* .zfs directory pointer */ struct zfsctl_root *z_ctldir; /* .zfs directory pointer */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */ boolean_t z_show_ctldir; /* expose .zfs in the root dir */

View File

@ -347,6 +347,7 @@ zfs_check_media_change(struct block_device *bdev)
#define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev) #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
#elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE) #elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE)
#define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk) #define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk)
#define zfs_check_media_change(bdev) disk_check_media_change(bdev->bd_disk)
#else #else
/* /*
* This is encountered if check_disk_change() and bdev_check_media_change() * This is encountered if check_disk_change() and bdev_check_media_change()
@ -397,6 +398,12 @@ vdev_lookup_bdev(const char *path, dev_t *dev)
#endif #endif
} }
#if defined(HAVE_BLK_MODE_T)
#define blk_mode_is_open_write(flag) ((flag) & BLK_OPEN_WRITE)
#else
#define blk_mode_is_open_write(flag) ((flag) & FMODE_WRITE)
#endif
/* /*
* Kernels without bio_set_op_attrs use bi_rw for the bio flags. * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
*/ */

View File

@ -173,4 +173,16 @@ zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset,
} }
#endif #endif
#if defined(HAVE_ITER_IOV)
#define zfs_uio_iter_iov(iter) iter_iov((iter))
#else
#define zfs_uio_iter_iov(iter) (iter)->iov
#endif
#if defined(HAVE_IOV_ITER_TYPE)
#define zfs_uio_iov_iter_type(iter) iov_iter_type((iter))
#else
#define zfs_uio_iov_iter_type(iter) (iter)->type
#endif
#endif /* SPL_UIO_H */ #endif /* SPL_UIO_H */

View File

@ -105,7 +105,6 @@ struct zfsvfs {
rrmlock_t z_teardown_lock; rrmlock_t z_teardown_lock;
krwlock_t z_teardown_inactive_lock; krwlock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all znodes in the fs */ list_t z_all_znodes; /* all znodes in the fs */
uint64_t z_nr_znodes; /* number of znodes in the fs */
unsigned long z_rollback_time; /* last online rollback time */ unsigned long z_rollback_time; /* last online rollback time */
unsigned long z_snap_defer_time; /* last snapshot unmount deferral */ unsigned long z_snap_defer_time; /* last snapshot unmount deferral */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */ kmutex_t z_znodes_lock; /* lock for z_all_znodes */

View File

@ -29,7 +29,7 @@
* Copyright (c) 2017, Intel Corporation. * Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com> * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org> * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc. * Copyright (c) 2021, 2023, Klara Inc.
*/ */
#include <errno.h> #include <errno.h>
@ -255,6 +255,7 @@ zpool_get_state_str(zpool_handle_t *zhp)
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
str = gettext("FAULTED"); str = gettext("FAULTED");
} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT || } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
status == ZPOOL_STATUS_IO_FAILURE_MMP) { status == ZPOOL_STATUS_IO_FAILURE_MMP) {
str = gettext("SUSPENDED"); str = gettext("SUSPENDED");
} else { } else {

View File

@ -650,10 +650,12 @@ send_worker(void *arg)
unsigned int bufsiz = max_pipe_buffer(ctx->from); unsigned int bufsiz = max_pipe_buffer(ctx->from);
ssize_t rd; ssize_t rd;
while ((rd = splice(ctx->from, NULL, ctx->to, NULL, bufsiz, for (;;) {
SPLICE_F_MOVE | SPLICE_F_MORE)) > 0) rd = splice(ctx->from, NULL, ctx->to, NULL, bufsiz,
; SPLICE_F_MOVE | SPLICE_F_MORE);
if ((rd == -1 && errno != EINTR) || rd == 0)
break;
}
int err = (rd == -1) ? errno : 0; int err = (rd == -1) ? errno : 0;
close(ctx->from); close(ctx->from);
return ((void *)(uintptr_t)err); return ((void *)(uintptr_t)err);

View File

@ -203,11 +203,9 @@ For more information, see the
section. section.
.El .El
.Pp .Pp
Virtual devices cannot be nested, so a mirror or raidz virtual device can only Virtual devices cannot be nested arbitrarily.
contain files or disks. A mirror, raidz or draid virtual device can only be created with files or disks.
Mirrors of mirrors Mirrors of mirrors or other such combinations are not allowed.
.Pq or other combinations
are not allowed.
.Pp .Pp
A pool can have any number of virtual devices at the top of the configuration A pool can have any number of virtual devices at the top of the configuration
.Po known as .Po known as

View File

@ -169,4 +169,4 @@ gen-zstd-symbols:
for obj in $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)); do echo; echo "/* $${obj#zstd/}: */"; @OBJDUMP@ -t $$obj | awk '$$2 == "g" && !/ zfs_/ {print "#define\t" $$6 " zfs_" $$6}' | sort; done >> zstd/include/zstd_compat_wrapper.h for obj in $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)); do echo; echo "/* $${obj#zstd/}: */"; @OBJDUMP@ -t $$obj | awk '$$2 == "g" && !/ zfs_/ {print "#define\t" $$6 " zfs_" $$6}' | sort; done >> zstd/include/zstd_compat_wrapper.h
check-zstd-symbols: check-zstd-symbols:
@OBJDUMP@ -t $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)) | awk '/file format/ {print} $$2 == "g" && !/ zfs_/ {++ret; print} END {exit ret}' @OBJDUMP@ -t $(addprefix zstd/,$(ZSTD_UPSTREAM_OBJS)) | awk '/file format/ {print} $$2 == "g" && (!/ zfs_/ && !/ __pfx_zfs_/) {++ret; print} END {exit ret}'

View File

@ -1154,7 +1154,6 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
mutex_destroy(&zfsvfs->z_znodes_lock); mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock); mutex_destroy(&zfsvfs->z_lock);
ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
list_destroy(&zfsvfs->z_all_znodes); list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs); ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs); ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
@ -1558,12 +1557,11 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* may add the parents of dir-based xattrs to the taskq * may add the parents of dir-based xattrs to the taskq
* so we want to wait for these. * so we want to wait for these.
* *
* We can safely read z_nr_znodes without locking because the * We can safely check z_all_znodes for being empty because the
* VFS has already blocked operations which add to the * VFS has already blocked operations which add to it.
* z_all_znodes list and thus increment z_nr_znodes.
*/ */
int round = 0; int round = 0;
while (zfsvfs->z_nr_znodes > 0) { while (!list_is_empty(&zfsvfs->z_all_znodes)) {
taskq_wait_outstanding(dsl_pool_zrele_taskq( taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0); dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting) if (++round > 1 && !unmounting)

View File

@ -537,7 +537,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp); list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
zp->z_zfsvfs = zfsvfs; zp->z_zfsvfs = zfsvfs;
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);
@ -1286,7 +1285,6 @@ zfs_znode_free(znode_t *zp)
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
POINTER_INVALIDATE(&zp->z_zfsvfs); POINTER_INVALIDATE(&zp->z_zfsvfs);
list_remove(&zfsvfs->z_all_znodes, zp); list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);
#if __FreeBSD_version >= 1300139 #if __FreeBSD_version >= 1300139

View File

@ -47,6 +47,10 @@ static unsigned long table_min = 0;
static unsigned long table_max = ~0; static unsigned long table_max = ~0;
static struct ctl_table_header *spl_header = NULL; static struct ctl_table_header *spl_header = NULL;
#ifndef HAVE_REGISTER_SYSCTL_TABLE
static struct ctl_table_header *spl_kmem = NULL;
static struct ctl_table_header *spl_kstat = NULL;
#endif
static struct proc_dir_entry *proc_spl = NULL; static struct proc_dir_entry *proc_spl = NULL;
static struct proc_dir_entry *proc_spl_kmem = NULL; static struct proc_dir_entry *proc_spl_kmem = NULL;
static struct proc_dir_entry *proc_spl_kmem_slab = NULL; static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
@ -624,6 +628,7 @@ static struct ctl_table spl_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dohostid, .proc_handler = &proc_dohostid,
}, },
#ifdef HAVE_REGISTER_SYSCTL_TABLE
{ {
.procname = "kmem", .procname = "kmem",
.mode = 0555, .mode = 0555,
@ -634,9 +639,11 @@ static struct ctl_table spl_table[] = {
.mode = 0555, .mode = 0555,
.child = spl_kstat_table, .child = spl_kstat_table,
}, },
#endif
{}, {},
}; };
#ifdef HAVE_REGISTER_SYSCTL_TABLE
static struct ctl_table spl_dir[] = { static struct ctl_table spl_dir[] = {
{ {
.procname = "spl", .procname = "spl",
@ -648,21 +655,64 @@ static struct ctl_table spl_dir[] = {
static struct ctl_table spl_root[] = { static struct ctl_table spl_root[] = {
{ {
.procname = "kernel", .procname = "kernel",
.mode = 0555, .mode = 0555,
.child = spl_dir, .child = spl_dir,
}, },
{} {}
}; };
#endif
static void spl_proc_cleanup(void)
{
remove_proc_entry("kstat", proc_spl);
remove_proc_entry("slab", proc_spl_kmem);
remove_proc_entry("kmem", proc_spl);
remove_proc_entry("taskq-all", proc_spl);
remove_proc_entry("taskq", proc_spl);
remove_proc_entry("spl", NULL);
#ifndef HAVE_REGISTER_SYSCTL_TABLE
if (spl_kstat) {
unregister_sysctl_table(spl_kstat);
spl_kstat = NULL;
}
if (spl_kmem) {
unregister_sysctl_table(spl_kmem);
spl_kmem = NULL;
}
#endif
if (spl_header) {
unregister_sysctl_table(spl_header);
spl_header = NULL;
}
}
int int
spl_proc_init(void) spl_proc_init(void)
{ {
int rc = 0; int rc = 0;
#ifdef HAVE_REGISTER_SYSCTL_TABLE
spl_header = register_sysctl_table(spl_root); spl_header = register_sysctl_table(spl_root);
if (spl_header == NULL) if (spl_header == NULL)
return (-EUNATCH); return (-EUNATCH);
#else
spl_header = register_sysctl("kernel/spl", spl_table);
if (spl_header == NULL)
return (-EUNATCH);
spl_kmem = register_sysctl("kernel/spl/kmem", spl_kmem_table);
if (spl_kmem == NULL) {
rc = -EUNATCH;
goto out;
}
spl_kstat = register_sysctl("kernel/spl/kstat", spl_kstat_table);
if (spl_kstat == NULL) {
rc = -EUNATCH;
goto out;
}
#endif
proc_spl = proc_mkdir("spl", NULL); proc_spl = proc_mkdir("spl", NULL);
if (proc_spl == NULL) { if (proc_spl == NULL) {
@ -703,15 +753,8 @@ spl_proc_init(void)
goto out; goto out;
} }
out: out:
if (rc) { if (rc)
remove_proc_entry("kstat", proc_spl); spl_proc_cleanup();
remove_proc_entry("slab", proc_spl_kmem);
remove_proc_entry("kmem", proc_spl);
remove_proc_entry("taskq-all", proc_spl);
remove_proc_entry("taskq", proc_spl);
remove_proc_entry("spl", NULL);
unregister_sysctl_table(spl_header);
}
return (rc); return (rc);
} }
@ -719,13 +762,5 @@ out:
void void
spl_proc_fini(void) spl_proc_fini(void)
{ {
remove_proc_entry("kstat", proc_spl); spl_proc_cleanup();
remove_proc_entry("slab", proc_spl_kmem);
remove_proc_entry("kmem", proc_spl);
remove_proc_entry("taskq-all", proc_spl);
remove_proc_entry("taskq", proc_spl);
remove_proc_entry("spl", NULL);
ASSERT(spl_header != NULL);
unregister_sysctl_table(spl_header);
} }

View File

@ -80,9 +80,22 @@ typedef struct dio_request {
static unsigned int zfs_vdev_failfast_mask = 1; static unsigned int zfs_vdev_failfast_mask = 1;
#ifdef HAVE_BLK_MODE_T
static blk_mode_t
#else
static fmode_t static fmode_t
#endif
vdev_bdev_mode(spa_mode_t spa_mode) vdev_bdev_mode(spa_mode_t spa_mode)
{ {
#ifdef HAVE_BLK_MODE_T
blk_mode_t mode = 0;
if (spa_mode & SPA_MODE_READ)
mode |= BLK_OPEN_READ;
if (spa_mode & SPA_MODE_WRITE)
mode |= BLK_OPEN_WRITE;
#else
fmode_t mode = 0; fmode_t mode = 0;
if (spa_mode & SPA_MODE_READ) if (spa_mode & SPA_MODE_READ)
@ -90,6 +103,7 @@ vdev_bdev_mode(spa_mode_t spa_mode)
if (spa_mode & SPA_MODE_WRITE) if (spa_mode & SPA_MODE_WRITE)
mode |= FMODE_WRITE; mode |= FMODE_WRITE;
#endif
return (mode); return (mode);
} }
@ -197,12 +211,47 @@ vdev_disk_kobj_evt_post(vdev_t *v)
} }
} }
#if !defined(HAVE_BLKDEV_GET_BY_PATH_4ARG)
/*
* Define a dummy struct blk_holder_ops for kernel versions
* prior to 6.5.
*/
struct blk_holder_ops {};
#endif
static struct block_device *
vdev_blkdev_get_by_path(const char *path, spa_mode_t mode, void *holder,
const struct blk_holder_ops *hops)
{
#ifdef HAVE_BLKDEV_GET_BY_PATH_4ARG
return (blkdev_get_by_path(path,
vdev_bdev_mode(mode) | BLK_OPEN_EXCL, holder, hops));
#else
return (blkdev_get_by_path(path,
vdev_bdev_mode(mode) | FMODE_EXCL, holder));
#endif
}
static void
vdev_blkdev_put(struct block_device *bdev, spa_mode_t mode, void *holder)
{
#ifdef HAVE_BLKDEV_PUT_HOLDER
return (blkdev_put(bdev, holder));
#else
return (blkdev_put(bdev, vdev_bdev_mode(mode) | FMODE_EXCL));
#endif
}
static int static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift) uint64_t *logical_ashift, uint64_t *physical_ashift)
{ {
struct block_device *bdev; struct block_device *bdev;
#ifdef HAVE_BLK_MODE_T
blk_mode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
#else
fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa)); fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
#endif
hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms); hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
vdev_disk_t *vd; vdev_disk_t *vd;
@ -252,15 +301,15 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
reread_part = B_TRUE; reread_part = B_TRUE;
} }
blkdev_put(bdev, mode | FMODE_EXCL); vdev_blkdev_put(bdev, mode, zfs_vdev_holder);
} }
if (reread_part) { if (reread_part) {
bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL, bdev = vdev_blkdev_get_by_path(disk_name, mode,
zfs_vdev_holder); zfs_vdev_holder, NULL);
if (!IS_ERR(bdev)) { if (!IS_ERR(bdev)) {
int error = vdev_bdev_reread_part(bdev); int error = vdev_bdev_reread_part(bdev);
blkdev_put(bdev, mode | FMODE_EXCL); vdev_blkdev_put(bdev, mode, zfs_vdev_holder);
if (error == 0) { if (error == 0) {
timeout = MSEC2NSEC( timeout = MSEC2NSEC(
zfs_vdev_open_timeout_ms * 2); zfs_vdev_open_timeout_ms * 2);
@ -305,8 +354,8 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
hrtime_t start = gethrtime(); hrtime_t start = gethrtime();
bdev = ERR_PTR(-ENXIO); bdev = ERR_PTR(-ENXIO);
while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) { while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL, bdev = vdev_blkdev_get_by_path(v->vdev_path, mode,
zfs_vdev_holder); zfs_vdev_holder, NULL);
if (unlikely(PTR_ERR(bdev) == -ENOENT)) { if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
/* /*
* There is no point of waiting since device is removed * There is no point of waiting since device is removed
@ -382,8 +431,8 @@ vdev_disk_close(vdev_t *v)
return; return;
if (vd->vd_bdev != NULL) { if (vd->vd_bdev != NULL) {
blkdev_put(vd->vd_bdev, vdev_blkdev_put(vd->vd_bdev, spa_mode(v->vdev_spa),
vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL); zfs_vdev_holder);
} }
rw_destroy(&vd->vd_lock); rw_destroy(&vd->vd_lock);

View File

@ -537,7 +537,6 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp); list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
membar_producer(); membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);

View File

@ -1348,12 +1348,11 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* may add the parents of dir-based xattrs to the taskq * may add the parents of dir-based xattrs to the taskq
* so we want to wait for these. * so we want to wait for these.
* *
* We can safely read z_nr_znodes without locking because the * We can safely check z_all_znodes for being empty because the
* VFS has already blocked operations which add to the * VFS has already blocked operations which add to it.
* z_all_znodes list and thus increment z_nr_znodes.
*/ */
int round = 0; int round = 0;
while (zfsvfs->z_nr_znodes > 0) { while (!list_is_empty(&zfsvfs->z_all_znodes)) {
taskq_wait_outstanding(dsl_pool_zrele_taskq( taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0); dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting) if (++round > 1 && !unmounting)

View File

@ -186,7 +186,7 @@ zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
return (error); return (error);
/* Honor ZFS_APPENDONLY file attribute */ /* Honor ZFS_APPENDONLY file attribute */
if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) && if (blk_mode_is_open_write(mode) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) { ((flag & O_APPEND) == 0)) {
zfs_exit(zfsvfs, FTAG); zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM)); return (SET_ERROR(EPERM));

View File

@ -390,7 +390,6 @@ zfs_inode_destroy(struct inode *ip)
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) { if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp); list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
} }
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);
@ -641,7 +640,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp); list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0) if (links > 0)

View File

@ -42,7 +42,7 @@
static int static int
zpl_common_open(struct inode *ip, struct file *filp) zpl_common_open(struct inode *ip, struct file *filp)
{ {
if (filp->f_mode & FMODE_WRITE) if (blk_mode_is_open_write(filp->f_mode))
return (-EACCES); return (-EACCES);
return (generic_file_open(ip, filp)); return (generic_file_open(ip, filp));

View File

@ -301,15 +301,10 @@ zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
#if defined(HAVE_VFS_IOV_ITER) #if defined(HAVE_VFS_IOV_ITER)
zfs_uio_iov_iter_init(uio, to, pos, count, skip); zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else #else
#ifdef HAVE_IOV_ITER_TYPE zfs_uio_iovec_init(uio, zfs_uio_iter_iov(to), to->nr_segs, pos,
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos, zfs_uio_iov_iter_type(to) & ITER_KVEC ?
iov_iter_type(to) & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE, UIO_SYSSPACE : UIO_USERSPACE,
count, skip); count, skip);
#else
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#endif
#endif #endif
} }
@ -1328,7 +1323,11 @@ const struct file_operations zpl_file_operations = {
.read_iter = zpl_iter_read, .read_iter = zpl_iter_read,
.write_iter = zpl_iter_write, .write_iter = zpl_iter_write,
#ifdef HAVE_VFS_IOV_ITER #ifdef HAVE_VFS_IOV_ITER
#ifdef HAVE_COPY_SPLICE_READ
.splice_read = copy_splice_read,
#else
.splice_read = generic_file_splice_read, .splice_read = generic_file_splice_read,
#endif
.splice_write = iter_file_splice_write, .splice_write = iter_file_splice_write,
#endif #endif
#else #else

View File

@ -671,7 +671,11 @@ zvol_request(struct request_queue *q, struct bio *bio)
} }
static int static int
#ifdef HAVE_BLK_MODE_T
zvol_open(struct gendisk *disk, blk_mode_t flag)
#else
zvol_open(struct block_device *bdev, fmode_t flag) zvol_open(struct block_device *bdev, fmode_t flag)
#endif
{ {
zvol_state_t *zv; zvol_state_t *zv;
int error = 0; int error = 0;
@ -686,10 +690,14 @@ retry:
/* /*
* Obtain a copy of private_data under the zvol_state_lock to make * Obtain a copy of private_data under the zvol_state_lock to make
* sure that either the result of zvol free code path setting * sure that either the result of zvol free code path setting
* bdev->bd_disk->private_data to NULL is observed, or zvol_os_free() * disk->private_data to NULL is observed, or zvol_os_free()
* is not called on this zv because of the positive zv_open_count. * is not called on this zv because of the positive zv_open_count.
*/ */
#ifdef HAVE_BLK_MODE_T
zv = disk->private_data;
#else
zv = bdev->bd_disk->private_data; zv = bdev->bd_disk->private_data;
#endif
if (zv == NULL) { if (zv == NULL) {
rw_exit(&zvol_state_lock); rw_exit(&zvol_state_lock);
return (SET_ERROR(-ENXIO)); return (SET_ERROR(-ENXIO));
@ -769,14 +777,15 @@ retry:
} }
} }
error = -zvol_first_open(zv, !(flag & FMODE_WRITE)); error = -zvol_first_open(zv, !(blk_mode_is_open_write(flag)));
if (drop_namespace) if (drop_namespace)
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
} }
if (error == 0) { if (error == 0) {
if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) { if ((blk_mode_is_open_write(flag)) &&
(zv->zv_flags & ZVOL_RDONLY)) {
if (zv->zv_open_count == 0) if (zv->zv_open_count == 0)
zvol_last_close(zv); zvol_last_close(zv);
@ -791,14 +800,25 @@ retry:
rw_exit(&zv->zv_suspend_lock); rw_exit(&zv->zv_suspend_lock);
if (error == 0) if (error == 0)
#ifdef HAVE_BLK_MODE_T
disk_check_media_change(disk);
#else
zfs_check_media_change(bdev); zfs_check_media_change(bdev);
#endif
return (error); return (error);
} }
static void static void
zvol_release(struct gendisk *disk, fmode_t mode) #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
zvol_release(struct gendisk *disk)
#else
zvol_release(struct gendisk *disk, fmode_t unused)
#endif
{ {
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
zvol_state_t *zv; zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE; boolean_t drop_suspend = B_TRUE;

View File

@ -9103,15 +9103,16 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
* write things before deciding to fail compression in nearly * write things before deciding to fail compression in nearly
* every case.) * every case.)
*/ */
cabd = abd_alloc_for_io(size, ismd); uint64_t bufsize = MAX(size, asize);
tmp = abd_borrow_buf(cabd, size); cabd = abd_alloc_for_io(bufsize, ismd);
tmp = abd_borrow_buf(cabd, bufsize);
psize = zio_compress_data(compress, to_write, &tmp, size, psize = zio_compress_data(compress, to_write, &tmp, size,
hdr->b_complevel); hdr->b_complevel);
if (psize >= asize) { if (psize >= asize) {
psize = HDR_GET_PSIZE(hdr); psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size); abd_return_buf_copy(cabd, tmp, bufsize);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd; to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize); abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
@ -9121,9 +9122,9 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
} }
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr)); ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize) if (psize < asize)
memset((char *)tmp + psize, 0, asize - psize); memset((char *)tmp + psize, 0, bufsize - psize);
psize = HDR_GET_PSIZE(hdr); psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size); abd_return_buf_copy(cabd, tmp, bufsize);
to_write = cabd; to_write = cabd;
} }

View File

@ -26,6 +26,7 @@
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved. * Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
*/ */
#include <sys/dmu.h> #include <sys/dmu.h>
@ -1358,30 +1359,23 @@ top_of_function:
ext_quota = 0; ext_quota = 0;
if (used_on_disk >= quota) { if (used_on_disk >= quota) {
if (retval == ENOSPC && (used_on_disk - quota) <
dsl_pool_deferred_space(dd->dd_pool)) {
retval = SET_ERROR(ERESTART);
}
/* Quota exceeded */ /* Quota exceeded */
mutex_exit(&dd->dd_lock); mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota); DMU_TX_STAT_BUMP(dmu_tx_quota);
return (retval); return (retval);
} else if (used_on_disk + est_inflight >= quota + ext_quota) { } else if (used_on_disk + est_inflight >= quota + ext_quota) {
if (est_inflight > 0 || used_on_disk < quota) {
retval = SET_ERROR(ERESTART);
} else {
ASSERT3U(used_on_disk, >=, quota);
if (retval == ENOSPC && (used_on_disk - quota) <
dsl_pool_deferred_space(dd->dd_pool)) {
retval = SET_ERROR(ERESTART);
}
}
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK " dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n", "quota=%lluK tr=%lluK\n",
(u_longlong_t)used_on_disk>>10, (u_longlong_t)used_on_disk>>10,
(u_longlong_t)est_inflight>>10, (u_longlong_t)est_inflight>>10,
(u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval); (u_longlong_t)quota>>10, (u_longlong_t)asize>>10);
mutex_exit(&dd->dd_lock); mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota); DMU_TX_STAT_BUMP(dmu_tx_quota);
return (retval); return (SET_ERROR(ERESTART));
} }
/* We need to up our estimated delta before dropping dd_lock */ /* We need to up our estimated delta before dropping dd_lock */

View File

@ -3208,6 +3208,15 @@ metaslab_segment_weight(metaslab_t *msp)
static boolean_t static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{ {
/*
* This case will usually but not always get caught by the checks below;
* metaslabs can be loaded by various means, including the trim and
* initialize code. Once that happens, without this check they are
* allocatable even before they finish their first txg sync.
*/
if (unlikely(msp->ms_new))
return (B_FALSE);
/* /*
* If the metaslab is loaded, ms_max_size is definitive and we can use * If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once * the fast check. If it's not, the ms_max_size is a lower bound (once

View File

@ -930,12 +930,21 @@ spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
if (spa->spa_errlog_last != 0) { if (spa->spa_errlog_last != 0) {
sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx); sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
spa->spa_errlog_last = newobj; spa->spa_errlog_last = newobj;
(void) zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
sizeof (uint64_t), 1, &spa->spa_errlog_last, tx);
} }
if (spa->spa_errlog_scrub != 0) { if (spa->spa_errlog_scrub != 0) {
sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx); sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
spa->spa_errlog_scrub = newobj; spa->spa_errlog_scrub = newobj;
(void) zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
sizeof (uint64_t), 1, &spa->spa_errlog_scrub, tx);
} }
mutex_exit(&spa->spa_errlog_lock); mutex_exit(&spa->spa_errlog_lock);
} }

View File

@ -27,6 +27,7 @@
* Copyright (c) 2017 Datto Inc. * Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation. * Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2023, Klara Inc.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -2756,8 +2757,7 @@ spa_state_to_name(spa_t *spa)
vdev_state_t state = rvd->vdev_state; vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux; vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa) && if (spa_suspended(spa))
(spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
return ("SUSPENDED"); return ("SUSPENDED");
switch (state) { switch (state) {

View File

@ -1550,7 +1550,16 @@ zil_lwb_write_done(zio_t *zio)
lwb->lwb_state = LWB_STATE_WRITE_DONE; lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_child_zio = NULL; lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL; lwb->lwb_write_zio = NULL;
/*
* If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not
* called for it yet, and when it will be, it won't be able to make
* its write ZIO a parent this ZIO. In such case we can not defer
* our flushes or below may be a race between the done callbacks.
*/
nlwb = list_next(&zilog->zl_lwb_list, lwb); nlwb = list_next(&zilog->zl_lwb_list, lwb);
if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
nlwb = NULL;
mutex_exit(&zilog->zl_lock); mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0) if (avl_numnodes(t) == 0)

View File

@ -214,6 +214,7 @@ maybe = {
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479], 'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason], 'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason], 'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares': ['FAIL', known_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason], 'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason], 'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason], 'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
@ -259,10 +260,9 @@ if sys.platform.startswith('freebsd'):
maybe.update({ maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason], 'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason], 'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason], 'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason], 'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'delegate/zfs_allow_010_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', 11829], 'inheritance/inherit_001_pos': ['FAIL', 11829],
'resilver/resilver_restart_001': ['FAIL', known_reason], 'resilver/resilver_restart_001': ['FAIL', known_reason],
'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622], 'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622],

View File

@ -1588,7 +1588,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/online_offline/setup.ksh \ functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \ functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \ functional/pam/pam_basic.ksh \
functional/pam/pam_change_unmounted.ksh \
functional/pam/pam_nounmount.ksh \ functional/pam/pam_nounmount.ksh \
functional/pam/pam_recursive.ksh \
functional/pam/pam_short_password.ksh \ functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \ functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \ functional/pool_checkpoint/checkpoint_after_rewind.ksh \

View File

@ -48,10 +48,10 @@ function cleanup
log_onexit cleanup log_onexit cleanup
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must set_tunable64 TXG_TIMEOUT 5000 log_must set_tunable64 TXG_TIMEOUT 5000
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4 log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4
log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288 log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288

View File

@ -204,11 +204,11 @@ function histo_check_test_pool
# 4096 blocksize count for asize. For verification we stick # 4096 blocksize count for asize. For verification we stick
# to just lsize counts. # to just lsize counts.
# #
# The max_variance is hard-coded here at 12% to leave us some # Variances are expected since this test does not account for
# margin. Testing has shown this normally to be in the range # metadata. The hardcoded limit here is empirical and should
# of 2%-8%, but it may be as large as 11%. # not be construed as deterministic.
################### ###################
let max_variance=12 let max_variance=15
let fail_value=0 let fail_value=0
let error_count=0 let error_count=0
log_note "Comparisons for ${pool}" log_note "Comparisons for ${pool}"

View File

@ -117,7 +117,9 @@ usage:
For the property list, run: zfs set|get For the property list, run: zfs set|get
For the delegated permission list, run: zfs allow|unallow") For the delegated permission list, run: zfs allow|unallow
For further help on a command or topic, run: zfs help [<topic>]")
cnt=0 cnt=0
for cmd in ${neg_cmds[@]}; do for cmd in ${neg_cmds[@]}; do
log_mustnot zfs program $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1 log_mustnot zfs program $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1

View File

@ -60,7 +60,7 @@ log_must set_tunable64 VDEV_MIN_MS_COUNT 64
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
log_must mkdir "$TESTDIR" log_must mkdir "$TESTDIR"
log_must truncate -s $LARGESIZE "$LARGEFILE" log_must truncate -s $LARGESIZE "$LARGEFILE"

View File

@ -52,7 +52,7 @@ LARGEFILE="$TESTDIR/largefile"
# Reduce trim size to allow for tighter tolerance below when checking. # Reduce trim size to allow for tighter tolerance below when checking.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
log_must mkdir "$TESTDIR" log_must mkdir "$TESTDIR"
log_must truncate -s $LARGESIZE "$LARGEFILE" log_must truncate -s $LARGESIZE "$LARGEFILE"

View File

@ -17,6 +17,7 @@
# #
# Copyright (c) 2014, 2016 by Delphix. All rights reserved. # Copyright (c) 2014, 2016 by Delphix. All rights reserved.
# Copyright (c) 2022 by Lawrence Livermore National Security, LLC. # Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
# Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
# #
. $STF_SUITE/include/libtest.shlib . $STF_SUITE/include/libtest.shlib
@ -51,11 +52,20 @@ log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_must zfs set compression=off $TESTPOOL/$TESTFS log_must zfs set compression=off $TESTPOOL/$TESTFS
log_note "Writing files until ENOSPC." log_note "Writing Big(1G) files until ENOSPC."
log_mustnot_expect "No space left on device" fio --name=test \ log_mustnot_expect "No space left on device" fio --name=test \
--fallocate=none --rw=write --bs=1M --size=1G --numjobs=4 \ --fallocate=none --rw=write --bs=1M --size=1G --numjobs=4 \
--sync=1 --directory=$TESTDIR/ --group_reporting --sync=1 --directory=$TESTDIR/ --group_reporting
log_must rm $TESTDIR/test.*
log_must test -z "$(ls -A $TESTDIR)"
sync_pool $TESTPOOL true
log_note "Writing small(10M) files until ENOSPC."
log_mustnot_expect "No space left on device" fio --name=test \
--fallocate=none --rw=write --bs=1M --size=10M --numjobs=200 \
--sync=1 --directory=$TESTDIR/ --group_reporting
log_must rm $TESTDIR/test.* log_must rm $TESTDIR/test.*
log_must test -z "$(ls -A $TESTDIR)" log_must test -z "$(ls -A $TESTDIR)"

View File

@ -57,7 +57,7 @@ log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
# Reduced TRIM_TXG_BATCH to make trimming more frequent. # Reduced TRIM_TXG_BATCH to make trimming more frequent.
typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH) typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)

View File

@ -54,7 +54,7 @@ log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
# Reduced TRIM_TXG_BATCH to make trimming more frequent. # Reduced TRIM_TXG_BATCH to make trimming more frequent.
typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH) typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)

View File

@ -55,7 +55,7 @@ log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
# Reduced TRIM_TXG_BATCH to make trimming more frequent. # Reduced TRIM_TXG_BATCH to make trimming more frequent.
typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH) typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)

View File

@ -57,7 +57,7 @@ log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
# Reduced TRIM_TXG_BATCH to make trimming more frequent. # Reduced TRIM_TXG_BATCH to make trimming more frequent.
typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH) typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)

View File

@ -54,7 +54,7 @@ log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes. # Minimum trim size is decreased to verify all trim sizes.
typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN) typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096 log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 512
# Reduced TRIM_TXG_BATCH to make trimming more frequent. # Reduced TRIM_TXG_BATCH to make trimming more frequent.
typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH) typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)