FreeBSD: Organize sysctls

FreeBSD had a few platform-specific ARC tunables in the wrong place:

- Move FreeBSD-specifc ARC tunables into the same vfs.zfs.arc node as
  the rest of the ARC tunables.
- Move the handlers from arc_os.c to sysctl_os.c and add compat sysctls
  for the legacy names.

While here, some additional clean up:

- Most handlers are specific to a particular variable and don't need a
  pointer passed through the args.
- Group blocks of related variables, handlers, and sysctl declarations
  into logical sections.
- Match variable types for temporaries in handlers with the type of the
  global variable.
- Remove leftover comments.

Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Signed-off-by: Ryan Moeller <ryan@iXsystems.com>
Closes #13756
This commit is contained in:
Ryan Moeller 2022-08-09 09:05:47 +00:00 committed by Brian Behlendorf
parent 4723eba8c0
commit 7bb707ffaf
3 changed files with 455 additions and 308 deletions

View File

@ -59,15 +59,21 @@
#define param_set_arc_long_args(var) \ #define param_set_arc_long_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_arc_long, "LU" CTLTYPE_ULONG, &var, 0, param_set_arc_long, "LU"
#define param_set_arc_min_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_arc_min, "LU"
#define param_set_arc_max_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_arc_max, "LU"
#define param_set_arc_int_args(var) \ #define param_set_arc_int_args(var) \
CTLTYPE_INT, &var, 0, param_set_arc_int, "I" CTLTYPE_INT, &var, 0, param_set_arc_int, "I"
#define param_set_arc_min_args(var) \
CTLTYPE_ULONG, NULL, 0, param_set_arc_min, "LU"
#define param_set_arc_max_args(var) \
CTLTYPE_ULONG, NULL, 0, param_set_arc_max, "LU"
#define param_set_arc_free_target_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_arc_free_target, "IU"
#define param_set_arc_no_grow_shift_args(var) \
CTLTYPE_INT, NULL, 0, param_set_arc_no_grow_shift, "I"
#define param_set_deadman_failmode_args(var) \ #define param_set_deadman_failmode_args(var) \
CTLTYPE_STRING, NULL, 0, param_set_deadman_failmode, "A" CTLTYPE_STRING, NULL, 0, param_set_deadman_failmode, "A"
@ -78,16 +84,16 @@
CTLTYPE_ULONG, NULL, 0, param_set_deadman_ziotime, "LU" CTLTYPE_ULONG, NULL, 0, param_set_deadman_ziotime, "LU"
#define param_set_multihost_interval_args(var) \ #define param_set_multihost_interval_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_multihost_interval, "LU" CTLTYPE_ULONG, NULL, 0, param_set_multihost_interval, "LU"
#define param_set_slop_shift_args(var) \ #define param_set_slop_shift_args(var) \
CTLTYPE_INT, &var, 0, param_set_slop_shift, "I" CTLTYPE_INT, NULL, 0, param_set_slop_shift, "I"
#define param_set_min_auto_ashift_args(var) \ #define param_set_min_auto_ashift_args(var) \
CTLTYPE_U64, &var, 0, param_set_min_auto_ashift, "QU" CTLTYPE_U64, NULL, 0, param_set_min_auto_ashift, "QU"
#define param_set_max_auto_ashift_args(var) \ #define param_set_max_auto_ashift_args(var) \
CTLTYPE_U64, &var, 0, param_set_max_auto_ashift, "QU" CTLTYPE_U64, NULL, 0, param_set_max_auto_ashift, "QU"
#define fletcher_4_param_set_args(var) \ #define fletcher_4_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A" CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A"

View File

@ -72,31 +72,14 @@ SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
* We don't have a tunable for arc_free_target due to the dependency on * We don't have a tunable for arc_free_target due to the dependency on
* pagedaemon initialisation. * pagedaemon initialisation.
*/ */
static int int param_set_arc_free_target(SYSCTL_HANDLER_ARGS);
sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS) ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, free_target,
{ param_set_arc_free_target, 0, CTLFLAG_RW,
uint_t val;
int err;
val = zfs_arc_free_target;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < minfree)
return (EINVAL);
if (val > vm_cnt.v_page_count)
return (EINVAL);
zfs_arc_free_target = val;
return (0);
}
SYSCTL_DECL(_vfs_zfs);
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof (uint_t),
sysctl_vfs_zfs_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim"); "Desired number of free pages below which ARC triggers reclaim");
int param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS);
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, no_grow_shift,
param_set_arc_no_grow_shift, 0, ZMOD_RW,
"log2(fraction of ARC which must be free to allow growing)");
int64_t int64_t
arc_available_memory(void) arc_available_memory(void)

View File

@ -91,6 +91,7 @@ __FBSDID("$FreeBSD$");
#include <sys/arc_impl.h> #include <sys/arc_impl.h>
#include <sys/dsl_pool.h> #include <sys/dsl_pool.h>
#include <sys/vmmeter.h>
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0, SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0,
@ -132,170 +133,8 @@ SYSCTL_DECL(_vfs_zfs_version);
SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD, SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD,
(ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version"); (ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version");
extern arc_state_t ARC_anon;
extern arc_state_t ARC_mru;
extern arc_state_t ARC_mru_ghost;
extern arc_state_t ARC_mfu;
extern arc_state_t ARC_mfu_ghost;
extern arc_state_t ARC_l2c_only;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
/* arc.c */ /* arc.c */
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
uint64_t val;
int err;
val = zfs_arc_max;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
val >= arc_all_memory()))
return (SET_ERROR(EINVAL));
zfs_arc_max = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_max = arc_c_max;
return (0);
}
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
uint64_t val;
int err;
val = zfs_arc_min;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
return (SET_ERROR(EINVAL));
zfs_arc_min = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_min = arc_c_min;
return (0);
}
/* legacy compat */
extern uint64_t l2arc_write_max; /* def max write size */
extern uint64_t l2arc_write_boost; /* extra warmup write */
extern uint64_t l2arc_headroom; /* # of dev writes */
extern uint64_t l2arc_headroom_boost;
extern uint64_t l2arc_feed_secs; /* interval seconds */
extern uint64_t l2arc_feed_min_ms; /* min interval msecs */
extern int l2arc_noprefetch; /* don't cache prefetch bufs */
extern int l2arc_feed_again; /* turbo warmup */
extern int l2arc_norw; /* no reads during writes */
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
&l2arc_write_max, 0, "max write size (LEGACY)");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
&l2arc_write_boost, 0, "extra write during warmup (LEGACY)");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
&l2arc_headroom, 0, "number of dev writes (LEGACY)");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
&l2arc_feed_secs, 0, "interval seconds (LEGACY)");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
&l2arc_feed_min_ms, 0, "min interval milliseconds (LEGACY)");
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
&l2arc_noprefetch, 0, "don't cache prefetch bufs (LEGACY)");
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
&l2arc_feed_again, 0, "turbo warmup (LEGACY)");
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
&l2arc_norw, 0, "no reads during writes (LEGACY)");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
&ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
&ARC_mru.arcs_size.rc_count, 0, "size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
&ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
&ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
&ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
/* END CSTYLED */
static int
sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = arc_no_grow_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, sizeof (int),
sysctl_vfs_zfs_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing)");
int int
param_set_arc_long(SYSCTL_HANDLER_ARGS) param_set_arc_long(SYSCTL_HANDLER_ARGS)
{ {
@ -324,55 +163,319 @@ param_set_arc_int(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_max;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
val >= arc_all_memory()))
return (SET_ERROR(EINVAL));
zfs_arc_max = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_max = arc_c_max;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_max, "LU",
"Maximum ARC size in bytes (LEGACY)");
/* END CSTYLED */
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_min;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
return (SET_ERROR(EINVAL));
zfs_arc_min = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_min = arc_c_min;
return (0);
}
/* BEGIN CSTYLED */ /* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_arc_min, sizeof (zfs_arc_min), param_set_arc_min, "LU", NULL, 0, param_set_arc_min, "LU",
"min arc size (LEGACY)"); "Minimum ARC size in bytes (LEGACY)");
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, /* END CSTYLED */
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_arc_max, sizeof (zfs_arc_max), param_set_arc_max, "LU", extern uint_t zfs_arc_free_target;
"max arc size (LEGACY)");
static int
param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
{
uint_t val;
int err;
val = zfs_arc_free_target;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < minfree)
return (EINVAL);
if (val > vm_cnt.v_page_count)
return (EINVAL);
zfs_arc_free_target = val;
return (0);
}
/*
* NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
* pagedaemon initialization.
*/
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim"
" (LEGACY)");
/* END CSTYLED */
static int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = arc_no_grow_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing) (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_max;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
CTLFLAG_RWTUN, &l2arc_write_max, 0,
"Max write bytes per interval (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
CTLFLAG_RWTUN, &l2arc_write_boost, 0,
"Extra write bytes during device warmup (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
CTLFLAG_RWTUN, &l2arc_headroom, 0,
"Number of max device writes to precache (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
"Compressed l2arc_headroom multiplier (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_secs;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
"Seconds between L2ARC writing (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_min_ms;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
"Min feed interval in milliseconds (LEGACY)");
/* END CSTYLED */
extern int l2arc_noprefetch;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
"Skip caching prefetched buffers (LEGACY)");
/* END CSTYLED */
extern int l2arc_feed_again;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
CTLFLAG_RWTUN, &l2arc_feed_again, 0,
"Turbo L2ARC warmup (LEGACY)");
/* END CSTYLED */
extern int l2arc_norw;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
CTLFLAG_RWTUN, &l2arc_norw, 0,
"No reads during writes (LEGACY)");
/* END CSTYLED */
extern arc_state_t ARC_anon;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
&ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of anonymous state");
/* END CSTYLED */
extern arc_state_t ARC_mru;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
&ARC_mru.arcs_size.rc_count, 0, "size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru state");
/* END CSTYLED */
extern arc_state_t ARC_mru_ghost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
&ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru ghost state");
/* END CSTYLED */
extern arc_state_t ARC_mfu;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
&ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu state");
/* END CSTYLED */
extern arc_state_t ARC_mfu_ghost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu ghost state");
/* END CSTYLED */
extern arc_state_t ARC_l2c_only;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
&ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
/* END CSTYLED */ /* END CSTYLED */
/* dbuf.c */ /* dbuf.c */
/* dmu.c */ /* dmu.c */
/* dmu_zfetch.c */ /* dmu_zfetch.c */
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
/* max bytes to prefetch per stream (default 8MB) */
extern uint32_t zfetch_max_distance; extern uint32_t zfetch_max_distance;
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, CTLFLAG_RWTUN,
&zfetch_max_distance, 0, "Max bytes to prefetch per stream (LEGACY)");
/* max bytes to prefetch indirects for per stream (default 64MB) */
extern uint32_t zfetch_max_idistance;
/* BEGIN CSTYLED */ /* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance, CTLFLAG_RWTUN, SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
&zfetch_max_idistance, 0, CTLFLAG_RWTUN, &zfetch_max_distance, 0,
"Max bytes to prefetch per stream (LEGACY)");
/* END CSTYLED */
extern uint32_t zfetch_max_idistance;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
"Max bytes to prefetch indirects for per stream (LEGACY)"); "Max bytes to prefetch indirects for per stream (LEGACY)");
/* END CSTYLED */ /* END CSTYLED */
/* dsl_pool.c */ /* dsl_pool.c */
/* dnode.c */ /* dnode.c */
extern int zfs_default_bs; extern int zfs_default_bs;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, default_bs, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, default_bs, CTLFLAG_RWTUN,
&zfs_default_bs, 0, "Default dnode block shift"); &zfs_default_bs, 0, "Default dnode block shift");
/* END CSTYLED */
extern int zfs_default_ibs; extern int zfs_default_ibs;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN,
&zfs_default_ibs, 0, "Default dnode indirect block shift"); &zfs_default_ibs, 0, "Default dnode indirect block shift");
/* END CSTYLED */
/* dsl_scan.c */ /* dsl_scan.c */
/* metaslab.c */ /* metaslab.c */
/* BEGIN CSTYLED */
/* /*
* In pools where the log space map feature is not enabled we touch * In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each * multiple metaslabs (and their respective space maps) with each
@ -382,10 +485,13 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN,
* is 8~16K. * is 8~16K.
*/ */
extern int zfs_metaslab_sm_blksz_no_log; extern int zfs_metaslab_sm_blksz_no_log;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log, CTLFLAG_RDTUN,
&zfs_metaslab_sm_blksz_no_log, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
"Block size for space map in pools with log space map disabled. " "Block size for space map in pools with log space map disabled. "
"Power of 2 greater than 4096."); "Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* When the log space map feature is enabled, we accumulate a lot of * When the log space map feature is enabled, we accumulate a lot of
@ -393,10 +499,13 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log, CTLFLAG_RDTUN,
* from a bigger block size like 128K for the metaslab space maps. * from a bigger block size like 128K for the metaslab space maps.
*/ */
extern int zfs_metaslab_sm_blksz_with_log; extern int zfs_metaslab_sm_blksz_with_log;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, CTLFLAG_RDTUN,
&zfs_metaslab_sm_blksz_with_log, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
"Block size for space map in pools with log space map enabled. " "Block size for space map in pools with log space map enabled. "
"Power of 2 greater than 4096."); "Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* The in-core space map representation is more compact than its on-disk form. * The in-core space map representation is more compact than its on-disk form.
@ -405,21 +514,30 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, CTLFLAG_RDTUN,
* Values should be greater than or equal to 100. * Values should be greater than or equal to 100.
*/ */
extern int zfs_condense_pct; extern int zfs_condense_pct;
SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
&zfs_condense_pct, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct,
CTLFLAG_RWTUN, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents" "Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart"); " of in-memory counterpart");
/* END CSTYLED */
extern int zfs_remove_max_segment; extern int zfs_remove_max_segment;
SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment, CTLFLAG_RWTUN,
&zfs_remove_max_segment, 0, "Largest contiguous segment ZFS will" /* BEGIN CSTYLED */
" attempt to allocate when removing a device"); SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment,
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
"Largest contiguous segment ZFS will attempt to allocate when removing"
" a device");
/* END CSTYLED */
extern int zfs_removal_suspend_progress; extern int zfs_removal_suspend_progress;
SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress, CTLFLAG_RWTUN,
&zfs_removal_suspend_progress, 0,
"Ensures certain actions can happen while in the middle of a removal");
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
"Ensures certain actions can happen while in the middle of a removal");
/* END CSTYLED */
/* /*
* Minimum size which forces the dynamic allocator to change * Minimum size which forces the dynamic allocator to change
@ -428,9 +546,13 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress, CTLFLAG_RWTUN,
* aggressive strategy (i.e search by size rather than offset). * aggressive strategy (i.e search by size rather than offset).
*/ */
extern uint64_t metaslab_df_alloc_threshold; extern uint64_t metaslab_df_alloc_threshold;
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
&metaslab_df_alloc_threshold, 0, "Minimum size which forces the dynamic" /* BEGIN CSTYLED */
" allocator to change its allocation strategy"); SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change its"
" allocation strategy");
/* END CSTYLED */
/* /*
* The minimum free space, in percent, which must be available * The minimum free space, in percent, which must be available
@ -439,46 +561,84 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
* switch to using best-fit allocations. * switch to using best-fit allocations.
*/ */
extern int metaslab_df_free_pct; extern int metaslab_df_free_pct;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
&metaslab_df_free_pct, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a" "The minimum free space, in percent, which must be available in a"
" space map to continue allocations in a first-fit fashion"); " space map to continue allocations in a first-fit fashion");
/* END CSTYLED */
/* /*
* Percentage of all cpus that can be used by the metaslab taskq. * Percentage of all cpus that can be used by the metaslab taskq.
*/ */
extern int metaslab_load_pct; extern int metaslab_load_pct;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
&metaslab_load_pct, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct,
CTLFLAG_RWTUN, &metaslab_load_pct, 0,
"Percentage of cpus that can be used by the metaslab taskq"); "Percentage of cpus that can be used by the metaslab taskq");
/* END CSTYLED */
/* /*
* Max number of metaslabs per group to preload. * Max number of metaslabs per group to preload.
*/ */
extern int metaslab_preload_limit; extern int metaslab_preload_limit;
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
&metaslab_preload_limit, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
"Max number of metaslabs per group to preload"); "Max number of metaslabs per group to preload");
/* END CSTYLED */
/* mmp.c */
int
param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_long(oidp, &zfs_multihost_interval, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (spa_mode_global != SPA_MODE_UNINIT)
mmp_signal_all_threads();
return (0);
}
/* spa.c */ /* spa.c */
extern int zfs_ccw_retry_interval; extern int zfs_ccw_retry_interval;
SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RWTUN,
&zfs_ccw_retry_interval, 0, "Configuration cache file write," /* BEGIN CSTYLED */
" retry after failure, interval (seconds)"); SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
"Configuration cache file write, retry after failure, interval"
" (seconds)");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_cachefile; extern uint64_t zfs_max_missing_tvds_cachefile;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile, CTLFLAG_RWTUN,
&zfs_max_missing_tvds_cachefile, 0, /* BEGIN CSTYLED */
"allow importing pools with missing top-level vdevs in cache file"); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
"Allow importing pools with missing top-level vdevs in cache file");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_scan; extern uint64_t zfs_max_missing_tvds_scan;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan, CTLFLAG_RWTUN,
&zfs_max_missing_tvds_scan, 0, /* BEGIN CSTYLED */
"allow importing pools with missing top-level vdevs during scan"); SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
"Allow importing pools with missing top-level vdevs during scan");
/* END CSTYLED */ /* END CSTYLED */
/* spa_misc.c */ /* spa_misc.c */
extern int zfs_flags; extern int zfs_flags;
static int static int
sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
{ {
@ -566,14 +726,37 @@ param_set_deadman_failmode(SYSCTL_HANDLER_ARGS)
return (-param_set_deadman_failmode_common(buf)); return (-param_set_deadman_failmode_common(buf));
} }
int
param_set_slop_shift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = spa_slop_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 1 || val > 31)
return (EINVAL);
spa_slop_shift = val;
return (0);
}
/* spacemap.c */ /* spacemap.c */
extern int space_map_ibs; extern int space_map_ibs;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
&space_map_ibs, 0, "Space map indirect block shift"); &space_map_ibs, 0, "Space map indirect block shift");
/* END CSTYLED */
/* vdev.c */ /* vdev.c */
int int
param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS) param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
{ {
@ -593,6 +776,14 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
param_set_min_auto_ashift, "QU",
"Min ashift used when creating new top-level vdev. (LEGACY)");
/* END CSTYLED */
int int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS) param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{ {
@ -613,26 +804,25 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
} }
/* BEGIN CSTYLED */ /* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
param_set_min_auto_ashift, "QU",
"Min ashift used when creating new top-level vdev. (LEGACY)");
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift, SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift), &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
param_set_max_auto_ashift, "QU", param_set_max_auto_ashift, "QU",
"Max ashift used when optimizing for logical -> physical sector size on" "Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)"); " new top-level vdevs. (LEGACY)");
/* END CSTYLED */
/* /*
* Since the DTL space map of a vdev is not expected to have a lot of * Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K. * entries, we default its block size to 4K.
*/ */
extern int zfs_vdev_dtl_sm_blksz; extern int zfs_vdev_dtl_sm_blksz;
SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN,
&zfs_vdev_dtl_sm_blksz, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
"Block size for DTL space map. Power of 2 greater than 4096."); "Block size for DTL space map. Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* vdev-wide space maps that have lots of entries written to them at * vdev-wide space maps that have lots of entries written to them at
@ -640,80 +830,48 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN,
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K. * (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/ */
extern int zfs_vdev_standard_sm_blksz; extern int zfs_vdev_standard_sm_blksz;
SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, CTLFLAG_RDTUN,
&zfs_vdev_standard_sm_blksz, 0, /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096."); "Block size for standard space map. Power of 2 greater than 4096.");
/* END CSTYLED */ /* END CSTYLED */
extern int vdev_validate_skip; extern int vdev_validate_skip;
SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip, CTLFLAG_RDTUN,
&vdev_validate_skip, 0, "Enable to bypass vdev_validate().");
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
CTLFLAG_RDTUN, &vdev_validate_skip, 0,
"Enable to bypass vdev_validate().");
/* END CSTYLED */
/* vdev_cache.c */ /* vdev_cache.c */
/* vdev_mirror.c */ /* vdev_mirror.c */
/*
* The load configuration settings below are tuned by default for
* the case where all devices are of the same rotational type.
*
* If there is a mixture of rotating and non-rotating media, setting
* non_rotating_seek_inc to 0 may well provide better results as it
* will direct more reads to the non-rotating vdevs which are more
* likely to have a higher performance.
*/
/* vdev_queue.c */ /* vdev_queue.c */
/* BEGIN CSTYLED */
extern uint32_t zfs_vdev_max_active; extern uint32_t zfs_vdev_max_active;
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN,
&zfs_vdev_max_active, 0, /* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device." "The maximum number of I/Os of all types active for each device."
" (LEGACY)"); " (LEGACY)");
extern int zfs_vdev_def_queue_depth;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth, CTLFLAG_RWTUN,
&zfs_vdev_def_queue_depth, 0,
"Default queue depth for each allocator");
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN,
&zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well");
/* END CSTYLED */ /* END CSTYLED */
int extern int zfs_vdev_def_queue_depth;
param_set_slop_shift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = *(int *)arg1; /* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
"Default queue depth for each allocator");
/* END CSTYLED */
err = sysctl_handle_int(oidp, &val, 0, req); /* zio.c */
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 1 || val > 31) /* BEGIN CSTYLED */
return (EINVAL); SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
*(int *)arg1 = val; "Exclude metadata buffers from dumps as well");
/* END CSTYLED */
return (0);
}
int
param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_long(oidp, arg1, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (spa_mode_global != SPA_MODE_UNINIT)
mmp_signal_all_threads();
return (0);
}