Add missing ZFS tunables
This commit adds module options for all existing zfs tunables. Ideally the average user should never need to modify any of these values. However, in practice sometimes you do need to tweak these values for one reason or another. In those cases it's nice not to have to resort to rebuilding from source. All tunables are visable to modinfo and the list is as follows: $ modinfo module/zfs/zfs.ko filename: module/zfs/zfs.ko license: CDDL author: Sun Microsystems/Oracle, Lawrence Livermore National Laboratory description: ZFS srcversion: 8EAB1D71DACE05B5AA61567 depends: spl,znvpair,zcommon,zunicode,zavl vermagic: 2.6.32-131.0.5.el6.x86_64 SMP mod_unload modversions parm: zvol_major:Major number for zvol device (uint) parm: zvol_threads:Number of threads for zvol device (uint) parm: zio_injection_enabled:Enable fault injection (int) parm: zio_bulk_flags:Additional flags to pass to bulk buffers (int) parm: zio_delay_max:Max zio millisec delay before posting event (int) parm: zio_requeue_io_start_cut_in_line:Prioritize requeued I/O (bool) parm: zil_replay_disable:Disable intent logging replay (int) parm: zfs_nocacheflush:Disable cache flushes (bool) parm: zfs_read_chunk_size:Bytes to read per chunk (long) parm: zfs_vdev_max_pending:Max pending per-vdev I/Os (int) parm: zfs_vdev_min_pending:Min pending per-vdev I/Os (int) parm: zfs_vdev_aggregation_limit:Max vdev I/O aggregation size (int) parm: zfs_vdev_time_shift:Deadline time shift for vdev I/O (int) parm: zfs_vdev_ramp_rate:Exponential I/O issue ramp-up rate (int) parm: zfs_vdev_read_gap_limit:Aggregate read I/O over gap (int) parm: zfs_vdev_write_gap_limit:Aggregate write I/O over gap (int) parm: zfs_vdev_scheduler:I/O scheduler (charp) parm: zfs_vdev_cache_max:Inflate reads small than max (int) parm: zfs_vdev_cache_size:Total size of the per-disk cache (int) parm: zfs_vdev_cache_bshift:Shift size to inflate reads too (int) parm: zfs_scrub_limit:Max scrub/resilver I/O per leaf vdev (int) parm: zfs_recover:Set to attempt to recover from fatal errors (int) parm: spa_config_path:SPA config file (/etc/zfs/zpool.cache) (charp) parm: zfs_zevent_len_max:Max event queue length (int) parm: zfs_zevent_cols:Max event column width (int) parm: zfs_zevent_console:Log events to the console (int) parm: zfs_top_maxinflight:Max I/Os per top-level (int) parm: zfs_resilver_delay:Number of ticks to delay resilver (int) parm: zfs_scrub_delay:Number of ticks to delay scrub (int) parm: zfs_scan_idle:Idle window in clock ticks (int) parm: zfs_scan_min_time_ms:Min millisecs to scrub per txg (int) parm: zfs_free_min_time_ms:Min millisecs to free per txg (int) parm: zfs_resilver_min_time_ms:Min millisecs to resilver per txg (int) parm: zfs_no_scrub_io:Set to disable scrub I/O (bool) parm: zfs_no_scrub_prefetch:Set to disable scrub prefetching (bool) parm: zfs_txg_timeout:Max seconds worth of delta per txg (int) parm: zfs_no_write_throttle:Disable write throttling (int) parm: zfs_write_limit_shift:log2(fraction of memory) per txg (int) parm: zfs_txg_synctime_ms:Target milliseconds between tgx sync (int) parm: zfs_write_limit_min:Min tgx write limit (ulong) parm: zfs_write_limit_max:Max tgx write limit (ulong) parm: zfs_write_limit_inflated:Inflated tgx write limit (ulong) parm: zfs_write_limit_override:Override tgx write limit (ulong) parm: zfs_prefetch_disable:Disable all ZFS prefetching (int) parm: zfetch_max_streams:Max number of streams per zfetch (uint) parm: zfetch_min_sec_reap:Min time before stream reclaim (uint) parm: zfetch_block_cap:Max number of blocks to fetch at a time (uint) parm: zfetch_array_rd_sz:Number of bytes in a array_read (ulong) parm: zfs_pd_blks_max:Max number of blocks to prefetch (int) parm: zfs_dedup_prefetch:Enable prefetching dedup-ed blks (int) parm: zfs_arc_min:Min arc size (ulong) parm: zfs_arc_max:Max arc size (ulong) parm: zfs_arc_meta_limit:Meta limit for arc size (ulong) parm: zfs_arc_reduce_dnlc_percent:Meta reclaim percentage (int) parm: zfs_arc_grow_retry:Seconds before growing arc size (int) parm: zfs_arc_shrink_shift:log2(fraction of arc to reclaim) (int) parm: zfs_arc_p_min_shift:arc_c shift to calc min/max arc_p (int)
This commit is contained in:
parent
8db77dd7ed
commit
c409e4647f
|
@ -32,7 +32,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern uint64_t zfetch_array_rd_sz;
|
||||
extern unsigned long zfetch_array_rd_sz;
|
||||
|
||||
struct dnode; /* so we can reference dnode */
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ typedef enum vdev_dtl_type {
|
|||
DTL_TYPES
|
||||
} vdev_dtl_type_t;
|
||||
|
||||
extern boolean_t zfs_nocacheflush;
|
||||
extern int zfs_nocacheflush;
|
||||
|
||||
extern int vdev_open(vdev_t *);
|
||||
extern void vdev_open_children(vdev_t *);
|
||||
|
|
|
@ -4767,15 +4767,25 @@ EXPORT_SYMBOL(arc_read);
|
|||
EXPORT_SYMBOL(arc_buf_remove_ref);
|
||||
EXPORT_SYMBOL(arc_getbuf_func);
|
||||
|
||||
module_param(zfs_arc_min, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_arc_min, "Minimum arc size");
|
||||
module_param(zfs_arc_min, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
|
||||
|
||||
module_param(zfs_arc_max, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_arc_max, "Maximum arc size");
|
||||
module_param(zfs_arc_max, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_max, "Max arc size");
|
||||
|
||||
module_param(zfs_arc_meta_limit, ulong, 0644);
|
||||
module_param(zfs_arc_meta_limit, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size");
|
||||
|
||||
module_param(arc_reduce_dnlc_percent, uint, 0644);
|
||||
MODULE_PARM_DESC(arc_reduce_dnlc_percent, "Meta reclaim percentage");
|
||||
module_param(zfs_arc_reduce_dnlc_percent, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_reduce_dnlc_percent, "Meta reclaim percentage");
|
||||
|
||||
module_param(zfs_arc_grow_retry, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size");
|
||||
|
||||
module_param(zfs_arc_shrink_shift, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)");
|
||||
|
||||
module_param(zfs_arc_p_min_shift, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p");
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1187,3 +1187,8 @@ ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
|
|||
|
||||
return (ENOENT);
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_dedup_prefetch, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_dedup_prefetch,"Enable prefetching dedup-ed blks");
|
||||
#endif
|
||||
|
|
|
@ -571,4 +571,7 @@ traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
|
|||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
EXPORT_SYMBOL(traverse_dataset);
|
||||
EXPORT_SYMBOL(traverse_pool);
|
||||
|
||||
module_param(zfs_pd_blks_max, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_pd_blks_max, "Max number of blocks to prefetch");
|
||||
#endif
|
||||
|
|
|
@ -39,13 +39,13 @@
|
|||
int zfs_prefetch_disable = 0;
|
||||
|
||||
/* max # of streams per zfetch */
|
||||
uint32_t zfetch_max_streams = 8;
|
||||
unsigned int zfetch_max_streams = 8;
|
||||
/* min time before stream reclaim */
|
||||
uint32_t zfetch_min_sec_reap = 2;
|
||||
unsigned int zfetch_min_sec_reap = 2;
|
||||
/* max number of blocks to fetch at a time */
|
||||
uint32_t zfetch_block_cap = 256;
|
||||
unsigned int zfetch_block_cap = 256;
|
||||
/* number of bytes in a array_read at which we stop prefetching (1Mb) */
|
||||
uint64_t zfetch_array_rd_sz = 1024 * 1024;
|
||||
unsigned long zfetch_array_rd_sz = 1024 * 1024;
|
||||
|
||||
/* forward decls for static routines */
|
||||
static int dmu_zfetch_colinear(zfetch_t *, zstream_t *);
|
||||
|
@ -726,5 +726,17 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
|
|||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_prefetch_disable, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_prefetch_disable, "Disable all ZFS prefetching");
|
||||
|
||||
module_param(zfetch_max_streams, uint, 0644);
|
||||
MODULE_PARM_DESC(zfetch_max_streams, "Max number of streams per zfetch");
|
||||
|
||||
module_param(zfetch_min_sec_reap, uint, 0644);
|
||||
MODULE_PARM_DESC(zfetch_min_sec_reap, "Min time before stream reclaim");
|
||||
|
||||
module_param(zfetch_block_cap, uint, 0644);
|
||||
MODULE_PARM_DESC(zfetch_block_cap, "Max number of blocks to fetch at a time");
|
||||
|
||||
module_param(zfetch_array_rd_sz, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
|
||||
#endif
|
||||
|
||||
|
|
|
@ -44,10 +44,10 @@ int zfs_no_write_throttle = 0;
|
|||
int zfs_write_limit_shift = 3; /* 1/8th of physical memory */
|
||||
int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */
|
||||
|
||||
uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */
|
||||
uint64_t zfs_write_limit_max = 0; /* max data payload per txg */
|
||||
uint64_t zfs_write_limit_inflated = 0;
|
||||
uint64_t zfs_write_limit_override = 0;
|
||||
unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */
|
||||
unsigned long zfs_write_limit_max = 0; /* max data payload per txg */
|
||||
unsigned long zfs_write_limit_inflated = 0;
|
||||
unsigned long zfs_write_limit_override = 0;
|
||||
|
||||
kmutex_t zfs_write_limit_lock;
|
||||
|
||||
|
@ -847,3 +847,26 @@ dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
|
|||
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL,
|
||||
tx, B_FALSE));
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_no_write_throttle, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_no_write_throttle, "Disable write throttling");
|
||||
|
||||
module_param(zfs_write_limit_shift, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg");
|
||||
|
||||
module_param(zfs_txg_synctime_ms, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between tgx sync");
|
||||
|
||||
module_param(zfs_write_limit_min, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_write_limit_min, "Min tgx write limit");
|
||||
|
||||
module_param(zfs_write_limit_max, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_write_limit_max, "Max tgx write limit");
|
||||
|
||||
module_param(zfs_write_limit_inflated, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_write_limit_inflated, "Inflated tgx write limit");
|
||||
|
||||
module_param(zfs_write_limit_override, ulong, 0444);
|
||||
MODULE_PARM_DESC(zfs_write_limit_override, "Override tgx write limit");
|
||||
#endif
|
||||
|
|
|
@ -62,8 +62,8 @@ int zfs_scan_idle = 50; /* idle window in clock ticks */
|
|||
int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
|
||||
int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
|
||||
int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
|
||||
boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
|
||||
boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
|
||||
int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
|
||||
int zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
|
||||
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
|
||||
int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
|
||||
|
||||
|
@ -1785,3 +1785,35 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
|
|||
return (dsl_sync_task_do(dp, dsl_scan_setup_check,
|
||||
dsl_scan_setup_sync, dp->dp_scan, &func, 0));
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_top_maxinflight, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_top_maxinflight, "Max I/Os per top-level");
|
||||
|
||||
module_param(zfs_resilver_delay, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_resilver_delay, "Number of ticks to delay resilver");
|
||||
|
||||
module_param(zfs_scrub_delay, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_scrub_delay, "Number of ticks to delay scrub");
|
||||
|
||||
module_param(zfs_scan_idle, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_scan_idle, "Idle window in clock ticks");
|
||||
|
||||
module_param(zfs_scan_min_time_ms, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_scan_min_time_ms, "Min millisecs to scrub per txg");
|
||||
|
||||
module_param(zfs_free_min_time_ms, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_free_min_time_ms, "Min millisecs to free per txg");
|
||||
|
||||
module_param(zfs_resilver_min_time_ms, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_resilver_min_time_ms, "Min millisecs to resilver per txg");
|
||||
|
||||
module_param(zfs_no_scrub_io, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
|
||||
|
||||
module_param(zfs_no_scrub_prefetch, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
|
||||
|
||||
module_param(zfs_txg_timeout, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_txg_timeout, "Max seconds worth of delta per txg");
|
||||
#endif
|
||||
|
|
|
@ -76,9 +76,9 @@
|
|||
#include <sys/time.h>
|
||||
#include <sys/zfs_ioctl.h>
|
||||
|
||||
int zevent_len_max = 0;
|
||||
int zevent_cols = 80;
|
||||
int zevent_console = 0;
|
||||
int zfs_zevent_len_max = 0;
|
||||
int zfs_zevent_cols = 80;
|
||||
int zfs_zevent_console = 0;
|
||||
|
||||
static int zevent_len_cur = 0;
|
||||
static int zevent_waiters = 0;
|
||||
|
@ -405,9 +405,9 @@ fm_nvprint(nvlist_t *nvl)
|
|||
console_printf("\n");
|
||||
|
||||
if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
|
||||
c = fm_printf(0, c, zevent_cols, "%s", class);
|
||||
c = fm_printf(0, c, zfs_zevent_cols, "%s", class);
|
||||
|
||||
if (fm_nvprintr(nvl, 0, c, zevent_cols) != 0)
|
||||
if (fm_nvprintr(nvl, 0, c, zfs_zevent_cols) != 0)
|
||||
console_printf("\n");
|
||||
|
||||
console_printf("\n");
|
||||
|
@ -483,7 +483,7 @@ zfs_zevent_insert(zevent_t *ev)
|
|||
{
|
||||
mutex_enter(&zevent_lock);
|
||||
list_insert_head(&zevent_list, ev);
|
||||
if (zevent_len_cur >= zevent_len_max)
|
||||
if (zevent_len_cur >= zfs_zevent_len_max)
|
||||
zfs_zevent_drain(list_tail(&zevent_list));
|
||||
else
|
||||
zevent_len_cur++;
|
||||
|
@ -516,7 +516,7 @@ zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
|
|||
return;
|
||||
}
|
||||
|
||||
if (zevent_console)
|
||||
if (zfs_zevent_console)
|
||||
fm_nvprint(nvl);
|
||||
|
||||
ev = zfs_zevent_alloc();
|
||||
|
@ -1488,8 +1488,8 @@ fm_init(void)
|
|||
zevent_len_cur = 0;
|
||||
zevent_flags = 0;
|
||||
|
||||
if (zevent_len_max == 0)
|
||||
zevent_len_max = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
|
||||
if (zfs_zevent_len_max == 0)
|
||||
zfs_zevent_len_max = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
|
||||
|
||||
/* Initialize zevent allocation and generation kstats */
|
||||
fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
|
||||
|
@ -1535,13 +1535,13 @@ fm_fini(void)
|
|||
}
|
||||
}
|
||||
|
||||
module_param(zevent_len_max, int, 0644);
|
||||
MODULE_PARM_DESC(zevent_len_max, "Maximum event queue length");
|
||||
module_param(zfs_zevent_len_max, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_zevent_len_max, "Max event queue length");
|
||||
|
||||
module_param(zevent_cols, int, 0644);
|
||||
MODULE_PARM_DESC(zevent_cols, "Maximum event column width");
|
||||
module_param(zfs_zevent_cols, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_zevent_cols, "Max event column width");
|
||||
|
||||
module_param(zevent_console, int, 0644);
|
||||
MODULE_PARM_DESC(zevent_console, "Log events to the console");
|
||||
module_param(zfs_zevent_console, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_zevent_console, "Log events to the console");
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
|
|
@ -1758,4 +1758,7 @@ EXPORT_SYMBOL(spa_writeable);
|
|||
EXPORT_SYMBOL(spa_mode);
|
||||
|
||||
EXPORT_SYMBOL(spa_namespace_lock);
|
||||
|
||||
module_param(zfs_recover, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors");
|
||||
#endif
|
||||
|
|
|
@ -3174,4 +3174,7 @@ EXPORT_SYMBOL(vdev_degrade);
|
|||
EXPORT_SYMBOL(vdev_online);
|
||||
EXPORT_SYMBOL(vdev_offline);
|
||||
EXPORT_SYMBOL(vdev_clear);
|
||||
|
||||
module_param(zfs_scrub_limit, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev");
|
||||
#endif
|
||||
|
|
|
@ -416,3 +416,14 @@ vdev_cache_stat_fini(void)
|
|||
vdc_ksp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_vdev_cache_max, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_cache_max, "Inflate reads small than max");
|
||||
|
||||
module_param(zfs_vdev_cache_size, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_vdev_cache_size, "Total size of the per-disk cache");
|
||||
|
||||
module_param(zfs_vdev_cache_bshift, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_cache_bshift, "Shift size to inflate reads too");
|
||||
#endif
|
||||
|
|
|
@ -763,4 +763,4 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
|
|||
}
|
||||
|
||||
module_param(zfs_vdev_scheduler, charp, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_scheduler, "IO Scheduler (noop)");
|
||||
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
|
||||
|
|
|
@ -408,11 +408,23 @@ vdev_queue_io_done(zio_t *zio)
|
|||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_vdev_max_pending, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_max_pending, "Maximum pending VDEV IO");
|
||||
MODULE_PARM_DESC(zfs_vdev_max_pending, "Max pending per-vdev I/Os");
|
||||
|
||||
module_param(zfs_vdev_min_pending, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_min_pending, "Minimum pending VDEV IO");
|
||||
MODULE_PARM_DESC(zfs_vdev_min_pending, "Min pending per-vdev I/Os");
|
||||
|
||||
module_param(zfs_vdev_aggregation_limit, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Maximum VDEV IO aggregation");
|
||||
MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Max vdev I/O aggregation size");
|
||||
|
||||
module_param(zfs_vdev_time_shift, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_time_shift, "Deadline time shift for vdev I/O");
|
||||
|
||||
module_param(zfs_vdev_ramp_rate, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_ramp_rate, "Exponential I/O issue ramp-up rate");
|
||||
|
||||
module_param(zfs_vdev_read_gap_limit, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");
|
||||
|
||||
module_param(zfs_vdev_write_gap_limit, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_vdev_write_gap_limit, "Aggregate write I/O over gap");
|
||||
#endif
|
||||
|
|
|
@ -335,7 +335,7 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
|
|||
}
|
||||
#endif /* _KERNEL */
|
||||
|
||||
offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
|
||||
unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
|
||||
|
||||
/*
|
||||
* Read bytes from specified file into supplied buffer.
|
||||
|
@ -4716,3 +4716,8 @@ zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
|
|||
return (0);
|
||||
}
|
||||
#endif /* HAVE_UIO_ZEROCOPY */
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zfs_read_chunk_size, long, 0644);
|
||||
MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
|
||||
#endif
|
||||
|
|
|
@ -74,7 +74,7 @@ int zil_replay_disable = 0; /* disable intent logging replay */
|
|||
* zfs_nocacheflush will cause corruption on power loss if a volatile
|
||||
* out-of-order write cache is enabled.
|
||||
*/
|
||||
boolean_t zfs_nocacheflush = B_FALSE;
|
||||
int zfs_nocacheflush = 0;
|
||||
|
||||
static kmem_cache_t *zil_lwb_cache;
|
||||
|
||||
|
@ -1995,3 +1995,11 @@ zil_vdev_offline(const char *osname, void *arg)
|
|||
dmu_objset_rele(os, FTAG);
|
||||
return (error);
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zil_replay_disable, int, 0644);
|
||||
MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
|
||||
|
||||
module_param(zfs_nocacheflush, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
|
||||
#endif
|
||||
|
|
|
@ -86,7 +86,7 @@ extern vmem_t *zio_alloc_arena;
|
|||
*/
|
||||
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
|
||||
|
||||
boolean_t zio_requeue_io_start_cut_in_line = B_TRUE;
|
||||
int zio_requeue_io_start_cut_in_line = 1;
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
int zio_buf_debug_limit = 16384;
|
||||
|
@ -3006,5 +3006,8 @@ module_param(zio_bulk_flags, int, 0644);
|
|||
MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers");
|
||||
|
||||
module_param(zio_delay_max, int, 0644);
|
||||
MODULE_PARM_DESC(zio_delay_max, "Max zio delay before posting an event (ms)");
|
||||
MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event");
|
||||
|
||||
module_param(zio_requeue_io_start_cut_in_line, int, 0644);
|
||||
MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O");
|
||||
#endif
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#include <sys/dmu_objset.h>
|
||||
#include <sys/fs/zfs.h>
|
||||
|
||||
uint32_t zio_injection_enabled;
|
||||
uint32_t zio_injection_enabled = 0;
|
||||
|
||||
typedef struct inject_handler {
|
||||
int zi_id;
|
||||
|
@ -513,3 +513,8 @@ zio_inject_fini(void)
|
|||
list_destroy(&inject_handlers);
|
||||
rw_destroy(&inject_lock);
|
||||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
module_param(zio_injection_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(zio_injection_enabled, "Enable fault injection");
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue