Use cstyle -cpP in `make cstyle` check
Enable picky cstyle checks and resolve the new warnings. The vast majority of the changes needed were to handle minor issues with whitespace formatting. This patch contains no functional changes. Non-whitespace changes are as follows: * 8 times ; to { } in for/while loop * fix missing ; in cmd/zed/agents/zfs_diagnosis.c * comment (confim -> confirm) * change endline , to ; in cmd/zpool/zpool_main.c * a number of /* BEGIN CSTYLED */ /* END CSTYLED */ blocks * /* CSTYLED */ markers * change == 0 to ! * ulong to unsigned long in module/zfs/dsl_scan.c * rearrangement of module_param lines in module/zfs/metaslab.c * add { } block around statement after for_each_online_node Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov> Reviewed-by: Håkan Johansson <f96hajo@chalmers.se> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #5465
This commit is contained in:
parent
d57f03e40e
commit
02730c333c
|
@ -43,7 +43,7 @@ checkstyle: cstyle shellcheck flake8
|
|||
|
||||
cstyle:
|
||||
@find ${top_srcdir} -name '*.[hc]' ! -name 'zfs_config.*' \
|
||||
! -name '*.mod.c' -type f -exec scripts/cstyle.pl {} \+
|
||||
! -name '*.mod.c' -type f -exec scripts/cstyle.pl -cpP {} \+
|
||||
|
||||
shellcheck:
|
||||
@if type shellcheck > /dev/null 2>&1; then \
|
||||
|
|
|
@ -53,7 +53,7 @@ static void sig_handler(int signo)
|
|||
(void) sigaction(signo, &action, NULL);
|
||||
|
||||
if (rto_opts.rto_gdb)
|
||||
if (system(gdb));
|
||||
if (system(gdb)) { }
|
||||
|
||||
raise(signo);
|
||||
}
|
||||
|
@ -86,8 +86,7 @@ static void print_opts(raidz_test_opts_t *opts, boolean_t force)
|
|||
opts->rto_dcols, /* -d */
|
||||
ilog2(opts->rto_dsize), /* -s */
|
||||
opts->rto_sweep ? "yes" : "no", /* -S */
|
||||
verbose /* -v */
|
||||
);
|
||||
verbose); /* -v */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,8 +114,7 @@ static void usage(boolean_t requested)
|
|||
o->rto_dcols, /* -d */
|
||||
ilog2(o->rto_dsize), /* -s */
|
||||
rto_opts.rto_sweep ? "yes" : "no", /* -S */
|
||||
o->rto_v /* -d */
|
||||
);
|
||||
o->rto_v); /* -d */
|
||||
|
||||
exit(requested ? 0 : 1);
|
||||
}
|
||||
|
@ -476,16 +474,13 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
|
|||
}
|
||||
} else {
|
||||
/* can reconstruct 3 failed data disk */
|
||||
for (x0 = 0;
|
||||
x0 < opts->rto_dcols; x0++) {
|
||||
for (x0 = 0; x0 < opts->rto_dcols; x0++) {
|
||||
if (x0 >= rm->rm_cols - raidz_parity(rm))
|
||||
continue;
|
||||
for (x1 = x0 + 1;
|
||||
x1 < opts->rto_dcols; x1++) {
|
||||
for (x1 = x0 + 1; x1 < opts->rto_dcols; x1++) {
|
||||
if (x1 >= rm->rm_cols - raidz_parity(rm))
|
||||
continue;
|
||||
for (x2 = x1 + 1;
|
||||
x2 < opts->rto_dcols; x2++) {
|
||||
for (x2 = x1 + 1; x2 < opts->rto_dcols; x2++) {
|
||||
if (x2 >=
|
||||
rm->rm_cols - raidz_parity(rm))
|
||||
continue;
|
||||
|
|
|
@ -378,7 +378,7 @@ zfs_case_solve(fmd_hdl_t *hdl, zfs_case_t *zcp, const char *faultname,
|
|||
boolean_t serialize;
|
||||
nvlist_t *fru = NULL;
|
||||
#ifdef _HAS_FMD_TOPO
|
||||
nvlist_t *fmri
|
||||
nvlist_t *fmri;
|
||||
topo_hdl_t *thp;
|
||||
int err;
|
||||
#endif
|
||||
|
|
|
@ -417,11 +417,11 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||
|
||||
/*
|
||||
* Note: on zfsonlinux statechange events are more than just
|
||||
* healthy ones so we need to confim the actual state value.
|
||||
* healthy ones so we need to confirm the actual state value.
|
||||
*/
|
||||
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
|
||||
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE,
|
||||
&state) == 0 && state == VDEV_STATE_HEALTHY) {;
|
||||
&state) == 0 && state == VDEV_STATE_HEALTHY) {
|
||||
zfs_vdev_repair(hdl, nvl);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -18,15 +18,10 @@
|
|||
typedef struct zed_strings zed_strings_t;
|
||||
|
||||
zed_strings_t *zed_strings_create(void);
|
||||
|
||||
void zed_strings_destroy(zed_strings_t *zsp);
|
||||
|
||||
int zed_strings_add(zed_strings_t *zsp, const char *key, const char *s);
|
||||
|
||||
const char *zed_strings_first(zed_strings_t *zsp);
|
||||
|
||||
const char *zed_strings_next(zed_strings_t *zsp);
|
||||
|
||||
int zed_strings_count(zed_strings_t *zsp);
|
||||
|
||||
#endif /* !ZED_STRINGS_H */
|
||||
|
|
|
@ -517,9 +517,8 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
|
|||
|
||||
dev_clear();
|
||||
|
||||
cmd_size =
|
||||
sizeof (zpios_cmd_t)
|
||||
+ ((T + N + 1) * sizeof (zpios_stats_t));
|
||||
cmd_size = sizeof (zpios_cmd_t) +
|
||||
((T + N + 1) * sizeof (zpios_stats_t));
|
||||
cmd = (zpios_cmd_t *)malloc(cmd_size);
|
||||
if (cmd == NULL)
|
||||
return (ENOMEM);
|
||||
|
|
|
@ -4201,7 +4201,7 @@ zpool_do_iostat(int argc, char **argv)
|
|||
fprintf(stderr, " -%c", flag_to_arg[idx]);
|
||||
}
|
||||
|
||||
fprintf(stderr, ". Try running a newer module.\n"),
|
||||
fprintf(stderr, ". Try running a newer module.\n");
|
||||
pool_list_free(list);
|
||||
|
||||
return (1);
|
||||
|
|
|
@ -1131,9 +1131,8 @@ ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
|
|||
|
||||
err = zfs_prop_index_to_string(prop, curval, &valname);
|
||||
if (err)
|
||||
(void) printf("%s %s = %llu at '%s'\n",
|
||||
osname, propname, (unsigned long long)curval,
|
||||
setpoint);
|
||||
(void) printf("%s %s = %llu at '%s'\n", osname,
|
||||
propname, (unsigned long long)curval, setpoint);
|
||||
else
|
||||
(void) printf("%s %s = %s at '%s'\n",
|
||||
osname, propname, valname, setpoint);
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
* zfs_ace_hdr_t *, ...,
|
||||
* uint32_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_ace_class,
|
||||
TP_PROTO(znode_t *zn, zfs_ace_hdr_t *ace, uint32_t mask_matched),
|
||||
TP_ARGS(zn, ace, mask_matched),
|
||||
|
@ -136,6 +136,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
|
|||
__entry->z_type, __entry->z_flags, __entry->z_access_mask,
|
||||
__entry->mask_matched)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_ACE_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_ace_class, name, \
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
* DTRACE_PROBE1(...,
|
||||
* arc_buf_hdr_t *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
|
||||
TP_PROTO(arc_buf_hdr_t *ab),
|
||||
TP_ARGS(ab),
|
||||
|
@ -95,6 +95,7 @@ DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
|
|||
__entry->hdr_mfu_ghost_hits, __entry->hdr_l2_hits,
|
||||
__entry->hdr_refcount)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_ARC_BUF_HDR_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_arc_buf_hdr_class, name, \
|
||||
|
@ -117,7 +118,7 @@ DEFINE_ARC_BUF_HDR_EVENT(zfs_l2arc__miss);
|
|||
* vdev_t *, ...,
|
||||
* zio_t *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_l2arc_rw_class,
|
||||
TP_PROTO(vdev_t *vd, zio_t *zio),
|
||||
TP_ARGS(vd, zio),
|
||||
|
@ -137,6 +138,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_rw_class,
|
|||
ZIO_TP_PRINTK_FMT, __entry->vdev_id, __entry->vdev_guid,
|
||||
__entry->vdev_state, ZIO_TP_PRINTK_ARGS)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_L2ARC_RW_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_l2arc_rw_class, name, \
|
||||
|
@ -153,7 +155,7 @@ DEFINE_L2ARC_RW_EVENT(zfs_l2arc__write);
|
|||
* zio_t *, ...,
|
||||
* l2arc_write_callback_t *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_l2arc_iodone_class,
|
||||
TP_PROTO(zio_t *zio, l2arc_write_callback_t *cb),
|
||||
TP_ARGS(zio, cb),
|
||||
|
@ -161,6 +163,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_iodone_class,
|
|||
TP_fast_assign(ZIO_TP_FAST_ASSIGN),
|
||||
TP_printk(ZIO_TP_PRINTK_FMT, ZIO_TP_PRINTK_ARGS)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_L2ARC_IODONE_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_l2arc_iodone_class, name, \
|
||||
|
@ -178,7 +181,7 @@ DEFINE_L2ARC_IODONE_EVENT(zfs_l2arc__iodone);
|
|||
* uint64_t,
|
||||
* const zbookmark_phys_t *);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
||||
TP_PROTO(arc_buf_hdr_t *hdr,
|
||||
const blkptr_t *bp, uint64_t size, const zbookmark_phys_t *zb),
|
||||
|
@ -272,6 +275,7 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
|||
__entry->bp_lsize, __entry->zb_objset, __entry->zb_object,
|
||||
__entry->zb_level, __entry->zb_blkid)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_ARC_MISS_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_arc_miss_class, name, \
|
||||
|
@ -289,7 +293,7 @@ DEFINE_ARC_MISS_EVENT(zfs_arc__miss);
|
|||
* uint64_t, ...,
|
||||
* boolean_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_l2arc_evict_class,
|
||||
TP_PROTO(l2arc_dev_t *dev,
|
||||
list_t *buflist, uint64_t taddr, boolean_t all),
|
||||
|
@ -330,6 +334,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_evict_class,
|
|||
__entry->l2ad_end, __entry->l2ad_first, __entry->l2ad_writing,
|
||||
__entry->taddr, __entry->all)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_L2ARC_EVICT_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_l2arc_evict_class, name, \
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
* int, ...,
|
||||
* const char *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_dprintf_class,
|
||||
TP_PROTO(const char *file, const char *function, int line,
|
||||
const char *msg),
|
||||
|
@ -66,6 +66,7 @@ DECLARE_EVENT_CLASS(zfs_dprintf_class,
|
|||
TP_printk("%s:%d:%s(): %s", __get_str(file), __entry->line,
|
||||
__get_str(function), __get_str(msg))
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_DPRINTF_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_dprintf_class, name, \
|
||||
|
@ -83,7 +84,7 @@ DEFINE_DPRINTF_EVENT(zfs_zfs__dprintf);
|
|||
* int, ...,
|
||||
* uintptr_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_set_error_class,
|
||||
TP_PROTO(const char *file, const char *function, int line,
|
||||
uintptr_t error),
|
||||
|
@ -104,6 +105,7 @@ DECLARE_EVENT_CLASS(zfs_set_error_class,
|
|||
TP_printk("%s:%d:%s(): error 0x%lx", __get_str(file), __entry->line,
|
||||
__get_str(function), __entry->error)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#ifdef TP_CONDITION
|
||||
#define DEFINE_SET_ERROR_EVENT(name) \
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
* uint64_t, ...,
|
||||
* uint64_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_delay_mintime_class,
|
||||
TP_PROTO(dmu_tx_t *tx, uint64_t dirty, uint64_t min_tx_time),
|
||||
TP_ARGS(tx, dirty, min_tx_time),
|
||||
|
@ -102,6 +102,7 @@ DECLARE_EVENT_CLASS(zfs_delay_mintime_class,
|
|||
#endif
|
||||
__entry->dirty, __entry->min_tx_time)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_DELAY_MINTIME_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_delay_mintime_class, name, \
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
* int64_t, ...,
|
||||
* uint32_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_dnode_move_class,
|
||||
TP_PROTO(dnode_t *dn, int64_t refcount, uint32_t dbufs),
|
||||
TP_ARGS(dn, refcount, dbufs),
|
||||
|
@ -102,6 +102,7 @@ DECLARE_EVENT_CLASS(zfs_dnode_move_class,
|
|||
__entry->dn_maxblkid, __entry->dn_tx_holds, __entry->dn_holds,
|
||||
__entry->dn_have_spill, __entry->refcount, __entry->dbufs)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_DNODE_MOVE_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_dnode_move_class, name, \
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
* unsigned int, ...,
|
||||
* void *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_multilist_insert_remove_class,
|
||||
TP_PROTO(multilist_t *ml, unsigned sublist_idx, void *obj),
|
||||
TP_ARGS(ml, sublist_idx, obj),
|
||||
|
@ -60,6 +60,7 @@ DECLARE_EVENT_CLASS(zfs_multilist_insert_remove_class,
|
|||
TP_printk("ml { offset %ld numsublists %llu sublistidx %u } ",
|
||||
__entry->ml_offset, __entry->ml_num_sublists, __entry->sublist_idx)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_MULTILIST_INSERT_REMOVE_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_multilist_insert_remove_class, name, \
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
* dsl_pool_t *, ...,
|
||||
* uint64_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_txg_class,
|
||||
TP_PROTO(dsl_pool_t *dp, uint64_t txg),
|
||||
TP_ARGS(dp, txg),
|
||||
|
@ -52,6 +52,7 @@ DECLARE_EVENT_CLASS(zfs_txg_class,
|
|||
),
|
||||
TP_printk("txg %llu", __entry->txg)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_TXG_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_txg_class, name, \
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* DTRACE_PROBE1(...,
|
||||
* zilog_t *, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_zil_class,
|
||||
TP_PROTO(zilog_t *zilog),
|
||||
TP_ARGS(zilog),
|
||||
|
@ -111,6 +111,7 @@ DECLARE_EVENT_CLASS(zfs_zil_class,
|
|||
__entry->zl_itx_list_sz, __entry->zl_cur_used,
|
||||
__entry->zl_replay_time, __entry->zl_replay_blks)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#define DEFINE_ZIL_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_zil_class, name, \
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <sys/types.h>
|
||||
#include <sys/trace_common.h> /* For ZIO macros */
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
TRACE_EVENT(zfs_zio__delay__miss,
|
||||
TP_PROTO(zio_t *zio, hrtime_t now),
|
||||
TP_ARGS(zio, now),
|
||||
|
@ -75,6 +76,7 @@ TRACE_EVENT(zfs_zio__delay__skip,
|
|||
TP_fast_assign(ZIO_TP_FAST_ASSIGN),
|
||||
TP_printk(ZIO_TP_PRINTK_FMT, ZIO_TP_PRINTK_ARGS)
|
||||
);
|
||||
/* END CSTYLED */
|
||||
|
||||
#endif /* _TRACE_ZIO_H */
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
* zrlock_t *, ...,
|
||||
* uint32_t, ...);
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
DECLARE_EVENT_CLASS(zfs_zrlock_class,
|
||||
TP_PROTO(zrlock_t *zrl, uint32_t n),
|
||||
TP_ARGS(zrl, n),
|
||||
|
@ -69,6 +69,7 @@ DECLARE_EVENT_CLASS(zfs_zrlock_class,
|
|||
__entry->refcount, __entry->n)
|
||||
#endif
|
||||
);
|
||||
/* END_CSTYLED */
|
||||
|
||||
#define DEFINE_ZRLOCK_EVENT(name) \
|
||||
DEFINE_EVENT(zfs_zrlock_class, name, \
|
||||
|
|
|
@ -53,8 +53,8 @@ void vdev_raidz_math_init(void);
|
|||
void vdev_raidz_math_fini(void);
|
||||
struct raidz_impl_ops *vdev_raidz_math_get_ops(void);
|
||||
int vdev_raidz_math_generate(struct raidz_map *);
|
||||
int vdev_raidz_math_reconstruct(struct raidz_map *,
|
||||
const int *, const int *, const int);
|
||||
int vdev_raidz_math_reconstruct(struct raidz_map *, const int *, const int *,
|
||||
const int);
|
||||
int vdev_raidz_impl_set(const char *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -148,8 +148,7 @@ static inline bool
|
|||
dir_emit(struct dir_context *ctx, const char *name, int namelen,
|
||||
uint64_t ino, unsigned type)
|
||||
{
|
||||
return (ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type)
|
||||
== 0);
|
||||
return (!ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
|
|
@ -688,7 +688,8 @@ nfs_check_exportfs(void)
|
|||
}
|
||||
|
||||
if (pid > 0) {
|
||||
while ((rc = waitpid(pid, &status, 0)) <= 0 && errno == EINTR);
|
||||
while ((rc = waitpid(pid, &status, 0)) <= 0 &&
|
||||
errno == EINTR) { }
|
||||
|
||||
if (rc <= 0) {
|
||||
(void) close(nfs_exportfs_temp_fd);
|
||||
|
|
|
@ -39,7 +39,7 @@ pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
|
|||
/*
|
||||
* Theses are the void returning variants
|
||||
*/
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
#define ATOMIC_INC(name, type) \
|
||||
void atomic_inc_##name(volatile type *target) \
|
||||
{ \
|
||||
|
@ -381,6 +381,7 @@ ATOMIC_SWAP(32, uint32_t)
|
|||
ATOMIC_SWAP(uint, uint_t)
|
||||
ATOMIC_SWAP(ulong, ulong_t)
|
||||
ATOMIC_SWAP(64, uint64_t)
|
||||
/* END CSTYLED */
|
||||
|
||||
void *
|
||||
atomic_swap_ptr(volatile void *target, void *bits)
|
||||
|
|
|
@ -53,7 +53,7 @@ getmntany(FILE *fp, struct mnttab *mgetp, struct mnttab *mrefp)
|
|||
while (
|
||||
((ret = _sol_getmntent(fp, mgetp)) == 0) && (
|
||||
DIFF(mnt_special) || DIFF(mnt_mountp) ||
|
||||
DIFF(mnt_fstype) || DIFF(mnt_mntopts)));
|
||||
DIFF(mnt_fstype) || DIFF(mnt_mntopts))) { }
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
|
|
@ -3416,12 +3416,12 @@ zfs_strip_partition(char *path)
|
|||
d = part + 1;
|
||||
} else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
|
||||
tmp[1] == 'd') {
|
||||
for (d = &tmp[2]; isalpha(*d); part = ++d);
|
||||
for (d = &tmp[2]; isalpha(*d); part = ++d) { }
|
||||
} else if (strncmp("xvd", tmp, 3) == 0) {
|
||||
for (d = &tmp[3]; isalpha(*d); part = ++d);
|
||||
for (d = &tmp[3]; isalpha(*d); part = ++d) { }
|
||||
}
|
||||
if (part && d && *d != '\0') {
|
||||
for (; isdigit(*d); d++);
|
||||
for (; isdigit(*d); d++) { }
|
||||
if (*d == '\0')
|
||||
*part = '\0';
|
||||
}
|
||||
|
|
|
@ -752,7 +752,7 @@ libzfs_run_process(const char *path, char *argv[], int flags)
|
|||
int status;
|
||||
|
||||
while ((error = waitpid(pid, &status, 0)) == -1 &&
|
||||
errno == EINTR);
|
||||
errno == EINTR) { }
|
||||
if (error < 0 || !WIFEXITED(status))
|
||||
return (-1);
|
||||
|
||||
|
|
|
@ -1596,7 +1596,6 @@ intel_aes_instructions_present(void)
|
|||
if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
|
||||
memcmp((char *)(&edx), "ineI", 4) == 0 &&
|
||||
memcmp((char *)(&ecx), "ntel", 4) == 0) {
|
||||
|
||||
func = 1;
|
||||
subfunc = 0;
|
||||
|
||||
|
|
|
@ -726,7 +726,6 @@ intel_pclmulqdq_instruction_present(void)
|
|||
if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
|
||||
memcmp((char *)(&edx), "ineI", 4) == 0 &&
|
||||
memcmp((char *)(&ecx), "ntel", 4) == 0) {
|
||||
|
||||
func = 1;
|
||||
subfunc = 0;
|
||||
|
||||
|
|
|
@ -67,7 +67,8 @@ static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
|
|||
void
|
||||
kcf_prov_tab_destroy(void)
|
||||
{
|
||||
if (prov_tab) kmem_free(prov_tab, prov_tab_max *
|
||||
if (prov_tab)
|
||||
kmem_free(prov_tab, prov_tab_max *
|
||||
sizeof (kcf_provider_desc_t *));
|
||||
}
|
||||
|
||||
|
|
|
@ -701,12 +701,9 @@ kcf_prov_kstat_update(kstat_t *ksp, int rw)
|
|||
|
||||
ks_data = ksp->ks_data;
|
||||
|
||||
ks_data->ps_ops_total.value.ui64 =
|
||||
pd->pd_sched_info.ks_ndispatches;
|
||||
ks_data->ps_ops_failed.value.ui64 =
|
||||
pd->pd_sched_info.ks_nfails;
|
||||
ks_data->ps_ops_busy_rval.value.ui64 =
|
||||
pd->pd_sched_info.ks_nbusy_rval;
|
||||
ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
|
||||
ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
|
||||
ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
|
||||
ks_data->ps_ops_passed.value.ui64 =
|
||||
pd->pd_sched_info.ks_ndispatches -
|
||||
pd->pd_sched_info.ks_nfails -
|
||||
|
|
|
@ -1486,8 +1486,8 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
|
|||
else
|
||||
pos = abd->abd_u.abd_scatter.abd_offset + off;
|
||||
|
||||
return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT)
|
||||
- (pos >> PAGE_SHIFT);
|
||||
return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
|
||||
(pos >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1537,6 +1537,7 @@ abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
|
|||
module_param(zfs_abd_scatter_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
|
||||
"Toggle whether ABD allocations must be linear.");
|
||||
/* CSTYLED */
|
||||
module_param(zfs_abd_scatter_max_order, uint, 0644);
|
||||
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
|
||||
"Maximum order allocation used for a scatter ABD.");
|
||||
|
|
|
@ -7688,6 +7688,7 @@ EXPORT_SYMBOL(arc_getbuf_func);
|
|||
EXPORT_SYMBOL(arc_add_prune_callback);
|
||||
EXPORT_SYMBOL(arc_remove_prune_callback);
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
module_param(zfs_arc_min, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
|
||||
|
||||
|
@ -7786,5 +7787,5 @@ MODULE_PARM_DESC(zfs_arc_dnode_limit_percent,
|
|||
module_param(zfs_arc_dnode_reduce_percent, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent,
|
||||
"Percentage of excess dnodes to try to unpin");
|
||||
|
||||
/* END CSTYLED */
|
||||
#endif
|
||||
|
|
|
@ -2686,8 +2686,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
|
|||
|
||||
ASSERT3P(dh->dh_parent, ==, NULL);
|
||||
dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
|
||||
dh->dh_fail_sparse, &dh->dh_parent,
|
||||
&dh->dh_bp, dh);
|
||||
dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh);
|
||||
if (dh->dh_fail_sparse) {
|
||||
if (dh->dh_err == 0 &&
|
||||
dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
|
||||
|
@ -3884,7 +3883,7 @@ EXPORT_SYMBOL(dmu_buf_get_user);
|
|||
EXPORT_SYMBOL(dmu_buf_freeable);
|
||||
EXPORT_SYMBOL(dmu_buf_get_blkptr);
|
||||
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
module_param(dbuf_cache_max_bytes, ulong, 0644);
|
||||
MODULE_PARM_DESC(dbuf_cache_max_bytes,
|
||||
"Maximum size in bytes of the dbuf cache.");
|
||||
|
@ -3902,5 +3901,5 @@ MODULE_PARM_DESC(dbuf_cache_lowater_pct,
|
|||
module_param(dbuf_cache_max_shift, int, 0644);
|
||||
MODULE_PARM_DESC(dbuf_cache_max_shift,
|
||||
"Cap the size of the dbuf cache to log2 fraction of arc size.");
|
||||
|
||||
/* END CSTYLED */
|
||||
#endif
|
||||
|
|
|
@ -336,6 +336,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
|
|||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* BEGIN CSTYLED */
|
||||
module_param(zfs_prefetch_disable, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_prefetch_disable, "Disable all ZFS prefetching");
|
||||
|
||||
|
@ -351,4 +352,5 @@ MODULE_PARM_DESC(zfetch_max_distance,
|
|||
|
||||
module_param(zfetch_array_rd_sz, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
|
||||
/* END CSTYLED */
|
||||
#endif
|
||||
|
|
|
@ -1087,6 +1087,7 @@ dsl_pool_config_held_writer(dsl_pool_t *dp)
|
|||
EXPORT_SYMBOL(dsl_pool_config_enter);
|
||||
EXPORT_SYMBOL(dsl_pool_config_exit);
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
|
||||
module_param(zfs_dirty_data_max_percent, int, 0444);
|
||||
MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
|
||||
|
@ -1112,4 +1113,5 @@ MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
|
|||
|
||||
module_param(zfs_delay_scale, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
|
||||
/* END CSTYLED */
|
||||
#endif
|
||||
|
|
|
@ -73,7 +73,7 @@ int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
|
|||
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
|
||||
int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
|
||||
/* max number of blocks to free in a single TXG */
|
||||
ulong zfs_free_max_blocks = 100000;
|
||||
unsigned long zfs_free_max_blocks = 100000;
|
||||
|
||||
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
|
||||
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
|
||||
|
@ -1985,6 +1985,7 @@ MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
|
|||
module_param(zfs_no_scrub_prefetch, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
|
||||
|
||||
/* CSTYLED */
|
||||
module_param(zfs_free_max_blocks, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_free_max_blocks, "Max number of blocks freed in one txg");
|
||||
|
||||
|
|
|
@ -2924,37 +2924,44 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
|
|||
}
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* CSTYLED */
|
||||
module_param(metaslab_aliquot, ulong, 0644);
|
||||
module_param(metaslab_debug_load, int, 0644);
|
||||
module_param(metaslab_debug_unload, int, 0644);
|
||||
module_param(metaslab_preload_enabled, int, 0644);
|
||||
module_param(zfs_mg_noalloc_threshold, int, 0644);
|
||||
module_param(zfs_mg_fragmentation_threshold, int, 0644);
|
||||
module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
|
||||
module_param(metaslab_fragmentation_factor_enabled, int, 0644);
|
||||
module_param(metaslab_lba_weighting_enabled, int, 0644);
|
||||
module_param(metaslab_bias_enabled, int, 0644);
|
||||
|
||||
MODULE_PARM_DESC(metaslab_aliquot,
|
||||
"allocation granularity (a.k.a. stripe size)");
|
||||
|
||||
module_param(metaslab_debug_load, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_debug_load,
|
||||
"load all metaslabs when pool is first opened");
|
||||
|
||||
module_param(metaslab_debug_unload, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_debug_unload,
|
||||
"prevent metaslabs from being unloaded");
|
||||
|
||||
module_param(metaslab_preload_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_preload_enabled,
|
||||
"preload potential metaslabs during reassessment");
|
||||
|
||||
module_param(zfs_mg_noalloc_threshold, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
|
||||
"percentage of free space for metaslab group to allow allocation");
|
||||
|
||||
module_param(zfs_mg_fragmentation_threshold, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
|
||||
"fragmentation for metaslab group to allow allocation");
|
||||
|
||||
module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
|
||||
"fragmentation for metaslab to allow allocation");
|
||||
|
||||
module_param(metaslab_fragmentation_factor_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
|
||||
"use the fragmentation metric to prefer less fragmented metaslabs");
|
||||
|
||||
module_param(metaslab_lba_weighting_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
|
||||
"prefer metaslabs with lower LBAs");
|
||||
|
||||
module_param(metaslab_bias_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(metaslab_bias_enabled,
|
||||
"enable metaslab group biasing");
|
||||
#endif /* _KERNEL && HAVE_SPL */
|
||||
|
|
|
@ -6996,6 +6996,7 @@ module_param(spa_load_verify_data, int, 0644);
|
|||
MODULE_PARM_DESC(spa_load_verify_data,
|
||||
"Set to traverse data on pool import");
|
||||
|
||||
/* CSTYLED */
|
||||
module_param(zio_taskq_batch_pct, uint, 0444);
|
||||
MODULE_PARM_DESC(zio_taskq_batch_pct,
|
||||
"Percentage of CPUs to run an IO worker thread");
|
||||
|
|
|
@ -2093,9 +2093,9 @@ EXPORT_SYMBOL(spa_has_slogs);
|
|||
EXPORT_SYMBOL(spa_is_root);
|
||||
EXPORT_SYMBOL(spa_writeable);
|
||||
EXPORT_SYMBOL(spa_mode);
|
||||
|
||||
EXPORT_SYMBOL(spa_namespace_lock);
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
module_param(zfs_flags, uint, 0644);
|
||||
MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
|
||||
|
||||
|
@ -2118,4 +2118,5 @@ MODULE_PARM_DESC(spa_asize_inflation,
|
|||
|
||||
module_param(spa_slop_shift, int, 0644);
|
||||
MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
|
||||
/* END CSTYLED */
|
||||
#endif
|
||||
|
|
|
@ -1222,8 +1222,9 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
|
|||
defined(SHRINKER_NUMA_AWARE)
|
||||
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
|
||||
*objects = 0;
|
||||
for_each_online_node(sc.nid)
|
||||
for_each_online_node(sc.nid) {
|
||||
*objects += (*shrinker->scan_objects)(shrinker, &sc);
|
||||
}
|
||||
} else {
|
||||
*objects = (*shrinker->scan_objects)(shrinker, &sc);
|
||||
}
|
||||
|
|
|
@ -4932,6 +4932,7 @@ zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
|
|||
#endif /* HAVE_UIO_ZEROCOPY */
|
||||
|
||||
#if defined(_KERNEL) && defined(HAVE_SPL)
|
||||
/* CSTYLED */
|
||||
module_param(zfs_delete_blocks, ulong, 0644);
|
||||
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
|
||||
module_param(zfs_read_chunk_size, long, 0644);
|
||||
|
|
|
@ -575,9 +575,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
|
|||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
|
||||
|
||||
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 ||
|
||||
tmp_gen == 0) {
|
||||
|
||||
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) {
|
||||
if (hdl == NULL)
|
||||
sa_handle_destroy(zp->z_sa_hdl);
|
||||
zp->z_sa_hdl = NULL;
|
||||
|
@ -2142,6 +2140,7 @@ zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
|
|||
EXPORT_SYMBOL(zfs_create_fs);
|
||||
EXPORT_SYMBOL(zfs_obj_to_path);
|
||||
|
||||
/* CSTYLED */
|
||||
module_param(zfs_object_mutex_size, uint, 0644);
|
||||
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
|
||||
#endif
|
||||
|
|
|
@ -2270,6 +2270,7 @@ MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
|
|||
module_param(zfs_nocacheflush, int, 0644);
|
||||
MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
|
||||
|
||||
/* CSTYLED */
|
||||
module_param(zil_slog_limit, ulong, 0644);
|
||||
MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
|
||||
#endif
|
||||
|
|
|
@ -37,6 +37,7 @@ zpl_encode_fh(struct inode *ip, __u32 *fh, int *max_len, struct inode *parent)
|
|||
#else
|
||||
zpl_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, int connectable)
|
||||
{
|
||||
/* CSTYLED */
|
||||
struct inode *ip = dentry->d_inode;
|
||||
#endif /* HAVE_ENCODE_FH_WITH_INODE */
|
||||
fstrans_cookie_t cookie;
|
||||
|
|
|
@ -657,6 +657,7 @@ zpl_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
zpl_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
#endif /* HAVE_D_REVALIDATE_NAMEIDATA */
|
||||
/* CSTYLED */
|
||||
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
|
|
|
@ -2087,6 +2087,7 @@ zvol_fini(void)
|
|||
mutex_destroy(&zvol_state_lock);
|
||||
}
|
||||
|
||||
/* BEGIN CSTYLED */
|
||||
module_param(zvol_inhibit_dev, uint, 0644);
|
||||
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
|
||||
|
||||
|
@ -2098,3 +2099,4 @@ MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
|
|||
|
||||
module_param(zvol_prefetch_bytes, uint, 0644);
|
||||
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
|
||||
/* END CSTYLED */
|
||||
|
|
|
@ -289,7 +289,8 @@ run_process(const char *path, char *argv[])
|
|||
} else if (pid > 0) {
|
||||
int status;
|
||||
|
||||
while ((rc = waitpid(pid, &status, 0)) == -1 && errno == EINTR);
|
||||
while ((rc = waitpid(pid, &status, 0)) == -1 &&
|
||||
errno == EINTR) { }
|
||||
|
||||
if (rc < 0 || !WIFEXITED(status))
|
||||
return (-1);
|
||||
|
@ -369,8 +370,8 @@ create_files(void)
|
|||
file = malloc(PATH_MAX);
|
||||
if (file == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n",
|
||||
rc, PATH_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
|
||||
PATH_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -454,16 +455,16 @@ setxattrs(void)
|
|||
value = malloc(XATTR_SIZE_MAX);
|
||||
if (value == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr value\n",
|
||||
rc, XATTR_SIZE_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr value\n", rc,
|
||||
XATTR_SIZE_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
file = malloc(PATH_MAX);
|
||||
if (file == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n",
|
||||
rc, PATH_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
|
||||
PATH_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -525,16 +526,16 @@ getxattrs(void)
|
|||
verify_value = malloc(XATTR_SIZE_MAX);
|
||||
if (verify_value == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr verify\n",
|
||||
rc, XATTR_SIZE_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr verify\n", rc,
|
||||
XATTR_SIZE_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
value = malloc(XATTR_SIZE_MAX);
|
||||
if (value == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr value\n",
|
||||
rc, XATTR_SIZE_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for xattr value\n", rc,
|
||||
XATTR_SIZE_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -544,8 +545,8 @@ getxattrs(void)
|
|||
file = malloc(PATH_MAX);
|
||||
if (file == NULL) {
|
||||
rc = ENOMEM;
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n",
|
||||
rc, PATH_MAX);
|
||||
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
|
||||
PATH_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue