Use cstyle -cpP in `make cstyle` check

Enable picky cstyle checks and resolve the new warnings.  The vast
majority of the changes needed were to handle minor issues with
whitespace formatting.  This patch contains no functional changes.

Non-whitespace changes are as follows:

* 8 times ; to { } in for/while loop
* fix missing ; in cmd/zed/agents/zfs_diagnosis.c
* comment (confim -> confirm)
* change endline , to ; in cmd/zpool/zpool_main.c
* a number of /* BEGIN CSTYLED */ /* END CSTYLED */ blocks
* /* CSTYLED */ markers
* change == 0 to !
* ulong to unsigned long in module/zfs/dsl_scan.c
* rearrangement of module_param lines in module/zfs/metaslab.c
* add { } block around statement after for_each_online_node

Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Håkan Johansson <f96hajo@chalmers.se>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #5465
This commit is contained in:
Brian Behlendorf 2016-12-12 10:46:26 -08:00 committed by GitHub
parent d57f03e40e
commit 02730c333c
96 changed files with 501 additions and 478 deletions

View File

@ -43,7 +43,7 @@ checkstyle: cstyle shellcheck flake8
cstyle:
@find ${top_srcdir} -name '*.[hc]' ! -name 'zfs_config.*' \
! -name '*.mod.c' -type f -exec scripts/cstyle.pl {} \+
! -name '*.mod.c' -type f -exec scripts/cstyle.pl -cpP {} \+
shellcheck:
@if type shellcheck > /dev/null 2>&1; then \

View File

@ -367,7 +367,7 @@ zfs_selinux_setcontext(zfs_handle_t *zhp, zfs_prop_t zpt, const char *name,
if (zfs_prop_get(zhp, zpt, context, sizeof (context),
NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(context, "none") != 0)
append_mntopt(name, context, mntopts, mtabopt, B_TRUE);
append_mntopt(name, context, mntopts, mtabopt, B_TRUE);
}
}
@ -600,7 +600,7 @@ main(int argc, char **argv)
gettext("filesystem '%s' (v%d) is not "
"supported by this implementation of "
"ZFS (max v%d).\n"), dataset,
(int) zfs_version, (int) ZPL_VERSION);
(int)zfs_version, (int)ZPL_VERSION);
} else {
(void) fprintf(stderr,
gettext("filesystem '%s' mount "

View File

@ -93,7 +93,7 @@ run_gen_bench_impl(const char *impl)
start = gethrtime();
for (iter = 0; iter < iter_cnt; iter++)
vdev_raidz_generate_parity(rm_bench);
elapsed = NSEC2SEC((double) (gethrtime() - start));
elapsed = NSEC2SEC((double)(gethrtime() - start));
disksize = (1ULL << ds) / rto_opts.rto_dcols;
d_bw = (double)iter_cnt * (double)disksize;
@ -106,7 +106,7 @@ run_gen_bench_impl(const char *impl)
(1ULL<<ds),
d_bw,
d_bw * (double)(ncols),
(unsigned) iter_cnt);
(unsigned)iter_cnt);
vdev_raidz_map_free(rm_bench);
}
@ -164,7 +164,7 @@ run_rec_bench_impl(const char *impl)
continue;
rm_bench = vdev_raidz_map_alloc(&zio_bench,
BENCH_ASHIFT, ncols, PARITY_PQR);
BENCH_ASHIFT, ncols, PARITY_PQR);
/* estimate iteration count */
iter_cnt = (REC_BENCH_MEMORY);
@ -177,7 +177,7 @@ run_rec_bench_impl(const char *impl)
start = gethrtime();
for (iter = 0; iter < iter_cnt; iter++)
vdev_raidz_reconstruct(rm_bench, tgt[fn], nbad);
elapsed = NSEC2SEC((double) (gethrtime() - start));
elapsed = NSEC2SEC((double)(gethrtime() - start));
disksize = (1ULL << ds) / rto_opts.rto_dcols;
d_bw = (double)iter_cnt * (double)(disksize);
@ -190,7 +190,7 @@ run_rec_bench_impl(const char *impl)
(1ULL<<ds),
d_bw,
d_bw * (double)ncols,
(unsigned) iter_cnt);
(unsigned)iter_cnt);
vdev_raidz_map_free(rm_bench);
}

View File

@ -53,7 +53,7 @@ static void sig_handler(int signo)
(void) sigaction(signo, &action, NULL);
if (rto_opts.rto_gdb)
if (system(gdb));
if (system(gdb)) { }
raise(signo);
}
@ -86,8 +86,7 @@ static void print_opts(raidz_test_opts_t *opts, boolean_t force)
opts->rto_dcols, /* -d */
ilog2(opts->rto_dsize), /* -s */
opts->rto_sweep ? "yes" : "no", /* -S */
verbose /* -v */
);
verbose); /* -v */
}
}
@ -98,25 +97,24 @@ static void usage(boolean_t requested)
FILE *fp = requested ? stdout : stderr;
(void) fprintf(fp, "Usage:\n"
"\t[-a zio ashift (default: %zu)]\n"
"\t[-o zio offset, exponent radix 2 (default: %zu)]\n"
"\t[-d number of raidz data columns (default: %zu)]\n"
"\t[-s zio size, exponent radix 2 (default: %zu)]\n"
"\t[-S parameter sweep (default: %s)]\n"
"\t[-t timeout for parameter sweep test]\n"
"\t[-B benchmark all raidz implementations]\n"
"\t[-v increase verbosity (default: %zu)]\n"
"\t[-h (print help)]\n"
"\t[-T test the test, see if failure would be detected]\n"
"\t[-D debug (attach gdb on SIGSEGV)]\n"
"",
o->rto_ashift, /* -a */
ilog2(o->rto_offset), /* -o */
o->rto_dcols, /* -d */
ilog2(o->rto_dsize), /* -s */
rto_opts.rto_sweep ? "yes" : "no", /* -S */
o->rto_v /* -d */
);
"\t[-a zio ashift (default: %zu)]\n"
"\t[-o zio offset, exponent radix 2 (default: %zu)]\n"
"\t[-d number of raidz data columns (default: %zu)]\n"
"\t[-s zio size, exponent radix 2 (default: %zu)]\n"
"\t[-S parameter sweep (default: %s)]\n"
"\t[-t timeout for parameter sweep test]\n"
"\t[-B benchmark all raidz implementations]\n"
"\t[-v increase verbosity (default: %zu)]\n"
"\t[-h (print help)]\n"
"\t[-T test the test, see if failure would be detected]\n"
"\t[-D debug (attach gdb on SIGSEGV)]\n"
"",
o->rto_ashift, /* -a */
ilog2(o->rto_offset), /* -o */
o->rto_dcols, /* -d */
ilog2(o->rto_dsize), /* -s */
rto_opts.rto_sweep ? "yes" : "no", /* -S */
o->rto_v); /* -d */
exit(requested ? 0 : 1);
}
@ -227,7 +225,7 @@ static int
init_rand(void *data, size_t size, void *private)
{
int i;
int *dst = (int *) data;
int *dst = (int *)data;
for (i = 0; i < size / sizeof (int); i++)
dst[i] = rand_data[i];
@ -333,7 +331,7 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
init_zio_abd(*zio);
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
total_ncols, parity);
total_ncols, parity);
VERIFY(rm);
/* Make sure code columns are destroyed */
@ -476,18 +474,15 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
}
} else {
/* can reconstruct 3 failed data disk */
for (x0 = 0;
x0 < opts->rto_dcols; x0++) {
for (x0 = 0; x0 < opts->rto_dcols; x0++) {
if (x0 >= rm->rm_cols - raidz_parity(rm))
continue;
for (x1 = x0 + 1;
x1 < opts->rto_dcols; x1++) {
for (x1 = x0 + 1; x1 < opts->rto_dcols; x1++) {
if (x1 >= rm->rm_cols - raidz_parity(rm))
continue;
for (x2 = x1 + 1;
x2 < opts->rto_dcols; x2++) {
for (x2 = x1 + 1; x2 < opts->rto_dcols; x2++) {
if (x2 >=
rm->rm_cols - raidz_parity(rm))
rm->rm_cols - raidz_parity(rm))
continue;
/* Check if should stop */
@ -504,7 +499,7 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
if (!opts->rto_sanity)
vdev_raidz_reconstruct(rm,
tgtidx, 3);
tgtidx, 3);
if (cmp_data(opts, rm) != 0) {
err++;
@ -555,7 +550,7 @@ run_rec_check(raidz_test_opts_t *opts)
for (fn = 0; fn < RAIDZ_REC_NUM; fn++) {
LOG(D_INFO, "\t\tTesting method [%s] ...",
raidz_rec_name[fn]);
raidz_rec_name[fn]);
if (run_rec_check_impl(opts, rm_test, fn) != 0) {
LOG(D_INFO, "[FAIL]\n");
@ -607,7 +602,7 @@ static void
sweep_thread(void *arg)
{
int err = 0;
raidz_test_opts_t *opts = (raidz_test_opts_t *) arg;
raidz_test_opts_t *opts = (raidz_test_opts_t *)arg;
VERIFY(opts != NULL);
err = run_test(opts);
@ -708,7 +703,7 @@ run_sweep(void)
opts->rto_v = 0; /* be quiet */
VERIFY3P(zk_thread_create(NULL, 0,
(thread_func_t) sweep_thread,
(thread_func_t)sweep_thread,
(void *) opts, TS_RUN, NULL, 0, 0,
PTHREAD_CREATE_JOINABLE), !=, NULL);
}
@ -765,7 +760,7 @@ main(int argc, char **argv)
kernel_init(FREAD);
/* setup random data because rand() is not reentrant */
rand_data = (int *) umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
rand_data = (int *)umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
srand((unsigned)time(NULL) * getpid());
for (i = 0; i < SPA_MAXBLOCKSIZE / sizeof (int); i++)
rand_data[i] = rand();

View File

@ -77,7 +77,7 @@ zdb_ot_name(dmu_object_type_t type)
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");

View File

@ -378,7 +378,7 @@ zfs_case_solve(fmd_hdl_t *hdl, zfs_case_t *zcp, const char *faultname,
boolean_t serialize;
nvlist_t *fru = NULL;
#ifdef _HAS_FMD_TOPO
nvlist_t *fmri
nvlist_t *fmri;
topo_hdl_t *thp;
int err;
#endif

View File

@ -343,7 +343,7 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
list_insert_tail(&g_device_list, device);
zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
leafname, (u_longlong_t) guid);
leafname, (u_longlong_t)guid);
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
@ -373,7 +373,7 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
}
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
physpath, (u_longlong_t) guid);
physpath, (u_longlong_t)guid);
(void) snprintf(devpath, sizeof (devpath), "%s%s",
DEV_BYID_PATH, new_devid);

View File

@ -417,11 +417,11 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
/*
* Note: on zfsonlinux statechange events are more than just
* healthy ones so we need to confim the actual state value.
* healthy ones so we need to confirm the actual state value.
*/
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE,
&state) == 0 && state == VDEV_STATE_HEALTHY) {;
&state) == 0 && state == VDEV_STATE_HEALTHY) {
zfs_vdev_repair(hdl, nvl);
return;
}

View File

@ -255,7 +255,7 @@ main(int argc, char *argv[])
zed_log_msg(LOG_NOTICE,
"ZFS Event Daemon %s-%s (PID %d)",
ZFS_META_VERSION, ZFS_META_RELEASE, (int) getpid());
ZFS_META_VERSION, ZFS_META_RELEASE, (int)getpid());
if (zed_conf_open_state(zcp) < 0)
exit(EXIT_FAILURE);

View File

@ -513,7 +513,7 @@ zed_conf_write_pid(struct zed_conf *zcp)
/*
* Write PID file.
*/
n = snprintf(buf, sizeof (buf), "%d\n", (int) getpid());
n = snprintf(buf, sizeof (buf), "%d\n", (int)getpid());
if ((n < 0) || (n >= sizeof (buf))) {
errno = ERANGE;
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
@ -637,7 +637,7 @@ zed_conf_read_state(struct zed_conf *zcp, uint64_t *eidp, int64_t etime[])
"Failed to read state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t) -1) {
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
@ -687,7 +687,7 @@ zed_conf_write_state(struct zed_conf *zcp, uint64_t eid, int64_t etime[])
"Failed to write state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t) -1) {
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));

View File

@ -283,7 +283,7 @@ zed_udev_monitor(void *arg)
if (strcmp(class, EC_DEV_STATUS) == 0 &&
udev_device_get_property_value(dev, "DM_UUID") &&
udev_device_get_property_value(dev, "MPATH_SBIN_PATH")) {
tmp = (char *) udev_device_get_devnode(dev);
tmp = (char *)udev_device_get_devnode(dev);
tmp2 = zfs_get_underlying_path(tmp);
if (tmp && tmp2 && (strcmp(tmp, tmp2) != 0)) {
/*

View File

@ -489,7 +489,7 @@ _zed_event_add_int64_array(uint64_t eid, zed_strings_t *zsp,
name = nvpair_name(nvp);
(void) nvpair_value_int64_array(nvp, &i64p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%lld ", (u_longlong_t) i64p[i]);
n = snprintf(p, buflen, "%lld ", (u_longlong_t)i64p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
@ -521,7 +521,7 @@ _zed_event_add_uint64_array(uint64_t eid, zed_strings_t *zsp,
fmt = _zed_event_value_is_hex(name) ? "0x%.16llX " : "%llu ";
(void) nvpair_value_uint64_array(nvp, &u64p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, fmt, (u_longlong_t) u64p[i]);
n = snprintf(p, buflen, fmt, (u_longlong_t)u64p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
@ -603,7 +603,7 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
_zed_event_add_var(eid, zsp, prefix, name, "%d", i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (int8_t *) &i8);
(void) nvpair_value_int8(nvp, (int8_t *)&i8);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i8);
break;
case DATA_TYPE_UINT8:
@ -611,7 +611,7 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
_zed_event_add_var(eid, zsp, prefix, name, "%u", i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (int16_t *) &i16);
(void) nvpair_value_int16(nvp, (int16_t *)&i16);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i16);
break;
case DATA_TYPE_UINT16:
@ -619,7 +619,7 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
_zed_event_add_var(eid, zsp, prefix, name, "%u", i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (int32_t *) &i32);
(void) nvpair_value_int32(nvp, (int32_t *)&i32);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i32);
break;
case DATA_TYPE_UINT32:
@ -627,15 +627,15 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
_zed_event_add_var(eid, zsp, prefix, name, "%u", i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (int64_t *) &i64);
(void) nvpair_value_int64(nvp, (int64_t *)&i64);
_zed_event_add_var(eid, zsp, prefix, name,
"%lld", (longlong_t) i64);
"%lld", (longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
_zed_event_add_var(eid, zsp, prefix, name,
(_zed_event_value_is_hex(name) ? "0x%.16llX" : "%llu"),
(u_longlong_t) i64);
(u_longlong_t)i64);
/*
* shadow readable strings for vdev state pairs
*/
@ -653,9 +653,9 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
_zed_event_add_var(eid, zsp, prefix, name, "%g", d);
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (hrtime_t *) &i64);
(void) nvpair_value_hrtime(nvp, (hrtime_t *)&i64);
_zed_event_add_var(eid, zsp, prefix, name,
"%llu", (u_longlong_t) i64);
"%llu", (u_longlong_t)i64);
break;
case DATA_TYPE_NVLIST:
_zed_event_add_var(eid, zsp, prefix, name,
@ -889,7 +889,7 @@ zed_event_service(struct zed_conf *zcp)
_zed_event_add_env_preserve(eid, zsp);
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "PID",
"%d", (int) getpid());
"%d", (int)getpid());
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "ZEDLET_DIR",
"%s", zcp->zedlet_dir);
subclass = _zed_event_get_subclass(class);

View File

@ -54,7 +54,7 @@ _zed_exec_create_env(zed_strings_t *zsp)
if (!buf)
return (NULL);
pp = (char **) buf;
pp = (char **)buf;
p = buf + (num_ptrs * sizeof (char *));
i = 0;
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp)) {
@ -66,7 +66,7 @@ _zed_exec_create_env(zed_strings_t *zsp)
}
pp[i] = NULL;
assert(buf + buflen == p);
return ((char **) buf);
return ((char **)buf);
}
/*
@ -131,7 +131,7 @@ _zed_exec_fork_child(uint64_t eid, const char *dir, const char *prog,
*/
for (n = 0; n < 1000; n++) {
wpid = waitpid(pid, &status, WNOHANG);
if (wpid == (pid_t) -1) {
if (wpid == (pid_t)-1) {
if (errno == EINTR)
continue;
zed_log_msg(LOG_WARNING,

View File

@ -66,11 +66,11 @@ zed_log_pipe_open(void)
{
if ((_ctx.pipe_fd[0] != -1) || (_ctx.pipe_fd[1] != -1))
zed_log_die("Invalid use of zed_log_pipe_open in PID %d",
(int) getpid());
(int)getpid());
if (pipe(_ctx.pipe_fd) < 0)
zed_log_die("Failed to create daemonize pipe in PID %d: %s",
(int) getpid(), strerror(errno));
(int)getpid(), strerror(errno));
}
/*
@ -85,12 +85,12 @@ zed_log_pipe_close_reads(void)
if (_ctx.pipe_fd[0] < 0)
zed_log_die(
"Invalid use of zed_log_pipe_close_reads in PID %d",
(int) getpid());
(int)getpid());
if (close(_ctx.pipe_fd[0]) < 0)
zed_log_die(
"Failed to close reads on daemonize pipe in PID %d: %s",
(int) getpid(), strerror(errno));
(int)getpid(), strerror(errno));
_ctx.pipe_fd[0] = -1;
}
@ -110,12 +110,12 @@ zed_log_pipe_close_writes(void)
if (_ctx.pipe_fd[1] < 0)
zed_log_die(
"Invalid use of zed_log_pipe_close_writes in PID %d",
(int) getpid());
(int)getpid());
if (close(_ctx.pipe_fd[1]) < 0)
zed_log_die(
"Failed to close writes on daemonize pipe in PID %d: %s",
(int) getpid(), strerror(errno));
(int)getpid(), strerror(errno));
_ctx.pipe_fd[1] = -1;
}
@ -135,7 +135,7 @@ zed_log_pipe_wait(void)
if (_ctx.pipe_fd[0] < 0)
zed_log_die("Invalid use of zed_log_pipe_wait in PID %d",
(int) getpid());
(int)getpid());
for (;;) {
n = read(_ctx.pipe_fd[0], &c, sizeof (c));
@ -144,7 +144,7 @@ zed_log_pipe_wait(void)
continue;
zed_log_die(
"Failed to read from daemonize pipe in PID %d: %s",
(int) getpid(), strerror(errno));
(int)getpid(), strerror(errno));
}
if (n == 0) {
break;

View File

@ -206,7 +206,7 @@ zed_strings_first(zed_strings_t *zsp)
if (!zsp->iteratorp)
return (NULL);
return (((zed_strings_node_t *) zsp->iteratorp)->val);
return (((zed_strings_node_t *)zsp->iteratorp)->val);
}

View File

@ -17,16 +17,11 @@
typedef struct zed_strings zed_strings_t;
zed_strings_t * zed_strings_create(void);
zed_strings_t *zed_strings_create(void);
void zed_strings_destroy(zed_strings_t *zsp);
int zed_strings_add(zed_strings_t *zsp, const char *key, const char *s);
const char * zed_strings_first(zed_strings_t *zsp);
const char * zed_strings_next(zed_strings_t *zsp);
const char *zed_strings_first(zed_strings_t *zsp);
const char *zed_strings_next(zed_strings_t *zsp);
int zed_strings_count(zed_strings_t *zsp);
#endif /* !ZED_STRINGS_H */

View File

@ -2332,9 +2332,9 @@ compare_nums:
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
@ -2561,12 +2561,12 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED) {
prop == ZFS_PROP_GROUPOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA) {
prop == ZFS_PROP_GROUPOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
@ -2637,7 +2637,7 @@ print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t) val64);
(u_longlong_t)val64);
strval = valstr;
}
break;
@ -2648,7 +2648,7 @@ print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t) val64);
(u_longlong_t)val64);
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
@ -2672,9 +2672,9 @@ print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
if (scripted)
(void) printf("%s", strval);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int) width[field], strval);
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int) width[field], strval);
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
@ -2699,10 +2699,10 @@ print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int) width[field], col);
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int) width[field], col);
(int)width[field], col);
}
first = B_FALSE;
cfield++;

View File

@ -1091,7 +1091,7 @@ main(int argc, char **argv)
record.zi_cmd = ZINJECT_DATA_FAULT;
if (translate_record(type, argv[0], range, level, &record, pool,
dataset) != 0) {
libzfs_fini(g_zfs);
libzfs_fini(g_zfs);
return (1);
}
if (!error)
@ -1105,7 +1105,7 @@ main(int argc, char **argv)
*/
if (dataset[0] != '\0' && domount) {
if ((zhp = zfs_open(g_zfs, dataset,
ZFS_TYPE_DATASET)) == NULL) {
ZFS_TYPE_DATASET)) == NULL) {
libzfs_fini(g_zfs);
return (1);
}

View File

@ -99,44 +99,44 @@ usage(void)
{
fprintf(stderr, "Usage: zpios\n");
fprintf(stderr,
" --threadcount -t =values\n"
" --threadcount_low -l =value\n"
" --threadcount_high -h =value\n"
" --threadcount_incr -e =value\n"
" --regioncount -n =values\n"
" --regioncount_low -i =value\n"
" --regioncount_high -j =value\n"
" --regioncount_incr -k =value\n"
" --offset -o =values\n"
" --offset_low -m =value\n"
" --offset_high -q =value\n"
" --offset_incr -r =value\n"
" --chunksize -c =values\n"
" --chunksize_low -a =value\n"
" --chunksize_high -b =value\n"
" --chunksize_incr -g =value\n"
" --regionsize -s =values\n"
" --regionsize_low -A =value\n"
" --regionsize_high -B =value\n"
" --regionsize_incr -C =value\n"
" --blocksize -S =values\n"
" --load -L =dmuio|ssf|fpp\n"
" --pool -p =pool name\n"
" --name -M =test name\n"
" --cleanup -x\n"
" --prerun -P =pre-command\n"
" --postrun -R =post-command\n"
" --log -G =log directory\n"
" --regionnoise -I =shift\n"
" --chunknoise -N =bytes\n"
" --threaddelay -T =jiffies\n"
" --verify -V\n"
" --zerocopy -z\n"
" --nowait -O\n"
" --noprefetch -f\n"
" --human-readable -H\n"
" --verbose -v =increase verbosity\n"
" --help -? =this help\n\n");
" --threadcount -t =values\n"
" --threadcount_low -l =value\n"
" --threadcount_high -h =value\n"
" --threadcount_incr -e =value\n"
" --regioncount -n =values\n"
" --regioncount_low -i =value\n"
" --regioncount_high -j =value\n"
" --regioncount_incr -k =value\n"
" --offset -o =values\n"
" --offset_low -m =value\n"
" --offset_high -q =value\n"
" --offset_incr -r =value\n"
" --chunksize -c =values\n"
" --chunksize_low -a =value\n"
" --chunksize_high -b =value\n"
" --chunksize_incr -g =value\n"
" --regionsize -s =values\n"
" --regionsize_low -A =value\n"
" --regionsize_high -B =value\n"
" --regionsize_incr -C =value\n"
" --blocksize -S =values\n"
" --load -L =dmuio|ssf|fpp\n"
" --pool -p =pool name\n"
" --name -M =test name\n"
" --cleanup -x\n"
" --prerun -P =pre-command\n"
" --postrun -R =post-command\n"
" --log -G =log directory\n"
" --regionnoise -I =shift\n"
" --chunknoise -N =bytes\n"
" --threaddelay -T =jiffies\n"
" --verify -V\n"
" --zerocopy -z\n"
" --nowait -O\n"
" --noprefetch -f\n"
" --human-readable -H\n"
" --verbose -v =increase verbosity\n"
" --help -? =this help\n\n");
return (0);
}
@ -517,9 +517,8 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
dev_clear();
cmd_size =
sizeof (zpios_cmd_t)
+ ((T + N + 1) * sizeof (zpios_stats_t));
cmd_size = sizeof (zpios_cmd_t) +
((T + N + 1) * sizeof (zpios_stats_t));
cmd = (zpios_cmd_t *)malloc(cmd_size);
if (cmd == NULL)
return (ENOMEM);

View File

@ -143,7 +143,7 @@ regex_match(const char *string, char *pattern)
return (rc);
}
rc = regexec(&re, string, (size_t) 0, NULL, 0);
rc = regexec(&re, string, (size_t)0, NULL, 0);
regfree(&re);
return (rc);
@ -224,7 +224,7 @@ set_lhi(char *pattern, range_repeat_t *range, char *optarg,
if ((rc = regex_match(optarg, pattern))) {
fprintf(stderr, "Error: Wrong pattern in %s, '%s'\n",
arg, optarg);
arg, optarg);
return (rc);
}

View File

@ -346,7 +346,7 @@ vdev_run_cmd_thread(void *cb_cmd_data)
if (getline(&data->line, &len, fp) != -1) {
/* Success. Remove newline from the end, if necessary. */
if ((pos = strchr(data->line, '\n')) != NULL)
*pos = '\0';
*pos = '\0';
} else {
data->line = NULL;
}

View File

@ -2752,7 +2752,7 @@ print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int) ((rw_column_width)/columns -
text_start = (int)((rw_column_width)/columns -
slen/columns);
printf(" "); /* Two spaces between columns */
@ -3090,7 +3090,7 @@ print_iostat_histo(struct stat_array *nva, unsigned int len,
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t) val);
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
@ -3567,7 +3567,7 @@ print_iostat(zpool_handle_t *zhp, void *data)
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) {
print_iostat_separator(cb);
@ -4201,7 +4201,7 @@ zpool_do_iostat(int argc, char **argv)
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n"),
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
@ -6173,7 +6173,7 @@ typedef struct upgrade_cbdata {
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int) zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
@ -6212,7 +6212,7 @@ upgrade_version(zpool_handle_t *zhp, uint64_t version)
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int) ZPL_VERSION);
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
@ -6223,12 +6223,12 @@ upgrade_version(zpool_handle_t *zhp, uint64_t version)
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t) oldversion);
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t) oldversion,
(u_longlong_t) version);
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
@ -6435,14 +6435,14 @@ upgrade_one(zpool_handle_t *zhp, void *data)
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t) cur_version);
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t) cbp->cb_version);
(u_longlong_t)cbp->cb_version);
return (0);
}
@ -6629,7 +6629,7 @@ zpool_do_upgrade(int argc, char **argv)
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t) cb.cb_version);
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
@ -6720,14 +6720,14 @@ get_history_one(zpool_handle_t *zhp, void *data)
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t) fnvlist_lookup_uint64(
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t) fnvlist_lookup_uint64(
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {

View File

@ -89,7 +89,7 @@ typedef struct vdev_cmd_data_list
vdev_cmd_data_t *data; /* Array of vdevs */
} vdev_cmd_data_list_t;
vdev_cmd_data_list_t * all_pools_for_each_vdev_run(int argc, char **argv,
vdev_cmd_data_list_t *all_pools_for_each_vdev_run(int argc, char **argv,
char *cmd);
void free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl);

View File

@ -1131,9 +1131,8 @@ ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
err = zfs_prop_index_to_string(prop, curval, &valname);
if (err)
(void) printf("%s %s = %llu at '%s'\n",
osname, propname, (unsigned long long)curval,
setpoint);
(void) printf("%s %s = %llu at '%s'\n", osname,
propname, (unsigned long long)curval, setpoint);
else
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
@ -4479,7 +4478,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
@ -4616,7 +4615,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
!ztest_random(2)) != 0)
goto out;
object = od->od_object;

View File

@ -208,8 +208,8 @@ xgetbv(uint32_t index)
uint32_t eax, edx;
/* xgetbv - instruction byte code */
__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
: "=a" (eax), "=d" (edx)
: "c" (index));
: "=a" (eax), "=d" (edx)
: "c" (index));
return ((((uint64_t)edx)<<32) | (uint64_t)eax);
}
@ -229,13 +229,13 @@ __cpuid_check_feature(const cpuid_feature_desc_t *desc)
* are passed by value.
*/
__cpuid_count(desc->leaf, desc->subleaf,
r[EAX], r[EBX], r[ECX], r[EDX]);
r[EAX], r[EBX], r[ECX], r[EDX]);
return ((r[desc->reg] & desc->flag) == desc->flag);
}
return (B_FALSE);
}
#define CPUID_FEATURE_CHECK(name, id) \
#define CPUID_FEATURE_CHECK(name, id) \
static inline boolean_t \
__cpuid_has_ ## name(void) \
{ \

View File

@ -42,7 +42,7 @@
* zfs_ace_hdr_t *, ...,
* uint32_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_ace_class,
TP_PROTO(znode_t *zn, zfs_ace_hdr_t *ace, uint32_t mask_matched),
TP_ARGS(zn, ace, mask_matched),
@ -136,6 +136,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__entry->z_type, __entry->z_flags, __entry->z_access_mask,
__entry->mask_matched)
);
/* END CSTYLED */
#define DEFINE_ACE_EVENT(name) \
DEFINE_EVENT(zfs_ace_class, name, \

View File

@ -42,7 +42,7 @@
* DTRACE_PROBE1(...,
* arc_buf_hdr_t *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
TP_PROTO(arc_buf_hdr_t *ab),
TP_ARGS(ab),
@ -95,6 +95,7 @@ DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
__entry->hdr_mfu_ghost_hits, __entry->hdr_l2_hits,
__entry->hdr_refcount)
);
/* END CSTYLED */
#define DEFINE_ARC_BUF_HDR_EVENT(name) \
DEFINE_EVENT(zfs_arc_buf_hdr_class, name, \
@ -117,7 +118,7 @@ DEFINE_ARC_BUF_HDR_EVENT(zfs_l2arc__miss);
* vdev_t *, ...,
* zio_t *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_l2arc_rw_class,
TP_PROTO(vdev_t *vd, zio_t *zio),
TP_ARGS(vd, zio),
@ -137,6 +138,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_rw_class,
ZIO_TP_PRINTK_FMT, __entry->vdev_id, __entry->vdev_guid,
__entry->vdev_state, ZIO_TP_PRINTK_ARGS)
);
/* END CSTYLED */
#define DEFINE_L2ARC_RW_EVENT(name) \
DEFINE_EVENT(zfs_l2arc_rw_class, name, \
@ -153,7 +155,7 @@ DEFINE_L2ARC_RW_EVENT(zfs_l2arc__write);
* zio_t *, ...,
* l2arc_write_callback_t *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_l2arc_iodone_class,
TP_PROTO(zio_t *zio, l2arc_write_callback_t *cb),
TP_ARGS(zio, cb),
@ -161,6 +163,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_iodone_class,
TP_fast_assign(ZIO_TP_FAST_ASSIGN),
TP_printk(ZIO_TP_PRINTK_FMT, ZIO_TP_PRINTK_ARGS)
);
/* END CSTYLED */
#define DEFINE_L2ARC_IODONE_EVENT(name) \
DEFINE_EVENT(zfs_l2arc_iodone_class, name, \
@ -178,7 +181,7 @@ DEFINE_L2ARC_IODONE_EVENT(zfs_l2arc__iodone);
* uint64_t,
* const zbookmark_phys_t *);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_arc_miss_class,
TP_PROTO(arc_buf_hdr_t *hdr,
const blkptr_t *bp, uint64_t size, const zbookmark_phys_t *zb),
@ -272,6 +275,7 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
__entry->bp_lsize, __entry->zb_objset, __entry->zb_object,
__entry->zb_level, __entry->zb_blkid)
);
/* END CSTYLED */
#define DEFINE_ARC_MISS_EVENT(name) \
DEFINE_EVENT(zfs_arc_miss_class, name, \
@ -289,7 +293,7 @@ DEFINE_ARC_MISS_EVENT(zfs_arc__miss);
* uint64_t, ...,
* boolean_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_l2arc_evict_class,
TP_PROTO(l2arc_dev_t *dev,
list_t *buflist, uint64_t taddr, boolean_t all),
@ -330,6 +334,7 @@ DECLARE_EVENT_CLASS(zfs_l2arc_evict_class,
__entry->l2ad_end, __entry->l2ad_first, __entry->l2ad_writing,
__entry->taddr, __entry->all)
);
/* END CSTYLED */
#define DEFINE_L2ARC_EVICT_EVENT(name) \
DEFINE_EVENT(zfs_l2arc_evict_class, name, \

View File

@ -45,7 +45,7 @@
* int, ...,
* const char *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_dprintf_class,
TP_PROTO(const char *file, const char *function, int line,
const char *msg),
@ -66,6 +66,7 @@ DECLARE_EVENT_CLASS(zfs_dprintf_class,
TP_printk("%s:%d:%s(): %s", __get_str(file), __entry->line,
__get_str(function), __get_str(msg))
);
/* END CSTYLED */
#define DEFINE_DPRINTF_EVENT(name) \
DEFINE_EVENT(zfs_dprintf_class, name, \
@ -83,7 +84,7 @@ DEFINE_DPRINTF_EVENT(zfs_zfs__dprintf);
* int, ...,
* uintptr_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_set_error_class,
TP_PROTO(const char *file, const char *function, int line,
uintptr_t error),
@ -104,6 +105,7 @@ DECLARE_EVENT_CLASS(zfs_set_error_class,
TP_printk("%s:%d:%s(): error 0x%lx", __get_str(file), __entry->line,
__get_str(function), __entry->error)
);
/* END CSTYLED */
#ifdef TP_CONDITION
#define DEFINE_SET_ERROR_EVENT(name) \

View File

@ -41,7 +41,7 @@
* uint64_t, ...,
* uint64_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_delay_mintime_class,
TP_PROTO(dmu_tx_t *tx, uint64_t dirty, uint64_t min_tx_time),
TP_ARGS(tx, dirty, min_tx_time),
@ -102,6 +102,7 @@ DECLARE_EVENT_CLASS(zfs_delay_mintime_class,
#endif
__entry->dirty, __entry->min_tx_time)
);
/* END CSTYLED */
#define DEFINE_DELAY_MINTIME_EVENT(name) \
DEFINE_EVENT(zfs_delay_mintime_class, name, \

View File

@ -41,7 +41,7 @@
* int64_t, ...,
* uint32_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_dnode_move_class,
TP_PROTO(dnode_t *dn, int64_t refcount, uint32_t dbufs),
TP_ARGS(dn, refcount, dbufs),
@ -102,6 +102,7 @@ DECLARE_EVENT_CLASS(zfs_dnode_move_class,
__entry->dn_maxblkid, __entry->dn_tx_holds, __entry->dn_holds,
__entry->dn_have_spill, __entry->refcount, __entry->dbufs)
);
/* END CSTYLED */
#define DEFINE_DNODE_MOVE_EVENT(name) \
DEFINE_EVENT(zfs_dnode_move_class, name, \

View File

@ -41,7 +41,7 @@
* unsigned int, ...,
* void *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_multilist_insert_remove_class,
TP_PROTO(multilist_t *ml, unsigned sublist_idx, void *obj),
TP_ARGS(ml, sublist_idx, obj),
@ -60,6 +60,7 @@ DECLARE_EVENT_CLASS(zfs_multilist_insert_remove_class,
TP_printk("ml { offset %ld numsublists %llu sublistidx %u } ",
__entry->ml_offset, __entry->ml_num_sublists, __entry->sublist_idx)
);
/* END CSTYLED */
#define DEFINE_MULTILIST_INSERT_REMOVE_EVENT(name) \
DEFINE_EVENT(zfs_multilist_insert_remove_class, name, \

View File

@ -40,7 +40,7 @@
* dsl_pool_t *, ...,
* uint64_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_txg_class,
TP_PROTO(dsl_pool_t *dp, uint64_t txg),
TP_ARGS(dp, txg),
@ -52,6 +52,7 @@ DECLARE_EVENT_CLASS(zfs_txg_class,
),
TP_printk("txg %llu", __entry->txg)
);
/* END CSTYLED */
#define DEFINE_TXG_EVENT(name) \
DEFINE_EVENT(zfs_txg_class, name, \

View File

@ -39,7 +39,7 @@
* DTRACE_PROBE1(...,
* zilog_t *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_zil_class,
TP_PROTO(zilog_t *zilog),
TP_ARGS(zilog),
@ -111,6 +111,7 @@ DECLARE_EVENT_CLASS(zfs_zil_class,
__entry->zl_itx_list_sz, __entry->zl_cur_used,
__entry->zl_replay_time, __entry->zl_replay_blks)
);
/* END CSTYLED */
#define DEFINE_ZIL_EVENT(name) \
DEFINE_EVENT(zfs_zil_class, name, \

View File

@ -36,6 +36,7 @@
#include <sys/types.h>
#include <sys/trace_common.h> /* For ZIO macros */
/* BEGIN CSTYLED */
TRACE_EVENT(zfs_zio__delay__miss,
TP_PROTO(zio_t *zio, hrtime_t now),
TP_ARGS(zio, now),
@ -75,6 +76,7 @@ TRACE_EVENT(zfs_zio__delay__skip,
TP_fast_assign(ZIO_TP_FAST_ASSIGN),
TP_printk(ZIO_TP_PRINTK_FMT, ZIO_TP_PRINTK_ARGS)
);
/* END CSTYLED */
#endif /* _TRACE_ZIO_H */

View File

@ -40,7 +40,7 @@
* zrlock_t *, ...,
* uint32_t, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_zrlock_class,
TP_PROTO(zrlock_t *zrl, uint32_t n),
TP_ARGS(zrl, n),
@ -69,6 +69,7 @@ DECLARE_EVENT_CLASS(zfs_zrlock_class,
__entry->refcount, __entry->n)
#endif
);
/* END_CSTYLED */
#define DEFINE_ZRLOCK_EVENT(name) \
DEFINE_EVENT(zfs_zrlock_class, name, \

View File

@ -40,22 +40,22 @@ struct kernel_param {};
/*
* vdev_raidz interface
*/
struct raidz_map * vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
struct raidz_map *vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
uint64_t);
void vdev_raidz_map_free(struct raidz_map *);
void vdev_raidz_generate_parity(struct raidz_map *);
int vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
void vdev_raidz_map_free(struct raidz_map *);
void vdev_raidz_generate_parity(struct raidz_map *);
int vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
/*
* vdev_raidz_math interface
*/
void vdev_raidz_math_init(void);
void vdev_raidz_math_fini(void);
struct raidz_impl_ops * vdev_raidz_math_get_ops(void);
int vdev_raidz_math_generate(struct raidz_map *);
int vdev_raidz_math_reconstruct(struct raidz_map *,
const int *, const int *, const int);
int vdev_raidz_impl_set(const char *);
void vdev_raidz_math_init(void);
void vdev_raidz_math_fini(void);
struct raidz_impl_ops *vdev_raidz_math_get_ops(void);
int vdev_raidz_math_generate(struct raidz_map *);
int vdev_raidz_math_reconstruct(struct raidz_map *, const int *, const int *,
const int);
int vdev_raidz_impl_set(const char *);
#ifdef __cplusplus
}

View File

@ -178,12 +178,12 @@ extern const raidz_impl_ops_t vdev_raidz_aarch64_neonx2_impl;
* @code parity the function produce
* @impl name of the implementation
*/
#define _RAIDZ_GEN_WRAP(code, impl) \
#define _RAIDZ_GEN_WRAP(code, impl) \
static void \
impl ## _gen_ ## code(void *rmp) \
{ \
raidz_map_t *rm = (raidz_map_t *) rmp; \
raidz_generate_## code ## _impl(rm); \
raidz_map_t *rm = (raidz_map_t *)rmp; \
raidz_generate_## code ## _impl(rm); \
}
/*
@ -192,11 +192,11 @@ impl ## _gen_ ## code(void *rmp) \
* @code parity the function produce
* @impl name of the implementation
*/
#define _RAIDZ_REC_WRAP(code, impl) \
static int \
#define _RAIDZ_REC_WRAP(code, impl) \
static int \
impl ## _rec_ ## code(void *rmp, const int *tgtidx) \
{ \
raidz_map_t *rm = (raidz_map_t *) rmp; \
raidz_map_t *rm = (raidz_map_t *)rmp; \
return (raidz_reconstruct_## code ## _impl(rm, tgtidx)); \
}
@ -295,7 +295,7 @@ vdev_raidz_exp2(const uint8_t a, const unsigned exp)
if (a == 0)
return (0);
return (vdev_raidz_pow2[(exp + (unsigned) vdev_raidz_log2[a]) % 255]);
return (vdev_raidz_pow2[(exp + (unsigned)vdev_raidz_log2[a]) % 255]);
}
/*
@ -318,9 +318,9 @@ gf_mul(const gf_t a, const gf_t b)
if (a == 0 || b == 0)
return (0);
logsum = (gf_log_t) vdev_raidz_log2[a] + (gf_log_t) vdev_raidz_log2[b];
logsum = (gf_log_t)vdev_raidz_log2[a] + (gf_log_t)vdev_raidz_log2[b];
return ((gf_t) vdev_raidz_pow2[logsum % 255]);
return ((gf_t)vdev_raidz_pow2[logsum % 255]);
}
static inline gf_t
@ -332,10 +332,10 @@ gf_div(const gf_t a, const gf_t b)
if (a == 0)
return (0);
logsum = (gf_log_t) 255 + (gf_log_t) vdev_raidz_log2[a] -
(gf_log_t) vdev_raidz_log2[b];
logsum = (gf_log_t)255 + (gf_log_t)vdev_raidz_log2[a] -
(gf_log_t)vdev_raidz_log2[b];
return ((gf_t) vdev_raidz_pow2[logsum % 255]);
return ((gf_t)vdev_raidz_pow2[logsum % 255]);
}
static inline gf_t
@ -345,9 +345,9 @@ gf_inv(const gf_t a)
ASSERT3U(a, >, 0);
logsum = (gf_log_t) 255 - (gf_log_t) vdev_raidz_log2[a];
logsum = (gf_log_t)255 - (gf_log_t)vdev_raidz_log2[a];
return ((gf_t) vdev_raidz_pow2[logsum]);
return ((gf_t)vdev_raidz_pow2[logsum]);
}
static inline gf_t
@ -360,7 +360,7 @@ static inline gf_t
gf_exp4(gf_log_t exp)
{
ASSERT3U(exp, <=, 255);
return ((gf_t) vdev_raidz_pow2[(2 * exp) % 255]);
return ((gf_t)vdev_raidz_pow2[(2 * exp) % 255]);
}
#ifdef __cplusplus

View File

@ -148,8 +148,7 @@ static inline bool
dir_emit(struct dir_context *ctx, const char *name, int namelen,
uint64_t ino, unsigned type)
{
return (ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type)
== 0);
return (!ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type));
}
static inline bool

View File

@ -498,7 +498,7 @@ sa_enable_share(sa_share_t share, char *protocol)
#ifdef DEBUG
fprintf(stderr, "sa_enable_share: share->sharepath=%s, protocol=%s\n",
impl_share->sharepath, protocol);
impl_share->sharepath, protocol);
#endif
assert(impl_share->handle != NULL);
@ -539,7 +539,7 @@ sa_disable_share(sa_share_t share, char *protocol)
#ifdef DEBUG
fprintf(stderr, "sa_disable_share: share->sharepath=%s, protocol=%s\n",
impl_share->sharepath, protocol);
impl_share->sharepath, protocol);
#endif
ret = SA_OK;
@ -697,7 +697,7 @@ sa_parse_legacy_options(sa_group_t group, char *options, char *proto)
#ifdef DEBUG
fprintf(stderr, "sa_parse_legacy_options: options=%s, proto=%s\n",
options, proto);
options, proto);
#endif
fstype = fstypes;

View File

@ -688,7 +688,8 @@ nfs_check_exportfs(void)
}
if (pid > 0) {
while ((rc = waitpid(pid, &status, 0)) <= 0 && errno == EINTR);
while ((rc = waitpid(pid, &status, 0)) <= 0 &&
errno == EINTR) { }
if (rc <= 0) {
(void) close(nfs_exportfs_temp_fd);

View File

@ -154,7 +154,7 @@ smb_retrieve_shares(void)
continue; /* Incomplete share definition */
else {
shares = (smb_share_t *)
malloc(sizeof (smb_share_t));
malloc(sizeof (smb_share_t));
if (shares == NULL) {
rc = SA_NO_MEMORY;
goto out;
@ -395,7 +395,7 @@ smb_update_shareopts(sa_share_impl_t impl_share, const char *resource,
old_shareopts = FSINFO(impl_share, smb_fstype)->shareopts;
if (FSINFO(impl_share, smb_fstype)->active && old_shareopts != NULL &&
strcmp(old_shareopts, shareopts) != 0) {
strcmp(old_shareopts, shareopts) != 0) {
needs_reshare = B_TRUE;
smb_disable_share(impl_share);
}

View File

@ -39,7 +39,7 @@ pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* Theses are the void returning variants
*/
/* BEGIN CSTYLED */
#define ATOMIC_INC(name, type) \
void atomic_inc_##name(volatile type *target) \
{ \
@ -381,6 +381,7 @@ ATOMIC_SWAP(32, uint32_t)
ATOMIC_SWAP(uint, uint_t)
ATOMIC_SWAP(ulong, ulong_t)
ATOMIC_SWAP(64, uint64_t)
/* END CSTYLED */
void *
atomic_swap_ptr(volatile void *target, void *bits)

View File

@ -53,7 +53,7 @@ getmntany(FILE *fp, struct mnttab *mgetp, struct mnttab *mrefp)
while (
((ret = _sol_getmntent(fp, mgetp)) == 0) && (
DIFF(mnt_special) || DIFF(mnt_mountp) ||
DIFF(mnt_fstype) || DIFF(mnt_mntopts)));
DIFF(mnt_fstype) || DIFF(mnt_mntopts))) { }
return (ret);
}
@ -86,7 +86,7 @@ getextmntent(FILE *fp, struct extmnttab *mp, int len)
int ret;
struct stat64 st;
ret = _sol_getmntent(fp, (struct mnttab *) mp);
ret = _sol_getmntent(fp, (struct mnttab *)mp);
if (ret == 0) {
if (stat64(mp->mnt_mountp, &st) != 0) {
mp->mnt_major = 0;

View File

@ -53,11 +53,11 @@ rwlock_init(rwlock_t *rwlp, int type, void *arg)
switch (type) {
case USYNC_THREAD:
VERIFY0(pthread_rwlockattr_setpshared(&attr,
PTHREAD_PROCESS_PRIVATE));
PTHREAD_PROCESS_PRIVATE));
break;
case USYNC_PROCESS:
VERIFY0(pthread_rwlockattr_setpshared(&attr,
PTHREAD_PROCESS_SHARED));
PTHREAD_PROCESS_SHARED));
break;
default:
VERIFY0(1);

View File

@ -1938,9 +1938,9 @@ get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
switch (prop) {
@ -2343,7 +2343,7 @@ zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t) val);
(u_longlong_t)val);
}
break;
@ -2744,11 +2744,11 @@ userquota_propname_decode(const char *propname, boolean_t zoned,
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
cp = strchr(propname, '@') + 1;

View File

@ -200,7 +200,7 @@ boolean_t
udev_is_mpath(struct udev_device *dev)
{
return udev_device_get_property_value(dev, "DM_UUID") &&
udev_device_get_property_value(dev, "MPATH_SBIN_PATH");
udev_device_get_property_value(dev, "MPATH_SBIN_PATH");
}
/*

View File

@ -456,7 +456,7 @@ zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
*/
if (!(flags & MS_OVERLAY)) {
if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay,
sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(overlay, "on") == 0) {
flags |= MS_OVERLAY;
}

View File

@ -308,7 +308,7 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
case ZPOOL_PROP_ASHIFT:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
@ -3416,12 +3416,12 @@ zfs_strip_partition(char *path)
d = part + 1;
} else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
tmp[1] == 'd') {
for (d = &tmp[2]; isalpha(*d); part = ++d);
for (d = &tmp[2]; isalpha(*d); part = ++d) { }
} else if (strncmp("xvd", tmp, 3) == 0) {
for (d = &tmp[3]; isalpha(*d); part = ++d);
for (d = &tmp[3]; isalpha(*d); part = ++d) { }
}
if (part && d && *d != '\0') {
for (; isdigit(*d); d++);
for (; isdigit(*d); d++) { }
if (*d == '\0')
*part = '\0';
}
@ -4210,7 +4210,7 @@ zpool_label_name(char *label_name, int label_size)
if (id == 0)
id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t) id);
snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
}
/*

View File

@ -2615,7 +2615,7 @@ again:
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t) parent_fromsnap_guid);
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
@ -2649,7 +2649,7 @@ again:
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t) parent_fromsnap_guid);
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;

View File

@ -617,7 +617,7 @@ zfs_nicenum_format(uint64_t num, char *buf, size_t buflen,
double val;
if (format == ZFS_NICENUM_RAW) {
snprintf(buf, buflen, "%llu", (u_longlong_t) num);
snprintf(buf, buflen, "%llu", (u_longlong_t)num);
return;
}
@ -633,12 +633,12 @@ zfs_nicenum_format(uint64_t num, char *buf, size_t buflen,
if ((format == ZFS_NICENUM_TIME) && (num == 0)) {
(void) snprintf(buf, buflen, "-");
} else if ((index == 0) || ((num %
(uint64_t) powl(k_unit[format], index)) == 0)) {
(uint64_t)powl(k_unit[format], index)) == 0)) {
/*
* If this is an even multiple of the base, always display
* without any decimal precision.
*/
(void) snprintf(buf, buflen, "%llu%s", (u_longlong_t) n, u);
(void) snprintf(buf, buflen, "%llu%s", (u_longlong_t)n, u);
} else {
/*
@ -652,8 +652,8 @@ zfs_nicenum_format(uint64_t num, char *buf, size_t buflen,
*/
int i;
for (i = 2; i >= 0; i--) {
val = (double) num /
(uint64_t) powl(k_unit[format], index);
val = (double)num /
(uint64_t)powl(k_unit[format], index);
/*
* Don't print floating point values for time. Note,
@ -752,7 +752,7 @@ libzfs_run_process(const char *path, char *argv[], int flags)
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR);
errno == EINTR) { }
if (error < 0 || !WIFEXITED(status))
return (-1);

View File

@ -617,7 +617,7 @@ recv_impl(const char *snapname, nvlist_t *props, const char *origin,
fnvlist_add_string(innvl, "origin", origin);
fnvlist_add_byte_array(innvl, "begin_record",
(uchar_t *) &drr, sizeof (drr));
(uchar_t *)&drr, sizeof (drr));
fnvlist_add_int32(innvl, "input_fd", input_fd);

View File

@ -127,7 +127,7 @@ zk_thread_current(void)
void *
zk_thread_helper(void *arg)
{
kthread_t *kt = (kthread_t *) arg;
kthread_t *kt = (kthread_t *)arg;
VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
@ -137,7 +137,7 @@ zk_thread_helper(void *arg)
(void) setpriority(PRIO_PROCESS, 0, kt->t_pri);
kt->t_tid = pthread_self();
((thread_func_arg_t) kt->t_func)(kt->t_arg);
((thread_func_arg_t)kt->t_func)(kt->t_arg);
/* Unreachable, thread must exit with thread_exit() */
abort();
@ -916,7 +916,7 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...)
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
(void) printf("%u ", (uint_t) pthread_self());
(void) printf("%u ", (uint_t)pthread_self());
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
@ -1490,7 +1490,7 @@ zfs_onexit_cb_data(minor_t minor, uint64_t action_handle, void **data)
fstrans_cookie_t
spl_fstrans_mark(void)
{
return ((fstrans_cookie_t) 0);
return ((fstrans_cookie_t)0);
}
void

View File

@ -1593,18 +1593,17 @@ intel_aes_instructions_present(void)
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
memcmp((char *) (&edx), "ineI", 4) == 0 &&
memcmp((char *) (&ecx), "ntel", 4) == 0) {
if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
memcmp((char *)(&edx), "ineI", 4) == 0 &&
memcmp((char *)(&ecx), "ntel", 4) == 0) {
func = 1;
subfunc = 0;
/* check for aes-ni instruction set */
__asm__ __volatile__(
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
cached_result = !!(ecx & INTEL_AESNI_FLAG);
} else {

View File

@ -723,18 +723,17 @@ intel_pclmulqdq_instruction_present(void)
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
memcmp((char *) (&edx), "ineI", 4) == 0 &&
memcmp((char *) (&ecx), "ntel", 4) == 0) {
if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
memcmp((char *)(&edx), "ineI", 4) == 0 &&
memcmp((char *)(&ecx), "ntel", 4) == 0) {
func = 1;
subfunc = 0;
/* check for aes-ni instruction set */
__asm__ __volatile__(
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
"cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
} else {

View File

@ -67,8 +67,9 @@ static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
if (prov_tab) kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
if (prov_tab)
kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
}
/*

View File

@ -1062,7 +1062,7 @@ kcf_sched_destroy(void)
for (i = 0; i < REQID_TABLES; i++) {
if (kcf_reqid_table[i])
kmem_free(kcf_reqid_table[i],
sizeof (kcf_reqid_table_t));
sizeof (kcf_reqid_table_t));
}
if (gswq)

View File

@ -71,7 +71,7 @@ mod_install(struct modlinkage *modlp)
if (modlp->ml_rev != MODREV_1) {
cmn_err(CE_WARN, "mod_install: "
"modlinkage structure is not MODREV_1\n");
"modlinkage structure is not MODREV_1\n");
return (EINVAL);
}
linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
@ -168,4 +168,4 @@ mod_info(struct modlinkage *modlp, struct modinfo *modinfop)
if (retval == 0)
return (1);
return (0);
}
}

View File

@ -701,16 +701,13 @@ kcf_prov_kstat_update(kstat_t *ksp, int rw)
ks_data = ksp->ks_data;
ks_data->ps_ops_total.value.ui64 =
pd->pd_sched_info.ks_ndispatches;
ks_data->ps_ops_failed.value.ui64 =
pd->pd_sched_info.ks_nfails;
ks_data->ps_ops_busy_rval.value.ui64 =
pd->pd_sched_info.ks_nbusy_rval;
ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
ks_data->ps_ops_passed.value.ui64 =
pd->pd_sched_info.ks_ndispatches -
pd->pd_sched_info.ks_nfails -
pd->pd_sched_info.ks_nbusy_rval;
pd->pd_sched_info.ks_ndispatches -
pd->pd_sched_info.ks_nfails -
pd->pd_sched_info.ks_nbusy_rval;
return (0);
}

View File

@ -608,7 +608,7 @@ fletcher_4_kstat_data(char *buf, size_t size, void *data)
{
struct fletcher_4_kstat *fastest_stat =
&fletcher_4_stat_data[fletcher_4_supp_impls_cnt];
struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *) data;
struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *)data;
ssize_t off = 0;
if (curr_stat == fastest_stat) {
@ -623,9 +623,9 @@ fletcher_4_kstat_data(char *buf, size_t size, void *data)
off += snprintf(buf + off, size - off, "%-17s",
fletcher_4_supp_impls[id]->name);
off += snprintf(buf + off, size - off, "%-15llu",
(u_longlong_t) curr_stat->native);
(u_longlong_t)curr_stat->native);
off += snprintf(buf + off, size - off, "%-15llu\n",
(u_longlong_t) curr_stat->byteswap);
(u_longlong_t)curr_stat->byteswap);
}
return (0);
@ -723,7 +723,7 @@ fletcher_4_init(void)
/* move supported impl into fletcher_4_supp_impls */
for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) {
curr_impl = (fletcher_4_ops_t *) fletcher_4_impls[i];
curr_impl = (fletcher_4_ops_t *)fletcher_4_impls[i];
if (curr_impl->valid && curr_impl->valid())
fletcher_4_supp_impls[c++] = curr_impl;
@ -754,7 +754,7 @@ fletcher_4_init(void)
/* install kstats for all implementations */
fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (fletcher_4_kstat != NULL) {
fletcher_4_kstat->ks_data = NULL;
fletcher_4_kstat->ks_ndata = UINT32_MAX;

View File

@ -193,7 +193,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
*/
p = iov->iov_base + skip;
while (cnt) {
if (fuword8((uint8_t *) p, &tmp))
if (fuword8((uint8_t *)p, &tmp))
return;
incr = MIN(cnt, PAGESIZE);
p += incr;
@ -203,7 +203,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
* touch the last byte in case it straddles a page.
*/
p--;
if (fuword8((uint8_t *) p, &tmp))
if (fuword8((uint8_t *)p, &tmp))
return;
}
}

View File

@ -407,7 +407,7 @@ struct page;
#define kpm_enable 1
#define abd_alloc_chunk(o) \
((struct page *) umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
((struct page *)umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
#define abd_free_chunk(chunk, o) umem_free(chunk, PAGESIZE << (o))
#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
@ -1486,8 +1486,8 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
else
pos = abd->abd_u.abd_scatter.abd_offset + off;
return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT)
- (pos >> PAGE_SHIFT);
return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
(pos >> PAGE_SHIFT);
}
/*
@ -1537,6 +1537,7 @@ abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
module_param(zfs_abd_scatter_enabled, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
"Toggle whether ABD allocations must be linear.");
/* CSTYLED */
module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD.");

View File

@ -1510,7 +1510,7 @@ arc_cksum_compute(arc_buf_t *buf)
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
panic("Got SIGSEGV at address: 0x%lx\n", (long) si->si_addr);
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
@ -7688,6 +7688,7 @@ EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
/* BEGIN CSTYLED */
module_param(zfs_arc_min, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
@ -7786,5 +7787,5 @@ MODULE_PARM_DESC(zfs_arc_dnode_limit_percent,
module_param(zfs_arc_dnode_reduce_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent,
"Percentage of excess dnodes to try to unpin");
/* END CSTYLED */
#endif

View File

@ -790,7 +790,7 @@ dbuf_verify(dmu_buf_impl_t *db)
} else {
/* db is pointed to by an indirect block */
ASSERTV(int epb = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT);
SPA_BLKPTRSHIFT);
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
@ -2686,8 +2686,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
ASSERT3P(dh->dh_parent, ==, NULL);
dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
dh->dh_fail_sparse, &dh->dh_parent,
&dh->dh_bp, dh);
dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh);
if (dh->dh_fail_sparse) {
if (dh->dh_err == 0 &&
dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
@ -2701,7 +2700,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
if (dh->dh_err && dh->dh_err != ENOENT)
return (dh->dh_err);
dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
dh->dh_parent, dh->dh_bp);
dh->dh_parent, dh->dh_bp);
}
if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
@ -2775,7 +2774,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse,
fail_uncached, tag, dbp, 0);
fail_uncached, tag, dbp, 0);
error = __dbuf_hold_impl(dh);
@ -3884,23 +3883,23 @@ EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_freeable);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
/* BEGIN CSTYLED */
module_param(dbuf_cache_max_bytes, ulong, 0644);
MODULE_PARM_DESC(dbuf_cache_max_bytes,
"Maximum size in bytes of the dbuf cache.");
"Maximum size in bytes of the dbuf cache.");
module_param(dbuf_cache_hiwater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_hiwater_pct,
"Percentage over dbuf_cache_max_bytes when dbufs \
much be evicted directly.");
"Percentage over dbuf_cache_max_bytes when dbufs \
much be evicted directly.");
module_param(dbuf_cache_lowater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_lowater_pct,
"Percentage below dbuf_cache_max_bytes \
when the evict thread stop evicting dbufs.");
"Percentage below dbuf_cache_max_bytes \
when the evict thread stop evicting dbufs.");
module_param(dbuf_cache_max_shift, int, 0644);
MODULE_PARM_DESC(dbuf_cache_max_shift,
"Cap the size of the dbuf cache to log2 fraction of arc size.");
"Cap the size of the dbuf cache to log2 fraction of arc size.");
/* END CSTYLED */
#endif

View File

@ -1591,7 +1591,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
8, 1, &one, tx));
8, 1, &one, tx));
}
if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA) {

View File

@ -336,6 +336,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* BEGIN CSTYLED */
module_param(zfs_prefetch_disable, int, 0644);
MODULE_PARM_DESC(zfs_prefetch_disable, "Disable all ZFS prefetching");
@ -351,4 +352,5 @@ MODULE_PARM_DESC(zfetch_max_distance,
module_param(zfetch_array_rd_sz, ulong, 0644);
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
/* END CSTYLED */
#endif

View File

@ -632,7 +632,7 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
dn_slots = dn_slots > 0 ? dn_slots : DNODE_MIN_SLOTS;

View File

@ -1087,6 +1087,7 @@ dsl_pool_config_held_writer(dsl_pool_t *dp)
EXPORT_SYMBOL(dsl_pool_config_enter);
EXPORT_SYMBOL(dsl_pool_config_exit);
/* BEGIN CSTYLED */
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
module_param(zfs_dirty_data_max_percent, int, 0444);
MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
@ -1112,4 +1113,5 @@ MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
module_param(zfs_delay_scale, ulong, 0644);
MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
/* END CSTYLED */
#endif

View File

@ -73,7 +73,7 @@ int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
/* max number of blocks to free in a single TXG */
ulong zfs_free_max_blocks = 100000;
unsigned long zfs_free_max_blocks = 100000;
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
@ -1985,6 +1985,7 @@ MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
module_param(zfs_no_scrub_prefetch, int, 0644);
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
/* CSTYLED */
module_param(zfs_free_max_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_free_max_blocks, "Max number of blocks freed in one txg");

View File

@ -431,7 +431,7 @@ zfs_zevent_alloc(void)
ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
offsetof(zfs_zevent_t, ze_node));
offsetof(zfs_zevent_t, ze_node));
list_link_init(&ev->ev_node);
return (ev);

View File

@ -64,7 +64,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
return (s_len);
}
return ((size_t) dstlen);
return ((size_t)dstlen);
}
/*ARGSUSED*/

View File

@ -1006,7 +1006,7 @@ void
lz4_init(void)
{
lz4_cache = kmem_cache_create("lz4_cache",
sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void

View File

@ -2924,37 +2924,44 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(metaslab_aliquot, ulong, 0644);
module_param(metaslab_debug_load, int, 0644);
module_param(metaslab_debug_unload, int, 0644);
module_param(metaslab_preload_enabled, int, 0644);
module_param(zfs_mg_noalloc_threshold, int, 0644);
module_param(zfs_mg_fragmentation_threshold, int, 0644);
module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
module_param(metaslab_fragmentation_factor_enabled, int, 0644);
module_param(metaslab_lba_weighting_enabled, int, 0644);
module_param(metaslab_bias_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_aliquot,
"allocation granularity (a.k.a. stripe size)");
module_param(metaslab_debug_load, int, 0644);
MODULE_PARM_DESC(metaslab_debug_load,
"load all metaslabs when pool is first opened");
module_param(metaslab_debug_unload, int, 0644);
MODULE_PARM_DESC(metaslab_debug_unload,
"prevent metaslabs from being unloaded");
module_param(metaslab_preload_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_preload_enabled,
"preload potential metaslabs during reassessment");
module_param(zfs_mg_noalloc_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
"percentage of free space for metaslab group to allow allocation");
module_param(zfs_mg_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
"fragmentation for metaslab group to allow allocation");
module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
"fragmentation for metaslab to allow allocation");
module_param(metaslab_fragmentation_factor_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
"use the fragmentation metric to prefer less fragmented metaslabs");
module_param(metaslab_lba_weighting_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
"prefer metaslabs with lower LBAs");
module_param(metaslab_bias_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_bias_enabled,
"enable metaslab group biasing");
#endif /* _KERNEL && HAVE_SPL */

View File

@ -6996,6 +6996,7 @@ module_param(spa_load_verify_data, int, 0644);
MODULE_PARM_DESC(spa_load_verify_data,
"Set to traverse data on pool import");
/* CSTYLED */
module_param(zio_taskq_batch_pct, uint, 0444);
MODULE_PARM_DESC(zio_taskq_batch_pct,
"Percentage of CPUs to run an IO worker thread");

View File

@ -419,7 +419,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
*/
if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
VERIFY0(nvlist_lookup_string(spa->spa_config,
ZPOOL_CONFIG_POOL_NAME, &pool_name));
ZPOOL_CONFIG_POOL_NAME, &pool_name));
} else
pool_name = spa_name(spa);

View File

@ -2093,9 +2093,9 @@ EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
/* BEGIN CSTYLED */
module_param(zfs_flags, uint, 0644);
MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
@ -2118,4 +2118,5 @@ MODULE_PARM_DESC(spa_asize_inflation,
module_param(spa_slop_shift, int, 0644);
MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
/* END CSTYLED */
#endif

View File

@ -576,7 +576,7 @@ retry:
/* bio_alloc() with __GFP_WAIT never returns NULL */
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
BIO_MAX_PAGES));
BIO_MAX_PAGES));
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (ENOMEM);
@ -593,7 +593,7 @@ retry:
/* Remaining size is returned to become the new size */
bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);

View File

@ -371,11 +371,11 @@ vdev_queue_init(vdev_t *vd)
avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
int (*compfn) (const void *, const void *);
@ -390,7 +390,7 @@ vdev_queue_init(vdev_t *vd)
else
compfn = vdev_queue_offset_compare;
avl_create(vdev_queue_class_tree(vq, p), compfn,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
sizeof (zio_t), offsetof(struct zio, io_queue_node));
}
vq->vq_lastoffset = 0;

View File

@ -124,10 +124,10 @@ vdev_raidz_math_get_ops()
break;
#endif
case IMPL_ORIGINAL:
ops = (raidz_impl_ops_t *) &vdev_raidz_original_impl;
ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
break;
case IMPL_SCALAR:
ops = (raidz_impl_ops_t *) &vdev_raidz_scalar_impl;
ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
break;
default:
ASSERT3U(impl, <, raidz_supp_impl_cnt);
@ -162,7 +162,7 @@ vdev_raidz_math_generate(raidz_map_t *rm)
default:
gen_parity = NULL;
cmn_err(CE_PANIC, "invalid RAID-Z configuration %d",
raidz_parity(rm));
raidz_parity(rm));
break;
}
@ -196,7 +196,7 @@ reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
}
} else if (nbaddata == 2 &&
parity_valid[CODE_P] && parity_valid[CODE_Q]) {
parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
}
return ((raidz_rec_f) NULL);
@ -223,8 +223,8 @@ reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
return (rm->rm_ops->rec[RAIDZ_REC_QR]);
}
} else if (nbaddata == 3 &&
parity_valid[CODE_P] && parity_valid[CODE_Q] &&
parity_valid[CODE_R]) {
parity_valid[CODE_P] && parity_valid[CODE_Q] &&
parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
}
return ((raidz_rec_f) NULL);
@ -300,8 +300,8 @@ raidz_math_kstat_headers(char *buf, size_t size)
static int
raidz_math_kstat_data(char *buf, size_t size, void *data)
{
raidz_impl_kstat_t * fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
raidz_impl_kstat_t * cstat = (raidz_impl_kstat_t *) data;
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
ssize_t off = 0;
int i;
@ -328,11 +328,11 @@ raidz_math_kstat_data(char *buf, size_t size, void *data)
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
(u_longlong_t) cstat->gen[i]);
(u_longlong_t)cstat->gen[i]);
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
(u_longlong_t) cstat->rec[i]);
(u_longlong_t)cstat->rec[i]);
}
(void) snprintf(buf + off, size - off, "\n");
@ -392,7 +392,7 @@ benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
uint64_t run_cnt, speed, best_speed = 0;
hrtime_t t_start, t_diff;
raidz_impl_ops_t *curr_impl;
raidz_impl_kstat_t * fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
int impl, i;
for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
@ -446,14 +446,14 @@ vdev_raidz_math_init(void)
/* move supported impl into raidz_supp_impl */
for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
curr_impl = (raidz_impl_ops_t *) raidz_all_maths[i];
curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
/* initialize impl */
if (curr_impl->init)
curr_impl->init();
if (curr_impl->is_supported())
raidz_supp_impl[c++] = (raidz_impl_ops_t *) curr_impl;
raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
}
membar_producer(); /* complete raidz_supp_impl[] init */
raidz_supp_impl_cnt = c; /* number of supported impl */
@ -505,7 +505,7 @@ vdev_raidz_math_init(void)
/* install kstats for all impl */
raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (raidz_math_kstat != NULL) {
raidz_math_kstat->ks_data = NULL;
@ -542,7 +542,7 @@ vdev_raidz_math_fini(void)
}
static const struct {
char *name;
char *name;
uint32_t sel;
} math_impl_opts[] = {
#if !defined(_KERNEL)

View File

@ -66,14 +66,14 @@ typedef struct v {
uint8_t b[ELEM_SIZE] __attribute__((aligned(ELEM_SIZE)));
} v_t;
#define PREFETCHNTA(ptr, offset) \
#define PREFETCHNTA(ptr, offset) \
{ \
__asm( \
"prefetchnta " #offset "(%[MEM])\n" \
: : [MEM] "r" (ptr)); \
}
#define PREFETCH(ptr, offset) \
#define PREFETCH(ptr, offset) \
{ \
__asm( \
"prefetcht0 " #offset "(%[MEM])\n" \
@ -142,7 +142,7 @@ typedef struct v {
} \
}
#define COPY(r...) \
#define COPY(r...) \
{ \
switch (REG_CNT(r)) { \
case 8: \
@ -162,7 +162,7 @@ typedef struct v {
} \
}
#define LOAD(src, r...) \
#define LOAD(src, r...) \
{ \
switch (REG_CNT(r)) { \
case 4: \
@ -184,7 +184,7 @@ typedef struct v {
} \
}
#define STORE(dst, r...) \
#define STORE(dst, r...) \
{ \
switch (REG_CNT(r)) { \
case 4: \
@ -211,8 +211,8 @@ typedef struct v {
__asm("vzeroupper"); \
}
#define MUL2_SETUP() \
{ \
#define MUL2_SETUP() \
{ \
__asm("vmovq %0, %%xmm14" :: "r"(0x1d1d1d1d1d1d1d1d)); \
__asm("vpbroadcastq %xmm14, %zmm14"); \
__asm("vmovq %0, %%xmm13" :: "r"(0x8080808080808080)); \
@ -222,7 +222,7 @@ typedef struct v {
__asm("vpxorq %zmm15, %zmm15 ,%zmm15"); \
}
#define _MUL2(r...) \
#define _MUL2(r...) \
{ \
switch (REG_CNT(r)) { \
case 2: \
@ -237,8 +237,8 @@ typedef struct v {
"vpsubq %zmm9, %zmm11, %zmm11\n" \
"vpsllq $1, %" VR0(r)", %" VR0(r) "\n" \
"vpsllq $1, %" VR1(r)", %" VR1(r) "\n" \
"vpandq %zmm10, %zmm14, %zmm10\n" \
"vpandq %zmm11, %zmm14, %zmm11\n" \
"vpandq %zmm10, %zmm14, %zmm10\n" \
"vpandq %zmm11, %zmm14, %zmm11\n" \
"vpternlogd $0x6c,%zmm12, %zmm10, %" VR0(r) "\n" \
"vpternlogd $0x6c,%zmm12, %zmm11, %" VR1(r)); \
break; \
@ -355,60 +355,60 @@ static const uint8_t __attribute__((aligned(32))) _mul_mask = 0x0F;
#define ADD_STRIDE 4
#define ADD_DEFINE() {}
#define ADD_D 0, 1, 2, 3
#define ADD_D 0, 1, 2, 3
#define MUL_STRIDE 4
#define MUL_DEFINE() {}
#define MUL_DEFINE() {}
#define MUL_D 0, 1, 2, 3
#define GEN_P_DEFINE() {}
#define GEN_P_STRIDE 4
#define GEN_P_P 0, 1, 2, 3
#define GEN_PQ_DEFINE() {}
#define GEN_PQ_DEFINE() {}
#define GEN_PQ_STRIDE 4
#define GEN_PQ_D 0, 1, 2, 3
#define GEN_PQ_P 4, 5, 6, 7
#define GEN_PQ_Q 20, 21, 22, 23
#define GEN_PQR_DEFINE() {}
#define GEN_PQR_DEFINE() {}
#define GEN_PQR_STRIDE 2
#define GEN_PQR_D 0, 1
#define GEN_PQR_P 2, 3
#define GEN_PQR_Q 4, 5
#define GEN_PQR_R 6, 7
#define REC_P_DEFINE() {}
#define REC_P_DEFINE() {}
#define REC_P_STRIDE 4
#define REC_P_X 0, 1, 2, 3
#define REC_Q_DEFINE() {}
#define REC_Q_DEFINE() {}
#define REC_Q_STRIDE 4
#define REC_Q_X 0, 1, 2, 3
#define REC_R_DEFINE() {}
#define REC_R_DEFINE() {}
#define REC_R_STRIDE 4
#define REC_R_X 0, 1, 2, 3
#define REC_PQ_DEFINE() {}
#define REC_PQ_DEFINE() {}
#define REC_PQ_STRIDE 4
#define REC_PQ_X 0, 1, 2, 3
#define REC_PQ_Y 4, 5, 6, 7
#define REC_PQ_D 20, 21, 22, 23
#define REC_PR_DEFINE() {}
#define REC_PR_DEFINE() {}
#define REC_PR_STRIDE 4
#define REC_PR_X 0, 1, 2, 3
#define REC_PR_Y 4, 5, 6, 7
#define REC_PR_D 20, 21, 22, 23
#define REC_QR_DEFINE() {}
#define REC_QR_DEFINE() {}
#define REC_QR_STRIDE 4
#define REC_QR_X 0, 1, 2, 3
#define REC_QR_Y 4, 5, 6, 7
#define REC_QR_D 20, 21, 22, 23
#define REC_PQR_DEFINE() {}
#define REC_PQR_DEFINE() {}
#define REC_PQR_STRIDE 2
#define REC_PQR_X 0, 1
#define REC_PQR_Y 2, 3
@ -428,8 +428,8 @@ static boolean_t
raidz_will_avx512bw_work(void)
{
return (zfs_avx_available() &&
zfs_avx512f_available() &&
zfs_avx512bw_available());
zfs_avx512f_available() &&
zfs_avx512bw_available());
}
const raidz_impl_ops_t vdev_raidz_avx512bw_impl = {

View File

@ -471,8 +471,8 @@ static boolean_t
raidz_will_avx512f_work(void)
{
return (zfs_avx_available() &&
zfs_avx2_available() &&
zfs_avx512f_available());
zfs_avx2_available() &&
zfs_avx512f_available());
}
const raidz_impl_ops_t vdev_raidz_avx512f_impl = {

View File

@ -158,7 +158,7 @@ raidz_rec_pqr_coeff(const raidz_map_t *rm, const int *tgtidx, unsigned *coeff)
static int
raidz_zero_abd_cb(void *dc, size_t dsize, void *private)
{
v_t *dst = (v_t *) dc;
v_t *dst = (v_t *)dc;
size_t i;
ZERO_DEFINE();
@ -193,8 +193,8 @@ raidz_zero_abd_cb(void *dc, size_t dsize, void *private)
static int
raidz_copy_abd_cb(void *dc, void *sc, size_t size, void *private)
{
v_t *dst = (v_t *) dc;
const v_t *src = (v_t *) sc;
v_t *dst = (v_t *)dc;
const v_t *src = (v_t *)sc;
size_t i;
COPY_DEFINE();
@ -232,8 +232,8 @@ raidz_copy_abd_cb(void *dc, void *sc, size_t size, void *private)
static int
raidz_add_abd_cb(void *dc, void *sc, size_t size, void *private)
{
v_t *dst = (v_t *) dc;
const v_t *src = (v_t *) sc;
v_t *dst = (v_t *)dc;
const v_t *src = (v_t *)sc;
size_t i;
ADD_DEFINE();
@ -270,8 +270,8 @@ raidz_add_abd_cb(void *dc, void *sc, size_t size, void *private)
static int
raidz_mul_abd_cb(void *dc, size_t size, void *private)
{
const unsigned mul = *((unsigned *) private);
v_t *d = (v_t *) dc;
const unsigned mul = *((unsigned *)private);
v_t *d = (v_t *)dc;
size_t i;
MUL_DEFINE();
@ -389,9 +389,9 @@ static void
raidz_gen_pq_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
v_t *p = (v_t *) c[0];
v_t *q = (v_t *) c[1];
const v_t *d = (v_t *) dc;
v_t *p = (v_t *)c[0];
v_t *q = (v_t *)c[1];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
@ -439,7 +439,7 @@ raidz_generate_pq_impl(raidz_map_t * const rm)
dsize = rm->rm_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 2,
raidz_gen_pq_add);
raidz_gen_pq_add);
}
raidz_math_end();
@ -459,10 +459,10 @@ static void
raidz_gen_pqr_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
v_t *p = (v_t *) c[0];
v_t *q = (v_t *) c[1];
v_t *r = (v_t *) c[CODE_R];
const v_t *d = (v_t *) dc;
v_t *p = (v_t *)c[0];
v_t *q = (v_t *)c[1];
v_t *r = (v_t *)c[CODE_R];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
@ -514,7 +514,7 @@ raidz_generate_pqr_impl(raidz_map_t * const rm)
dsize = rm->rm_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 3,
raidz_gen_pqr_add);
raidz_gen_pqr_add);
}
raidz_math_end();
@ -628,8 +628,8 @@ static void
raidz_syn_q_abd(void **xc, const void *dc, const size_t xsize,
const size_t dsize)
{
v_t *x = (v_t *) xc[TARGET_X];
const v_t *d = (v_t *) dc;
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (xsize / sizeof (v_t));
@ -719,8 +719,8 @@ static void
raidz_syn_r_abd(void **xc, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *) xc[TARGET_X];
const v_t *d = (v_t *) dc;
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (tsize / sizeof (v_t));
@ -784,7 +784,7 @@ raidz_reconstruct_r_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 1,
raidz_syn_r_abd);
raidz_syn_r_abd);
}
/* add R to the syndrome */
@ -811,9 +811,9 @@ static void
raidz_syn_pq_abd(void **tc, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *) tc[TARGET_X];
v_t *y = (v_t *) tc[TARGET_Y];
const v_t *d = (v_t *) dc;
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
@ -843,11 +843,11 @@ static void
raidz_rec_pq_abd(void **tc, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *) tc[TARGET_X];
v_t *y = (v_t *) tc[TARGET_Y];
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *) c[CODE_P];
const v_t *q = (v_t *) c[CODE_Q];
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_PQ_DEFINE();
@ -939,7 +939,7 @@ raidz_reconstruct_pq_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_pq_abd);
raidz_syn_pq_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pq_abd, coeff);
@ -969,9 +969,9 @@ static void
raidz_syn_pr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *) c[TARGET_X];
v_t *y = (v_t *) c[TARGET_Y];
const v_t *d = (v_t *) dc;
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
@ -1001,11 +1001,11 @@ static void
raidz_rec_pr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *) t[TARGET_X];
v_t *y = (v_t *) t[TARGET_Y];
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *) c[CODE_P];
const v_t *q = (v_t *) c[CODE_Q];
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_PR_DEFINE();
@ -1095,7 +1095,7 @@ raidz_reconstruct_pr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_pr_abd);
raidz_syn_pr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pr_abd, coeff);
@ -1127,10 +1127,10 @@ static void
raidz_syn_qr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *) c[TARGET_X];
v_t *y = (v_t *) c[TARGET_Y];
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *d = (v_t *) dc;
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_QR_DEFINE();
@ -1161,11 +1161,11 @@ static void
raidz_rec_qr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *) t[TARGET_X];
v_t *y = (v_t *) t[TARGET_Y];
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *) c[CODE_P];
const v_t *q = (v_t *) c[CODE_Q];
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_QR_DEFINE();
@ -1258,7 +1258,7 @@ raidz_reconstruct_qr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_qr_abd);
raidz_syn_qr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_qr_abd, coeff);
@ -1291,11 +1291,11 @@ static void
raidz_syn_pqr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *) c[TARGET_X];
v_t *y = (v_t *) c[TARGET_Y];
v_t *z = (v_t *) c[TARGET_Z];
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
v_t *z = (v_t *)c[TARGET_Z];
const v_t * const yend = y + (tsize / sizeof (v_t));
const v_t *d = (v_t *) dc;
const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_PQR_DEFINE();
@ -1328,13 +1328,13 @@ static void
raidz_rec_pqr_abd(void **t, const size_t tsize, void **c,
const unsigned * const mul)
{
v_t *x = (v_t *) t[TARGET_X];
v_t *y = (v_t *) t[TARGET_Y];
v_t *z = (v_t *) t[TARGET_Z];
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
v_t *z = (v_t *)t[TARGET_Z];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *) c[CODE_P];
const v_t *q = (v_t *) c[CODE_Q];
const v_t *r = (v_t *) c[CODE_R];
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
const v_t *r = (v_t *)c[CODE_R];
REC_PQR_DEFINE();
@ -1451,7 +1451,7 @@ raidz_reconstruct_pqr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 3,
raidz_syn_pqr_abd);
raidz_syn_pqr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 3, raidz_rec_pqr_abd, coeff);

View File

@ -864,7 +864,7 @@ zfsctl_snapdir_rename(struct inode *sdip, char *snm,
ZFS_MAX_DATASET_NAME_LEN, from);
if (error == 0)
error = zfsctl_snapshot_name(ITOZSB(tdip), tnm,
ZFS_MAX_DATASET_NAME_LEN, to);
ZFS_MAX_DATASET_NAME_LEN, to);
if (error == 0)
error = zfs_secpolicy_rename_perms(from, to, cr);
if (error != 0)

View File

@ -62,7 +62,7 @@ zfs_dbgmsg_data(char *buf, size_t size, void *data)
zfs_dbgmsg_t *zdm = (zfs_dbgmsg_t *)data;
(void) snprintf(buf, size, "%-12llu %-s\n",
(u_longlong_t) zdm->zdm_timestamp, zdm->zdm_msg);
(u_longlong_t)zdm->zdm_timestamp, zdm->zdm_msg);
return (0);
}

View File

@ -3883,7 +3883,7 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
* because GRUB doesn't support them.
*/
if (zfs_is_bootfs(dsname) &&
intval != ZFS_DNSIZE_LEGACY) {
intval != ZFS_DNSIZE_LEGACY) {
return (SET_ERROR(EDOM));
}
@ -4275,7 +4275,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin,
*read_bytes = off - input_fp->f_offset;
if (VOP_SEEK(input_fp->f_vnode, input_fp->f_offset, &off, NULL) == 0)
input_fp->f_offset = off;
input_fp->f_offset = off;
#ifdef DEBUG
if (zfs_ioc_recv_inject_err) {
@ -4463,7 +4463,7 @@ zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
return (error);
error = nvlist_lookup_byte_array(innvl, "begin_record",
(uchar_t **) &begin_record, &begin_record_size);
(uchar_t **)&begin_record, &begin_record_size);
if (error != 0 || begin_record_size != sizeof (*begin_record))
return (SET_ERROR(EINVAL));
@ -5356,7 +5356,7 @@ zfs_ioc_events_next(zfs_cmd_t *zc)
do {
error = zfs_zevent_next(ze, &event,
&zc->zc_nvlist_dst_size, &dropped);
&zc->zc_nvlist_dst_size, &dropped);
if (event != NULL) {
zc->zc_cookie = dropped;
error = put_nvlist(zc, event);
@ -5562,7 +5562,7 @@ zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
off = fp->f_offset;
error = dmu_send(snapname, fromname, embedok, largeblockok, compressok,
fd, resumeobj, resumeoff, fp->f_vnode, &off);
fd, resumeobj, resumeoff, fp->f_vnode, &off);
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
@ -5631,7 +5631,7 @@ zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
if (error != 0)
goto out;
error = dmu_send_estimate(tosnap, fromsnap, compressok,
&space);
&space);
dsl_dataset_rele(fromsnap, FTAG);
} else if (strchr(fromname, '#') != NULL) {
/*

View File

@ -1222,8 +1222,9 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid)
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
@ -1344,7 +1345,7 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
if (!unmounting) {
mutex_enter(&zsb->z_znodes_lock);
for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
zp = list_next(&zsb->z_all_znodes, zp)) {
zp = list_next(&zsb->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
}

View File

@ -2626,7 +2626,7 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp)
if (zsb->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
dmu_objset_id(zsb->z_os);
dmu_objset_id(zsb->z_os);
}
ZFS_EXIT(zsb);
@ -4932,6 +4932,7 @@ zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
#endif /* HAVE_UIO_ZEROCOPY */
#if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
module_param(zfs_read_chunk_size, long, 0644);

View File

@ -575,9 +575,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 ||
tmp_gen == 0) {
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
@ -2142,6 +2140,7 @@ zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
#endif

View File

@ -2270,6 +2270,7 @@ MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
module_param(zfs_nocacheflush, int, 0644);
MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
/* CSTYLED */
module_param(zil_slog_limit, ulong, 0644);
MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
#endif

View File

@ -1577,7 +1577,7 @@ zio_delay_interrupt(zio_t *zio)
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
(task_func_t *) zio_interrupt,
(task_func_t *)zio_interrupt,
zio, TQ_NOSLEEP, expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
@ -3802,9 +3802,9 @@ zio_done(zio_t *zio)
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd))
!vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_IO, zio->io_spa,
zio->io_vd, zio, 0, 0);
zio->io_vd, zio, 0, 0);
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&

View File

@ -37,6 +37,7 @@ zpl_encode_fh(struct inode *ip, __u32 *fh, int *max_len, struct inode *parent)
#else
zpl_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, int connectable)
{
/* CSTYLED */
struct inode *ip = dentry->d_inode;
#endif /* HAVE_ENCODE_FH_WITH_INODE */
fstrans_cookie_t cookie;

View File

@ -314,7 +314,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode)
}
static int
zpl_rmdir(struct inode * dir, struct dentry *dentry)
zpl_rmdir(struct inode *dir, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
@ -379,7 +379,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
if (vap->va_mask & ATTR_ATIME)
ip->i_atime = timespec_trunc(ia->ia_atime,
ip->i_sb->s_time_gran);
ip->i_sb->s_time_gran);
cookie = spl_fstrans_mark();
error = -zfs_setattr(ip, vap, 0, cr);
@ -657,6 +657,7 @@ zpl_revalidate(struct dentry *dentry, struct nameidata *nd)
zpl_revalidate(struct dentry *dentry, unsigned int flags)
{
#endif /* HAVE_D_REVALIDATE_NAMEIDATA */
/* CSTYLED */
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
int error;

View File

@ -572,9 +572,9 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
return;
immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
? 0 : zvol_immediate_write_sz;
? 0 : zvol_immediate_write_sz;
slogging = spa_has_slogs(zilog->zl_spa) &&
(zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
(zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
while (size) {
itx_t *itx;
@ -1441,7 +1441,7 @@ zvol_create_minor_impl(const char *name)
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
@ -1539,7 +1539,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg)
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
"%s is not a shapshot name\n", dsname);
"%s is not a shapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = strdup(dsname);
@ -1608,7 +1608,7 @@ zvol_create_minors_cb(const char *dsname, void *arg)
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
dsname);
dsname);
}
return (0);
@ -1954,7 +1954,7 @@ zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
task, TQ_SLEEP);
return (0);
}
@ -2087,6 +2087,7 @@ zvol_fini(void)
mutex_destroy(&zvol_state_lock);
}
/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
@ -2098,3 +2099,4 @@ MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
/* END CSTYLED */

View File

@ -179,7 +179,7 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
rc = dmu_tx_assign(tx, TXG_WAIT);
if (rc) {
zpios_print(run_args->file,
"dmu_tx_assign() failed: %d\n", rc);
"dmu_tx_assign() failed: %d\n", rc);
dmu_tx_abort(tx);
return (rc);
}
@ -187,7 +187,7 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
rc = dmu_object_free(os, obj, tx);
if (rc) {
zpios_print(run_args->file,
"dmu_object_free() failed: %d\n", rc);
"dmu_object_free() failed: %d\n", rc);
dmu_tx_abort(tx);
return (rc);
}
@ -213,14 +213,14 @@ zpios_dmu_setup(run_args_t *run_args)
rc = dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL);
if (rc) {
zpios_print(run_args->file, "Error dmu_objset_create(%s, ...) "
"failed: %d\n", name, rc);
"failed: %d\n", name, rc);
goto out;
}
rc = dmu_objset_own(name, DMU_OST_OTHER, 0, zpios_tag, &os);
if (rc) {
zpios_print(run_args->file, "Error dmu_objset_own(%s, ...) "
"failed: %d\n", name, rc);
"failed: %d\n", name, rc);
goto out_destroy;
}
@ -229,7 +229,7 @@ zpios_dmu_setup(run_args_t *run_args)
if (obj == 0) {
rc = -EBADF;
zpios_print(run_args->file, "Error zpios_dmu_"
"object_create() failed, %d\n", rc);
"object_create() failed, %d\n", rc);
goto out_destroy;
}
}
@ -268,7 +268,7 @@ out_destroy:
rc2 = dsl_destroy_head(name);
if (rc2)
zpios_print(run_args->file, "Error dsl_destroy_head"
"(%s, ...) failed: %d\n", name, rc2);
"(%s, ...) failed: %d\n", name, rc2);
}
out:
t->stop = zpios_timespec_now();
@ -497,7 +497,7 @@ zpios_dmu_write(run_args_t *run_args, objset_t *os, uint64_t object,
continue;
}
zpios_print(run_args->file,
"Error in dmu_tx_assign(), %d", rc);
"Error in dmu_tx_assign(), %d", rc);
dmu_tx_abort(tx);
return (rc);
}
@ -588,7 +588,7 @@ zpios_thread_main(void *data)
if (rc) {
zpios_print(run_args->file, "IO error while doing "
"dmu_write(): %d\n", rc);
"dmu_write(): %d\n", rc);
break;
}
@ -651,13 +651,13 @@ zpios_thread_main(void *data)
t.start = zpios_timespec_now();
rc = zpios_dmu_read(run_args, obj.os, obj.obj,
offset, chunk_size, buf);
offset, chunk_size, buf);
t.stop = zpios_timespec_now();
t.delta = zpios_timespec_sub(t.stop, t.start);
if (rc) {
zpios_print(run_args->file, "IO error while doing "
"dmu_read(): %d\n", rc);
"dmu_read(): %d\n", rc);
break;
}
@ -928,7 +928,7 @@ zpios_open(struct inode *inode, struct file *file)
spin_lock_init(&info->info_lock);
info->info_size = ZPIOS_INFO_BUFFER_SIZE;
info->info_buffer =
(char *) vmem_alloc(ZPIOS_INFO_BUFFER_SIZE, KM_SLEEP);
(char *)vmem_alloc(ZPIOS_INFO_BUFFER_SIZE, KM_SLEEP);
info->info_head = info->info_buffer;
file->private_data = (void *)info;
@ -1035,7 +1035,7 @@ zpios_ioctl_cfg(struct file *file, unsigned long arg)
break;
default:
zpios_print(file, "Bad config command %d\n",
kcfg.cfg_cmd);
kcfg.cfg_cmd);
rc = -EINVAL;
break;
}
@ -1055,7 +1055,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof (zpios_cmd_t));
if (rc) {
zpios_print(file, "Unable to copy command structure "
"from user to kernel memory, %d\n", rc);
"from user to kernel memory, %d\n", rc);
goto out_cmd;
}
@ -1074,7 +1074,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
cmd_data_str)), kcmd->cmd_data_size);
if (rc) {
zpios_print(file, "Unable to copy data buffer "
"from user to kernel memory, %d\n", rc);
"from user to kernel memory, %d\n", rc);
goto out_data;
}
}
@ -1090,7 +1090,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
cmd_data_str)), data, kcmd->cmd_data_size);
if (rc) {
zpios_print(file, "Unable to copy data buffer "
"from kernel to user memory, %d\n", rc);
"from kernel to user memory, %d\n", rc);
rc = -EFAULT;
}

View File

@ -65,7 +65,7 @@ main(int argc, char **argv)
cp1 = argv[1];
if (strlen(cp1) >= (sizeof (dirpath) - strlen("TMP_DIR"))) {
(void) printf("The string length of mount point is "
"too large\n");
"too large\n");
exit(-1);
}
(void) strcpy(&dirpath[0], (const char *)cp1);

View File

@ -215,7 +215,7 @@ main(int argc, char **argv)
(ssize_t)bytes) {
saverr = errno;
if (result < 0)
result = 0;
result = 0;
written += result;
(void) fprintf(stderr, gettext(
"%s: initialized %lu of %lu bytes: %s\n"),
@ -269,7 +269,7 @@ main(int argc, char **argv)
static void usage()
{
(void) fprintf(stderr, gettext(
"Usage: mkfile [-nv] <size>[g|k|b|m] <name1> [<name2>] ...\n"));
"Usage: mkfile [-nv] <size>[g|k|b|m] <name1> [<name2>] ...\n"));
exit(1);
/* NOTREACHED */
}

View File

@ -289,7 +289,8 @@ run_process(const char *path, char *argv[])
} else if (pid > 0) {
int status;
while ((rc = waitpid(pid, &status, 0)) == -1 && errno == EINTR);
while ((rc = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR) { }
if (rc < 0 || !WIFEXITED(status))
return (-1);
@ -369,8 +370,8 @@ create_files(void)
file = malloc(PATH_MAX);
if (file == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for file name\n",
rc, PATH_MAX);
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
PATH_MAX);
goto out;
}
@ -392,7 +393,7 @@ create_files(void)
rc = open(file, O_CREAT, 0644);
if (rc == -1) {
ERROR("Error %d: open(%s, O_CREATE, 0644)\n",
errno, file);
errno, file);
rc = errno;
goto out;
}
@ -454,16 +455,16 @@ setxattrs(void)
value = malloc(XATTR_SIZE_MAX);
if (value == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for xattr value\n",
rc, XATTR_SIZE_MAX);
ERROR("Error %d: malloc(%d) bytes for xattr value\n", rc,
XATTR_SIZE_MAX);
goto out;
}
file = malloc(PATH_MAX);
if (file == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for file name\n",
rc, PATH_MAX);
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
PATH_MAX);
goto out;
}
@ -525,16 +526,16 @@ getxattrs(void)
verify_value = malloc(XATTR_SIZE_MAX);
if (verify_value == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for xattr verify\n",
rc, XATTR_SIZE_MAX);
ERROR("Error %d: malloc(%d) bytes for xattr verify\n", rc,
XATTR_SIZE_MAX);
goto out;
}
value = malloc(XATTR_SIZE_MAX);
if (value == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for xattr value\n",
rc, XATTR_SIZE_MAX);
ERROR("Error %d: malloc(%d) bytes for xattr value\n", rc,
XATTR_SIZE_MAX);
goto out;
}
@ -544,8 +545,8 @@ getxattrs(void)
file = malloc(PATH_MAX);
if (file == NULL) {
rc = ENOMEM;
ERROR("Error %d: malloc(%d) bytes for file name\n",
rc, PATH_MAX);
ERROR("Error %d: malloc(%d) bytes for file name\n", rc,
PATH_MAX);
goto out;
}