Mark functions as static

Mark functions used only in the same translation unit as static. This
only includes functions that do not have a prototype in a header file
either.

Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
Closes #10470
This commit is contained in:
Arvind Sankar 2020-06-15 14:30:37 -04:00 committed by Brian Behlendorf
parent 1fa5c7af33
commit 65c7cc49bf
62 changed files with 157 additions and 176 deletions

View File

@ -113,7 +113,7 @@ run_gen_bench_impl(const char *impl)
}
}
void
static void
run_gen_bench(void)
{
char **impl_name;
@ -197,7 +197,7 @@ run_rec_bench_impl(const char *impl)
}
}
void
static void
run_rec_bench(void)
{
char **impl_name;

View File

@ -533,7 +533,7 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
void
static void
zfs_enable_ds(void *arg)
{
unavailpool_t *pool = (unavailpool_t *)arg;

View File

@ -108,7 +108,7 @@ _zed_strings_node_destroy(zed_strings_node_t *np)
* If [key] is specified, it will be used to index the node; otherwise,
* the string [val] will be used.
*/
zed_strings_node_t *
static zed_strings_node_t *
_zed_strings_node_create(const char *key, const char *val)
{
zed_strings_node_t *np;

View File

@ -443,7 +443,7 @@ safe_malloc(size_t size)
return (data);
}
void *
static void *
safe_realloc(void *data, size_t size)
{
void *newp;

View File

@ -32,7 +32,7 @@
libzfs_handle_t *g_zfs;
void
static void
usage(int err)
{
fprintf(stderr, "Usage: [-v] zfs_ids_to_path <pool> <objset id> "

View File

@ -613,7 +613,7 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
return (0);
}
int
static int
perform_action(const char *pool, zinject_record_t *record, int cmd)
{
zfs_cmd_t zc = {"\0"};

View File

@ -485,7 +485,7 @@ print_prop_cb(int prop, void *cb)
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
void
static void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
@ -1689,7 +1689,7 @@ typedef struct export_cbdata {
/*
* Export one pool
*/
int
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
@ -3711,7 +3711,7 @@ default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
void
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
@ -3783,7 +3783,7 @@ print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
void
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
@ -4633,7 +4633,7 @@ refresh_iostat(zpool_handle_t *zhp, void *data)
/*
* Callback to print out the iostats for the given pool.
*/
int
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
@ -5787,7 +5787,7 @@ print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
void
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
@ -5954,7 +5954,7 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
/*
* Generic callback function to list a pool.
*/
int
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
@ -6858,7 +6858,7 @@ zpool_has_checkpoint(zpool_handle_t *zhp)
return (B_FALSE);
}
int
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
@ -7583,7 +7583,7 @@ print_dedup_stats(nvlist_t *config)
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
int
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
@ -9448,7 +9448,7 @@ typedef struct set_cbdata {
boolean_t cb_any_successful;
} set_cbdata_t;
int
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
@ -9698,7 +9698,7 @@ print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
(void) fflush(stdout);
}
void *
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;

View File

@ -1173,7 +1173,7 @@ is_grouping(const char *type, int *mindev, int *maxdev)
* Note: we don't bother freeing anything in the error paths
* because the program is just going to exit anyway.
*/
nvlist_t *
static nvlist_t *
construct_spec(nvlist_t *props, int argc, char **argv)
{
nvlist_t *nvroot, *nv, **top, **spares, **l2cache;

View File

@ -1584,7 +1584,7 @@ ztest_bt_bonus(dmu_buf_t *db)
* helps ensure that all dnode traversal code properly skips the
* interior regions of large dnodes.
*/
void
static void
ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
@ -1603,7 +1603,7 @@ ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
* Verify that the unused area of a bonus buffer is filled with the
* expected tokens.
*/
void
static void
ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
@ -2259,7 +2259,7 @@ ztest_lr_alloc(size_t lrsize, char *name)
return (lr);
}
void
static void
ztest_lr_free(void *lr, size_t lrsize, char *name)
{
size_t namesize = name ? strlen(name) + 1 : 0;
@ -3609,7 +3609,7 @@ ztest_device_removal(ztest_ds_t *zd, uint64_t id)
/*
* Callback function which expands the physical size of the vdev.
*/
vdev_t *
static vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
spa_t *spa __maybe_unused = vd->vdev_spa;
@ -3638,7 +3638,7 @@ grow_vdev(vdev_t *vd, void *arg)
* Callback function which expands a given vdev by calling vdev_online().
*/
/* ARGSUSED */
vdev_t *
static vdev_t *
online_vdev(vdev_t *vd, void *arg)
{
spa_t *spa = vd->vdev_spa;
@ -3698,7 +3698,7 @@ online_vdev(vdev_t *vd, void *arg)
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
vdev_t *
static vdev_t *
vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
{
uint_t c;
@ -4151,7 +4151,7 @@ ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
/*
* Cleanup non-standard snapshots and clones.
*/
void
static void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
char *snap1name;
@ -4605,7 +4605,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
umem_free(od, size);
}
void
static void
compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
{
@ -7269,7 +7269,7 @@ ztest_run(ztest_shared_t *zs)
mutex_destroy(&ztest_checkpoint_lock);
}
void
static void
print_time(hrtime_t t, char *timebuf)
{
hrtime_t s = t / NANOSEC;

View File

@ -29,20 +29,7 @@
#ifndef _SPL_CONSOLE_H
#define _SPL_CONSOLE_H
static inline void
console_vprintf(const char *fmt, va_list args)
{
vprintf(fmt, args);
}
static inline void
console_printf(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
console_vprintf(fmt, args);
va_end(args);
}
#define console_vprintf vprintf
#define console_printf printf
#endif /* _SPL_CONSOLE_H */

View File

@ -25,20 +25,7 @@
#ifndef _SPL_CONSOLE_H
#define _SPL_CONSOLE_H
void
console_vprintf(const char *fmt, va_list args)
{
vprintk(fmt, args);
}
void
console_printf(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
console_vprintf(fmt, args);
va_end(args);
}
#define console_vprintf vprintk
#define console_printf printk
#endif /* _SPL_CONSOLE_H */

View File

@ -806,7 +806,7 @@ libzfs_mnttab_init(libzfs_handle_t *hdl)
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
}
int
static int
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
struct mnttab entry;
@ -1620,7 +1620,7 @@ error:
return (NULL);
}
int
static int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
@ -2551,7 +2551,7 @@ struct get_clones_arg {
char buf[ZFS_MAX_DATASET_NAME_LEN];
};
int
static int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
@ -3129,7 +3129,7 @@ zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
return (val);
}
int
static int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];

View File

@ -38,7 +38,7 @@
#include "libzfs_impl.h"
int
static int
zfs_iter_clones(zfs_handle_t *zhp, zfs_iter_f func, void *data)
{
nvlist_t *nvl = zfs_get_clones_nvl(zhp);

View File

@ -822,7 +822,7 @@ zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
/*
* Same as zfs_unmountall(), but for NFS and SMB unshares.
*/
int
static int
zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
{
prop_changelist_t *clp;

View File

@ -445,7 +445,7 @@ bootfs_name_valid(const char *pool, const char *bootfs)
return (B_FALSE);
}
boolean_t
static boolean_t
zpool_is_bootable(zpool_handle_t *zhp)
{
char bootfs[ZFS_MAX_DATASET_NAME_LEN];
@ -2141,7 +2141,7 @@ xlate_init_err(int err)
* Begin, suspend, or cancel the initialization (initializing of all free
* blocks) for the given vdevs in the given pool.
*/
int
static int
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{

View File

@ -2296,7 +2296,7 @@ err_out:
return (err);
}
zfs_handle_t *
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
@ -2877,7 +2877,7 @@ typedef struct guid_to_name_data {
uint64_t num_redact_snaps;
} guid_to_name_data_t;
boolean_t
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;

View File

@ -1773,7 +1773,7 @@ typedef struct expand_data {
zfs_type_t type;
} expand_data_t;
int
static int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;

View File

@ -318,7 +318,7 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
* This will only deal with decrypting the last block of the input that
* might not be a multiple of block length.
*/
void
static void
ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
{
@ -573,7 +573,7 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
return (CRYPTO_SUCCESS);
}
int
static int
ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
{
size_t macSize, nonceSize;
@ -758,11 +758,7 @@ encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
}
}
/*
* The following function should be call at encrypt or decrypt init time
* for AES CCM mode.
*/
int
static int
ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
unsigned char *auth_data, size_t auth_data_len, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
@ -846,6 +842,10 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
return (CRYPTO_SUCCESS);
}
/*
* The following function should be call at encrypt or decrypt init time
* for AES CCM mode.
*/
int
ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
boolean_t is_encrypt_init, size_t block_size,

View File

@ -518,11 +518,7 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
}
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GCM mode.
*/
int
static int
gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
unsigned char *auth_data, size_t auth_data_len, size_t block_size,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
@ -574,6 +570,9 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GCM mode.
*
* Init the GCM context struct. Handle the cycle and avx implementations here.
*/
int

View File

@ -872,7 +872,7 @@ kcf_free_req(kcf_areq_node_t *areq)
* Utility routine to remove a request from the chain of requests
* hanging off a context.
*/
void
static void
kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
{
kcf_areq_node_t *cur, *prev;
@ -909,7 +909,7 @@ kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
*
* The caller must hold the queue lock and request lock (an_lock).
*/
void
static void
kcf_remove_node(kcf_areq_node_t *node)
{
kcf_areq_node_t *nextp = node->an_next;

View File

@ -94,7 +94,7 @@ callb_cpr_t callb_cprinfo_safe = {
/*
* Init all callb tables in the system.
*/
void
static void
callb_init(void *dummy __unused)
{
callb_table.ct_busy = 0; /* mark table open for additions */
@ -102,7 +102,7 @@ callb_init(void *dummy __unused)
mutex_init(&callb_table.ct_lock, NULL, MUTEX_DEFAULT, NULL);
}
void
static void
callb_fini(void *dummy __unused)
{
callb_t *cp;

View File

@ -648,7 +648,7 @@ zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
int
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)

View File

@ -602,7 +602,7 @@ zfsctl_root_getattr(struct vop_getattr_args *ap)
* When we lookup "." we still can be asked to lock it
* differently, can't we?
*/
int
static int
zfsctl_relock_dot(vnode_t *dvp, int ltype)
{
vref(dvp);
@ -624,7 +624,7 @@ zfsctl_relock_dot(vnode_t *dvp, int ltype)
/*
* Special case the handling of "..".
*/
int
static int
zfsctl_root_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
@ -766,7 +766,7 @@ zfsctl_common_pathconf(struct vop_pathconf_args *ap)
/*
* Returns a trivial ACL
*/
int
static int
zfsctl_common_getacl(struct vop_getacl_args *ap)
{
int i;
@ -894,7 +894,7 @@ zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
* - the snapshot vnode is not covered, because the snapshot has been unmounted
* The last two states are transient and should be relatively short-lived.
*/
int
static int
zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
{
vnode_t *dvp = ap->a_dvp;

View File

@ -1250,7 +1250,7 @@ out:
return (error);
}
void
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;

View File

@ -1251,7 +1251,7 @@ zfs_write_simple(znode_t *zp, const void *data, size_t len,
return (error);
}
void
static void
zfs_get_done(zgd_t *zgd, int error)
{
znode_t *zp = zgd->zgd_private;
@ -2052,7 +2052,7 @@ out:
}
int
static int
zfs_lookup_internal(znode_t *dzp, char *name, vnode_t **vpp,
struct componentname *cnp, int nameiop)
{
@ -4605,7 +4605,7 @@ zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
}
/*ARGSUSED*/
void
static void
zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
@ -5043,7 +5043,7 @@ struct vop_putpages_args {
};
#endif
int
static int
zfs_freebsd_putpages(struct vop_putpages_args *ap)
{
@ -5994,7 +5994,7 @@ struct vop_deleteextattr {
/*
* Vnode operation to remove a named attribute.
*/
int
static int
zfs_deleteextattr(struct vop_deleteextattr_args *ap)
{
zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
@ -6271,7 +6271,7 @@ struct vop_getacl_args {
};
#endif
int
static int
zfs_freebsd_getacl(struct vop_getacl_args *ap)
{
int error;
@ -6302,7 +6302,7 @@ struct vop_setacl_args {
};
#endif
int
static int
zfs_freebsd_setacl(struct vop_setacl_args *ap)
{
int error;
@ -6355,7 +6355,7 @@ struct vop_aclcheck_args {
};
#endif
int
static int
zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
{

View File

@ -1731,7 +1731,7 @@ zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
return (0);
}
void
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
{
sa_handle_destroy(hdl);

View File

@ -543,7 +543,7 @@ spl_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
#endif
}
int
static int
spl_getattr(struct file *filp, struct kstat *stat)
{
int rc;

View File

@ -952,7 +952,7 @@ spa_iostats_trim_add(spa_t *spa, trim_type_t type,
}
}
int
static int
spa_iostats_update(kstat_t *ksp, int rw)
{
if (rw == KSTAT_WRITE) {

View File

@ -648,7 +648,7 @@ zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
int
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
@ -1189,7 +1189,7 @@ typedef struct trivial_acl {
uint32_t everyone; /* allow mask matching mode */
} trivial_acl_t;
void
static void
acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
{
uint32_t read_mask = ACE_READ_DATA;
@ -1262,7 +1262,7 @@ acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
* have read_acl denied, and write_owner/write_acl/write_attributes
* can only be owner@ entry.
*/
int
static int
ace_trivial_common(void *acep, int aclcnt,
uint64_t (*walk)(void *, uint64_t, int aclcnt,
uint16_t *, uint16_t *, uint32_t *))

View File

@ -971,7 +971,7 @@ zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
void
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
@ -1159,7 +1159,7 @@ zfs_statvfs(struct inode *ip, struct kstatfs *statp)
return (err);
}
int
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;

View File

@ -1041,7 +1041,7 @@ zfs_zrele_async(znode_t *zp)
}
/* ARGSUSED */
void
static void
zfs_get_done(zgd_t *zgd, int error)
{
znode_t *zp = zgd->zgd_private;

View File

@ -249,7 +249,7 @@ zfs_znode_hold_compare(const void *a, const void *b)
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
boolean_t
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
@ -451,7 +451,7 @@ zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
}
}
void
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
@ -2013,7 +2013,7 @@ zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
return (0);
}
void
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
{
sa_handle_destroy(hdl);

View File

@ -651,7 +651,7 @@ zpl_readpages(struct file *filp, struct address_space *mapping,
(filler_t *)zpl_readpage, filp));
}
int
static int
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;

View File

@ -518,7 +518,7 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link)
}
#if defined(HAVE_GET_LINK_DELAYED)
const char *
static const char *
zpl_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
@ -537,7 +537,7 @@ zpl_get_link(struct dentry *dentry, struct inode *inode,
return (link);
}
#elif defined(HAVE_GET_LINK_COOKIE)
const char *
static const char *
zpl_get_link(struct dentry *dentry, struct inode *inode, void **cookie)
{
char *link = NULL;
@ -553,7 +553,7 @@ zpl_get_link(struct dentry *dentry, struct inode *inode, void **cookie)
return (*cookie = link);
}
#elif defined(HAVE_FOLLOW_LINK_COOKIE)
const char *
static const char *
zpl_follow_link(struct dentry *dentry, void **cookie)
{
char *link = NULL;

View File

@ -148,7 +148,7 @@ zpl_xattr_filldir(xattr_filldir_t *xf, const char *name, int name_len)
* Read as many directory entry names as will fit in to the provided buffer,
* or when no buffer is provided calculate the required buffer size.
*/
int
static int
zpl_xattr_readdir(struct inode *dxip, xattr_filldir_t *xf)
{
zap_cursor_t zc;
@ -920,6 +920,9 @@ xattr_handler_t zpl_xattr_security_handler = {
* attribute implemented by filesystems in the kernel." - xattr(7)
*/
#ifdef CONFIG_FS_POSIX_ACL
#ifndef HAVE_SET_ACL
static
#endif
int
zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type)
{

View File

@ -640,7 +640,7 @@ zvol_revalidate_disk(struct gendisk *disk)
return (0);
}
int
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{

View File

@ -1696,7 +1696,7 @@ arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
arc_buf_hdr_t *
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, boolean_t protected, boolean_t prefetch)
@ -7016,7 +7016,7 @@ arc_kstat_update(kstat_t *ksp, int rw)
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
unsigned int
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;

View File

@ -588,7 +588,7 @@ dbuf_is_metadata(dmu_buf_impl_t *db)
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
unsigned int
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;

View File

@ -61,7 +61,7 @@ dbuf_stats_hash_table_headers(char *buf, size_t size)
return (0);
}
int
static int
__dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
{
arc_buf_info_t abi = { 0 };

View File

@ -151,7 +151,7 @@ const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
{ zfs_acl_byteswap, "acl" }
};
int
static int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp)
{

View File

@ -392,7 +392,7 @@ dnode_hash(const objset_t *os, uint64_t obj)
return (crc);
}
unsigned int
static unsigned int
dnode_multilist_index_func(multilist_t *ml, void *obj)
{
dnode_t *dn = obj;

View File

@ -343,7 +343,7 @@ dsl_dir_get_encryption_root_ddobj(dsl_dir_t *dd, uint64_t *rddobj)
DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1, rddobj));
}
int
static int
dsl_dir_get_encryption_version(dsl_dir_t *dd, uint64_t *version)
{
*version = 0;
@ -2304,7 +2304,7 @@ dsl_crypto_recv_raw_key_sync(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
iters, tx);
}
int
static int
dsl_crypto_recv_key_check(void *arg, dmu_tx_t *tx)
{
int ret;
@ -2345,7 +2345,7 @@ out:
return (ret);
}
void
static void
dsl_crypto_recv_key_sync(void *arg, dmu_tx_t *tx)
{
dsl_crypto_recv_key_arg_t *dcrka = arg;

View File

@ -1684,7 +1684,7 @@ dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
static int
dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;

View File

@ -1158,7 +1158,7 @@ dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
/*
* Create the pool-wide zap object for storing temporary snapshot holds.
*/
void
static void
dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;

View File

@ -1598,7 +1598,7 @@ dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
scan_prefetch_ctx_rele(spc, FTAG);
}
void
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{

View File

@ -1480,7 +1480,7 @@ metaslab_largest_allocatable(metaslab_t *msp)
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
uint64_t
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -1810,7 +1810,7 @@ metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
/*
* Wait for any in-progress metaslab loads to complete.
*/
void
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -1824,7 +1824,7 @@ metaslab_load_wait(metaslab_t *msp)
/*
* Wait for any in-progress flushing to complete.
*/
void
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -3107,7 +3107,7 @@ metaslab_segment_weight(metaslab_t *msp)
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
boolean_t
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
@ -3376,7 +3376,7 @@ metaslab_passivate(metaslab_t *msp, uint64_t weight)
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
void
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
@ -4650,7 +4650,7 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
return (msp);
}
void
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -5360,7 +5360,7 @@ typedef struct remap_blkptr_cb_arg {
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
void
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{

View File

@ -268,7 +268,7 @@ layout_hash_compare(const void *arg1, const void *arg2)
return (TREE_CMP(node1->lot_instance, node2->lot_instance));
}
boolean_t
static boolean_t
sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
{
int i;
@ -318,7 +318,7 @@ sa_get_spill(sa_handle_t *hdl)
*
* Operates on bulk array, first failure will abort further processing
*/
int
static int
sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
sa_data_op_t data_op, dmu_tx_t *tx)
{
@ -1156,7 +1156,7 @@ sa_tear_down(objset_t *os)
os->os_sa = NULL;
}
void
static void
sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
uint16_t length, int length_idx, boolean_t var_length, void *userp)
{
@ -1220,7 +1220,7 @@ sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
}
/*ARGSUSED*/
void
static void
sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
uint16_t length, int length_idx, boolean_t variable_length, void *userp)
{
@ -1230,7 +1230,7 @@ sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
}
void
static void
sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
{
sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
@ -1462,7 +1462,7 @@ sa_buf_rele(dmu_buf_t *db, void *tag)
dmu_buf_rele(db, tag);
}
int
static int
sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
{
ASSERT(hdl);

View File

@ -2264,7 +2264,7 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
}
/* ARGSUSED */
int
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
@ -2492,7 +2492,7 @@ livelist_delete_sync(void *arg, dmu_tx_t *tx)
* the pool-wide livelist data.
*/
/* ARGSUSED */
void
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
@ -2546,7 +2546,7 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
}
}
void
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
@ -2652,7 +2652,7 @@ out:
spa->spa_to_condense.syncing = B_FALSE;
}
void
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
@ -2747,7 +2747,7 @@ spa_livelist_condense_cb_check(void *arg, zthr_t *z)
return (B_FALSE);
}
void
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
@ -7733,7 +7733,7 @@ spa_vdev_resilver_done(spa_t *spa)
/*
* Update the stored path or FRU for this vdev.
*/
int
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{

View File

@ -615,7 +615,7 @@ spa_deadman(void *arg)
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
int
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
@ -944,7 +944,7 @@ spa_aux_compare(const void *a, const void *b)
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
void
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
@ -962,7 +962,7 @@ spa_aux_add(vdev_t *vd, avl_tree_t *avl)
}
}
void
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
@ -982,7 +982,7 @@ spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
}
}
boolean_t
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
@ -1007,7 +1007,7 @@ spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
return (found != NULL);
}
void
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;

View File

@ -2857,7 +2857,7 @@ vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
}
}
void
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;

View File

@ -966,7 +966,7 @@ typedef struct remap_segment {
list_node_t rs_node;
} remap_segment_t;
remap_segment_t *
static remap_segment_t *
rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
{
remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
@ -990,7 +990,7 @@ rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
* Finally, since we are doing an allocation, it is up to the caller to
* free the array allocated in this function.
*/
vdev_indirect_mapping_entry_phys_t *
static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
uint64_t asize, uint64_t *copied_entries)
{
@ -1858,7 +1858,6 @@ vdev_ops_t vdev_indirect_ops = {
.vdev_op_leaf = B_FALSE /* leaf vdev */
};
EXPORT_SYMBOL(rs_alloc);
EXPORT_SYMBOL(spa_condense_fini);
EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
EXPORT_SYMBOL(spa_condense_indirect_start_sync);

View File

@ -1588,7 +1588,7 @@ vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
}
/* Sync the uberblocks to all vdevs in svd[] */
int
static int
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
{
spa_t *spa = svd[0]->vdev_spa;
@ -1705,7 +1705,7 @@ vdev_label_sync(zio_t *zio, uint64_t *good_writes,
nvlist_free(label);
}
int
static int
vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
{
list_t *dl = &spa->spa_config_dirty_list;

View File

@ -212,7 +212,7 @@ int zfs_vdev_def_queue_depth = 32;
*/
int zfs_vdev_aggregate_trim = 0;
int
static int
vdev_queue_offset_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;
@ -244,7 +244,7 @@ vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
return (&vq->vq_trim_offset_tree);
}
int
static int
vdev_queue_timestamp_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;

View File

@ -230,7 +230,7 @@ zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
return (zn);
}
zap_name_t *
static zap_name_t *
zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
{
zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);

View File

@ -30,6 +30,9 @@
#include <sys/zfs_sa.h>
#include <sys/zfs_acl.h>
#ifndef _KERNEL
static
#endif
void
zfs_oldace_byteswap(ace_t *ace, int ace_cnt)
{
@ -46,6 +49,9 @@ zfs_oldace_byteswap(ace_t *ace, int ace_cnt)
/*
* swap ace_t and ace_object_t
*/
#ifndef _KERNEL
static
#endif
void
zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
{

View File

@ -735,13 +735,13 @@ zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
ZFS_DELEG_PERM_SEND, cr));
}
int
static int
zfs_secpolicy_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (SET_ERROR(ENOTSUP));
}
int
static int
zfs_secpolicy_smb_acl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (SET_ERROR(ENOTSUP));
@ -6421,7 +6421,7 @@ zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
}
/* ARGSUSED */
int
static int
send_space_sum(objset_t *os, void *buf, int len, void *arg)
{
uint64_t *size = arg;
@ -6876,7 +6876,7 @@ zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
DATASET_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
}
void
static void
zfs_ioctl_init(void)
{
zfs_ioctl_register("snapshot", ZFS_IOC_SNAPSHOT,
@ -7259,7 +7259,7 @@ zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec)
return (0);
}
int
static int
pool_status_check(const char *name, zfs_ioc_namecheck_t type,
zfs_ioc_poolcheck_t check)
{

View File

@ -602,7 +602,7 @@ zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
void
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
@ -628,7 +628,7 @@ zilog_dirty(zilog_t *zilog, uint64_t txg)
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
boolean_t
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
@ -642,7 +642,7 @@ zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
boolean_t
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;

View File

@ -2083,7 +2083,7 @@ zio_execute(zio_t *zio)
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
boolean_t
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)

View File

@ -100,7 +100,7 @@ abd_checksum_off(abd_t *abd, uint64_t size,
}
/*ARGSUSED*/
void
static void
abd_fletcher_2_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
@ -110,7 +110,7 @@ abd_fletcher_2_native(abd_t *abd, uint64_t size,
}
/*ARGSUSED*/
void
static void
abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{

View File

@ -46,7 +46,7 @@ static void usage(char *);
/*
* psudo-randomize the buffer
*/
void randomize_buffer(int block_size) {
static void randomize_buffer(int block_size) {
int i;
char rnd = rand() & 0xff;
for (i = 0; i < block_size; i++)

View File

@ -918,7 +918,7 @@ enum zfs_ioc_ref {
* Canonical reference check of /dev/zfs ioctl numbers.
* These cannot change and new ioctl numbers must be appended.
*/
boolean_t
static boolean_t
validate_ioc_values(void)
{
boolean_t result = B_TRUE;

View File

@ -28,7 +28,7 @@
#define BSZ 64
void
static void
fill_random(char *buf, int len)
{
int i;