Fix typos in module/zfs/

Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Reviewed-by: Richard Laager <rlaager@wiktel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net>
Closes #9240
This commit is contained in:
Andrea Gelmini 2019-09-03 02:56:41 +02:00 committed by Tony Hutter
parent 6673ef3f6f
commit 5097eb6ac9
49 changed files with 101 additions and 101 deletions

View File

@ -62,7 +62,7 @@
* elements of the cache are therefore exactly the same size. So * elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply * when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we * a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (rangeing from 512 bytes to * have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make * 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible * space for a cache miss that approximates as closely as possible
* the space used by the new block. * the space used by the new block.
@ -262,7 +262,7 @@
* The L1ARC has a slightly different system for storing encrypted data. * The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from * Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not * data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or visa versa) if the keys aren't loaded. * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion. * The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with * If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a * uncompressed data the worst thing that could happen is there might be a
@ -2152,7 +2152,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
} }
/* /*
* Adjust encrypted and authenticated headers to accomodate * Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are * the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded * allowed to fail decryption due to keys not being loaded
* without being marked as an IO error. * without being marked as an IO error.
@ -2221,7 +2221,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
if (arc_buf_is_shared(buf)) { if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf)); ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf it's own b_data */ /* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED; buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data = buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
@ -2837,7 +2837,7 @@ arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
* sufficient to make this guarantee, however it's possible * sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in * (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is sharable, but wasn't at the time of its allocation. Rather than * is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle * allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows * the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added. * sharing if the new buf isn't the first to be added.
@ -2896,7 +2896,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
/* /*
* Only honor requests for compressed bufs if the hdr is actually * Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overriden if the buffer is encrypted since * compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed. * encrypted buffers cannot be decompressed.
*/ */
if (encrypted) { if (encrypted) {
@ -3200,7 +3200,7 @@ arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
} }
/* /*
* Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it. * list and free it.
*/ */
static void static void
@ -3659,7 +3659,7 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
/* /*
* This function is used by the send / receive code to convert a newly * This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It * allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be uupdated without altering * is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not * its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize. * have to worry about compression type or psize.
*/ */
@ -6188,7 +6188,7 @@ top:
/* /*
* Determine if we have an L1 cache hit or a cache miss. For simplicity * Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data seperately from compressed / uncompressed * we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have * data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can * that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded. * get it even if the encryption keys aren't loaded.

View File

@ -2275,7 +2275,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
ASSERT(!zfs_refcount_is_zero(&db->db_holds)); ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/* /*
* Quick check for dirtyness. For already dirty blocks, this * Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance * reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks * by 50% for some workloads (e.g. file deletion with indirect blocks
* cached). * cached).
@ -2791,7 +2791,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
* Hold the dn_dbufs_mtx while we get the new dbuf * Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list. * in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone * This prevents a possible deadlock with someone
* trying to look up this dbuf before its added to the * trying to look up this dbuf before it's added to the
* dn_dbufs list. * dn_dbufs list.
*/ */
mutex_enter(&dn->dn_dbufs_mtx); mutex_enter(&dn->dn_dbufs_mtx);
@ -3185,7 +3185,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
/* /*
* If this buffer is currently syncing out, and we are are * If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy * still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg. * of it in case we decide we want to dirty it again in this txg.
*/ */
@ -3666,7 +3666,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
/* /*
* This buffer was allocated at a time when there was * This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was * no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mis-match). * inappropriate to hook it in (i.e., nlevels mismatch).
*/ */
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL); ASSERT(db->db_parent == NULL);

View File

@ -646,11 +646,11 @@ dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
/* /*
* Issue prefetch i/os for the given blocks. If level is greater than 0, the * Issue prefetch i/os for the given blocks. If level is greater than 0, the
* indirect blocks prefeteched will be those that point to the blocks containing * indirect blocks prefetched will be those that point to the blocks containing
* the data starting at offset, and continuing to offset + len. * the data starting at offset, and continuing to offset + len.
* *
* Note that if the indirect blocks above the blocks being prefetched are not * Note that if the indirect blocks above the blocks being prefetched are not
* in cache, they will be asychronously read in. * in cache, they will be asynchronously read in.
*/ */
void void
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
@ -2301,7 +2301,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
* Determine dedup setting. If we are in dmu_sync(), * Determine dedup setting. If we are in dmu_sync(),
* we won't actually dedup now because that's all * we won't actually dedup now because that's all
* done in syncing context; but we do want to use the * done in syncing context; but we do want to use the
* dedup checkum. If the checksum is not strong * dedup checksum. If the checksum is not strong
* enough to ensure unique signatures, force * enough to ensure unique signatures, force
* dedup_verify. * dedup_verify.
*/ */

View File

@ -1027,7 +1027,7 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
/* /*
* We don't want to have to increase the meta-dnode's nlevels * We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quescing context while * later, because then we could do it in quiescing context while
* we are also accessing it in open context. * we are also accessing it in open context.
* *
* This precaution is not necessary for the MOS (ds == NULL), * This precaution is not necessary for the MOS (ds == NULL),
@ -2742,7 +2742,7 @@ dmu_objset_find_dp_cb(void *arg)
/* /*
* We need to get a pool_config_lock here, as there are several * We need to get a pool_config_lock here, as there are several
* asssert(pool_config_held) down the stack. Getting a lock via * assert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a * dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can * pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock. * only be granted when our parent thread gives up the lock.

View File

@ -223,7 +223,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
* can only read from blocks that we carefully ensure are on * can only read from blocks that we carefully ensure are on
* concrete vdevs (or previously-loaded indirect vdevs). So we * concrete vdevs (or previously-loaded indirect vdevs). So we
* can't allow the predictive prefetcher to attempt reads of other * can't allow the predictive prefetcher to attempt reads of other
* blocks (e.g. of the MOS's dnode obejct). * blocks (e.g. of the MOS's dnode object).
*/ */
if (!spa_indirect_vdevs_loaded(spa)) if (!spa_indirect_vdevs_loaded(spa))
return; return;

View File

@ -1787,7 +1787,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
dn->dn_indblkshift = ibs; dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
} }
/* rele after we have fixed the blocksize in the dnode */ /* release after we have fixed the blocksize in the dnode */
if (db) if (db)
dbuf_rele(db, FTAG); dbuf_rele(db, FTAG);

View File

@ -84,7 +84,7 @@ dsl_dataset_bmark_lookup(dsl_dataset_t *ds, const char *shortname,
} }
/* /*
* If later_ds is non-NULL, this will return EXDEV if the the specified bookmark * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
* does not represents an earlier point in later_ds's timeline. * does not represents an earlier point in later_ds's timeline.
* *
* Returns ENOENT if the dataset containing the bookmark does not exist. * Returns ENOENT if the dataset containing the bookmark does not exist.

View File

@ -227,7 +227,7 @@ dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props,
goto error; goto error;
} }
/* if the user asked for the deault crypt, determine that now */ /* if the user asked for the default crypt, determine that now */
if (dcp->cp_crypt == ZIO_CRYPT_ON) if (dcp->cp_crypt == ZIO_CRYPT_ON)
dcp->cp_crypt = ZIO_CRYPT_ON_VALUE; dcp->cp_crypt = ZIO_CRYPT_ON_VALUE;
@ -1596,7 +1596,7 @@ spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp)
/* /*
* Perform the actual work in syncing context. The blocks modified * Perform the actual work in syncing context. The blocks modified
* here could be calculated but it would require holding the pool * here could be calculated but it would require holding the pool
* lock and tarversing all of the datasets that will have their keys * lock and traversing all of the datasets that will have their keys
* changed. * changed.
*/ */
return (dsl_sync_task(dsname, spa_keystore_change_key_check, return (dsl_sync_task(dsname, spa_keystore_change_key_check,
@ -1714,7 +1714,7 @@ dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin,
return; return;
/* /*
* If the target is being promoted to the encyrption root update the * If the target is being promoted to the encryption root update the
* DSL Crypto Key and keylocation to reflect that. We also need to * DSL Crypto Key and keylocation to reflect that. We also need to
* update the DSL Crypto Keys of all children inheritting their * update the DSL Crypto Keys of all children inheritting their
* encryption root to point to the new target. Otherwise, the check * encryption root to point to the new target. Otherwise, the check

View File

@ -363,7 +363,7 @@ load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
} }
/* /*
* We have to release the fsid syncronously or we risk that a subsequent * We have to release the fsid synchronously or we risk that a subsequent
* mount of the same dataset will fail to unique_insert the fsid. This * mount of the same dataset will fail to unique_insert the fsid. This
* failure would manifest itself as the fsid of this dataset changing * failure would manifest itself as the fsid of this dataset changing
* between mounts which makes NFS clients quite unhappy. * between mounts which makes NFS clients quite unhappy.
@ -2076,7 +2076,7 @@ get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
* We use nvlist_alloc() instead of fnvlist_alloc() because the * We use nvlist_alloc() instead of fnvlist_alloc() because the
* latter would allocate the list with NV_UNIQUE_NAME flag. * latter would allocate the list with NV_UNIQUE_NAME flag.
* As a result, every time a clone name is appended to the list * As a result, every time a clone name is appended to the list
* it would be (linearly) searched for for a duplicate name. * it would be (linearly) searched for a duplicate name.
* We already know that all clone names must be unique and we * We already know that all clone names must be unique and we
* want avoid the quadratic complexity of double-checking that * want avoid the quadratic complexity of double-checking that
* because we can have a large number of clones. * because we can have a large number of clones.
@ -2404,7 +2404,7 @@ dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
int error; int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool; dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Retrieve the mountpoint value stored in the zap opbject */ /* Retrieve the mountpoint value stored in the zap object */
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1, error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
ZAP_MAXVALUELEN, value, source); ZAP_MAXVALUELEN, value, source);
if (error != 0) { if (error != 0) {
@ -3635,7 +3635,7 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
* The clone can't be too much over the head's refquota. * The clone can't be too much over the head's refquota.
* *
* To ensure that the entire refquota can be used, we allow one * To ensure that the entire refquota can be used, we allow one
* transaction to exceed the the refquota. Therefore, this check * transaction to exceed the refquota. Therefore, this check
* needs to also allow for the space referenced to be more than the * needs to also allow for the space referenced to be more than the
* refquota. The maximum amount of space that one transaction can use * refquota. The maximum amount of space that one transaction can use
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this

View File

@ -632,7 +632,7 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
/* /*
* lzc_destroy_snaps() is documented to fill the errlist with * lzc_destroy_snaps() is documented to fill the errlist with
* int32 values, so we need to covert the int64 values that are * int32 values, so we need to convert the int64 values that are
* returned from LUA. * returned from LUA.
*/ */
int rv = 0; int rv = 0;

View File

@ -96,7 +96,7 @@
* limit set. If there is a limit at any initialized level up the tree, the * limit set. If there is a limit at any initialized level up the tree, the
* check must pass or the creation will fail. Likewise, when a filesystem or * check must pass or the creation will fail. Likewise, when a filesystem or
* snapshot is destroyed, the counts are recursively adjusted all the way up * snapshot is destroyed, the counts are recursively adjusted all the way up
* the initizized nodes in the tree. Renaming a filesystem into different point * the initialized nodes in the tree. Renaming a filesystem into different point
* in the tree will first validate, then update the counts on each branch up to * in the tree will first validate, then update the counts on each branch up to
* the common ancestor. A receive will also validate the counts and then update * the common ancestor. A receive will also validate the counts and then update
* them. * them.
@ -1495,7 +1495,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
* less than the amount specified. * less than the amount specified.
* *
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD * NOTE: The behavior of this function is identical to the Illumos / FreeBSD
* version however it has been adjusted to use an iterative rather then * version however it has been adjusted to use an iterative rather than
* recursive algorithm to minimize stack usage. * recursive algorithm to minimize stack usage.
*/ */
void void

View File

@ -1911,7 +1911,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
/* /*
* This debugging is commented out to conserve stack space. This * This debugging is commented out to conserve stack space. This
* function is called recursively and the debugging addes several * function is called recursively and the debugging adds several
* bytes to the stack for each call. It can be commented back in * bytes to the stack for each call. It can be commented back in
* if required to debug an issue in dsl_scan_visitbp(). * if required to debug an issue in dsl_scan_visitbp().
* *
@ -3391,7 +3391,7 @@ dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
/* /*
* This is the primary entry point for scans that is called from syncing * This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we * context. Scans must happen entirely during syncing context so that we
* cna guarantee that blocks we are currently scanning will not change out * can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly * from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by * transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread(). * txg_sync_thread().
@ -3995,7 +3995,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger. * extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by * Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). * 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*/ */
static int static int
ext_size_compare(const void *x, const void *y) ext_size_compare(const void *x, const void *y)

View File

@ -143,7 +143,7 @@ dsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc,
* For that reason, early synctasks can affect the process of writing dirty * For that reason, early synctasks can affect the process of writing dirty
* changes to disk for the txg that they run and should be used with caution. * changes to disk for the txg that they run and should be used with caution.
* In addition, early synctasks should not dirty any metaslabs as this would * In addition, early synctasks should not dirty any metaslabs as this would
* invalidate the precodition/invariant for subsequent early synctasks. * invalidate the precondition/invariant for subsequent early synctasks.
* [see dsl_pool_sync() and dsl_early_sync_task_verify()] * [see dsl_pool_sync() and dsl_early_sync_task_verify()]
*/ */
int int

View File

@ -302,7 +302,7 @@ dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
* holds is nvl of snapname -> holdname * holds is nvl of snapname -> holdname
* errlist will be filled in with snapname -> error * errlist will be filled in with snapname -> error
* *
* The snaphosts must all be in the same pool. * The snapshots must all be in the same pool.
* *
* Holds for snapshots that don't exist will be skipped. * Holds for snapshots that don't exist will be skipped.
* *
@ -556,9 +556,9 @@ dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
* errlist will be filled in with snapname -> error * errlist will be filled in with snapname -> error
* *
* If tmpdp is not NULL the names for holds should be the dsobj's of snapshots, * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
* otherwise they should be the names of shapshots. * otherwise they should be the names of snapshots.
* *
* As a release may cause snapshots to be destroyed this trys to ensure they * As a release may cause snapshots to be destroyed this tries to ensure they
* aren't mounted. * aren't mounted.
* *
* The release of non-existent holds are skipped. * The release of non-existent holds are skipped.

View File

@ -31,7 +31,7 @@
* Name-Value Pair Lists * Name-Value Pair Lists
* *
* The embodiment of an FMA protocol element (event, fmri or authority) is a * The embodiment of an FMA protocol element (event, fmri or authority) is a
* name-value pair list (nvlist_t). FMA-specific nvlist construtor and * name-value pair list (nvlist_t). FMA-specific nvlist constructor and
* destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
* to create an nvpair list using custom allocators. Callers may choose to * to create an nvpair list using custom allocators. Callers may choose to
* allocate either from the kernel memory allocator, or from a preallocated * allocate either from the kernel memory allocator, or from a preallocated
@ -784,7 +784,7 @@ zfs_zevent_destroy(zfs_zevent_t *ze)
#endif /* _KERNEL */ #endif /* _KERNEL */
/* /*
* Wrapppers for FM nvlist allocators * Wrappers for FM nvlist allocators
*/ */
/* ARGSUSED */ /* ARGSUSED */
static void * static void *

View File

@ -87,12 +87,12 @@
* *
* In this case, a weak guarantee is provided. Since the host which last had * In this case, a weak guarantee is provided. Since the host which last had
* the pool imported will suspend the pool if no mmp writes land within * the pool imported will suspend the pool if no mmp writes land within
* fail_intervals * multihost_interval ms, the absense of writes during that * fail_intervals * multihost_interval ms, the absence of writes during that
* time means either the pool is not imported, or it is imported but the pool * time means either the pool is not imported, or it is imported but the pool
* is suspended and no further writes will occur. * is suspended and no further writes will occur.
* *
* Note that resuming the suspended pool on the remote host would invalidate * Note that resuming the suspended pool on the remote host would invalidate
* this gurantee, and so it is not allowed. * this guarantee, and so it is not allowed.
* *
* The factor of 2 provides a conservative safety factor and derives from * The factor of 2 provides a conservative safety factor and derives from
* MMP_IMPORT_SAFETY_FACTOR; * MMP_IMPORT_SAFETY_FACTOR;

View File

@ -70,7 +70,7 @@ static int
priv_policy_user(const cred_t *cr, int capability, boolean_t all, int err) priv_policy_user(const cred_t *cr, int capability, boolean_t all, int err)
{ {
/* /*
* All priv_policy_user checks are preceeded by kuid/kgid_has_mapping() * All priv_policy_user checks are preceded by kuid/kgid_has_mapping()
* checks. If we cannot do them, we shouldn't be using ns_capable() * checks. If we cannot do them, we shouldn't be using ns_capable()
* since we don't know whether the affected files are valid in our * since we don't know whether the affected files are valid in our
* namespace. Note that kuid_has_mapping() came after cred->user_ns, so * namespace. Note that kuid_has_mapping() came after cred->user_ns, so

View File

@ -80,7 +80,7 @@ typedef struct qat_stats {
* Number of fails in the QAT compression / decompression engine. * Number of fails in the QAT compression / decompression engine.
* Note: when a QAT error happens, it doesn't necessarily indicate a * Note: when a QAT error happens, it doesn't necessarily indicate a
* critical hardware issue. Sometimes it is because the output buffer * critical hardware issue. Sometimes it is because the output buffer
* is not big enough. The compression job will be transfered to the * is not big enough. The compression job will be transferred to the
* gzip software implementation so the functionality of ZFS is not * gzip software implementation so the functionality of ZFS is not
* impacted. * impacted.
*/ */
@ -113,7 +113,7 @@ typedef struct qat_stats {
/* /*
* Number of fails in the QAT encryption / decryption engine. * Number of fails in the QAT encryption / decryption engine.
* Note: when a QAT error happens, it doesn't necessarily indicate a * Note: when a QAT error happens, it doesn't necessarily indicate a
* critical hardware issue. The encryption job will be transfered * critical hardware issue. The encryption job will be transferred
* to the software implementation so the functionality of ZFS is * to the software implementation so the functionality of ZFS is
* not impacted. * not impacted.
*/ */
@ -130,7 +130,7 @@ typedef struct qat_stats {
/* /*
* Number of fails in the QAT checksum engine. * Number of fails in the QAT checksum engine.
* Note: when a QAT error happens, it doesn't necessarily indicate a * Note: when a QAT error happens, it doesn't necessarily indicate a
* critical hardware issue. The checksum job will be transfered to the * critical hardware issue. The checksum job will be transferred to the
* software implementation so the functionality of ZFS is not impacted. * software implementation so the functionality of ZFS is not impacted.
*/ */
kstat_named_t cksum_fails; kstat_named_t cksum_fails;

View File

@ -83,7 +83,7 @@
* Layouts are simply an array of the attributes and their * Layouts are simply an array of the attributes and their
* ordering i.e. [0, 1, 4, 5, 2] * ordering i.e. [0, 1, 4, 5, 2]
* *
* Each distinct layout is given a unique layout number and that is whats * Each distinct layout is given a unique layout number and that is what's
* stored in the header at the beginning of the SA data buffer. * stored in the header at the beginning of the SA data buffer.
* *
* A layout only covers a single dbuf (bonus or spill). If a set of * A layout only covers a single dbuf (bonus or spill). If a set of
@ -95,7 +95,7 @@
* Adding a single attribute will cause the entire set of attributes to * Adding a single attribute will cause the entire set of attributes to
* be rewritten and could result in a new layout number being constructed * be rewritten and could result in a new layout number being constructed
* as part of the rewrite if no such layout exists for the new set of * as part of the rewrite if no such layout exists for the new set of
* attribues. The new attribute will be appended to the end of the already * attributes. The new attribute will be appended to the end of the already
* existing attributes. * existing attributes.
* *
* Both the attribute registration and attribute layout information are * Both the attribute registration and attribute layout information are

View File

@ -2113,7 +2113,7 @@ spa_load_verify_done(zio_t *zio)
} }
/* /*
* Maximum number of inflight bytes is the log2 faction of the arc size. * Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc. * By default, we set it to 1/16th of the arc.
*/ */
int spa_load_verify_shift = 4; int spa_load_verify_shift = 4;
@ -2566,7 +2566,7 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
} else if (MMP_VALID(ub)) { } else if (MMP_VALID(ub)) {
/* /*
* zfs-0.7 compatability case * zfs-0.7 compatibility case
*/ */
import_delay = MAX(import_delay, (multihost_interval + import_delay = MAX(import_delay, (multihost_interval +
@ -3861,7 +3861,7 @@ spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
need_update = B_TRUE; need_update = B_TRUE;
/* /*
* Update the config cache asychronously in case we're the * Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet. * root pool, in which case the config cache isn't writable yet.
*/ */
if (need_update) if (need_update)
@ -4174,7 +4174,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
return (error); return (error);
/* /*
* Redo the loading process process again with the * Redo the loading process again with the
* checkpointed uberblock. * checkpointed uberblock.
*/ */
spa_ld_prepare_for_reload(spa); spa_ld_prepare_for_reload(spa);
@ -7923,7 +7923,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
case ZPOOL_PROP_READONLY: case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE: case ZPOOL_PROP_CACHEFILE:
/* /*
* 'readonly' and 'cachefile' are also non-persisitent * 'readonly' and 'cachefile' are also non-persistent
* properties. * properties.
*/ */
break; break;
@ -8734,7 +8734,7 @@ EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init); EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats); EXPORT_SYMBOL(spa_scan_get_stats);
/* device maniion */ /* device manipulation */
EXPORT_SYMBOL(spa_vdev_add); EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach); EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach); EXPORT_SYMBOL(spa_vdev_detach);

View File

@ -31,7 +31,7 @@
* and the current log. All errors seen are logged to the current log. When a * and the current log. All errors seen are logged to the current log. When a
* scrub completes, the current log becomes the last log, the last log is thrown * scrub completes, the current log becomes the last log, the last log is thrown
* out, and the current log is reinitialized. This way, if an error is somehow * out, and the current log is reinitialized. This way, if an error is somehow
* corrected, a new scrub will show that that it no longer exists, and will be * corrected, a new scrub will show that it no longer exists, and will be
* deleted from the log when the scrub completes. * deleted from the log when the scrub completes.
* *
* The log is stored using a ZAP object whose key is a string form of the * The log is stored using a ZAP object whose key is a string form of the

View File

@ -63,7 +63,7 @@
* overwrite the original creation of the pool. 'sh_phys_max_off' is the * overwrite the original creation of the pool. 'sh_phys_max_off' is the
* physical ending offset in bytes of the log. This tells you the length of * physical ending offset in bytes of the log. This tells you the length of
* the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
* is added, 'sh_eof' is incremented by the the size of the record. * is added, 'sh_eof' is incremented by the size of the record.
* 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes). * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
* This is where the consumer should start reading from after reading in * This is where the consumer should start reading from after reading in
* the 'zpool create' portion of the log. * the 'zpool create' portion of the log.

View File

@ -644,8 +644,8 @@ txg_quiesce_thread(void *arg)
/* /*
* Delay this thread by delay nanoseconds if we are still in the open * Delay this thread by delay nanoseconds if we are still in the open
* transaction group and there is already a waiting txg quiesing or quiesced. * transaction group and there is already a waiting txg quiescing or quiesced.
* Abort the delay if this txg stalls or enters the quiesing state. * Abort the delay if this txg stalls or enters the quiescing state.
*/ */
void void
txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
@ -768,7 +768,7 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
/* /*
* If there isn't a txg syncing or in the pipeline, push another txg through * If there isn't a txg syncing or in the pipeline, push another txg through
* the pipeline by queiscing the open txg. * the pipeline by quiescing the open txg.
*/ */
void void
txg_kick(dsl_pool_t *dp) txg_kick(dsl_pool_t *dp)

View File

@ -223,7 +223,7 @@ vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res)
} }
/* /*
* Derive the enumerated alloction bias from string input. * Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(1M). * String origin is either the per-vdev zap or zpool(1M).
*/ */
static vdev_alloc_bias_t static vdev_alloc_bias_t
@ -1320,7 +1320,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
#ifndef _KERNEL #ifndef _KERNEL
/* /*
* To accomodate zdb_leak_init() fake indirect * To accommodate zdb_leak_init() fake indirect
* metaslabs, we allocate a metaslab group for * metaslabs, we allocate a metaslab group for
* indirect vdevs which normally don't have one. * indirect vdevs which normally don't have one.
*/ */
@ -4177,7 +4177,7 @@ vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev * factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its * because the root vdev's psize-to-asize is simply the max of its
* childrens', thus not accurate enough for us. * children's, thus not accurate enough for us.
*/ */
dspace_delta = vdev_deflated_space(vd, space_delta); dspace_delta = vdev_deflated_space(vd, space_delta);

View File

@ -46,7 +46,7 @@
* terribly wasteful of bandwidth. A more intelligent version of the cache * terribly wasteful of bandwidth. A more intelligent version of the cache
* could keep track of access patterns and not do read-ahead unless it sees * could keep track of access patterns and not do read-ahead unless it sees
* at least two temporally close I/Os to the same region. Currently, only * at least two temporally close I/Os to the same region. Currently, only
* metadata I/O is inflated. A futher enhancement could take advantage of * metadata I/O is inflated. A further enhancement could take advantage of
* more semantic information about the I/O. And it could use something * more semantic information about the I/O. And it could use something
* faster than an AVL tree; that was chosen solely for convenience. * faster than an AVL tree; that was chosen solely for convenience.
* *

View File

@ -599,7 +599,7 @@ vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
} }
/* /*
* Stop initializing a device, with the resultant initialing state being * Stop initializing a device, with the resultant initializing state being
* tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
* a list_t is provided the stopping vdev is inserted in to the list. Callers * a list_t is provided the stopping vdev is inserted in to the list. Callers
* are then required to call vdev_initialize_stop_wait() to block for all the * are then required to call vdev_initialize_stop_wait() to block for all the

View File

@ -485,7 +485,7 @@ vdev_mirror_preferred_child_randomize(zio_t *zio)
/* /*
* Try to find a vdev whose DTL doesn't contain the block we want to read * Try to find a vdev whose DTL doesn't contain the block we want to read
* prefering vdevs based on determined load. * preferring vdevs based on determined load.
* *
* Try to find a child whose DTL doesn't contain the block we want to read. * Try to find a child whose DTL doesn't contain the block we want to read.
* If we can't, try the read on any vdev we haven't already tried. * If we can't, try the read on any vdev we haven't already tried.

View File

@ -893,7 +893,7 @@ vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
* ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
* code to issue IOs without adding them to the vdev queue. In this * code to issue IOs without adding them to the vdev queue. In this
* case, the zio is already going to be issued as quickly as possible * case, the zio is already going to be issued as quickly as possible
* and so it doesn't need any reprioitization to help. * and so it doesn't need any reprioritization to help.
*/ */
if (zio->io_priority == ZIO_PRIORITY_NOW) if (zio->io_priority == ZIO_PRIORITY_NOW)
return; return;

View File

@ -98,7 +98,7 @@
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1 * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1 * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
* *
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trival * We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
* XOR operation, and 2 and 4 can be computed quickly and generate linearly- * XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have * independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.) * this property which is why the uncorrected Plank method breaks down.)
@ -447,7 +447,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
/* /*
* If all data stored spans all columns, there's a danger that parity * If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read * will always be on the same device and, since parity isn't read
* during normal operation, that that device's I/O bandwidth won't be * during normal operation, that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB. * used effectively. We therefore switch the parity every 1MB.
* *
* ... at least that was, ostensibly, the theory. As a practical * ... at least that was, ostensibly, the theory. As a practical
@ -2336,7 +2336,7 @@ vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
/* /*
* Determine if any portion of the provided block resides on a child vdev * Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function * with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which imples that full stripe * assumes that at least one DTL is dirty which implies that full stripe
* width blocks must be resilvered. * width blocks must be resilvered.
*/ */
static boolean_t static boolean_t

View File

@ -42,7 +42,7 @@
/* /*
* Here we need registers not used otherwise. * Here we need registers not used otherwise.
* They will be used in unused ASM for the case * They will be used in unused ASM for the case
* with more registers than required... but GGC * with more registers than required... but GCC
* will still need to make sure the constraints * will still need to make sure the constraints
* are correct, and duplicate constraints are illegal * are correct, and duplicate constraints are illegal
* ... and we use the "register" number as a name * ... and we use the "register" number as a name

View File

@ -66,7 +66,7 @@
* consuming excessive system or running forever. If one of these limits is * consuming excessive system or running forever. If one of these limits is
* hit, the channel program will be stopped immediately and return from * hit, the channel program will be stopped immediately and return from
* zcp_eval() with an error code. No attempt will be made to roll back or undo * zcp_eval() with an error code. No attempt will be made to roll back or undo
* any changes made by the channel program before the error occured. * any changes made by the channel program before the error occurred.
* Consumers invoking zcp_eval() from elsewhere in the kernel may pass a time * Consumers invoking zcp_eval() from elsewhere in the kernel may pass a time
* limit of 0, disabling the time limit. * limit of 0, disabling the time limit.
* *
@ -77,7 +77,7 @@
* In place of a return value, an error message will also be returned in the * In place of a return value, an error message will also be returned in the
* 'result' nvlist containing information about the error. No attempt will be * 'result' nvlist containing information about the error. No attempt will be
* made to roll back or undo any changes made by the channel program before the * made to roll back or undo any changes made by the channel program before the
* error occured. * error occurred.
* *
* 3. If an error occurs inside a ZFS library call which returns an error code, * 3. If an error occurs inside a ZFS library call which returns an error code,
* the error is returned to the Lua script to be handled as desired. * the error is returned to the Lua script to be handled as desired.
@ -160,7 +160,7 @@ zcp_argerror(lua_State *state, int narg, const char *msg, ...)
* of a function call. * of a function call.
* *
* If an error occurs, the cleanup function will be invoked exactly once and * If an error occurs, the cleanup function will be invoked exactly once and
* then unreigstered. * then unregistered.
* *
* Returns the registered cleanup handler so the caller can deregister it * Returns the registered cleanup handler so the caller can deregister it
* if no error occurs. * if no error occurs.

View File

@ -547,7 +547,7 @@ get_zap_prop(lua_State *state, dsl_dataset_t *ds, zfs_prop_t zfs_prop)
error = dsl_prop_get_ds(ds, prop_name, sizeof (numval), error = dsl_prop_get_ds(ds, prop_name, sizeof (numval),
1, &numval, setpoint); 1, &numval, setpoint);
/* Fill in temorary value for prop, if applicable */ /* Fill in temporary value for prop, if applicable */
(void) get_temporary_prop(ds, zfs_prop, &numval, setpoint); (void) get_temporary_prop(ds, zfs_prop, &numval, setpoint);
/* Push value to lua stack */ /* Push value to lua stack */
@ -678,7 +678,7 @@ parse_userquota_prop(const char *prop_name, zfs_userquota_prop_t *type,
if (strncmp(cp, "S-1-", 4) == 0) { if (strncmp(cp, "S-1-", 4) == 0) {
/* /*
* It's a numeric SID (eg "S-1-234-567-89") and we want to * It's a numeric SID (eg "S-1-234-567-89") and we want to
* seperate the domain id and the rid * separate the domain id and the rid
*/ */
int domain_len = strrchr(cp, '-') - cp; int domain_len = strrchr(cp, '-') - cp;
domain_val = kmem_alloc(domain_len + 1, KM_SLEEP); domain_val = kmem_alloc(domain_len + 1, KM_SLEEP);

View File

@ -435,7 +435,7 @@ static zcp_list_info_t zcp_system_props_list_info = {
}; };
/* /*
* Get a list of all visble properties and their values for a given dataset. * Get a list of all visible properties and their values for a given dataset.
* Returned on the stack as a Lua table. * Returned on the stack as a Lua table.
*/ */
static int static int

View File

@ -44,7 +44,7 @@ zfs_oldace_byteswap(ace_t *ace, int ace_cnt)
} }
/* /*
* swap ace_t and ace_oject_t * swap ace_t and ace_object_t
*/ */
void void
zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout) zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
@ -70,7 +70,7 @@ zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
* larger than needed to hold the aces * larger than needed to hold the aces
* present. As long as we do not do any * present. As long as we do not do any
* swapping beyond the end of our block we are * swapping beyond the end of our block we are
* okay. It it safe to swap any non-ace data * okay. It is safe to swap any non-ace data
* within the block since it is just zeros. * within the block since it is just zeros.
*/ */
if (ptr + sizeof (zfs_ace_hdr_t) > end) { if (ptr + sizeof (zfs_ace_hdr_t) > end) {

View File

@ -596,7 +596,7 @@ zfsctl_root(znode_t *zp)
/* /*
* Generate a long fid to indicate a snapdir. We encode whether snapdir is * Generate a long fid to indicate a snapdir. We encode whether snapdir is
* already monunted in gen field. We do this because nfsd lookup will not * already mounted in gen field. We do this because nfsd lookup will not
* trigger automount. Next time the nfsd does fh_to_dentry, we will notice * trigger automount. Next time the nfsd does fh_to_dentry, we will notice
* this and do automount and return ESTALE to force nfsd revalidate and follow * this and do automount and return ESTALE to force nfsd revalidate and follow
* mount. * mount.

View File

@ -55,7 +55,7 @@
#include <sys/zfs_sa.h> #include <sys/zfs_sa.h>
/* /*
* zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups * zfs_match_find() is used by zfs_dirent_lock() to perform zap lookups
* of names after deciding which is the appropriate lookup interface. * of names after deciding which is the appropriate lookup interface.
*/ */
static int static int
@ -232,7 +232,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
/* /*
* Wait until there are no locks on this name. * Wait until there are no locks on this name.
* *
* Don't grab the the lock if it is already held. However, cannot * Don't grab the lock if it is already held. However, cannot
* have both ZSHARED and ZHAVELOCK together. * have both ZSHARED and ZHAVELOCK together.
*/ */
ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK)); ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK));

View File

@ -6877,7 +6877,7 @@ zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec)
continue; continue;
if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) { if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) {
/* at least one non-optionial key is expected here */ /* at least one non-optional key is expected here */
if (!required_keys_found) if (!required_keys_found)
return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED)); return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
continue; continue;

View File

@ -1476,7 +1476,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
* "preferred" size. * "preferred" size.
*/ */
/* Round up so we never have a filesytem using 0 blocks. */ /* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize); refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift; statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift; statp->f_bfree = availbytes >> bshift;
@ -2396,7 +2396,7 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
} }
/* /*
* Return true if the coresponding vfs's unmounted flag is set. * Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false. * Otherwise return false.
* If this function returns true we know VFS unmount has been initiated. * If this function returns true we know VFS unmount has been initiated.
*/ */

View File

@ -889,7 +889,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* Clear Set-UID/Set-GID bits on successful write if not * Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set. * privileged and at least one of the execute bits is set.
* *
* It would be nice to to this after all writes have * It would be nice to do this after all writes have
* been done, but that would still expose the ISUID/ISGID * been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed. * to another app after the partial write is committed.
* *
@ -4638,7 +4638,7 @@ zfs_dirty_inode(struct inode *ip, int flags)
#ifdef I_DIRTY_TIME #ifdef I_DIRTY_TIME
/* /*
* This is the lazytime semantic indroduced in Linux 4.0 * This is the lazytime semantic introduced in Linux 4.0
* This flag will only be called from update_time when lazytime is set. * This flag will only be called from update_time when lazytime is set.
* (Note, I_DIRTY_SYNC will also set if not lazytime) * (Note, I_DIRTY_SYNC will also set if not lazytime)
* Fortunately mtime and ctime are managed within ZFS itself, so we * Fortunately mtime and ctime are managed within ZFS itself, so we

View File

@ -787,7 +787,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
} }
/* /*
* No execs denied will be deterimed when zfs_mode_compute() is called. * No execs denied will be determined when zfs_mode_compute() is called.
*/ */
pflags |= acl_ids->z_aclp->z_hints & pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT| (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
@ -1270,7 +1270,7 @@ zfs_rezget(znode_t *zp)
* If the file has zero links, then it has been unlinked on the send * If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set. * side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the * We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatical removal of the file in * stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed * zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or * on the send side and the next incremental stream is received or
* when the unlinked set gets processed. * when the unlinked set gets processed.

View File

@ -58,7 +58,7 @@
* *
* In the event of a crash or power loss, the itxs contained by each * In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first * dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal fileystem, when it is * instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted). * first mounted).
* *
* As hinted at above, there is one ZIL per dataset (both the in-memory * As hinted at above, there is one ZIL per dataset (both the in-memory
@ -2002,7 +2002,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
/* /*
* If there are any in-memory intent log transactions which have now been * If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we * synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been comitted) so that * have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required * don't inadvertently clean out in-memory log records that would be required
* by zil_commit(). * by zil_commit().
*/ */

View File

@ -308,7 +308,7 @@ zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
mutex_exit(&spa->spa_cksum_tmpls_lock); mutex_exit(&spa->spa_cksum_tmpls_lock);
} }
/* convenience function to update a checksum to accomodate an encryption MAC */ /* convenience function to update a checksum to accommodate an encryption MAC */
static void static void
zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor) zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
{ {

View File

@ -155,7 +155,7 @@ zio_decompress_data(enum zio_compress c, abd_t *src, void *dst,
abd_return_buf(src, tmp, s_len); abd_return_buf(src, tmp, s_len);
/* /*
* Decompression shouldn't fail, because we've already verifyied * Decompression shouldn't fail, because we've already verified
* the checksum. However, for extra protection (e.g. against bitflips * the checksum. However, for extra protection (e.g. against bitflips
* in non-ECC RAM), we handle this error (and test it). * in non-ECC RAM), we handle this error (and test it).
*/ */

View File

@ -369,7 +369,7 @@ error:
/* /*
* This function handles all encryption and decryption in zfs. When * This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to * encrypting it expects puio to reference the plaintext and cuio to
* reference the cphertext. cuio must have enough space for the * reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the * ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone. * plaintext / ciphertext alone.
*/ */
@ -934,7 +934,7 @@ zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
/* /*
* At L0 we want to verify these fields to ensure that data blocks * At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpretted. For instance, we do not want an attacker * can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user * to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot * by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information * enforce this policy since raw sends do not convey any information

View File

@ -113,7 +113,7 @@ freq_triggered(uint32_t frequency)
return (B_TRUE); return (B_TRUE);
/* /*
* Note: we still handle legacy (unscaled) frequecy values * Note: we still handle legacy (unscaled) frequency values
*/ */
uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX; uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;

View File

@ -297,7 +297,7 @@ zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm)
* The dsl pool lock must be released prior to calling sget(). * The dsl pool lock must be released prior to calling sget().
* It is possible sget() may block on the lock in grab_super() * It is possible sget() may block on the lock in grab_super()
* while deactivate_super() holds that same lock and waits for * while deactivate_super() holds that same lock and waits for
* a txg sync. If the dsl_pool lock is held over over sget() * a txg sync. If the dsl_pool lock is held over sget()
* this can prevent the pool sync and cause a deadlock. * this can prevent the pool sync and cause a deadlock.
*/ */
dsl_pool_rele(dmu_objset_pool(os), FTAG); dsl_pool_rele(dmu_objset_pool(os), FTAG);

View File

@ -1997,7 +1997,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg)
/* at this point, the dsname should name a snapshot */ /* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) { if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): " dprintf("zvol_create_snap_minor_cb(): "
"%s is not a shapshot name\n", dsname); "%s is not a snapshot name\n", dsname);
} else { } else {
minors_job_t *job; minors_job_t *job;
char *n = strdup(dsname); char *n = strdup(dsname);