Fix various typos

Correct an assortment of typos throughout the code base.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net>
Closes #11774
This commit is contained in:
Andrea Gelmini 2021-04-02 18:38:53 -07:00 committed by Brian Behlendorf
parent 35cce6ea63
commit ca7af7f675
57 changed files with 75 additions and 75 deletions

View File

@ -137,7 +137,7 @@ main(int argc, char **argv)
} }
/* /*
* we need just 4 bytes in native endianess * we need just 4 bytes in native endianness
* not using sethostid() because it may be missing or just a stub * not using sethostid() because it may be missing or just a stub
*/ */
uint32_t hostid = input_i; uint32_t hostid = input_i;

View File

@ -1065,7 +1065,7 @@ zpool_do_add(int argc, char **argv)
free(vname); free(vname);
} }
} }
/* And finaly the spares */ /* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) { &sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE; hadspare = B_TRUE;

View File

@ -445,7 +445,7 @@ typedef struct replication_level {
/* /*
* N.B. For the purposes of comparing replication levels dRAID can be * N.B. For the purposes of comparing replication levels dRAID can be
* considered functionally equivilant to raidz. * considered functionally equivalent to raidz.
*/ */
static boolean_t static boolean_t
is_raidz_mirror(replication_level_t *a, replication_level_t *b, is_raidz_mirror(replication_level_t *a, replication_level_t *b,

View File

@ -5979,7 +5979,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
vd0->vdev_resilver_txg != 0)) { vd0->vdev_resilver_txg != 0)) {
/* /*
* Make vd0 explicitly claim to be unreadable, * Make vd0 explicitly claim to be unreadable,
* or unwriteable, or reach behind its back * or unwritable, or reach behind its back
* and close the underlying fd. We can do this if * and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute, * maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll * and we can do it if maxfaults >= 2 because we'll

View File

@ -39,7 +39,7 @@ list_zvols() {
[ "$volmode" = "none" ] && continue [ "$volmode" = "none" ] && continue
[ "$redacted" = "-" ] || continue [ "$redacted" = "-" ] || continue
# #
# We also also ignore partially received zvols if it is # We also ignore partially received zvols if it is
# not an incremental receive, as those won't even have a block # not an incremental receive, as those won't even have a block
# device minor node created yet. # device minor node created yet.
# #

View File

@ -1,5 +1,5 @@
# #
# Default rules for running cppcheck against the the user space components. # Default rules for running cppcheck against the user space components.
# #
PHONY += cppcheck PHONY += cppcheck

View File

@ -16,7 +16,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS], [
]) ])
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS], [ AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS], [
AC_MSG_CHECKING([whether generic_fillattr requres struct user_namespace*]) AC_MSG_CHECKING([whether generic_fillattr requires struct user_namespace*])
ZFS_LINUX_TEST_RESULT([generic_fillattr_userns], [ ZFS_LINUX_TEST_RESULT([generic_fillattr_userns], [
AC_MSG_RESULT([yes]) AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_GENERIC_FILLATTR_USERNS, 1, AC_DEFINE(HAVE_GENERIC_FILLATTR_USERNS, 1,

View File

@ -14,13 +14,13 @@ PATH=/sbin:/bin:/usr/bin:/usr/sbin
# Source function library # Source function library
if [ -f /etc/rc.d/init.d/functions ]; then if [ -f /etc/rc.d/init.d/functions ]; then
# RedHat and derivates # RedHat and derivatives
. /etc/rc.d/init.d/functions . /etc/rc.d/init.d/functions
elif [ -L /etc/init.d/functions.sh ]; then elif [ -L /etc/init.d/functions.sh ]; then
# Gentoo # Gentoo
. /etc/init.d/functions.sh . /etc/init.d/functions.sh
elif [ -f /lib/lsb/init-functions ]; then elif [ -f /lib/lsb/init-functions ]; then
# LSB, Debian GNU/Linux and derivates # LSB, Debian GNU/Linux and derivatives
. /lib/lsb/init-functions . /lib/lsb/init-functions
fi fi

View File

@ -210,7 +210,7 @@ enum create { CRCREAT, CRMKNOD, CRMKDIR }; /* reason for create */
* *
* The cc_caller_id is used to identify one or more callers who invoke * The cc_caller_id is used to identify one or more callers who invoke
* operations, possibly on behalf of others. For example, the NFS * operations, possibly on behalf of others. For example, the NFS
* server could have it's own cc_caller_id which can be detected by * server could have its own cc_caller_id which can be detected by
* vnode/vfs operations or (FEM) monitors on those operations. New * vnode/vfs operations or (FEM) monitors on those operations. New
* caller IDs are generated by fs_new_caller_id(). * caller IDs are generated by fs_new_caller_id().
*/ */

View File

@ -261,7 +261,7 @@ bio_set_bi_error(struct bio *bio, int error)
* For older kernels trigger a re-reading of the partition table by calling * For older kernels trigger a re-reading of the partition table by calling
* check_disk_change() which calls flush_disk() to invalidate the device. * check_disk_change() which calls flush_disk() to invalidate the device.
* *
* For newer kernels (as of 5.10), bdev_check_media_chage is used, in favor of * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
* check_disk_change(), with the modification that invalidation is no longer * check_disk_change(), with the modification that invalidation is no longer
* forced. * forced.
*/ */

View File

@ -83,7 +83,7 @@ enum scope_prefix_types {
/* /*
* Declare a module parameter / sysctl node * Declare a module parameter / sysctl node
* *
* "scope_prefix" the part of the the sysctl / sysfs tree the node resides under * "scope_prefix" the part of the sysctl / sysfs tree the node resides under
* (currently a no-op on Linux) * (currently a no-op on Linux)
* "name_prefix" the part of the variable name that will be excluded from the * "name_prefix" the part of the variable name that will be excluded from the
* exported names on platforms with a hierarchical namespace * exported names on platforms with a hierarchical namespace

View File

@ -51,7 +51,7 @@ extern "C" {
* dRAID permutation map. * dRAID permutation map.
*/ */
typedef struct draid_map { typedef struct draid_map {
uint64_t dm_children; /* # of permuation columns */ uint64_t dm_children; /* # of permutation columns */
uint64_t dm_nperms; /* # of permutation rows */ uint64_t dm_nperms; /* # of permutation rows */
uint64_t dm_seed; /* dRAID map seed */ uint64_t dm_seed; /* dRAID map seed */
uint64_t dm_checksum; /* Checksum of generated map */ uint64_t dm_checksum; /* Checksum of generated map */

View File

@ -501,7 +501,7 @@ typedef enum vbe_vers {
* and is protected by an embedded checksum. By default, GRUB will * and is protected by an embedded checksum. By default, GRUB will
* check if the boot filesystem supports storing the environment data * check if the boot filesystem supports storing the environment data
* in a special location, and if so, will invoke filesystem specific * in a special location, and if so, will invoke filesystem specific
* logic to retrieve it. This can be overriden by a variable, should * logic to retrieve it. This can be overridden by a variable, should
* the user so desire. * the user so desire.
*/ */
VB_RAW = 0, VB_RAW = 0,

View File

@ -60,7 +60,7 @@ typedef struct vdev_rebuild_phys {
/* /*
* The vdev_rebuild_t describes the current state and how a top-level vdev * The vdev_rebuild_t describes the current state and how a top-level vdev
* should be rebuilt. The core elements are the top-vdev, the metaslab being * should be rebuilt. The core elements are the top-vdev, the metaslab being
* rebuilt, range tree containing the allocted extents and the on-disk state. * rebuilt, range tree containing the allocated extents and the on-disk state.
*/ */
typedef struct vdev_rebuild { typedef struct vdev_rebuild {
vdev_t *vr_top_vdev; /* top-level vdev to rebuild */ vdev_t *vr_top_vdev; /* top-level vdev to rebuild */

View File

@ -5334,7 +5334,7 @@ zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
* 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in * 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in
* the 128k block example above. * the 128k block example above.
* *
* The situtation is slightly different for dRAID since the minimum allocation * The situation is slightly different for dRAID since the minimum allocation
* size is the full group width. The same 8K block above would be written as * size is the full group width. The same 8K block above would be written as
* follows in a dRAID group: * follows in a dRAID group:
* *

View File

@ -83,7 +83,7 @@ lzbe_set_boot_device(const char *pool, lzbe_flags_t flag, const char *device)
} else { } else {
/* /*
* Use device name directly if it does start with * Use device name directly if it does start with
* prefix "zfs:". Otherwise, add prefix and sufix. * prefix "zfs:". Otherwise, add prefix and suffix.
*/ */
if (strncmp(device, "zfs:", 4) == 0) { if (strncmp(device, "zfs:", 4) == 0) {
fnvlist_add_string(nv, OS_BOOTONCE, device); fnvlist_add_string(nv, OS_BOOTONCE, device);

View File

@ -1408,7 +1408,7 @@ discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
/* /*
* Once we have the path, we need to add the directory to * Once we have the path, we need to add the directory to
* our directoy cache. * our directory cache.
*/ */
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
return (zpool_find_import_scan_dir(hdl, lock, cache, return (zpool_find_import_scan_dir(hdl, lock, cache,

View File

@ -234,7 +234,7 @@ Use \fB0\fR for no (default) and \fB1\fR for yes.
.RS 12n .RS 12n
Percent of ARC size allowed for L2ARC-only headers. Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure, too large amount of Since L2ARC buffers are not evicted on memory pressure, too large amount of
headers on system with irrationaly large L2ARC can render it slow or unusable. headers on system with irrationally large L2ARC can render it slow or unusable.
This parameter limits L2ARC writes and rebuild to achieve it. This parameter limits L2ARC writes and rebuild to achieve it.
.sp .sp
Default value: \fB33\fR%. Default value: \fB33\fR%.
@ -387,7 +387,7 @@ Default value: \fB16,777,217\fR.
.RS 12n .RS 12n
When attempting to log the output nvlist of an ioctl in the on-disk history, the When attempting to log the output nvlist of an ioctl in the on-disk history, the
output will not be stored if it is larger than size (in bytes). This must be output will not be stored if it is larger than size (in bytes). This must be
less then DMU_MAX_ACCESS (64MB). This applies primarily to less than DMU_MAX_ACCESS (64MB). This applies primarily to
zfs_ioc_channel_program(). zfs_ioc_channel_program().
.sp .sp
Default value: \fB1MB\fR. Default value: \fB1MB\fR.
@ -911,7 +911,7 @@ Default value: \fB8,388,608\fR (8MB).
.RS 12n .RS 12n
Max bytes to prefetch indirects for per stream. Max bytes to prefetch indirects for per stream.
.sp .sp
Default vaule: \fB67,108,864\fR (64MB). Default value: \fB67,108,864\fR (64MB).
.RE .RE
.sp .sp

View File

@ -168,7 +168,7 @@ of the requested feature set.
.LP .LP
By convention, compatibility files in \fB/usr/share/zfs/compatibility.d\fR By convention, compatibility files in \fB/usr/share/zfs/compatibility.d\fR
are provided by the distribution package, and include feature sets are provided by the distribution package, and include feature sets
supported by important versions of popular distribtions, and feature supported by important versions of popular distributions, and feature
sets commonly supported at the start of each year. Compatibility files sets commonly supported at the start of each year. Compatibility files
in \fB/etc/zfs/compatibility.d\fR, if present, will take precedence over in \fB/etc/zfs/compatibility.d\fR, if present, will take precedence over
files with the same name in \fB/usr/share/zfs/compatibility.d\fR. files with the same name in \fB/usr/share/zfs/compatibility.d\fR.
@ -1046,7 +1046,7 @@ DEPENDENCIES extensible_dataset
\fBzstd\fR is a high-performance compression algorithm that features a \fBzstd\fR is a high-performance compression algorithm that features a
combination of high compression ratios and high speed. Compared to \fBgzip\fR, combination of high compression ratios and high speed. Compared to \fBgzip\fR,
\fBzstd\fR offers slighty better compression at much higher speeds. Compared \fBzstd\fR offers slightly better compression at much higher speeds. Compared
to \fBlz4\fR, \fBzstd\fR offers much better compression while being only to \fBlz4\fR, \fBzstd\fR offers much better compression while being only
modestly slower. Typically, \fBzstd\fR compression speed ranges from 250 to 500 modestly slower. Typically, \fBzstd\fR compression speed ranges from 250 to 500
MB/s per thread and decompression speed is over 1 GB/s per thread. MB/s per thread and decompression speed is over 1 GB/s per thread.

View File

@ -678,7 +678,7 @@ This bookmark can then be used instead of snapshot in send streams.
.Ed .Ed
.It Sy Example 24 No Setting sharesmb Property Options on a ZFS File System .It Sy Example 24 No Setting sharesmb Property Options on a ZFS File System
The following example show how to share SMB filesystem through ZFS. The following example show how to share SMB filesystem through ZFS.
Note that that a user and his/her password must be given. Note that a user and his/her password must be given.
.Bd -literal .Bd -literal
# smbmount //127.0.0.1/share_tmp /mnt/tmp \\ # smbmount //127.0.0.1/share_tmp /mnt/tmp \\
-o user=workgroup/turbo,password=obrut,uid=1000 -o user=workgroup/turbo,password=obrut,uid=1000

View File

@ -1008,7 +1008,7 @@ avl_destroy_nodes(avl_tree_t *tree, void **cookie)
--tree->avl_numnodes; --tree->avl_numnodes;
/* /*
* If we just did a right child or there isn't one, go up to parent. * If we just removed a right child or there isn't one, go up to parent.
*/ */
if (child == 1 || parent->avl_child[1] == NULL) { if (child == 1 || parent->avl_child[1] == NULL) {
node = parent; node = parent;

View File

@ -1399,7 +1399,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
} }
datap += done; datap += done;
} }
/* Decrypt remainder, which is less then chunk size, in one go. */ /* Decrypt remainder, which is less than chunk size, in one go. */
kfpu_begin(); kfpu_begin();
if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) { if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) {
done = aesni_gcm_decrypt(datap, datap, bleft, done = aesni_gcm_decrypt(datap, datap, bleft,
@ -1415,7 +1415,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES); ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES);
/* /*
* Now less then GCM_AVX_MIN_DECRYPT_BYTES bytes remain, * Now less than GCM_AVX_MIN_DECRYPT_BYTES bytes remain,
* decrypt them block by block. * decrypt them block by block.
*/ */
while (bleft > 0) { while (bleft > 0) {

View File

@ -306,7 +306,7 @@ void
abd_free_linear_page(abd_t *abd) abd_free_linear_page(abd_t *abd)
{ {
/* /*
* FreeBSD does not have have scatter linear pages * FreeBSD does not have scatter linear pages
* so there is an error. * so there is an error.
*/ */
VERIFY(0); VERIFY(0);

View File

@ -3577,7 +3577,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
/* /*
* Create a new object for the symlink. * Create a new object for the symlink.
* for version 4 ZPL datsets the symlink will be an SA attribute * for version 4 ZPL datasets the symlink will be an SA attribute
*/ */
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);

View File

@ -114,7 +114,7 @@
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain * in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which which pieces of the block need to be encrypted. For more details about * which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode(). * dnode authentication and encryption, see zio_crypt_init_uios_dnode().
* *
* OBJECT SET AUTHENTICATION: * OBJECT SET AUTHENTICATION:

View File

@ -761,7 +761,7 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
volsize = zv->zv_volsize; volsize = zv->zv_volsize;
/* /*
* uio_loffset == volsize isn't an error as * uio_loffset == volsize isn't an error as
* its required for EOF processing. * it's required for EOF processing.
*/ */
if (zfs_uio_resid(&uio) > 0 && if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize)) (zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))

View File

@ -3140,7 +3140,7 @@ top:
/* /*
* Create a new object for the symlink. * Create a new object for the symlink.
* for version 4 ZPL datsets the symlink will be an SA attribute * for version 4 ZPL datasets the symlink will be an SA attribute
*/ */
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);

View File

@ -217,7 +217,7 @@ zfs_znode_fini(void)
* created or destroyed. This kind of locking would normally reside in the * created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA * znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally * buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexs and AVLs trees which contain per-object locks. * with an array of mutexes and AVLs trees which contain per-object locks.
* *
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted * In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In * in to the correct AVL tree and finally the per-object lock is held. In

View File

@ -115,7 +115,7 @@
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain * in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which which pieces of the block need to be encrypted. For more details about * which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode(). * dnode authentication and encryption, see zio_crypt_init_uios_dnode().
* *
* OBJECT SET AUTHENTICATION: * OBJECT SET AUTHENTICATION:

View File

@ -884,7 +884,7 @@ do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz,
* | B0| B1| ... | Bm| * | B0| B1| ... | Bm|
* +---+---+-...-+---+ * +---+---+-...-+---+
* *
* The first byte, B0, is always less then 0xF5 (U8_DECOMP_BOTH). * The first byte, B0, is always less than 0xF5 (U8_DECOMP_BOTH).
* *
* (2) Canonical decomposition mappings: * (2) Canonical decomposition mappings:
* *

View File

@ -26,7 +26,7 @@
/* /*
* This file is intended for functions that ought to be common between user * This file is intended for functions that ought to be common between user
* land (libzfs) and the kernel. When many common routines need to be shared * land (libzfs) and the kernel. When many common routines need to be shared
* then a separate file should to be created. * then a separate file should be created.
*/ */
#if !defined(_KERNEL) #if !defined(_KERNEL)

View File

@ -381,7 +381,7 @@ abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) { child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/* /*
* We always pass B_FALSE for free_on_free as it is the * We always pass B_FALSE for free_on_free as it is the
* original child gang ABDs responsibilty to determine * original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call * if any of its child ABDs should be free'd on the call
* to abd_free(). * to abd_free().
*/ */

View File

@ -5036,7 +5036,7 @@ arc_reap_cb(void *arg, zthr_t *zthr)
* memory in the system at a fraction of the arc_size (1/128th by * memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the * default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional * target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less then the fractional * amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount. * amount, reduce by what is needed to hit the fractional amount.
*/ */
free_memory = arc_available_memory(); free_memory = arc_available_memory();

View File

@ -236,7 +236,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
error = SET_ERROR(EEXIST); error = SET_ERROR(EEXIST);
goto eholdnewbmds; goto eholdnewbmds;
default: default:
/* dsl_bookmark_lookup_impl already did SET_ERRROR */ /* dsl_bookmark_lookup_impl already did SET_ERROR */
goto eholdnewbmds; goto eholdnewbmds;
} }
@ -271,7 +271,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR); error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
break; break;
default: default:
/* dsl_bookmark_lookup already did SET_ERRROR */ /* dsl_bookmark_lookup already did SET_ERROR */
break; break;
} }
} else { } else {
@ -536,7 +536,7 @@ dsl_bookmark_create_sync_impl_book(
* Reasoning: * Reasoning:
* - The zbm_redaction_obj would be referred to by both source and new * - The zbm_redaction_obj would be referred to by both source and new
* bookmark, but would be destroyed once either source or new is * bookmark, but would be destroyed once either source or new is
* destroyed, resulting in use-after-free of the referrred object. * destroyed, resulting in use-after-free of the referred object.
* - User expectation when issuing the `zfs bookmark` command is that * - User expectation when issuing the `zfs bookmark` command is that
* a normal bookmark of the source is created * a normal bookmark of the source is created
* *

View File

@ -6496,7 +6496,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
/* /*
* The virtual dRAID spares must be added after vdev tree is created * The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their assoicated * and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare. * dRAID is stored in the config and used when opening the spare.
*/ */
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,

View File

@ -1372,7 +1372,7 @@ vdev_metaslab_group_create(vdev_t *vd)
/* /*
* The spa ashift min/max only apply for the normal metaslab * The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundry * class. Class destination is late binding so ashift boundary
* setting had to wait until now. * setting had to wait until now.
*/ */
if (vd->vdev_top == vd && vd->vdev_ashift != 0 && if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
@ -2046,7 +2046,7 @@ vdev_open(vdev_t *vd)
vd->vdev_max_asize = max_asize; vd->vdev_max_asize = max_asize;
/* /*
* If the vdev_ashift was not overriden at creation time, * If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift. * then set it the logical ashift and optimize the ashift.
*/ */
if (vd->vdev_ashift == 0) { if (vd->vdev_ashift == 0) {
@ -2116,7 +2116,7 @@ vdev_open(vdev_t *vd)
} }
/* /*
* Track the the minimum allocation size. * Track the minimum allocation size.
*/ */
if (vd->vdev_top == vd && vd->vdev_ashift != 0 && if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) { vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
@ -4570,7 +4570,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
/* /*
* Solely for the purposes of 'zpool iostat -lqrw' * Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to catagorize the IO. * reporting use the priority to categorize the IO.
* Only the following are reported to user space: * Only the following are reported to user space:
* *
* ZIO_PRIORITY_SYNC_READ, * ZIO_PRIORITY_SYNC_READ,

View File

@ -1984,7 +1984,7 @@ raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
* 2 4 5 first: increment to 3 * 2 4 5 first: increment to 3
* 3 4 5 done * 3 4 5 done
* *
* This strategy works for dRAID but is less effecient when there are a large * This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore, * number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap reconstruction would be * since the raidz_map_t rows likely do not overlap reconstruction would be
* possible as long as there are no more than nparity data errors per row. * possible as long as there are no more than nparity data errors per row.

View File

@ -81,7 +81,7 @@
* Advantages: * Advantages:
* *
* - Sequential reconstruction is performed in LBA order which may be faster * - Sequential reconstruction is performed in LBA order which may be faster
* than healing reconstruction particularly when using using HDDs (or * than healing reconstruction particularly when using HDDs (or
* especially with SMR devices). Only allocated capacity is resilvered. * especially with SMR devices). Only allocated capacity is resilvered.
* *
* - Sequential reconstruction is not constrained by ZFS block boundaries. * - Sequential reconstruction is not constrained by ZFS block boundaries.

View File

@ -233,7 +233,7 @@ unsigned long zfs_max_nvlist_src_size = 0;
/* /*
* When logging the output nvlist of an ioctl in the on-disk history, limit * When logging the output nvlist of an ioctl in the on-disk history, limit
* the logged size to this many bytes. This must be less then DMU_MAX_ACCESS. * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS.
* This applies primarily to zfs_ioc_channel_program(). * This applies primarily to zfs_ioc_channel_program().
*/ */
unsigned long zfs_history_output_max = 1024 * 1024; unsigned long zfs_history_output_max = 1024 * 1024;

View File

@ -10,7 +10,7 @@ library, besides upgrading to a newer ZSTD release.
Tree structure: Tree structure:
* `zfs_zstd.c` is the actual `zzstd` kernel module. * `zfs_zstd.c` is the actual `zzstd` kernel module.
* `lib/` contains the the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md) * `lib/` contains the unmodified, [_"amalgamated"_](https://github.com/facebook/zstd/blob/dev/contrib/single_file_libs/README.md)
version of the `Zstandard` library, generated from our template file version of the `Zstandard` library, generated from our template file
* `zstd-in.c` is our template file for generating the library * `zstd-in.c` is our template file for generating the library
* `include/`: This directory contains supplemental includes for platform * `include/`: This directory contains supplemental includes for platform

View File

@ -34,7 +34,7 @@
/* /*
* This wrapper fixes a problem, in case the ZFS filesystem driver, is compiled * This wrapper fixes a problem, in case the ZFS filesystem driver, is compiled
* staticly into the kernel. * statically into the kernel.
* This will cause a symbol collision with the older in-kernel zstd library. * This will cause a symbol collision with the older in-kernel zstd library.
* The following macros will simply rename all local zstd symbols and references * The following macros will simply rename all local zstd symbols and references
* *

View File

@ -258,7 +258,7 @@ zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
for (int i = 0; i < ZSTD_POOL_MAX; i++) { for (int i = 0; i < ZSTD_POOL_MAX; i++) {
pool = &zstd_mempool[i]; pool = &zstd_mempool[i];
/* /*
* This lock is simply a marker for a pool object beeing in use. * This lock is simply a marker for a pool object being in use.
* If it's already hold, it will be skipped. * If it's already hold, it will be skipped.
* *
* We need to create it before checking it to avoid race * We need to create it before checking it to avoid race
@ -488,7 +488,7 @@ zfs_zstd_decompress_level(void *s_start, void *d_start, size_t s_len,
/* /*
* NOTE: We ignore the ZSTD version for now. As soon as any * NOTE: We ignore the ZSTD version for now. As soon as any
* incompatibility occurrs, it has to be handled accordingly. * incompatibility occurs, it has to be handled accordingly.
* The version can be accessed via `hdr_copy.version`. * The version can be accessed via `hdr_copy.version`.
*/ */

View File

@ -12,7 +12,7 @@
# as much functionality as possible while still executing relatively # as much functionality as possible while still executing relatively
# quickly. The included tests should take no more than a few seconds # quickly. The included tests should take no more than a few seconds
# each to run at most. This provides a convenient way to sanity test a # each to run at most. This provides a convenient way to sanity test a
# change before commiting to a full test run which takes several hours. # change before committing to a full test run which takes several hours.
# #
# Approximate run time: 15 minutes # Approximate run time: 15 minutes
# #

View File

@ -626,7 +626,7 @@ eval_decluster(draid_map_t *map, double *worst_ratiop, double *avg_ratiop)
uint64_t faults = nspares; uint64_t faults = nspares;
/* /*
* Score groupwidths up to 19. This value was choosen as the * Score groupwidths up to 19. This value was chosen as the
* largest reasonable width (16d+3p). dRAID pools may be still * largest reasonable width (16d+3p). dRAID pools may be still
* be created with wider stripes but they are not considered in * be created with wider stripes but they are not considered in
* this analysis in order to optimize for the most common cases. * this analysis in order to optimize for the most common cases.
@ -727,7 +727,7 @@ eval_maps(uint64_t children, int passes, uint64_t *map_seed,
* Consider maps with a lower worst_ratio to be of higher * Consider maps with a lower worst_ratio to be of higher
* quality. Some maps may have a lower avg_ratio but they * quality. Some maps may have a lower avg_ratio but they
* are discarded since they might include some particularly * are discarded since they might include some particularly
* imbalanced permuations. The average is tracked to in * imbalanced permutations. The average is tracked to in
* order to get a sense of the average permutation quality. * order to get a sense of the average permutation quality.
*/ */
eval_decluster(map, &worst_ratio, &avg_ratio); eval_decluster(map, &worst_ratio, &avg_ratio);
@ -1194,8 +1194,8 @@ draid_dump(int argc, char *argv[])
} }
/* /*
* Print all of the mappings as a C formated draid_map_t array. This table * Print all of the mappings as a C formatted draid_map_t array. This table
* is found in the module/zcommon/zfs_draid.c file and is the definative * is found in the module/zcommon/zfs_draid.c file and is the definitive
* source for all mapping used by dRAID. It cannot be updated without * source for all mapping used by dRAID. It cannot be updated without
* changing the dRAID on disk format. * changing the dRAID on disk format.
*/ */

View File

@ -44,7 +44,7 @@ static unsigned char bigbuffer[BIGBUFFERSIZE];
static void usage(char *); static void usage(char *);
/* /*
* psudo-randomize the buffer * pseudo-randomize the buffer
*/ */
static void randomize_buffer(int block_size) { static void randomize_buffer(int block_size) {
int i; int i;

View File

@ -19,7 +19,7 @@
# snapshots from the same datasets # snapshots from the same datasets
# #
# STRATEGY # STRATEGY
# 1. Create multiple snapshots for the same datset # 1. Create multiple snapshots for the same dataset
# 2. Run zfs destroy for these snapshots for a mix of valid and # 2. Run zfs destroy for these snapshots for a mix of valid and
# invalid snapshot names # invalid snapshot names
# 3. Run zfs destroy for snapshots from different datasets and # 3. Run zfs destroy for snapshots from different datasets and

View File

@ -36,7 +36,7 @@ typeset VDEV_PREFIX="$TEST_BASE_DIR/filedev"
# STRATEGY: # STRATEGY:
# 1. Create different storage pools, use -n to add devices to the pool and # 1. Create different storage pools, use -n to add devices to the pool and
# verify the output is as expected. # verify the output is as expected.
# 2. Create a pool whith a hole vdev and verify it's not listed with add -n. # 2. Create a pool with a hole vdev and verify it's not listed with add -n.
# #
typeset -a dev=( typeset -a dev=(
@ -163,7 +163,7 @@ for (( i=0; i < ${#tests[@]}; i+=1 )); do
log_must destroy_pool "$TESTPOOL" log_must destroy_pool "$TESTPOOL"
done done
# Make sure hole vdevs are skiped in output. # Make sure hole vdevs are skipped in output.
log_must eval "zpool create '$TESTPOOL' '${dev[0]}' log '${dev[1]}' \ log_must eval "zpool create '$TESTPOOL' '${dev[0]}' log '${dev[1]}' \
cache '${dev[2]}'" cache '${dev[2]}'"

View File

@ -74,7 +74,7 @@ else
fi fi
# #
# datsets ordered by checksum options (note, Orange, Carrot & Banana have the # datasets ordered by checksum options (note, Orange, Carrot & Banana have the
# same checksum options, so ZFS should revert to sorting them alphabetically by # same checksum options, so ZFS should revert to sorting them alphabetically by
# name) # name)
# #

View File

@ -30,7 +30,7 @@
# #
# STRATEGY: # STRATEGY:
# 1. Change HOME to /var/tmp # 1. Change HOME to /var/tmp
# 2. Make a simple script that echos a key value pair # 2. Make a simple script that echoes a key value pair
# in /var/tmp/.zpool.d # in /var/tmp/.zpool.d
# 3. Make sure it can be run with -c # 3. Make sure it can be run with -c
# 4. Remove the script we created # 4. Remove the script we created

View File

@ -30,7 +30,7 @@
# #
# STRATEGY: # STRATEGY:
# 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs
# 2. Make a simple script that echos a key value pair in each dir # 2. Make a simple script that echoes a key value pair in each dir
# 3. Make sure scripts can be run with -c # 3. Make sure scripts can be run with -c
# 4. Remove the scripts we created # 4. Remove the scripts we created

View File

@ -30,7 +30,7 @@
# #
# STRATEGY: # STRATEGY:
# 1. Change HOME to /var/tmp # 1. Change HOME to /var/tmp
# 2. Make a simple script that echos a key value pair # 2. Make a simple script that echoes a key value pair
# in /var/tmp/.zpool.d # in /var/tmp/.zpool.d
# 3. Make sure it can be run with -c # 3. Make sure it can be run with -c
# 4. Remove the script we created # 4. Remove the script we created

View File

@ -30,7 +30,7 @@
# #
# STRATEGY: # STRATEGY:
# 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs # 1. Set ZPOOL_SCRIPTS_PATH to contain a couple of non-default dirs
# 2. Make a simple script that echos a key value pair in each dir # 2. Make a simple script that echoes a key value pair in each dir
# 3. Make sure scripts can be run with -c # 3. Make sure scripts can be run with -c
# 4. Remove the scripts we created # 4. Remove the scripts we created

View File

@ -30,7 +30,7 @@
# STRATEGY: # STRATEGY:
# 1. Create a pool # 1. Create a pool
# 2. Simulate physical removal of one device # 2. Simulate physical removal of one device
# 3. Verify the device is unvailable # 3. Verify the device is unavailable
# 4. Reattach the device # 4. Reattach the device
# 5. Verify the device is onlined # 5. Verify the device is onlined
# 6. Repeat the same tests with a spare device: # 6. Repeat the same tests with a spare device:
@ -104,7 +104,7 @@ do
log_must mkfile 1m $mntpnt/file log_must mkfile 1m $mntpnt/file
log_must zpool sync $TESTPOOL log_must zpool sync $TESTPOOL
# 3. Verify the device is unvailable. # 3. Verify the device is unavailable.
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL" log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 4. Reattach the device # 4. Reattach the device

View File

@ -44,7 +44,7 @@ function cleanup
rm -f ${VDEV_FILES[@]} rm -f ${VDEV_FILES[@]}
} }
log_assert "Verify attach/detech with multiple vdevs" log_assert "Verify attach/detach with multiple vdevs"
ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS) ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS)
@ -79,7 +79,7 @@ for replace_mode in "healing" "sequential"; do
${VDEV_FILES[1]} ${VDEV_FILES[2]} ${VDEV_FILES[1]} ${VDEV_FILES[2]}
log_must is_pool_resilvering $TESTPOOL1 log_must is_pool_resilvering $TESTPOOL1
# Original vdev cannot be detached until there is sufficent redundancy. # Original vdev cannot be detached until there is sufficient redundancy.
log_mustnot zpool detach $TESTPOOL1 ${VDEV_FILES[0]} log_mustnot zpool detach $TESTPOOL1 ${VDEV_FILES[0]}
# Detach first vdev (resilver keeps running) # Detach first vdev (resilver keeps running)
@ -108,4 +108,4 @@ for replace_mode in "healing" "sequential"; do
log_must zpool wait $TESTPOOL1 log_must zpool wait $TESTPOOL1
done done
log_pass "Verify attach/detech with multiple vdevs" log_pass "Verify attach/detach with multiple vdevs"

View File

@ -26,7 +26,7 @@
# Strategy: # Strategy:
# 1. For both healing and sequential resilvering replace: # 1. For both healing and sequential resilvering replace:
# a. Create a pool # a. Create a pool
# b. Repalce a vdev with 'zpool replace' to resilver (-s) it. # b. Replace a vdev with 'zpool replace' to resilver (-s) it.
# c. Export the pool # c. Export the pool
# d. Import the pool # d. Import the pool
# e. Verify the 'zpool replace' resumed resilvering. # e. Verify the 'zpool replace' resumed resilvering.

View File

@ -36,7 +36,7 @@
# a. Replace a vdev with a spare & suspend resilver immediately # a. Replace a vdev with a spare & suspend resilver immediately
# b. Verify resilver starts properly # b. Verify resilver starts properly
# c. Offline / online another vdev to introduce a new DTL range # c. Offline / online another vdev to introduce a new DTL range
# d. Verify resilver restart restart or defer # d. Verify resilver restart or defer
# e. Inject read errors on vdev that was offlined / onlned # e. Inject read errors on vdev that was offlined / onlned
# f. Verify that resilver did not restart # f. Verify that resilver did not restart
# g. Unsuspend resilver and wait for it to finish # g. Unsuspend resilver and wait for it to finish

View File

@ -39,7 +39,7 @@
# for a dataset. Unlike quotas however there should be no restrictions # for a dataset. Unlike quotas however there should be no restrictions
# on accessing space outside of the limits of the reservation (if the # on accessing space outside of the limits of the reservation (if the
# space is available in the pool). Verify that in a filesystem with a # space is available in the pool). Verify that in a filesystem with a
# reservation set that its possible to create files both within the # reservation set that it's possible to create files both within the
# reserved space and also outside. # reserved space and also outside.
# #
# STRATEGY: # STRATEGY: