2019-10-09 17:39:26 +00:00
|
|
|
#
|
|
|
|
# This file and its contents are supplied under the terms of the
|
|
|
|
# Common Development and Distribution License ("CDDL"), version 1.0.
|
|
|
|
# You may only use this file in accordance with the terms of version
|
|
|
|
# 1.0 of the CDDL.
|
|
|
|
#
|
|
|
|
# A full copy of the text of the CDDL should have accompanied this
|
|
|
|
# source. A copy of the CDDL is also available via the Internet at
|
|
|
|
# http://www.illumos.org/license/CDDL.
|
|
|
|
#
|
2020-12-03 18:49:39 +00:00
|
|
|
# This run file contains all of the common functional tests. When
|
|
|
|
# adding a new test consider also adding it to the sanity.run file
|
|
|
|
# if the new test runs to completion in only a few seconds.
|
|
|
|
#
|
|
|
|
# Approximate run time: 4-5 hours
|
|
|
|
#
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[DEFAULT]
|
|
|
|
pre = setup
|
|
|
|
quiet = False
|
|
|
|
pre_user = root
|
|
|
|
user = root
|
|
|
|
timeout = 600
|
|
|
|
post_user = root
|
|
|
|
post = cleanup
|
2020-03-10 18:00:56 +00:00
|
|
|
failsafe_user = root
|
|
|
|
failsafe = callbacks/zfs_failsafe
|
2019-10-09 17:39:26 +00:00
|
|
|
outputdir = /var/tmp/test_results
|
|
|
|
tags = ['functional']
|
|
|
|
|
2021-03-20 05:50:46 +00:00
|
|
|
[tests/functional/acl/off]
|
Expose additional file level attributes
ZFS allows to update and retrieve additional file level attributes for
FreeBSD. This commit allows additional file level attributes to be
updated and retrieved for Linux. These include the flags stored in the
upper half of z_pflags only.
Two new IOCTLs have been added for this purpose. ZFS_IOC_GETDOSFLAGS
can be used to retrieve the attributes, while ZFS_IOC_SETDOSFLAGS can
be used to update the attributes.
Attributes that are allowed to be updated include ZFS_IMMUTABLE,
ZFS_APPENDONLY, ZFS_NOUNLINK, ZFS_ARCHIVE, ZFS_NODUMP, ZFS_SYSTEM,
ZFS_HIDDEN, ZFS_READONLY, ZFS_REPARSE, ZFS_OFFLINE and ZFS_SPARSE.
Flags can be or'd together while calling ZFS_IOC_SETDOSFLAGS.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Umer Saleem <usaleem@ixsystems.com>
Closes #13118
2022-03-08 01:52:03 +00:00
|
|
|
tests = ['dosmode', 'posixmode']
|
2021-03-20 05:50:46 +00:00
|
|
|
tags = ['functional', 'acl']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/alloc_class]
|
|
|
|
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
|
|
|
|
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
|
|
|
|
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
|
|
|
|
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
|
|
|
|
'alloc_class_013_pos']
|
|
|
|
tags = ['functional', 'alloc_class']
|
|
|
|
|
2022-05-11 15:38:16 +00:00
|
|
|
[tests/functional/append]
|
|
|
|
tests = ['file_append', 'threadsappend_001_pos']
|
|
|
|
tags = ['functional', 'append']
|
|
|
|
|
2020-10-08 16:40:23 +00:00
|
|
|
[tests/functional/arc]
|
|
|
|
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
|
|
|
|
'arcstats_runtime_tuning']
|
|
|
|
tags = ['functional', 'arc']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/atime]
|
2020-02-25 23:27:41 +00:00
|
|
|
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'atime']
|
|
|
|
|
|
|
|
[tests/functional/bootfs]
|
|
|
|
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
|
|
|
|
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
|
|
|
|
'bootfs_008_pos']
|
|
|
|
tags = ['functional', 'bootfs']
|
|
|
|
|
2019-12-19 19:53:55 +00:00
|
|
|
[tests/functional/btree]
|
|
|
|
tests = ['btree_positive', 'btree_negative']
|
|
|
|
tags = ['functional', 'btree']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cache]
|
|
|
|
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
|
|
|
|
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
|
2020-04-13 17:50:41 +00:00
|
|
|
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cache']
|
|
|
|
|
|
|
|
[tests/functional/cachefile]
|
|
|
|
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
|
|
|
|
'cachefile_004_pos']
|
|
|
|
tags = ['functional', 'cachefile']
|
|
|
|
|
|
|
|
[tests/functional/casenorm]
|
|
|
|
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
|
|
|
|
'sensitive_none_lookup', 'sensitive_none_delete',
|
|
|
|
'sensitive_formd_lookup', 'sensitive_formd_delete',
|
|
|
|
'insensitive_none_lookup', 'insensitive_none_delete',
|
|
|
|
'insensitive_formd_lookup', 'insensitive_formd_delete',
|
|
|
|
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
|
|
|
|
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
|
|
|
|
tags = ['functional', 'casenorm']
|
|
|
|
|
|
|
|
[tests/functional/channel_program/lua_core]
|
|
|
|
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
|
|
|
|
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
|
|
|
|
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
|
|
|
|
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
|
|
|
|
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
|
|
|
|
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
|
2020-07-27 23:11:47 +00:00
|
|
|
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'channel_program', 'lua_core']
|
|
|
|
|
|
|
|
[tests/functional/channel_program/synctask_core]
|
|
|
|
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
|
|
|
|
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
|
|
|
|
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
|
2020-01-23 01:03:17 +00:00
|
|
|
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
|
2019-10-09 17:39:26 +00:00
|
|
|
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
|
|
|
|
'tst.list_snapshots', 'tst.list_system_props',
|
|
|
|
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
|
|
|
|
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
|
2020-02-14 21:41:42 +00:00
|
|
|
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
|
2019-11-11 07:24:14 +00:00
|
|
|
'tst.snapshot_recursive', 'tst.snapshot_simple',
|
2020-01-16 01:15:05 +00:00
|
|
|
'tst.bookmark.create', 'tst.bookmark.copy',
|
2019-11-11 07:24:14 +00:00
|
|
|
'tst.terminate_by_signal'
|
|
|
|
]
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'channel_program', 'synctask_core']
|
|
|
|
|
|
|
|
[tests/functional/checksum]
|
Introduce BLAKE3 checksums as an OpenZFS feature
This commit adds BLAKE3 checksums to OpenZFS, it has similar
performance to Edon-R, but without the caveats around the latter.
Homepage of BLAKE3: https://github.com/BLAKE3-team/BLAKE3
Wikipedia: https://en.wikipedia.org/wiki/BLAKE_(hash_function)#BLAKE3
Short description of Wikipedia:
BLAKE3 is a cryptographic hash function based on Bao and BLAKE2,
created by Jack O'Connor, Jean-Philippe Aumasson, Samuel Neves, and
Zooko Wilcox-O'Hearn. It was announced on January 9, 2020, at Real
World Crypto. BLAKE3 is a single algorithm with many desirable
features (parallelism, XOF, KDF, PRF and MAC), in contrast to BLAKE
and BLAKE2, which are algorithm families with multiple variants.
BLAKE3 has a binary tree structure, so it supports a practically
unlimited degree of parallelism (both SIMD and multithreading) given
enough input. The official Rust and C implementations are
dual-licensed as public domain (CC0) and the Apache License.
Along with adding the BLAKE3 hash into the OpenZFS infrastructure a
new benchmarking file called chksum_bench was introduced. When read
it reports the speed of the available checksum functions.
On Linux: cat /proc/spl/kstat/zfs/chksum_bench
On FreeBSD: sysctl kstat.zfs.misc.chksum_bench
This is an example output of an i3-1005G1 test system with Debian 11:
implementation 1k 4k 16k 64k 256k 1m 4m
edonr-generic 1196 1602 1761 1749 1762 1759 1751
skein-generic 546 591 608 615 619 612 616
sha256-generic 240 300 316 314 304 285 276
sha512-generic 353 441 467 476 472 467 426
blake3-generic 308 313 313 313 312 313 312
blake3-sse2 402 1289 1423 1446 1432 1458 1413
blake3-sse41 427 1470 1625 1704 1679 1607 1629
blake3-avx2 428 1920 3095 3343 3356 3318 3204
blake3-avx512 473 2687 4905 5836 5844 5643 5374
Output on Debian 5.10.0-10-amd64 system: (Ryzen 7 5800X)
implementation 1k 4k 16k 64k 256k 1m 4m
edonr-generic 1840 2458 2665 2719 2711 2723 2693
skein-generic 870 966 996 992 1003 1005 1009
sha256-generic 415 442 453 455 457 457 457
sha512-generic 608 690 711 718 719 720 721
blake3-generic 301 313 311 309 309 310 310
blake3-sse2 343 1865 2124 2188 2180 2181 2186
blake3-sse41 364 2091 2396 2509 2463 2482 2488
blake3-avx2 365 2590 4399 4971 4915 4802 4764
Output on Debian 5.10.0-9-powerpc64le system: (POWER 9)
implementation 1k 4k 16k 64k 256k 1m 4m
edonr-generic 1213 1703 1889 1918 1957 1902 1907
skein-generic 434 492 520 522 511 525 525
sha256-generic 167 183 187 188 188 187 188
sha512-generic 186 216 222 221 225 224 224
blake3-generic 153 152 154 153 151 153 153
blake3-sse2 391 1170 1366 1406 1428 1426 1414
blake3-sse41 352 1049 1212 1174 1262 1258 1259
Output on Debian 5.10.0-11-arm64 system: (Pi400)
implementation 1k 4k 16k 64k 256k 1m 4m
edonr-generic 487 603 629 639 643 641 641
skein-generic 271 299 303 308 309 309 307
sha256-generic 117 127 128 130 130 129 130
sha512-generic 145 165 170 172 173 174 175
blake3-generic 81 29 71 89 89 89 89
blake3-sse2 112 323 368 379 380 371 374
blake3-sse41 101 315 357 368 369 364 360
Structurally, the new code is mainly split into these parts:
- 1x cross platform generic c variant: blake3_generic.c
- 4x assembly for X86-64 (SSE2, SSE4.1, AVX2, AVX512)
- 2x assembly for ARMv8 (NEON converted from SSE2)
- 2x assembly for PPC64-LE (POWER8 converted from SSE2)
- one file for switching between the implementations
Note the PPC64 assembly requires the VSX instruction set and the
kfpu_begin() / kfpu_end() calls on PowerPC were updated accordingly.
Reviewed-by: Felix Dörre <felix@dogcraft.de>
Reviewed-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tino Reichardt <milky-zfs@mcmilk.de>
Co-authored-by: Rich Ercolani <rincebrain@gmail.com>
Closes #10058
Closes #12918
2022-06-08 22:55:57 +00:00
|
|
|
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'run_blake3_test',
|
|
|
|
'filetest_001_pos', 'filetest_002_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'checksum']
|
|
|
|
|
|
|
|
[tests/functional/clean_mirror]
|
|
|
|
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
|
|
|
|
'clean_mirror_003_pos', 'clean_mirror_004_pos']
|
|
|
|
tags = ['functional', 'clean_mirror']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zdb]
|
2020-04-28 16:56:31 +00:00
|
|
|
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
|
2020-06-26 22:09:20 +00:00
|
|
|
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
|
|
|
|
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
|
2021-11-10 19:22:00 +00:00
|
|
|
'zdb_display_block', 'zdb_label_checksum', 'zdb_object_range_neg',
|
|
|
|
'zdb_object_range_pos', 'zdb_objset_id', 'zdb_decompress_zstd',
|
|
|
|
'zdb_recover', 'zdb_recover_2']
|
2019-10-09 17:39:26 +00:00
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zdb']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs]
|
|
|
|
tests = ['zfs_001_neg', 'zfs_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_bookmark]
|
|
|
|
tests = ['zfs_bookmark_cliargs']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_bookmark']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_change-key]
|
|
|
|
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
|
|
|
|
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
|
|
|
|
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_change-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_clone]
|
|
|
|
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
|
|
|
|
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
|
|
|
|
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
|
|
|
|
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_clone']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_copies]
|
|
|
|
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
|
|
|
|
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_copies']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_create]
|
|
|
|
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
|
|
|
|
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
|
|
|
|
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
|
|
|
|
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
|
|
|
|
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
|
2020-12-04 22:01:42 +00:00
|
|
|
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
|
|
|
|
'zfs_create_verbose']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_create']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_destroy]
|
|
|
|
tests = ['zfs_clone_livelist_condense_and_disable',
|
2021-06-07 19:09:07 +00:00
|
|
|
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
|
|
|
|
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
|
|
|
|
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
|
|
|
|
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
|
|
|
|
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
|
|
|
|
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
|
|
|
|
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_destroy']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_diff]
|
|
|
|
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
|
2021-12-09 23:02:52 +00:00
|
|
|
'zfs_diff_types', 'zfs_diff_encrypted', 'zfs_diff_mangle']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_diff']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_get]
|
|
|
|
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
|
|
|
|
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
|
|
|
|
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_get']
|
|
|
|
|
2020-05-20 17:05:33 +00:00
|
|
|
[tests/functional/cli_root/zfs_ids_to_path]
|
|
|
|
tests = ['zfs_ids_to_path_001_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cli_root/zfs_inherit]
|
|
|
|
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
|
|
|
|
'zfs_inherit_mountpoint']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_inherit']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_load-key]
|
|
|
|
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
|
2021-05-13 04:21:35 +00:00
|
|
|
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
|
|
|
|
'zfs_load-key_recursive']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_load-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_mount]
|
|
|
|
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
|
|
|
|
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
|
|
|
|
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
|
2020-03-06 17:28:19 +00:00
|
|
|
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
|
|
|
|
'zfs_mount_test_race']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_mount']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_program]
|
|
|
|
tests = ['zfs_program_json']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_program']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_promote]
|
|
|
|
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
|
|
|
|
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
|
|
|
|
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_promote']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_property]
|
|
|
|
tests = ['zfs_written_property_001_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_property']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_receive]
|
|
|
|
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
|
|
|
|
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
|
|
|
|
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
|
|
|
|
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
|
|
|
|
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
|
2020-03-17 17:08:32 +00:00
|
|
|
'zfs_receive_016_pos', 'receive-o-x_props_override',
|
2021-10-29 22:38:10 +00:00
|
|
|
'receive-o-x_props_aliases',
|
2020-03-17 17:08:32 +00:00
|
|
|
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
|
|
|
|
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
|
2022-02-09 22:38:33 +00:00
|
|
|
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props',
|
2022-07-28 22:52:46 +00:00
|
|
|
'zfs_receive_-wR-encrypted-mix', 'zfs_receive_corrective',
|
|
|
|
'zfs_receive_compressed_corrective']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_receive']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_rename]
|
|
|
|
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
|
|
|
|
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
|
|
|
|
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
|
|
|
|
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
|
|
|
|
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
|
2020-09-01 23:14:16 +00:00
|
|
|
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_rename']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_reservation]
|
|
|
|
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_reservation']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_rollback]
|
|
|
|
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
|
|
|
|
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_rollback']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_send]
|
|
|
|
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
|
|
|
|
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
|
|
|
|
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
|
2021-04-11 19:05:35 +00:00
|
|
|
'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_send']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_set]
|
|
|
|
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
|
|
|
|
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
|
|
|
|
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
|
|
|
|
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
|
|
|
|
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
|
|
|
|
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
|
|
|
|
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
|
|
|
|
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
|
Add zstd support to zfs
This PR adds two new compression types, based on ZStandard:
- zstd: A basic ZStandard compression algorithm Available compression.
Levels for zstd are zstd-1 through zstd-19, where the compression
increases with every level, but speed decreases.
- zstd-fast: A faster version of the ZStandard compression algorithm
zstd-fast is basically a "negative" level of zstd. The compression
decreases with every level, but speed increases.
Available compression levels for zstd-fast:
- zstd-fast-1 through zstd-fast-10
- zstd-fast-20 through zstd-fast-100 (in increments of 10)
- zstd-fast-500 and zstd-fast-1000
For more information check the man page.
Implementation details:
Rather than treat each level of zstd as a different algorithm (as was
done historically with gzip), the block pointer `enum zio_compress`
value is simply zstd for all levels, including zstd-fast, since they all
use the same decompression function.
The compress= property (a 64bit unsigned integer) uses the lower 7 bits
to store the compression algorithm (matching the number of bits used in
a block pointer, as the 8th bit was borrowed for embedded block
pointers). The upper bits are used to store the compression level.
It is necessary to be able to determine what compression level was used
when later reading a block back, so the concept used in LZ4, where the
first 32bits of the on-disk value are the size of the compressed data
(since the allocation is rounded up to the nearest ashift), was
extended, and we store the version of ZSTD and the level as well as the
compressed size. This value is returned when decompressing a block, so
that if the block needs to be recompressed (L2ARC, nop-write, etc), that
the same parameters will be used to result in the matching checksum.
All of the internal ZFS code ( `arc_buf_hdr_t`, `objset_t`,
`zio_prop_t`, etc.) uses the separated _compress and _complevel
variables. Only the properties ZAP contains the combined/bit-shifted
value. The combined value is split when the compression_changed_cb()
callback is called, and sets both objset members (os_compress and
os_complevel).
The userspace tools all use the combined/bit-shifted value.
Additional notes:
zdb can now also decode the ZSTD compression header (flag -Z) and
inspect the size, version and compression level saved in that header.
For each record, if it is ZSTD compressed, the parameters of the decoded
compression header get printed.
ZSTD is included with all current tests and new tests are added
as-needed.
Per-dataset feature flags now get activated when the property is set.
If a compression algorithm requires a feature flag, zfs activates the
feature when the property is set, rather than waiting for the first
block to be born. This is currently only used by zstd but can be
extended as needed.
Portions-Sponsored-By: The FreeBSD Foundation
Co-authored-by: Allan Jude <allanjude@freebsd.org>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Co-authored-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Co-authored-by: Michael Niewöhner <foss@mniewoehner.de>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #6247
Closes #9024
Closes #10277
Closes #10278
2020-08-18 17:10:17 +00:00
|
|
|
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
|
|
|
|
'zfs_set_feature_activation']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_set']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_share]
|
|
|
|
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
|
2020-07-13 16:19:18 +00:00
|
|
|
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
|
|
|
|
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_share']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_snapshot]
|
|
|
|
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
|
|
|
|
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
|
|
|
|
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
|
|
|
|
'zfs_snapshot_009_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_snapshot']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unload-key]
|
|
|
|
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_unload-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unmount]
|
|
|
|
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
|
|
|
|
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
|
|
|
|
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
|
|
|
|
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_unmount']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unshare]
|
|
|
|
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
|
|
|
|
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
|
2022-03-06 00:39:54 +00:00
|
|
|
'zfs_unshare_007_pos', 'zfs_unshare_008_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_unshare']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_upgrade]
|
|
|
|
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
|
|
|
|
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
|
|
|
|
'zfs_upgrade_007_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_upgrade']
|
2020-04-01 17:02:06 +00:00
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_wait]
|
2022-03-10 19:14:28 +00:00
|
|
|
tests = ['zfs_wait_deleteq', 'zfs_wait_getsubopt']
|
2020-04-01 17:02:06 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_wait']
|
2019-10-09 17:39:26 +00:00
|
|
|
|
2021-11-11 19:26:18 +00:00
|
|
|
[tests/functional/cli_root/zhack]
|
|
|
|
tests = ['zhack_label_checksum']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zhack']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cli_root/zpool]
|
2019-12-20 00:26:07 +00:00
|
|
|
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_add]
|
|
|
|
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
|
2020-01-03 17:08:23 +00:00
|
|
|
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
|
|
|
|
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
|
2020-12-04 22:04:39 +00:00
|
|
|
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_add']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_attach]
|
|
|
|
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_attach']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_clear]
|
|
|
|
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
|
|
|
|
'zpool_clear_readonly']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_clear']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_create]
|
|
|
|
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
|
|
|
|
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
|
|
|
|
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
|
|
|
|
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
|
2020-01-03 17:08:23 +00:00
|
|
|
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
|
|
|
|
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
|
|
|
|
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
|
|
|
|
'zpool_create_023_neg', 'zpool_create_024_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_create_encrypted', 'zpool_create_crypt_combos',
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
|
|
|
|
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
|
|
|
|
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
|
2021-02-18 05:30:45 +00:00
|
|
|
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
|
|
|
|
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
|
2021-06-24 21:30:02 +00:00
|
|
|
'zpool_create_features_009_pos', 'create-o_ashift',
|
|
|
|
'zpool_create_tempname', 'zpool_create_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_create']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_destroy]
|
|
|
|
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
|
|
|
|
'zpool_destroy_003_neg']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_destroy']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_detach]
|
|
|
|
tests = ['zpool_detach_001_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_detach']
|
|
|
|
|
2020-02-18 19:22:56 +00:00
|
|
|
[tests/functional/cli_root/zpool_events]
|
|
|
|
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
|
2021-02-20 06:33:15 +00:00
|
|
|
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
|
|
|
|
'zpool_events_clear_retained']
|
2020-02-18 19:22:56 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_events']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cli_root/zpool_export]
|
|
|
|
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
|
|
|
|
'zpool_export_003_neg', 'zpool_export_004_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_export']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_get]
|
|
|
|
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
|
|
|
|
'zpool_get_004_neg', 'zpool_get_005_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_get']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_history]
|
|
|
|
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_history']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_import]
|
|
|
|
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
|
|
|
|
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
|
|
|
|
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
|
|
|
|
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
|
|
|
|
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
|
|
|
|
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
|
|
|
|
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
|
|
|
|
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
|
|
|
|
'zpool_import_encrypted', 'zpool_import_encrypted_load',
|
|
|
|
'zpool_import_errata3', 'zpool_import_errata4',
|
|
|
|
'import_cachefile_device_added',
|
|
|
|
'import_cachefile_device_removed',
|
|
|
|
'import_cachefile_device_replaced',
|
|
|
|
'import_cachefile_mirror_attached',
|
|
|
|
'import_cachefile_mirror_detached',
|
2021-03-12 23:42:27 +00:00
|
|
|
'import_cachefile_paths_changed',
|
2019-10-09 17:39:26 +00:00
|
|
|
'import_cachefile_shared_device',
|
|
|
|
'import_devices_missing',
|
|
|
|
'import_paths_changed',
|
|
|
|
'import_rewind_config_changed',
|
|
|
|
'import_rewind_device_replaced']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_import']
|
2020-12-06 17:48:36 +00:00
|
|
|
timeout = 1200
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_labelclear]
|
|
|
|
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
|
|
|
|
'zpool_labelclear_removed', 'zpool_labelclear_valid']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_labelclear']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_initialize]
|
|
|
|
tests = ['zpool_initialize_attach_detach_add_remove',
|
2021-03-02 18:27:27 +00:00
|
|
|
'zpool_initialize_fault_export_import_online',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_initialize_import_export',
|
|
|
|
'zpool_initialize_offline_export_import_online',
|
|
|
|
'zpool_initialize_online_offline',
|
|
|
|
'zpool_initialize_split',
|
|
|
|
'zpool_initialize_start_and_cancel_neg',
|
|
|
|
'zpool_initialize_start_and_cancel_pos',
|
|
|
|
'zpool_initialize_suspend_resume',
|
|
|
|
'zpool_initialize_unsupported_vdevs',
|
|
|
|
'zpool_initialize_verify_checksums',
|
|
|
|
'zpool_initialize_verify_initialized']
|
|
|
|
pre =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_initialize']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_offline]
|
|
|
|
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
|
|
|
|
'zpool_offline_003_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_offline']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_online]
|
|
|
|
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_online']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_remove]
|
|
|
|
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
|
|
|
|
'zpool_remove_003_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_remove']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_replace]
|
|
|
|
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_replace']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_resilver]
|
|
|
|
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_resilver']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_scrub]
|
|
|
|
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
|
|
|
|
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
|
|
|
|
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
|
|
|
|
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_scrub']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_set]
|
|
|
|
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
|
|
|
|
'zpool_set_ashift', 'zpool_set_features']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_set']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_split]
|
|
|
|
tests = ['zpool_split_cliargs', 'zpool_split_devices',
|
|
|
|
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
|
2020-12-04 22:04:39 +00:00
|
|
|
'zpool_split_resilver', 'zpool_split_indirect',
|
|
|
|
'zpool_split_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_split']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_status]
|
2021-02-18 05:30:45 +00:00
|
|
|
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
|
Improve zpool status output, list all affected datasets
Currently, determining which datasets are affected by corruption is
a manual process.
The primary difficulty in reporting the list of affected snapshots is
that since the error was initially found, the snapshot where the error
originally occurred in, may have been deleted. To solve this issue, we
add the ID of the head dataset of the original snapshot which the error
was detected in, to the stored error report. Then any time a filesystem
is deleted, the errors associated with it are deleted as well. Any time
a clone promote occurs, we modify reports associated with the original
head to refer to the new head. The stored error reports are identified
by this head ID, the birth time of the block which the error occurred
in, as well as some information about the error itself are also stored.
Once this information is stored, we can find the set of datasets
affected by an error by walking back the list of snapshots in the given
head until we find one with the appropriate birth txg, and then traverse
through the snapshots of the clone family, terminating a branch if the
block was replaced in a given snapshot. Then we report this information
back to libzfs, and to the zpool status command, where it is displayed
as follows:
pool: test
state: ONLINE
status: One or more devices has experienced an error resulting in data
corruption. Applications may be affected.
action: Restore the file in question if possible. Otherwise restore the
entire pool from backup.
see: https://openzfs.github.io/openzfs-docs/msg/ZFS-8000-8A
scan: scrub repaired 0B in 00:00:00 with 800 errors on Fri Dec 3
08:27:57 2021
config:
NAME STATE READ WRITE CKSUM
test ONLINE 0 0 0
sdb ONLINE 0 0 1.58K
errors: Permanent errors have been detected in the following files:
test@1:/test.0.0
/test/test.0.0
/test/1clone/test.0.0
A new feature flag is introduced to mark the presence of this change, as
well as promotion and backwards compatibility logic. This is an updated
version of #9175. Rebase required fixing the tests, updating the ABI of
libzfs, updating the man pages, fixing bugs, fixing the error returns,
and updating the old on-disk error logs to the new format when
activating the feature.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mark.maybee@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Co-authored-by: TulsiJain <tulsi.jain@delphix.com>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #9175
Closes #12812
2022-04-26 00:25:42 +00:00
|
|
|
'zpool_status_003_pos', 'zpool_status_004_pos',
|
2022-08-23 00:42:01 +00:00
|
|
|
'zpool_status_005_pos', 'zpool_status_features_001_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_status']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_sync]
|
|
|
|
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_sync']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_trim]
|
|
|
|
tests = ['zpool_trim_attach_detach_add_remove',
|
2021-03-02 18:27:27 +00:00
|
|
|
'zpool_trim_fault_export_import_online',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
|
|
|
|
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
|
|
|
|
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
|
|
|
|
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
|
|
|
|
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
|
|
|
|
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
|
|
|
|
'zpool_trim_verify_trimmed']
|
|
|
|
tags = ['functional', 'zpool_trim']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_upgrade]
|
|
|
|
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
|
|
|
|
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
|
|
|
|
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
|
|
|
|
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
|
2021-02-18 05:30:45 +00:00
|
|
|
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_upgrade']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_wait]
|
|
|
|
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
|
|
|
|
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
|
|
|
|
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
|
|
|
|
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
|
2020-03-04 23:07:11 +00:00
|
|
|
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_wait_usage']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_wait']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_wait/scan]
|
2020-07-03 18:05:50 +00:00
|
|
|
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
|
|
|
|
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_wait']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/misc]
|
|
|
|
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
|
|
|
|
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
|
|
|
|
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
|
|
|
|
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
|
|
|
|
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
|
|
|
|
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
|
|
|
|
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
|
|
|
|
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
|
|
|
|
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
|
|
|
|
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
|
|
|
|
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
|
|
|
|
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
|
|
|
|
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
|
|
|
|
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
|
2022-09-02 20:24:07 +00:00
|
|
|
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
|
|
|
|
'zilstat_001_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'misc']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zfs_list]
|
|
|
|
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
|
2022-03-10 19:14:28 +00:00
|
|
|
'zfs_list_004_neg', 'zfs_list_005_neg', 'zfs_list_007_pos',
|
|
|
|
'zfs_list_008_neg']
|
2019-10-09 17:39:26 +00:00
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zfs_list']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_iostat]
|
|
|
|
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
|
|
|
|
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
|
|
|
|
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
|
|
|
|
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_iostat']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_list]
|
|
|
|
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_list']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_status]
|
|
|
|
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
|
|
|
|
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_status']
|
|
|
|
|
|
|
|
[tests/functional/compression]
|
Fix L2ARC reads when compressed ARC disabled
When reading compressed blocks from the L2ARC, with
compressed ARC disabled, arc_hdr_size() returns
LSIZE rather than PSIZE, but the actual read is PSIZE.
This causes l2arc_read_done() to compare the checksum
against the wrong size, resulting in checksum failure.
This manifests as an increase in the kstat l2_cksum_bad
and the read being retried from the main pool, making the
L2ARC ineffective.
Add new L2ARC tests with Compressed ARC enabled/disabled
Blocks are handled differently depending on the state of the
zfs_compressed_arc_enabled tunable.
If a block is compressed on-disk, and compressed_arc is enabled:
- the block is read from disk
- It is NOT decompressed
- It is added to the ARC in its compressed form
- l2arc_write_buffers() may write it to the L2ARC (as is)
- l2arc_read_done() compares the checksum to the BP (compressed)
However, if compressed_arc is disabled:
- the block is read from disk
- It is decompressed
- It is added to the ARC (uncompressed)
- l2arc_write_buffers() will use l2arc_apply_transforms() to
recompress the block, before writing it to the L2ARC
- l2arc_read_done() compares the checksum to the BP (compressed)
- l2arc_read_done() will use l2arc_untransform() to uncompress it
This test writes out a test file to a pool consisting of one disk
and one cache device, then randomly reads from it. Since the arc_max
in the tests is low, this will feed the L2ARC, and result in reads
from the L2ARC.
We compare the value of the kstat l2_cksum_bad before and after
to determine if any blocks failed to survive the trip through the
L2ARC.
Sponsored-by: The FreeBSD Foundation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Closes #10693
2020-08-14 06:31:20 +00:00
|
|
|
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
|
Add zstd support to zfs
This PR adds two new compression types, based on ZStandard:
- zstd: A basic ZStandard compression algorithm Available compression.
Levels for zstd are zstd-1 through zstd-19, where the compression
increases with every level, but speed decreases.
- zstd-fast: A faster version of the ZStandard compression algorithm
zstd-fast is basically a "negative" level of zstd. The compression
decreases with every level, but speed increases.
Available compression levels for zstd-fast:
- zstd-fast-1 through zstd-fast-10
- zstd-fast-20 through zstd-fast-100 (in increments of 10)
- zstd-fast-500 and zstd-fast-1000
For more information check the man page.
Implementation details:
Rather than treat each level of zstd as a different algorithm (as was
done historically with gzip), the block pointer `enum zio_compress`
value is simply zstd for all levels, including zstd-fast, since they all
use the same decompression function.
The compress= property (a 64bit unsigned integer) uses the lower 7 bits
to store the compression algorithm (matching the number of bits used in
a block pointer, as the 8th bit was borrowed for embedded block
pointers). The upper bits are used to store the compression level.
It is necessary to be able to determine what compression level was used
when later reading a block back, so the concept used in LZ4, where the
first 32bits of the on-disk value are the size of the compressed data
(since the allocation is rounded up to the nearest ashift), was
extended, and we store the version of ZSTD and the level as well as the
compressed size. This value is returned when decompressing a block, so
that if the block needs to be recompressed (L2ARC, nop-write, etc), that
the same parameters will be used to result in the matching checksum.
All of the internal ZFS code ( `arc_buf_hdr_t`, `objset_t`,
`zio_prop_t`, etc.) uses the separated _compress and _complevel
variables. Only the properties ZAP contains the combined/bit-shifted
value. The combined value is split when the compression_changed_cb()
callback is called, and sets both objset members (os_compress and
os_complevel).
The userspace tools all use the combined/bit-shifted value.
Additional notes:
zdb can now also decode the ZSTD compression header (flag -Z) and
inspect the size, version and compression level saved in that header.
For each record, if it is ZSTD compressed, the parameters of the decoded
compression header get printed.
ZSTD is included with all current tests and new tests are added
as-needed.
Per-dataset feature flags now get activated when the property is set.
If a compression algorithm requires a feature flag, zfs activates the
feature when the property is set, rather than waiting for the first
block to be born. This is currently only used by zstd but can be
extended as needed.
Portions-Sponsored-By: The FreeBSD Foundation
Co-authored-by: Allan Jude <allanjude@freebsd.org>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Co-authored-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Co-authored-by: Michael Niewöhner <foss@mniewoehner.de>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #6247
Closes #9024
Closes #10277
Closes #10278
2020-08-18 17:10:17 +00:00
|
|
|
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
|
|
|
|
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'compression']
|
|
|
|
|
|
|
|
[tests/functional/cp_files]
|
|
|
|
tests = ['cp_files_001_pos']
|
|
|
|
tags = ['functional', 'cp_files']
|
|
|
|
|
2021-07-26 20:08:52 +00:00
|
|
|
[tests/functional/crtime]
|
|
|
|
tests = ['crtime_001_pos' ]
|
|
|
|
tags = ['functional', 'crtime']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/ctime]
|
|
|
|
tests = ['ctime_001_pos' ]
|
|
|
|
tags = ['functional', 'ctime']
|
|
|
|
|
2021-04-07 23:23:57 +00:00
|
|
|
[tests/functional/deadman]
|
|
|
|
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'deadman']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/delegate]
|
|
|
|
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
|
|
|
|
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
|
|
|
|
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
|
|
|
|
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
|
|
|
|
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
|
|
|
|
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
|
|
|
|
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
|
|
|
|
tags = ['functional', 'delegate']
|
|
|
|
|
|
|
|
[tests/functional/exec]
|
|
|
|
tests = ['exec_001_pos', 'exec_002_neg']
|
|
|
|
tags = ['functional', 'exec']
|
|
|
|
|
2021-08-22 15:22:07 +00:00
|
|
|
[tests/functional/fallocate]
|
|
|
|
tests = ['fallocate_punch-hole']
|
|
|
|
tags = ['functional', 'fallocate']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/features/async_destroy]
|
|
|
|
tests = ['async_destroy_001_pos']
|
|
|
|
tags = ['functional', 'features', 'async_destroy']
|
|
|
|
|
|
|
|
[tests/functional/features/large_dnode]
|
|
|
|
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
|
|
|
|
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
|
|
|
|
tags = ['functional', 'features', 'large_dnode']
|
|
|
|
|
|
|
|
[tests/functional/grow]
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
|
|
|
|
tags = ['functional', 'grow']
|
|
|
|
|
|
|
|
[tests/functional/history]
|
|
|
|
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
|
|
|
|
'history_004_pos', 'history_005_neg', 'history_006_neg',
|
|
|
|
'history_007_pos', 'history_008_pos', 'history_009_pos',
|
|
|
|
'history_010_pos']
|
|
|
|
tags = ['functional', 'history']
|
|
|
|
|
|
|
|
[tests/functional/hkdf]
|
2022-03-22 19:09:35 +00:00
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tests = ['hkdf_test']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'hkdf']
|
|
|
|
|
|
|
|
[tests/functional/inheritance]
|
|
|
|
tests = ['inherit_001_pos']
|
|
|
|
pre =
|
|
|
|
tags = ['functional', 'inheritance']
|
|
|
|
|
|
|
|
[tests/functional/io]
|
|
|
|
tests = ['sync', 'psync', 'posixaio', 'mmap']
|
|
|
|
tags = ['functional', 'io']
|
|
|
|
|
|
|
|
[tests/functional/inuse]
|
2020-01-03 17:08:23 +00:00
|
|
|
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
post =
|
|
|
|
tags = ['functional', 'inuse']
|
|
|
|
|
|
|
|
[tests/functional/large_files]
|
|
|
|
tests = ['large_files_001_pos', 'large_files_002_pos']
|
|
|
|
tags = ['functional', 'large_files']
|
|
|
|
|
|
|
|
[tests/functional/limits]
|
|
|
|
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
|
|
|
|
'snapshot_limit']
|
|
|
|
tags = ['functional', 'limits']
|
|
|
|
|
|
|
|
[tests/functional/link_count]
|
2019-10-24 17:47:47 +00:00
|
|
|
tests = ['link_count_001', 'link_count_root_inode']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'link_count']
|
|
|
|
|
|
|
|
[tests/functional/migration]
|
|
|
|
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
|
|
|
|
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
|
|
|
|
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
|
|
|
|
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
|
|
|
|
tags = ['functional', 'migration']
|
|
|
|
|
|
|
|
[tests/functional/mmap]
|
2022-05-03 20:23:26 +00:00
|
|
|
tests = ['mmap_write_001_pos', 'mmap_read_001_pos', 'mmap_seek_001_pos', 'mmap_sync_001_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'mmap']
|
|
|
|
|
|
|
|
[tests/functional/mount]
|
2020-01-27 21:29:25 +00:00
|
|
|
tests = ['umount_001', 'umountall_001']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'mount']
|
|
|
|
|
|
|
|
[tests/functional/mv_files]
|
|
|
|
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
|
|
|
|
tags = ['functional', 'mv_files']
|
|
|
|
|
|
|
|
[tests/functional/nestedfs]
|
|
|
|
tests = ['nestedfs_001_pos']
|
|
|
|
tags = ['functional', 'nestedfs']
|
|
|
|
|
|
|
|
[tests/functional/no_space]
|
|
|
|
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
|
2022-03-08 17:16:35 +00:00
|
|
|
'enospc_df', 'enospc_rm']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'no_space']
|
|
|
|
|
|
|
|
[tests/functional/nopwrite]
|
|
|
|
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
|
|
|
|
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
|
|
|
|
'nopwrite_varying_compression', 'nopwrite_volume']
|
|
|
|
tags = ['functional', 'nopwrite']
|
|
|
|
|
|
|
|
[tests/functional/online_offline]
|
|
|
|
tests = ['online_offline_001_pos', 'online_offline_002_neg',
|
|
|
|
'online_offline_003_neg']
|
|
|
|
tags = ['functional', 'online_offline']
|
|
|
|
|
|
|
|
[tests/functional/pool_checkpoint]
|
|
|
|
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
|
|
|
|
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
|
|
|
|
'checkpoint_discard_busy', 'checkpoint_discard_many',
|
|
|
|
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
|
|
|
|
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
|
|
|
|
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
|
|
|
|
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
|
|
|
|
tags = ['functional', 'pool_checkpoint']
|
|
|
|
timeout = 1800
|
|
|
|
|
|
|
|
[tests/functional/pool_names]
|
|
|
|
tests = ['pool_names_001_pos', 'pool_names_002_neg']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'pool_names']
|
|
|
|
|
|
|
|
[tests/functional/poolversion]
|
|
|
|
tests = ['poolversion_001_pos', 'poolversion_002_pos']
|
|
|
|
tags = ['functional', 'poolversion']
|
|
|
|
|
|
|
|
[tests/functional/pyzfs]
|
|
|
|
tests = ['pyzfs_unittest']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'pyzfs']
|
|
|
|
|
|
|
|
[tests/functional/quota]
|
|
|
|
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
|
|
|
|
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
|
|
|
|
tags = ['functional', 'quota']
|
|
|
|
|
|
|
|
[tests/functional/redacted_send]
|
|
|
|
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
|
|
|
|
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
|
|
|
|
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
|
|
|
|
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
|
2021-03-20 05:47:50 +00:00
|
|
|
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
|
|
|
|
'redacted_size', 'redacted_volume']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'redacted_send']
|
|
|
|
|
|
|
|
[tests/functional/raidz]
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'raidz']
|
|
|
|
|
|
|
|
[tests/functional/redundancy]
|
2021-05-08 15:57:25 +00:00
|
|
|
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
|
2022-06-23 17:36:28 +00:00
|
|
|
'redundancy_draid3', 'redundancy_draid_damaged1',
|
|
|
|
'redundancy_draid_damaged2', 'redundancy_draid_spare1',
|
2021-05-20 22:05:26 +00:00
|
|
|
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
|
|
|
|
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
|
|
|
|
'redundancy_raidz3', 'redundancy_stripe']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'redundancy']
|
2021-05-14 16:11:56 +00:00
|
|
|
timeout = 1200
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[tests/functional/refquota]
|
|
|
|
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
|
|
|
|
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
|
|
|
|
'refquota_007_neg', 'refquota_008_neg']
|
|
|
|
tags = ['functional', 'refquota']
|
|
|
|
|
|
|
|
[tests/functional/refreserv]
|
|
|
|
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
|
|
|
|
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
|
|
|
|
'refreserv_raidz']
|
|
|
|
tags = ['functional', 'refreserv']
|
|
|
|
|
|
|
|
[tests/functional/removal]
|
|
|
|
pre =
|
|
|
|
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
|
|
|
|
'removal_condense_export', 'removal_multiple_indirection',
|
|
|
|
'removal_nopwrite', 'removal_remap_deadlists',
|
|
|
|
'removal_resume_export', 'removal_sanity', 'removal_with_add',
|
|
|
|
'removal_with_create_fs', 'removal_with_dedup',
|
|
|
|
'removal_with_errors', 'removal_with_export',
|
|
|
|
'removal_with_ganging', 'removal_with_faulted',
|
|
|
|
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
|
|
|
|
'removal_with_send_recv', 'removal_with_snapshot',
|
|
|
|
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
|
|
|
|
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
|
2020-12-11 20:15:37 +00:00
|
|
|
'remove_indirect', 'remove_attach_mirror']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'removal']
|
|
|
|
|
|
|
|
[tests/functional/rename_dirs]
|
|
|
|
tests = ['rename_dirs_001_pos']
|
|
|
|
tags = ['functional', 'rename_dirs']
|
|
|
|
|
|
|
|
[tests/functional/replacement]
|
2020-07-03 18:05:50 +00:00
|
|
|
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
|
|
|
|
'attach_resilver', 'detach', 'rebuild_disabled_feature',
|
|
|
|
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
|
|
|
|
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
|
|
|
|
'scrub_cancel']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'replacement']
|
|
|
|
|
|
|
|
[tests/functional/reservation]
|
|
|
|
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
|
|
|
|
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
|
|
|
|
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
|
|
|
|
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
|
|
|
|
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
|
|
|
|
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
|
|
|
|
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
|
|
|
|
'reservation_022_pos']
|
|
|
|
tags = ['functional', 'reservation']
|
|
|
|
|
|
|
|
[tests/functional/rootpool]
|
|
|
|
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
|
|
|
|
tags = ['functional', 'rootpool']
|
|
|
|
|
|
|
|
[tests/functional/rsend]
|
2020-04-23 17:06:57 +00:00
|
|
|
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
|
|
|
|
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
|
|
|
|
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
|
|
|
|
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
|
|
|
|
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
|
2022-03-19 00:02:12 +00:00
|
|
|
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos', 'rsend_025_pos',
|
|
|
|
'rsend_026_neg', 'rsend_027_pos', 'rsend_028_neg', 'rsend_029_neg',
|
2019-10-09 17:39:26 +00:00
|
|
|
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
|
|
|
|
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
|
|
|
|
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
|
2020-04-23 17:06:57 +00:00
|
|
|
'send-c_mixed_compression', 'send-c_stream_size_estimate',
|
2019-10-09 17:39:26 +00:00
|
|
|
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
|
File incorrectly zeroed when receiving incremental stream that toggles -L
Background:
By increasing the recordsize property above the default of 128KB, a
filesystem may have "large" blocks. By default, a send stream of such a
filesystem does not contain large WRITE records, instead it decreases
objects' block sizes to 128KB and splits the large blocks into 128KB
blocks, allowing the large-block filesystem to be received by a system
that does not support the `large_blocks` feature. A send stream
generated by `zfs send -L` (or `--large-block`) preserves the large
block size on the receiving system, by using large WRITE records.
When receiving an incremental send stream for a filesystem with large
blocks, if the send stream's -L flag was toggled, a bug is encountered
in which the file's contents are incorrectly zeroed out. The contents
of any blocks that were not modified by this send stream will be lost.
"Toggled" means that the previous send used `-L`, but this incremental
does not use `-L` (-L to no-L); or that the previous send did not use
`-L`, but this incremental does use `-L` (no-L to -L).
Changes:
This commit addresses the problem with several changes to the semantics
of zfs send/receive:
1. "-L to no-L" incrementals are rejected. If the previous send used
`-L`, but this incremental does not use `-L`, the `zfs receive` will
fail with this error message:
incremental send stream requires -L (--large-block), to match
previous receive.
2. "no-L to -L" incrementals are handled correctly, preserving the
smaller (128KB) block size of any already-received files that used large
blocks on the sending system but were split by `zfs send` without the
`-L` flag.
3. A new send stream format flag is added, `SWITCH_TO_LARGE_BLOCKS`.
This feature indicates that we can correctly handle "no-L to -L"
incrementals. This flag is currently not set on any send streams. In
the future, we intend for incremental send streams of snapshots that
have large blocks to use `-L` by default, and these streams will also
have the `SWITCH_TO_LARGE_BLOCKS` feature set. This ensures that streams
from the default use of `zfs send` won't encounter the bug mentioned
above, because they can't be received by software with the bug.
Implementation notes:
To facilitate accessing the ZPL's generation number,
`zfs_space_delta_cb()` has been renamed to `zpl_get_file_info()` and
restructured to fill in a struct with ZPL-specific info including owner
and generation.
In the "no-L to -L" case, if this is a compressed send stream (from
`zfs send -cL`), large WRITE records that are being written to small
(128KB) blocksize files need to be decompressed so that they can be
written split up into multiple blocks. The zio pipeline will recompress
each smaller block individually.
A new test case, `send-L_toggle`, is added, which tests the "no-L to -L"
case and verifies that we get an error for the "-L to no-L" case.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #6224
Closes #10383
2020-06-09 17:41:01 +00:00
|
|
|
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_hierarchy',
|
2019-10-09 17:39:26 +00:00
|
|
|
'send_encrypted_props', 'send_encrypted_truncated_files',
|
|
|
|
'send_freeobjects', 'send_realloc_files',
|
|
|
|
'send_realloc_encrypted_files', 'send_spill_block', 'send_holds',
|
2020-04-23 17:06:57 +00:00
|
|
|
'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol',
|
2022-02-03 22:28:19 +00:00
|
|
|
'send_partial_dataset', 'send_invalid', 'send_doall',
|
2022-02-16 19:52:02 +00:00
|
|
|
'send_raw_spill_block', 'send_raw_ashift']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'rsend']
|
|
|
|
|
|
|
|
[tests/functional/scrub_mirror]
|
|
|
|
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
|
|
|
|
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
|
|
|
|
tags = ['functional', 'scrub_mirror']
|
|
|
|
|
|
|
|
[tests/functional/slog]
|
|
|
|
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
|
|
|
|
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
|
|
|
|
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
|
|
|
|
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
|
log xattr=sa create/remove/update to ZIL
As such, there are no specific synchronous semantics defined for
the xattrs. But for xattr=on, it does log to ZIL and zil_commit() is
done, if sync=always is set on dataset. This provides sync semantics
for xattr=on with sync=always set on dataset.
For the xattr=sa implementation, it doesn't log to ZIL, so, even with
sync=always, xattrs are not guaranteed to be synced before xattr call
returns to caller. So, xattr can be lost if system crash happens, before
txg carrying xattr transaction is synced.
This change adds xattr=sa logging to ZIL on xattr create/remove/update
and xattrs are synced to ZIL (zil_commit() done) for sync=always.
This makes xattr=sa behavior similar to xattr=on.
Implementation notes:
The actual logging is fairly straight-forward and does not warrant
additional explanation.
However, it has been 14 years since we last added new TX types
to the ZIL [1], hence this is the first time we do it after the
introduction of zpool features. Therefore, here is an overview of the
feature activation and deactivation workflow:
1. The feature must be enabled. Otherwise, we don't log the new
record type. This ensures compatibility with older software.
2. The feature is activated per-dataset, since the ZIL is per-dataset.
3. If the feature is enabled and dataset is not for zvol, any append to
the ZIL chain will activate the feature for the dataset. Likewise
for starting a new ZIL chain.
4. A dataset that doesn't have a ZIL chain has the feature deactivated.
We ensure (3) by activating on the first zil_commit() after the feature
was enabled. Since activating the features requires waiting for txg
sync, the first zil_commit() after enabling the feature will be slower
than usual. The downside is that this is really a conservative
approximation: even if we never append a 'TX_SETSAXATTR' to the ZIL
chain, we pay the penalty for feature activation. The upside is that the
user is in control of when we pay the penalty, i.e., upon enabling the
feature.
We ensure (4) by hooking into zil_sync(), where ZIL destroy actually
happens.
One more piece on feature activation, since it's spread across
multiple functions:
zil_commit()
zil_process_commit_list()
if lwb == NULL // first zil_commit since zil_open
zil_create()
if no log block pointer in ZIL header:
if feature enabled and not active:
// CASE 1
enable, COALESCE txg wait with dmu_tx that allocated the
log block
else // log block was allocated earlier than this zil_open
if feature enabled and not active:
// CASE 2
enable, EXPLICIT txg wait
else // already have an in-DRAM LWB
if feature enabled and not active:
// this happens when we enable the feature after zil_create
// CASE 3
enable, EXPLICIT txg wait
[1] https://github.com/illumos/illumos-gate/commit/da6c28aaf62fa55f0fdb8004aa40f88f23bf53f0
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Christian Schwarz <christian.schwarz@nutanix.com>
Reviewed-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Jitendra Patidar <jitendra.patidar@nutanix.com>
Closes #8768
Closes #9078
2022-02-22 21:06:43 +00:00
|
|
|
'slog_replay_fs_002', 'slog_replay_volume', 'slog_016_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'slog']
|
|
|
|
|
|
|
|
[tests/functional/snapshot]
|
|
|
|
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
|
|
|
|
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
|
|
|
|
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
|
|
|
|
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
|
|
|
|
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
|
|
|
|
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
|
2022-08-02 23:45:30 +00:00
|
|
|
'snapshot_017_pos', 'snapshot_018_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'snapshot']
|
|
|
|
|
|
|
|
[tests/functional/snapused]
|
|
|
|
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
|
|
|
|
'snapused_004_pos', 'snapused_005_pos']
|
|
|
|
tags = ['functional', 'snapused']
|
|
|
|
|
|
|
|
[tests/functional/sparse]
|
|
|
|
tests = ['sparse_001_pos']
|
|
|
|
tags = ['functional', 'sparse']
|
|
|
|
|
2021-12-18 00:18:37 +00:00
|
|
|
[tests/functional/stat]
|
|
|
|
tests = ['stat_001_pos']
|
|
|
|
tags = ['functional', 'stat']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/suid]
|
|
|
|
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
|
2022-02-03 22:37:57 +00:00
|
|
|
'suid_write_to_none', 'suid_write_zil_replay']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'suid']
|
|
|
|
|
|
|
|
[tests/functional/trim]
|
|
|
|
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
|
2020-06-09 17:15:08 +00:00
|
|
|
'trim_integrity', 'trim_config', 'trim_l2arc']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'trim']
|
|
|
|
|
|
|
|
[tests/functional/truncate]
|
|
|
|
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
|
|
|
|
tags = ['functional', 'truncate']
|
|
|
|
|
|
|
|
[tests/functional/upgrade]
|
|
|
|
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
|
|
|
|
tags = ['functional', 'upgrade']
|
|
|
|
|
|
|
|
[tests/functional/userquota]
|
|
|
|
tests = [
|
|
|
|
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
|
|
|
|
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
|
|
|
|
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
|
|
|
|
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
|
2022-01-21 19:41:17 +00:00
|
|
|
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
|
|
|
|
'userspace_send_encrypted']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'userquota']
|
|
|
|
|
|
|
|
[tests/functional/vdev_zaps]
|
|
|
|
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
|
|
|
|
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
|
|
|
|
'vdev_zaps_007_pos']
|
|
|
|
tags = ['functional', 'vdev_zaps']
|
|
|
|
|
|
|
|
[tests/functional/write_dirs]
|
|
|
|
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
|
|
|
|
tags = ['functional', 'write_dirs']
|
|
|
|
|
2020-01-24 01:14:40 +00:00
|
|
|
[tests/functional/xattr]
|
|
|
|
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
|
|
|
|
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
|
2022-02-16 00:35:30 +00:00
|
|
|
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
|
2020-01-24 01:14:40 +00:00
|
|
|
tags = ['functional', 'xattr']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/zvol/zvol_ENOSPC]
|
|
|
|
tests = ['zvol_ENOSPC_001_pos']
|
|
|
|
tags = ['functional', 'zvol', 'zvol_ENOSPC']
|
|
|
|
|
|
|
|
[tests/functional/zvol/zvol_cli]
|
|
|
|
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
|
|
|
|
tags = ['functional', 'zvol', 'zvol_cli']
|
|
|
|
|
|
|
|
[tests/functional/zvol/zvol_misc]
|
2020-01-03 17:08:23 +00:00
|
|
|
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
|
2022-06-09 14:10:38 +00:00
|
|
|
'zvol_misc_snapdev', 'zvol_misc_trim', 'zvol_misc_volmode', 'zvol_misc_zil']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'zvol', 'zvol_misc']
|
|
|
|
|
2022-06-09 14:10:38 +00:00
|
|
|
[tests/functional/zvol/zvol_stress]
|
|
|
|
tests = ['zvol_stress']
|
|
|
|
tags = ['functional', 'zvol', 'zvol_stress']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/zvol/zvol_swap]
|
2020-01-27 21:29:25 +00:00
|
|
|
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'zvol', 'zvol_swap']
|
|
|
|
|
|
|
|
[tests/functional/libzfs]
|
|
|
|
tests = ['many_fds', 'libzfs_input']
|
|
|
|
tags = ['functional', 'libzfs']
|
|
|
|
|
|
|
|
[tests/functional/log_spacemap]
|
|
|
|
tests = ['log_spacemap_import_logs']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'log_spacemap']
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 17:10:44 +00:00
|
|
|
|
|
|
|
[tests/functional/l2arc]
|
2020-10-20 18:39:52 +00:00
|
|
|
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 17:10:44 +00:00
|
|
|
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
|
2021-07-26 19:30:24 +00:00
|
|
|
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 17:10:44 +00:00
|
|
|
tags = ['functional', 'l2arc']
|
|
|
|
|
2020-10-09 16:29:21 +00:00
|
|
|
[tests/functional/zpool_influxdb]
|
|
|
|
tests = ['zpool_influxdb']
|
|
|
|
tags = ['functional', 'zpool_influxdb']
|