2019-10-09 17:39:26 +00:00
|
|
|
#
|
|
|
|
# This file and its contents are supplied under the terms of the
|
|
|
|
# Common Development and Distribution License ("CDDL"), version 1.0.
|
|
|
|
# You may only use this file in accordance with the terms of version
|
|
|
|
# 1.0 of the CDDL.
|
|
|
|
#
|
|
|
|
# A full copy of the text of the CDDL should have accompanied this
|
|
|
|
# source. A copy of the CDDL is also available via the Internet at
|
|
|
|
# http://www.illumos.org/license/CDDL.
|
|
|
|
#
|
2020-12-03 18:49:39 +00:00
|
|
|
# This run file contains all of the common functional tests. When
|
|
|
|
# adding a new test consider also adding it to the sanity.run file
|
|
|
|
# if the new test runs to completion in only a few seconds.
|
|
|
|
#
|
|
|
|
# Approximate run time: 4-5 hours
|
|
|
|
#
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[DEFAULT]
|
|
|
|
pre = setup
|
|
|
|
quiet = False
|
|
|
|
pre_user = root
|
|
|
|
user = root
|
|
|
|
timeout = 600
|
|
|
|
post_user = root
|
|
|
|
post = cleanup
|
2020-03-10 18:00:56 +00:00
|
|
|
failsafe_user = root
|
|
|
|
failsafe = callbacks/zfs_failsafe
|
2019-10-09 17:39:26 +00:00
|
|
|
outputdir = /var/tmp/test_results
|
|
|
|
tags = ['functional']
|
|
|
|
|
|
|
|
[tests/functional/alloc_class]
|
|
|
|
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
|
|
|
|
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
|
|
|
|
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
|
|
|
|
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
|
|
|
|
'alloc_class_013_pos']
|
|
|
|
tags = ['functional', 'alloc_class']
|
|
|
|
|
2020-10-08 16:40:23 +00:00
|
|
|
[tests/functional/arc]
|
|
|
|
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
|
|
|
|
'arcstats_runtime_tuning']
|
|
|
|
tags = ['functional', 'arc']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/atime]
|
2020-02-25 23:27:41 +00:00
|
|
|
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'atime']
|
|
|
|
|
|
|
|
[tests/functional/bootfs]
|
|
|
|
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
|
|
|
|
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
|
|
|
|
'bootfs_008_pos']
|
|
|
|
tags = ['functional', 'bootfs']
|
|
|
|
|
2019-12-19 19:53:55 +00:00
|
|
|
[tests/functional/btree]
|
|
|
|
tests = ['btree_positive', 'btree_negative']
|
|
|
|
tags = ['functional', 'btree']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cache]
|
|
|
|
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
|
|
|
|
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
|
2020-04-13 17:50:41 +00:00
|
|
|
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cache']
|
|
|
|
|
|
|
|
[tests/functional/cachefile]
|
|
|
|
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
|
|
|
|
'cachefile_004_pos']
|
|
|
|
tags = ['functional', 'cachefile']
|
|
|
|
|
|
|
|
[tests/functional/casenorm]
|
|
|
|
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
|
|
|
|
'sensitive_none_lookup', 'sensitive_none_delete',
|
|
|
|
'sensitive_formd_lookup', 'sensitive_formd_delete',
|
|
|
|
'insensitive_none_lookup', 'insensitive_none_delete',
|
|
|
|
'insensitive_formd_lookup', 'insensitive_formd_delete',
|
|
|
|
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
|
|
|
|
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
|
|
|
|
tags = ['functional', 'casenorm']
|
|
|
|
|
|
|
|
[tests/functional/channel_program/lua_core]
|
|
|
|
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
|
|
|
|
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
|
|
|
|
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
|
|
|
|
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
|
|
|
|
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
|
|
|
|
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
|
2020-07-27 23:11:47 +00:00
|
|
|
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'channel_program', 'lua_core']
|
|
|
|
|
|
|
|
[tests/functional/channel_program/synctask_core]
|
|
|
|
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
|
|
|
|
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
|
|
|
|
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
|
2020-01-23 01:03:17 +00:00
|
|
|
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
|
2019-10-09 17:39:26 +00:00
|
|
|
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
|
|
|
|
'tst.list_snapshots', 'tst.list_system_props',
|
|
|
|
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
|
|
|
|
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
|
2020-02-14 21:41:42 +00:00
|
|
|
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
|
2019-11-11 07:24:14 +00:00
|
|
|
'tst.snapshot_recursive', 'tst.snapshot_simple',
|
2020-01-16 01:15:05 +00:00
|
|
|
'tst.bookmark.create', 'tst.bookmark.copy',
|
2019-11-11 07:24:14 +00:00
|
|
|
'tst.terminate_by_signal'
|
|
|
|
]
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'channel_program', 'synctask_core']
|
|
|
|
|
|
|
|
[tests/functional/checksum]
|
2020-10-21 22:35:08 +00:00
|
|
|
tests = ['run_sha2_test', 'run_skein_test', 'filetest_001_pos',
|
|
|
|
'filetest_002_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'checksum']
|
|
|
|
|
|
|
|
[tests/functional/clean_mirror]
|
|
|
|
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
|
|
|
|
'clean_mirror_003_pos', 'clean_mirror_004_pos']
|
|
|
|
tags = ['functional', 'clean_mirror']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zdb]
|
2020-04-28 16:56:31 +00:00
|
|
|
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
|
2020-06-26 22:09:20 +00:00
|
|
|
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
|
|
|
|
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
|
|
|
|
'zdb_display_block', 'zdb_object_range_neg', 'zdb_object_range_pos',
|
Add zstd support to zfs
This PR adds two new compression types, based on ZStandard:
- zstd: A basic ZStandard compression algorithm Available compression.
Levels for zstd are zstd-1 through zstd-19, where the compression
increases with every level, but speed decreases.
- zstd-fast: A faster version of the ZStandard compression algorithm
zstd-fast is basically a "negative" level of zstd. The compression
decreases with every level, but speed increases.
Available compression levels for zstd-fast:
- zstd-fast-1 through zstd-fast-10
- zstd-fast-20 through zstd-fast-100 (in increments of 10)
- zstd-fast-500 and zstd-fast-1000
For more information check the man page.
Implementation details:
Rather than treat each level of zstd as a different algorithm (as was
done historically with gzip), the block pointer `enum zio_compress`
value is simply zstd for all levels, including zstd-fast, since they all
use the same decompression function.
The compress= property (a 64bit unsigned integer) uses the lower 7 bits
to store the compression algorithm (matching the number of bits used in
a block pointer, as the 8th bit was borrowed for embedded block
pointers). The upper bits are used to store the compression level.
It is necessary to be able to determine what compression level was used
when later reading a block back, so the concept used in LZ4, where the
first 32bits of the on-disk value are the size of the compressed data
(since the allocation is rounded up to the nearest ashift), was
extended, and we store the version of ZSTD and the level as well as the
compressed size. This value is returned when decompressing a block, so
that if the block needs to be recompressed (L2ARC, nop-write, etc), that
the same parameters will be used to result in the matching checksum.
All of the internal ZFS code ( `arc_buf_hdr_t`, `objset_t`,
`zio_prop_t`, etc.) uses the separated _compress and _complevel
variables. Only the properties ZAP contains the combined/bit-shifted
value. The combined value is split when the compression_changed_cb()
callback is called, and sets both objset members (os_compress and
os_complevel).
The userspace tools all use the combined/bit-shifted value.
Additional notes:
zdb can now also decode the ZSTD compression header (flag -Z) and
inspect the size, version and compression level saved in that header.
For each record, if it is ZSTD compressed, the parameters of the decoded
compression header get printed.
ZSTD is included with all current tests and new tests are added
as-needed.
Per-dataset feature flags now get activated when the property is set.
If a compression algorithm requires a feature flag, zfs activates the
feature when the property is set, rather than waiting for the first
block to be born. This is currently only used by zstd but can be
extended as needed.
Portions-Sponsored-By: The FreeBSD Foundation
Co-authored-by: Allan Jude <allanjude@freebsd.org>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Co-authored-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Co-authored-by: Michael Niewöhner <foss@mniewoehner.de>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #6247
Closes #9024
Closes #10277
Closes #10278
2020-08-18 17:10:17 +00:00
|
|
|
'zdb_objset_id', 'zdb_decompress_zstd']
|
2019-10-09 17:39:26 +00:00
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zdb']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs]
|
|
|
|
tests = ['zfs_001_neg', 'zfs_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_bookmark]
|
|
|
|
tests = ['zfs_bookmark_cliargs']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_bookmark']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_change-key]
|
|
|
|
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
|
|
|
|
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
|
|
|
|
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_change-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_clone]
|
|
|
|
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
|
|
|
|
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
|
|
|
|
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
|
|
|
|
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_clone']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_copies]
|
|
|
|
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
|
|
|
|
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_copies']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_create]
|
|
|
|
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
|
|
|
|
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
|
|
|
|
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
|
|
|
|
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
|
|
|
|
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
|
2020-12-04 22:01:42 +00:00
|
|
|
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
|
|
|
|
'zfs_create_verbose']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_create']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_destroy]
|
|
|
|
tests = ['zfs_clone_livelist_condense_and_disable',
|
|
|
|
'zfs_clone_livelist_condense_races', 'zfs_destroy_001_pos',
|
|
|
|
'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
|
|
|
|
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
|
|
|
|
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
|
|
|
|
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
|
|
|
|
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
|
|
|
|
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
|
|
|
|
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_destroy']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_diff]
|
|
|
|
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
|
|
|
|
'zfs_diff_types', 'zfs_diff_encrypted']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_diff']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_get]
|
|
|
|
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
|
|
|
|
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
|
|
|
|
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_get']
|
|
|
|
|
2020-05-20 17:05:33 +00:00
|
|
|
[tests/functional/cli_root/zfs_ids_to_path]
|
|
|
|
tests = ['zfs_ids_to_path_001_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cli_root/zfs_inherit]
|
|
|
|
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
|
|
|
|
'zfs_inherit_mountpoint']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_inherit']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_load-key]
|
|
|
|
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
|
|
|
|
'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_load-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_mount]
|
|
|
|
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
|
|
|
|
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
|
|
|
|
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
|
2020-03-06 17:28:19 +00:00
|
|
|
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
|
|
|
|
'zfs_mount_test_race']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_mount']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_program]
|
|
|
|
tests = ['zfs_program_json']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_program']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_promote]
|
|
|
|
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
|
|
|
|
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
|
|
|
|
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_promote']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_property]
|
|
|
|
tests = ['zfs_written_property_001_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_property']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_receive]
|
|
|
|
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
|
|
|
|
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
|
|
|
|
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
|
|
|
|
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
|
|
|
|
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
|
2020-03-17 17:08:32 +00:00
|
|
|
'zfs_receive_016_pos', 'receive-o-x_props_override',
|
|
|
|
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
|
|
|
|
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
|
2020-08-27 04:38:27 +00:00
|
|
|
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_receive']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_rename]
|
|
|
|
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
|
|
|
|
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
|
|
|
|
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
|
|
|
|
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
|
|
|
|
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
|
2020-09-01 23:14:16 +00:00
|
|
|
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_rename']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_reservation]
|
|
|
|
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_reservation']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_rollback]
|
|
|
|
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
|
|
|
|
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_rollback']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_send]
|
|
|
|
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
|
|
|
|
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
|
|
|
|
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
|
|
|
|
'zfs_send_sparse', 'zfs_send-b']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_send']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_set]
|
|
|
|
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
|
|
|
|
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
|
|
|
|
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
|
|
|
|
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
|
|
|
|
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
|
|
|
|
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
|
|
|
|
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
|
|
|
|
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
|
Add zstd support to zfs
This PR adds two new compression types, based on ZStandard:
- zstd: A basic ZStandard compression algorithm Available compression.
Levels for zstd are zstd-1 through zstd-19, where the compression
increases with every level, but speed decreases.
- zstd-fast: A faster version of the ZStandard compression algorithm
zstd-fast is basically a "negative" level of zstd. The compression
decreases with every level, but speed increases.
Available compression levels for zstd-fast:
- zstd-fast-1 through zstd-fast-10
- zstd-fast-20 through zstd-fast-100 (in increments of 10)
- zstd-fast-500 and zstd-fast-1000
For more information check the man page.
Implementation details:
Rather than treat each level of zstd as a different algorithm (as was
done historically with gzip), the block pointer `enum zio_compress`
value is simply zstd for all levels, including zstd-fast, since they all
use the same decompression function.
The compress= property (a 64bit unsigned integer) uses the lower 7 bits
to store the compression algorithm (matching the number of bits used in
a block pointer, as the 8th bit was borrowed for embedded block
pointers). The upper bits are used to store the compression level.
It is necessary to be able to determine what compression level was used
when later reading a block back, so the concept used in LZ4, where the
first 32bits of the on-disk value are the size of the compressed data
(since the allocation is rounded up to the nearest ashift), was
extended, and we store the version of ZSTD and the level as well as the
compressed size. This value is returned when decompressing a block, so
that if the block needs to be recompressed (L2ARC, nop-write, etc), that
the same parameters will be used to result in the matching checksum.
All of the internal ZFS code ( `arc_buf_hdr_t`, `objset_t`,
`zio_prop_t`, etc.) uses the separated _compress and _complevel
variables. Only the properties ZAP contains the combined/bit-shifted
value. The combined value is split when the compression_changed_cb()
callback is called, and sets both objset members (os_compress and
os_complevel).
The userspace tools all use the combined/bit-shifted value.
Additional notes:
zdb can now also decode the ZSTD compression header (flag -Z) and
inspect the size, version and compression level saved in that header.
For each record, if it is ZSTD compressed, the parameters of the decoded
compression header get printed.
ZSTD is included with all current tests and new tests are added
as-needed.
Per-dataset feature flags now get activated when the property is set.
If a compression algorithm requires a feature flag, zfs activates the
feature when the property is set, rather than waiting for the first
block to be born. This is currently only used by zstd but can be
extended as needed.
Portions-Sponsored-By: The FreeBSD Foundation
Co-authored-by: Allan Jude <allanjude@freebsd.org>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Co-authored-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Co-authored-by: Michael Niewöhner <foss@mniewoehner.de>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #6247
Closes #9024
Closes #10277
Closes #10278
2020-08-18 17:10:17 +00:00
|
|
|
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
|
|
|
|
'zfs_set_feature_activation']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_set']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_share]
|
|
|
|
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
|
2020-07-13 16:19:18 +00:00
|
|
|
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
|
|
|
|
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zfs_share']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_snapshot]
|
|
|
|
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
|
|
|
|
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
|
|
|
|
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
|
|
|
|
'zfs_snapshot_009_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_snapshot']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unload-key]
|
|
|
|
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_unload-key']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unmount]
|
|
|
|
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
|
|
|
|
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
|
|
|
|
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
|
|
|
|
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_unmount']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_unshare]
|
|
|
|
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
|
|
|
|
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
|
|
|
|
'zfs_unshare_007_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_unshare']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_upgrade]
|
|
|
|
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
|
|
|
|
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
|
|
|
|
'zfs_upgrade_007_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_upgrade']
|
2020-04-01 17:02:06 +00:00
|
|
|
|
|
|
|
[tests/functional/cli_root/zfs_wait]
|
|
|
|
tests = ['zfs_wait_deleteq']
|
|
|
|
tags = ['functional', 'cli_root', 'zfs_wait']
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool]
|
2019-12-20 00:26:07 +00:00
|
|
|
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_add]
|
|
|
|
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
|
2020-01-03 17:08:23 +00:00
|
|
|
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
|
|
|
|
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
|
2020-12-04 22:04:39 +00:00
|
|
|
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_add']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_attach]
|
|
|
|
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_attach']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_clear]
|
|
|
|
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
|
|
|
|
'zpool_clear_readonly']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_clear']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_create]
|
|
|
|
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
|
|
|
|
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
|
|
|
|
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
|
|
|
|
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
|
2020-01-03 17:08:23 +00:00
|
|
|
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
|
|
|
|
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
|
|
|
|
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
|
|
|
|
'zpool_create_023_neg', 'zpool_create_024_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_create_encrypted', 'zpool_create_crypt_combos',
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
|
|
|
|
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
|
|
|
|
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
|
|
|
|
'zpool_create_features_005_pos',
|
2020-12-04 22:04:39 +00:00
|
|
|
'create-o_ashift', 'zpool_create_tempname', 'zpool_create_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_create']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_destroy]
|
|
|
|
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
|
|
|
|
'zpool_destroy_003_neg']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_destroy']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_detach]
|
|
|
|
tests = ['zpool_detach_001_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_detach']
|
|
|
|
|
2020-02-18 19:22:56 +00:00
|
|
|
[tests/functional/cli_root/zpool_events]
|
|
|
|
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
|
2020-09-04 17:34:28 +00:00
|
|
|
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates']
|
2020-02-18 19:22:56 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_events']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/cli_root/zpool_export]
|
|
|
|
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
|
|
|
|
'zpool_export_003_neg', 'zpool_export_004_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_export']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_get]
|
|
|
|
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
|
|
|
|
'zpool_get_004_neg', 'zpool_get_005_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_get']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_history]
|
|
|
|
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_history']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_import]
|
|
|
|
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
|
|
|
|
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
|
|
|
|
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
|
|
|
|
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
|
|
|
|
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
|
|
|
|
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
|
|
|
|
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
|
|
|
|
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
|
|
|
|
'zpool_import_encrypted', 'zpool_import_encrypted_load',
|
|
|
|
'zpool_import_errata3', 'zpool_import_errata4',
|
|
|
|
'import_cachefile_device_added',
|
|
|
|
'import_cachefile_device_removed',
|
|
|
|
'import_cachefile_device_replaced',
|
|
|
|
'import_cachefile_mirror_attached',
|
|
|
|
'import_cachefile_mirror_detached',
|
|
|
|
'import_cachefile_shared_device',
|
|
|
|
'import_devices_missing',
|
|
|
|
'import_paths_changed',
|
|
|
|
'import_rewind_config_changed',
|
|
|
|
'import_rewind_device_replaced']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_import']
|
2020-12-06 17:48:36 +00:00
|
|
|
timeout = 1200
|
2019-10-09 17:39:26 +00:00
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_labelclear]
|
|
|
|
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
|
|
|
|
'zpool_labelclear_removed', 'zpool_labelclear_valid']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_labelclear']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_initialize]
|
|
|
|
tests = ['zpool_initialize_attach_detach_add_remove',
|
|
|
|
'zpool_initialize_import_export',
|
|
|
|
'zpool_initialize_offline_export_import_online',
|
|
|
|
'zpool_initialize_online_offline',
|
|
|
|
'zpool_initialize_split',
|
|
|
|
'zpool_initialize_start_and_cancel_neg',
|
|
|
|
'zpool_initialize_start_and_cancel_pos',
|
|
|
|
'zpool_initialize_suspend_resume',
|
|
|
|
'zpool_initialize_unsupported_vdevs',
|
|
|
|
'zpool_initialize_verify_checksums',
|
|
|
|
'zpool_initialize_verify_initialized']
|
|
|
|
pre =
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_initialize']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_offline]
|
|
|
|
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
|
|
|
|
'zpool_offline_003_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_offline']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_online]
|
|
|
|
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_online']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_remove]
|
|
|
|
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
|
|
|
|
'zpool_remove_003_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_remove']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_replace]
|
|
|
|
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_replace']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_resilver]
|
|
|
|
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_resilver']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_scrub]
|
|
|
|
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
|
|
|
|
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
|
|
|
|
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
|
|
|
|
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_scrub']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_set]
|
|
|
|
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
|
|
|
|
'zpool_set_ashift', 'zpool_set_features']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_set']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_split]
|
|
|
|
tests = ['zpool_split_cliargs', 'zpool_split_devices',
|
|
|
|
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
|
2020-12-04 22:04:39 +00:00
|
|
|
'zpool_split_resilver', 'zpool_split_indirect',
|
|
|
|
'zpool_split_dryrun_output']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'cli_root', 'zpool_split']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_status]
|
|
|
|
tests = ['zpool_status_001_pos', 'zpool_status_002_pos']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_status']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_sync]
|
|
|
|
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_sync']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_trim]
|
|
|
|
tests = ['zpool_trim_attach_detach_add_remove',
|
|
|
|
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
|
|
|
|
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
|
|
|
|
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
|
|
|
|
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
|
|
|
|
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
|
|
|
|
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
|
|
|
|
'zpool_trim_verify_trimmed']
|
|
|
|
tags = ['functional', 'zpool_trim']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_upgrade]
|
|
|
|
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
|
|
|
|
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
|
|
|
|
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
|
|
|
|
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
|
|
|
|
'zpool_upgrade_009_neg']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_upgrade']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_wait]
|
|
|
|
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
|
|
|
|
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
|
|
|
|
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
|
|
|
|
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
|
2020-03-04 23:07:11 +00:00
|
|
|
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_wait_usage']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_wait']
|
|
|
|
|
|
|
|
[tests/functional/cli_root/zpool_wait/scan]
|
2020-07-03 18:05:50 +00:00
|
|
|
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
|
|
|
|
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
|
2019-10-09 17:39:26 +00:00
|
|
|
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
|
|
|
|
tags = ['functional', 'cli_root', 'zpool_wait']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/misc]
|
|
|
|
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
|
|
|
|
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
|
|
|
|
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
|
|
|
|
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
|
|
|
|
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
|
|
|
|
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
|
|
|
|
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
|
|
|
|
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
|
|
|
|
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
|
|
|
|
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
|
|
|
|
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
|
|
|
|
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
|
|
|
|
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
|
|
|
|
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
|
|
|
|
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'misc']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zfs_list]
|
|
|
|
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
|
|
|
|
'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zfs_list']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_iostat]
|
|
|
|
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
|
|
|
|
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
|
|
|
|
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
|
|
|
|
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_iostat']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_list]
|
|
|
|
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_list']
|
|
|
|
|
|
|
|
[tests/functional/cli_user/zpool_status]
|
|
|
|
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
|
|
|
|
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
|
|
|
|
user =
|
|
|
|
tags = ['functional', 'cli_user', 'zpool_status']
|
|
|
|
|
|
|
|
[tests/functional/compression]
|
Fix L2ARC reads when compressed ARC disabled
When reading compressed blocks from the L2ARC, with
compressed ARC disabled, arc_hdr_size() returns
LSIZE rather than PSIZE, but the actual read is PSIZE.
This causes l2arc_read_done() to compare the checksum
against the wrong size, resulting in checksum failure.
This manifests as an increase in the kstat l2_cksum_bad
and the read being retried from the main pool, making the
L2ARC ineffective.
Add new L2ARC tests with Compressed ARC enabled/disabled
Blocks are handled differently depending on the state of the
zfs_compressed_arc_enabled tunable.
If a block is compressed on-disk, and compressed_arc is enabled:
- the block is read from disk
- It is NOT decompressed
- It is added to the ARC in its compressed form
- l2arc_write_buffers() may write it to the L2ARC (as is)
- l2arc_read_done() compares the checksum to the BP (compressed)
However, if compressed_arc is disabled:
- the block is read from disk
- It is decompressed
- It is added to the ARC (uncompressed)
- l2arc_write_buffers() will use l2arc_apply_transforms() to
recompress the block, before writing it to the L2ARC
- l2arc_read_done() compares the checksum to the BP (compressed)
- l2arc_read_done() will use l2arc_untransform() to uncompress it
This test writes out a test file to a pool consisting of one disk
and one cache device, then randomly reads from it. Since the arc_max
in the tests is low, this will feed the L2ARC, and result in reads
from the L2ARC.
We compare the value of the kstat l2_cksum_bad before and after
to determine if any blocks failed to survive the trip through the
L2ARC.
Sponsored-by: The FreeBSD Foundation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Closes #10693
2020-08-14 06:31:20 +00:00
|
|
|
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
|
Add zstd support to zfs
This PR adds two new compression types, based on ZStandard:
- zstd: A basic ZStandard compression algorithm Available compression.
Levels for zstd are zstd-1 through zstd-19, where the compression
increases with every level, but speed decreases.
- zstd-fast: A faster version of the ZStandard compression algorithm
zstd-fast is basically a "negative" level of zstd. The compression
decreases with every level, but speed increases.
Available compression levels for zstd-fast:
- zstd-fast-1 through zstd-fast-10
- zstd-fast-20 through zstd-fast-100 (in increments of 10)
- zstd-fast-500 and zstd-fast-1000
For more information check the man page.
Implementation details:
Rather than treat each level of zstd as a different algorithm (as was
done historically with gzip), the block pointer `enum zio_compress`
value is simply zstd for all levels, including zstd-fast, since they all
use the same decompression function.
The compress= property (a 64bit unsigned integer) uses the lower 7 bits
to store the compression algorithm (matching the number of bits used in
a block pointer, as the 8th bit was borrowed for embedded block
pointers). The upper bits are used to store the compression level.
It is necessary to be able to determine what compression level was used
when later reading a block back, so the concept used in LZ4, where the
first 32bits of the on-disk value are the size of the compressed data
(since the allocation is rounded up to the nearest ashift), was
extended, and we store the version of ZSTD and the level as well as the
compressed size. This value is returned when decompressing a block, so
that if the block needs to be recompressed (L2ARC, nop-write, etc), that
the same parameters will be used to result in the matching checksum.
All of the internal ZFS code ( `arc_buf_hdr_t`, `objset_t`,
`zio_prop_t`, etc.) uses the separated _compress and _complevel
variables. Only the properties ZAP contains the combined/bit-shifted
value. The combined value is split when the compression_changed_cb()
callback is called, and sets both objset members (os_compress and
os_complevel).
The userspace tools all use the combined/bit-shifted value.
Additional notes:
zdb can now also decode the ZSTD compression header (flag -Z) and
inspect the size, version and compression level saved in that header.
For each record, if it is ZSTD compressed, the parameters of the decoded
compression header get printed.
ZSTD is included with all current tests and new tests are added
as-needed.
Per-dataset feature flags now get activated when the property is set.
If a compression algorithm requires a feature flag, zfs activates the
feature when the property is set, rather than waiting for the first
block to be born. This is currently only used by zstd but can be
extended as needed.
Portions-Sponsored-By: The FreeBSD Foundation
Co-authored-by: Allan Jude <allanjude@freebsd.org>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Co-authored-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Co-authored-by: Michael Niewöhner <foss@mniewoehner.de>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Allan Jude <allanjude@freebsd.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #6247
Closes #9024
Closes #10277
Closes #10278
2020-08-18 17:10:17 +00:00
|
|
|
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
|
|
|
|
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'compression']
|
|
|
|
|
|
|
|
[tests/functional/cp_files]
|
|
|
|
tests = ['cp_files_001_pos']
|
|
|
|
tags = ['functional', 'cp_files']
|
|
|
|
|
|
|
|
[tests/functional/ctime]
|
|
|
|
tests = ['ctime_001_pos' ]
|
|
|
|
tags = ['functional', 'ctime']
|
|
|
|
|
|
|
|
[tests/functional/delegate]
|
|
|
|
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
|
|
|
|
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
|
|
|
|
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
|
|
|
|
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
|
|
|
|
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
|
|
|
|
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
|
|
|
|
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
|
|
|
|
tags = ['functional', 'delegate']
|
|
|
|
|
|
|
|
[tests/functional/exec]
|
|
|
|
tests = ['exec_001_pos', 'exec_002_neg']
|
|
|
|
tags = ['functional', 'exec']
|
|
|
|
|
|
|
|
[tests/functional/features/async_destroy]
|
|
|
|
tests = ['async_destroy_001_pos']
|
|
|
|
tags = ['functional', 'features', 'async_destroy']
|
|
|
|
|
|
|
|
[tests/functional/features/large_dnode]
|
|
|
|
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
|
|
|
|
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
|
|
|
|
tags = ['functional', 'features', 'large_dnode']
|
|
|
|
|
|
|
|
[tests/functional/grow]
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
|
|
|
|
tags = ['functional', 'grow']
|
|
|
|
|
|
|
|
[tests/functional/history]
|
|
|
|
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
|
|
|
|
'history_004_pos', 'history_005_neg', 'history_006_neg',
|
|
|
|
'history_007_pos', 'history_008_pos', 'history_009_pos',
|
|
|
|
'history_010_pos']
|
|
|
|
tags = ['functional', 'history']
|
|
|
|
|
|
|
|
[tests/functional/hkdf]
|
|
|
|
tests = ['run_hkdf_test']
|
|
|
|
tags = ['functional', 'hkdf']
|
|
|
|
|
|
|
|
[tests/functional/inheritance]
|
|
|
|
tests = ['inherit_001_pos']
|
|
|
|
pre =
|
|
|
|
tags = ['functional', 'inheritance']
|
|
|
|
|
|
|
|
[tests/functional/io]
|
|
|
|
tests = ['sync', 'psync', 'posixaio', 'mmap']
|
|
|
|
tags = ['functional', 'io']
|
|
|
|
|
|
|
|
[tests/functional/inuse]
|
2020-01-03 17:08:23 +00:00
|
|
|
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
post =
|
|
|
|
tags = ['functional', 'inuse']
|
|
|
|
|
|
|
|
[tests/functional/large_files]
|
|
|
|
tests = ['large_files_001_pos', 'large_files_002_pos']
|
|
|
|
tags = ['functional', 'large_files']
|
|
|
|
|
|
|
|
[tests/functional/largest_pool]
|
|
|
|
tests = ['largest_pool_001_pos']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'largest_pool']
|
|
|
|
|
|
|
|
[tests/functional/limits]
|
|
|
|
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
|
|
|
|
'snapshot_limit']
|
|
|
|
tags = ['functional', 'limits']
|
|
|
|
|
|
|
|
[tests/functional/link_count]
|
2019-10-24 17:47:47 +00:00
|
|
|
tests = ['link_count_001', 'link_count_root_inode']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'link_count']
|
|
|
|
|
|
|
|
[tests/functional/migration]
|
|
|
|
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
|
|
|
|
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
|
|
|
|
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
|
|
|
|
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
|
|
|
|
tags = ['functional', 'migration']
|
|
|
|
|
|
|
|
[tests/functional/mmap]
|
|
|
|
tests = ['mmap_write_001_pos', 'mmap_read_001_pos']
|
|
|
|
tags = ['functional', 'mmap']
|
|
|
|
|
|
|
|
[tests/functional/mount]
|
2020-01-27 21:29:25 +00:00
|
|
|
tests = ['umount_001', 'umountall_001']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'mount']
|
|
|
|
|
|
|
|
[tests/functional/mv_files]
|
|
|
|
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
|
|
|
|
tags = ['functional', 'mv_files']
|
|
|
|
|
|
|
|
[tests/functional/nestedfs]
|
|
|
|
tests = ['nestedfs_001_pos']
|
|
|
|
tags = ['functional', 'nestedfs']
|
|
|
|
|
|
|
|
[tests/functional/no_space]
|
|
|
|
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
|
|
|
|
'enospc_df']
|
|
|
|
tags = ['functional', 'no_space']
|
|
|
|
|
|
|
|
[tests/functional/nopwrite]
|
|
|
|
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
|
|
|
|
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
|
|
|
|
'nopwrite_varying_compression', 'nopwrite_volume']
|
|
|
|
tags = ['functional', 'nopwrite']
|
|
|
|
|
|
|
|
[tests/functional/online_offline]
|
|
|
|
tests = ['online_offline_001_pos', 'online_offline_002_neg',
|
|
|
|
'online_offline_003_neg']
|
|
|
|
tags = ['functional', 'online_offline']
|
|
|
|
|
|
|
|
[tests/functional/pool_checkpoint]
|
|
|
|
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
|
|
|
|
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
|
|
|
|
'checkpoint_discard_busy', 'checkpoint_discard_many',
|
|
|
|
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
|
|
|
|
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
|
|
|
|
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
|
|
|
|
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
|
|
|
|
tags = ['functional', 'pool_checkpoint']
|
|
|
|
timeout = 1800
|
|
|
|
|
|
|
|
[tests/functional/pool_names]
|
|
|
|
tests = ['pool_names_001_pos', 'pool_names_002_neg']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'pool_names']
|
|
|
|
|
|
|
|
[tests/functional/poolversion]
|
|
|
|
tests = ['poolversion_001_pos', 'poolversion_002_pos']
|
|
|
|
tags = ['functional', 'poolversion']
|
|
|
|
|
|
|
|
[tests/functional/pyzfs]
|
|
|
|
tests = ['pyzfs_unittest']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'pyzfs']
|
|
|
|
|
|
|
|
[tests/functional/quota]
|
|
|
|
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
|
|
|
|
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
|
|
|
|
tags = ['functional', 'quota']
|
|
|
|
|
|
|
|
[tests/functional/redacted_send]
|
|
|
|
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
|
|
|
|
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
|
|
|
|
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
|
|
|
|
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
|
|
|
|
'redacted_origin', 'redacted_props', 'redacted_resume', 'redacted_size',
|
|
|
|
'redacted_volume']
|
|
|
|
tags = ['functional', 'redacted_send']
|
|
|
|
|
|
|
|
[tests/functional/raidz]
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'raidz']
|
|
|
|
|
|
|
|
[tests/functional/redundancy]
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
tests = ['redundancy_draid1', 'redundancy_draid2', 'redundancy_draid3',
|
|
|
|
'redundancy_draid_spare1', 'redundancy_draid_spare2',
|
RAIDZ2/3 fails to heal silently corrupted parity w/2+ bad disks
When scrubbing, (non-sequential) resilvering, or correcting a checksum
error using RAIDZ parity, ZFS should heal any incorrect RAIDZ parity by
overwriting it. For example, if P disks are silently corrupted (P being
the number of failures tolerated; e.g. RAIDZ2 has P=2), `zpool scrub`
should detect and heal all the bad state on these disks, including
parity. This way if there is a subsequent failure we are fully
protected.
With RAIDZ2 or RAIDZ3, a block can have silent damage to a parity
sector, and also damage (silent or known) to a data sector. In this
case the parity should be healed but it is not.
The problem can be noticed by scrubbing the pool twice. Assuming there
was no damage concurrent with the scrubs, the first scrub should fix all
silent damage, and the second scrub should be "clean" (`zpool status`
should not report checksum errors on any disks). If the bug is
encountered, then the second scrub will repair the silently-damaged
parity that the first scrub failed to repair, and these checksum errors
will be reported after the second scrub. Since the first scrub repaired
all the damaged data, the bug can not be encountered during the second
scrub, so subsequent scrubs (more than two) are not necessary.
The root cause of the problem is some code that was inadvertently added
to `raidz_parity_verify()` by the DRAID changes. The incorrect code
causes the parity healing to be aborted if there is damaged data
(`rc_error != 0`) or the data disk is not present (`!rc_tried`). These
checks are not necessary, because we only call `raidz_parity_verify()`
if we have the correct data (which may have been reconstructed using
parity, and which was verified by the checksum).
This commit fixes the problem by removing the incorrect checks in
`raidz_parity_verify()`.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #11489
Closes #11510
2021-01-27 00:05:05 +00:00
|
|
|
'redundancy_draid_spare3', 'redundancy_mirror', 'redundancy_raidz',
|
|
|
|
'redundancy_raidz1', 'redundancy_raidz2', 'redundancy_raidz3',
|
|
|
|
'redundancy_stripe']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'redundancy']
|
|
|
|
|
|
|
|
[tests/functional/refquota]
|
|
|
|
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
|
|
|
|
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
|
|
|
|
'refquota_007_neg', 'refquota_008_neg']
|
|
|
|
tags = ['functional', 'refquota']
|
|
|
|
|
|
|
|
[tests/functional/refreserv]
|
|
|
|
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
|
|
|
|
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
|
|
|
|
'refreserv_raidz']
|
|
|
|
tags = ['functional', 'refreserv']
|
|
|
|
|
|
|
|
[tests/functional/removal]
|
|
|
|
pre =
|
|
|
|
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
|
|
|
|
'removal_condense_export', 'removal_multiple_indirection',
|
|
|
|
'removal_nopwrite', 'removal_remap_deadlists',
|
|
|
|
'removal_resume_export', 'removal_sanity', 'removal_with_add',
|
|
|
|
'removal_with_create_fs', 'removal_with_dedup',
|
|
|
|
'removal_with_errors', 'removal_with_export',
|
|
|
|
'removal_with_ganging', 'removal_with_faulted',
|
|
|
|
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
|
|
|
|
'removal_with_send_recv', 'removal_with_snapshot',
|
|
|
|
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
|
|
|
|
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
|
2020-12-11 20:15:37 +00:00
|
|
|
'remove_indirect', 'remove_attach_mirror']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'removal']
|
|
|
|
|
|
|
|
[tests/functional/rename_dirs]
|
|
|
|
tests = ['rename_dirs_001_pos']
|
|
|
|
tags = ['functional', 'rename_dirs']
|
|
|
|
|
|
|
|
[tests/functional/replacement]
|
2020-07-03 18:05:50 +00:00
|
|
|
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
|
|
|
|
'attach_resilver', 'detach', 'rebuild_disabled_feature',
|
|
|
|
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
|
|
|
|
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
|
|
|
|
'scrub_cancel']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'replacement']
|
|
|
|
|
|
|
|
[tests/functional/reservation]
|
|
|
|
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
|
|
|
|
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
|
|
|
|
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
|
|
|
|
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
|
|
|
|
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
|
|
|
|
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
|
|
|
|
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
|
|
|
|
'reservation_022_pos']
|
|
|
|
tags = ['functional', 'reservation']
|
|
|
|
|
|
|
|
[tests/functional/rootpool]
|
|
|
|
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
|
|
|
|
tags = ['functional', 'rootpool']
|
|
|
|
|
|
|
|
[tests/functional/rsend]
|
2020-04-23 17:06:57 +00:00
|
|
|
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
|
|
|
|
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
|
|
|
|
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
|
|
|
|
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
|
|
|
|
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
|
2019-10-09 17:39:26 +00:00
|
|
|
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
|
|
|
|
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
|
|
|
|
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
|
|
|
|
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
|
2020-04-23 17:06:57 +00:00
|
|
|
'send-c_mixed_compression', 'send-c_stream_size_estimate',
|
2019-10-09 17:39:26 +00:00
|
|
|
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
|
File incorrectly zeroed when receiving incremental stream that toggles -L
Background:
By increasing the recordsize property above the default of 128KB, a
filesystem may have "large" blocks. By default, a send stream of such a
filesystem does not contain large WRITE records, instead it decreases
objects' block sizes to 128KB and splits the large blocks into 128KB
blocks, allowing the large-block filesystem to be received by a system
that does not support the `large_blocks` feature. A send stream
generated by `zfs send -L` (or `--large-block`) preserves the large
block size on the receiving system, by using large WRITE records.
When receiving an incremental send stream for a filesystem with large
blocks, if the send stream's -L flag was toggled, a bug is encountered
in which the file's contents are incorrectly zeroed out. The contents
of any blocks that were not modified by this send stream will be lost.
"Toggled" means that the previous send used `-L`, but this incremental
does not use `-L` (-L to no-L); or that the previous send did not use
`-L`, but this incremental does use `-L` (no-L to -L).
Changes:
This commit addresses the problem with several changes to the semantics
of zfs send/receive:
1. "-L to no-L" incrementals are rejected. If the previous send used
`-L`, but this incremental does not use `-L`, the `zfs receive` will
fail with this error message:
incremental send stream requires -L (--large-block), to match
previous receive.
2. "no-L to -L" incrementals are handled correctly, preserving the
smaller (128KB) block size of any already-received files that used large
blocks on the sending system but were split by `zfs send` without the
`-L` flag.
3. A new send stream format flag is added, `SWITCH_TO_LARGE_BLOCKS`.
This feature indicates that we can correctly handle "no-L to -L"
incrementals. This flag is currently not set on any send streams. In
the future, we intend for incremental send streams of snapshots that
have large blocks to use `-L` by default, and these streams will also
have the `SWITCH_TO_LARGE_BLOCKS` feature set. This ensures that streams
from the default use of `zfs send` won't encounter the bug mentioned
above, because they can't be received by software with the bug.
Implementation notes:
To facilitate accessing the ZPL's generation number,
`zfs_space_delta_cb()` has been renamed to `zpl_get_file_info()` and
restructured to fill in a struct with ZPL-specific info including owner
and generation.
In the "no-L to -L" case, if this is a compressed send stream (from
`zfs send -cL`), large WRITE records that are being written to small
(128KB) blocksize files need to be decompressed so that they can be
written split up into multiple blocks. The zio pipeline will recompress
each smaller block individually.
A new test case, `send-L_toggle`, is added, which tests the "no-L to -L"
case and verifies that we get an error for the "-L to no-L" case.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #6224
Closes #10383
2020-06-09 17:41:01 +00:00
|
|
|
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_hierarchy',
|
2019-10-09 17:39:26 +00:00
|
|
|
'send_encrypted_props', 'send_encrypted_truncated_files',
|
|
|
|
'send_freeobjects', 'send_realloc_files',
|
|
|
|
'send_realloc_encrypted_files', 'send_spill_block', 'send_holds',
|
2020-04-23 17:06:57 +00:00
|
|
|
'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol',
|
2020-09-30 20:19:49 +00:00
|
|
|
'send_partial_dataset', 'send_invalid']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'rsend']
|
|
|
|
|
|
|
|
[tests/functional/scrub_mirror]
|
|
|
|
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
|
|
|
|
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
|
|
|
|
tags = ['functional', 'scrub_mirror']
|
|
|
|
|
|
|
|
[tests/functional/slog]
|
|
|
|
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
|
|
|
|
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
|
|
|
|
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
|
|
|
|
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
|
|
|
|
'slog_replay_fs_002', 'slog_replay_volume']
|
|
|
|
tags = ['functional', 'slog']
|
|
|
|
|
|
|
|
[tests/functional/snapshot]
|
|
|
|
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
|
|
|
|
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
|
|
|
|
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
|
|
|
|
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
|
|
|
|
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
|
|
|
|
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
|
|
|
|
'snapshot_017_pos']
|
|
|
|
tags = ['functional', 'snapshot']
|
|
|
|
|
|
|
|
[tests/functional/snapused]
|
|
|
|
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
|
|
|
|
'snapused_004_pos', 'snapused_005_pos']
|
|
|
|
tags = ['functional', 'snapused']
|
|
|
|
|
|
|
|
[tests/functional/sparse]
|
|
|
|
tests = ['sparse_001_pos']
|
|
|
|
tags = ['functional', 'sparse']
|
|
|
|
|
|
|
|
[tests/functional/suid]
|
|
|
|
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
|
|
|
|
'suid_write_to_none']
|
|
|
|
tags = ['functional', 'suid']
|
|
|
|
|
|
|
|
[tests/functional/threadsappend]
|
|
|
|
tests = ['threadsappend_001_pos']
|
|
|
|
tags = ['functional', 'threadsappend']
|
|
|
|
|
|
|
|
[tests/functional/trim]
|
|
|
|
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
|
2020-06-09 17:15:08 +00:00
|
|
|
'trim_integrity', 'trim_config', 'trim_l2arc']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'trim']
|
|
|
|
|
|
|
|
[tests/functional/truncate]
|
|
|
|
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
|
|
|
|
tags = ['functional', 'truncate']
|
|
|
|
|
|
|
|
[tests/functional/upgrade]
|
|
|
|
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
|
|
|
|
tags = ['functional', 'upgrade']
|
|
|
|
|
|
|
|
[tests/functional/userquota]
|
|
|
|
tests = [
|
|
|
|
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
|
|
|
|
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
|
|
|
|
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
|
|
|
|
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
|
2020-12-04 22:34:29 +00:00
|
|
|
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
|
|
|
|
'userspace_send_encrypted']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'userquota']
|
|
|
|
|
|
|
|
[tests/functional/vdev_zaps]
|
|
|
|
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
|
|
|
|
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
|
|
|
|
'vdev_zaps_007_pos']
|
|
|
|
tags = ['functional', 'vdev_zaps']
|
|
|
|
|
|
|
|
[tests/functional/write_dirs]
|
|
|
|
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
|
|
|
|
tags = ['functional', 'write_dirs']
|
|
|
|
|
2020-01-24 01:14:40 +00:00
|
|
|
[tests/functional/xattr]
|
|
|
|
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
|
|
|
|
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
|
|
|
|
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos']
|
|
|
|
tags = ['functional', 'xattr']
|
|
|
|
|
2019-10-09 17:39:26 +00:00
|
|
|
[tests/functional/zvol/zvol_ENOSPC]
|
|
|
|
tests = ['zvol_ENOSPC_001_pos']
|
|
|
|
tags = ['functional', 'zvol', 'zvol_ENOSPC']
|
|
|
|
|
|
|
|
[tests/functional/zvol/zvol_cli]
|
|
|
|
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
|
|
|
|
tags = ['functional', 'zvol', 'zvol_cli']
|
|
|
|
|
|
|
|
[tests/functional/zvol/zvol_misc]
|
2020-01-03 17:08:23 +00:00
|
|
|
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
|
|
|
|
'zvol_misc_snapdev', 'zvol_misc_volmode', 'zvol_misc_zil']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'zvol', 'zvol_misc']
|
|
|
|
|
|
|
|
[tests/functional/zvol/zvol_swap]
|
2020-01-27 21:29:25 +00:00
|
|
|
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
|
2019-10-09 17:39:26 +00:00
|
|
|
tags = ['functional', 'zvol', 'zvol_swap']
|
|
|
|
|
|
|
|
[tests/functional/libzfs]
|
|
|
|
tests = ['many_fds', 'libzfs_input']
|
|
|
|
tags = ['functional', 'libzfs']
|
|
|
|
|
|
|
|
[tests/functional/log_spacemap]
|
|
|
|
tests = ['log_spacemap_import_logs']
|
|
|
|
pre =
|
|
|
|
post =
|
|
|
|
tags = ['functional', 'log_spacemap']
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 17:10:44 +00:00
|
|
|
|
|
|
|
[tests/functional/l2arc]
|
2020-10-20 18:39:52 +00:00
|
|
|
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 17:10:44 +00:00
|
|
|
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
|
|
|
|
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos',
|
|
|
|
'persist_l2arc_006_pos', 'persist_l2arc_007_pos', 'persist_l2arc_008_pos']
|
|
|
|
tags = ['functional', 'l2arc']
|
|
|
|
|
2020-10-09 16:29:21 +00:00
|
|
|
[tests/functional/zpool_influxdb]
|
|
|
|
tests = ['zpool_influxdb']
|
|
|
|
tags = ['functional', 'zpool_influxdb']
|