ZTS: Adopt OpenZFS test analysis script

Adopt and extend the OpenZFS ZTS results analysis script for use
with ZFS on Linux.  This allows for automatic analysis of tests
which may be skipped for a variety or reasons or which are not
entirely reliable.

In addition to the list of 'known' failures, which have been updated
for ZFS on Linux, there in a new 'maybe' section.  This mapping
include tests which might be correctly skipped depending on the
test environment.  This may be because of a missing dependency or
lack of required kernel support.  This list also includes tests
which normally pass but might on occasion fail for a harmless
reason.

The script was also extended include a reason for why a given test
might be skipped or may fail.  The reason will be included after
the test in the "results other than PASS that are expected" section.
For failures it is preferable to set the reason to the GitHub issue
number and for skipped tests several generic reasons are available.
You may also specify a custom reason if needed.

All tests were added back in to the linux.run file even if they are
expected to failed.  There is value in running tests which may not
pass, the expected results for these tests has been encoded in
the new analysis script.

All tests which were disabled because they ran more slowly on a
32-bit system have been re-enabled.  Developers working on 32-bit
systems should assess what it reasonable for their environment.

The unnecessary dependency on physical block devices was removed for
the checksum, grow_pool, and grow_replicas test groups so they are
no longer skipped.  Updated the filetest_001_pos test case to run
properly now that it is enabled and moved the grow tests in to a
single directory.

Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7638
This commit is contained in:
Brian Behlendorf 2018-06-20 14:03:13 -07:00 committed by GitHub
parent 1c38ac61e1
commit e4a3297a04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 587 additions and 591 deletions

View File

@ -271,8 +271,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/features/async_destroy/Makefile
tests/zfs-tests/tests/functional/features/large_dnode/Makefile
tests/zfs-tests/tests/functional/features/Makefile
tests/zfs-tests/tests/functional/grow_pool/Makefile
tests/zfs-tests/tests/functional/grow_replicas/Makefile
tests/zfs-tests/tests/functional/grow/Makefile
tests/zfs-tests/tests/functional/history/Makefile
tests/zfs-tests/tests/functional/hkdf/Makefile
tests/zfs-tests/tests/functional/inheritance/Makefile

View File

@ -16,5 +16,6 @@ export ZFS_SH=${ZFS_SH:-$SCRIPT_DIR/zfs.sh}
# Test Suite
export RUNFILE_DIR=${RUNFILE_DIR:-$ZTS_DIR/runfiles}
export TEST_RUNNER=${TEST_RUNNER:-$ZTS_DIR/test-runner/bin/test-runner.py}
export ZTS_REPORT=${ZTS_REPORT:-$ZTS_DIR/test-runner/bin/zts-report.py}
export STF_TOOLS=${STF_TOOLS:-$ZTS_DIR/test-runner}
export STF_SUITE=${STF_SUITE:-$ZTS_DIR/zfs-tests}

View File

@ -575,12 +575,31 @@ export __ZFS_POOL_EXCLUDE
export TESTFAIL_CALLBACKS
export PATH=$STF_PATH
RESULTS_FILE=$(mktemp -u -t zts-results.XXXX -p "$FILEDIR")
REPORT_FILE=$(mktemp -u -t zts-report.XXXX -p "$FILEDIR")
#
# Run all the tests as specified.
#
msg "${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -T ${TAGS} -i ${STF_SUITE}" \
"-I ${ITERATIONS}"
${TEST_RUNNER} ${QUIET} -c "${RUNFILE}" -T "${TAGS}" -i "${STF_SUITE}" \
-I "${ITERATIONS}"
-I "${ITERATIONS}" 2>&1 | tee "$RESULTS_FILE"
#
# Analyze the results.
#
set -o pipefail
${ZTS_REPORT} "$RESULTS_FILE" | tee "$REPORT_FILE"
RESULT=$?
echo
set +o pipefail
RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
if [ -d "$RESULTS_DIR" ]; then
cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results"
fi
rm -f "$RESULTS_FILE" "$REPORT_FILE"
if [ ${#SINGLETEST[@]} -ne 0 ]; then
rm -f "$RUNFILE" &>/dev/null

View File

@ -20,11 +20,8 @@ post = cleanup
outputdir = /var/tmp/test_results
tags = ['functional']
# Update to use ZFS_ACL_* variables and user_run helper.
# posix_001_pos
# posix_002_pos
[tests/functional/acl/posix]
tests = ['posix_003_pos']
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/arc]
@ -52,14 +49,14 @@ tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
# 'sensitive_none_lookup', 'sensitive_none_delete',
# 'sensitive_formd_lookup', 'sensitive_formd_delete',
# 'insensitive_none_lookup', 'insensitive_none_delete',
# 'insensitive_formd_lookup', 'insensitive_formd_delete',
# 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
# 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure']
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
@ -89,7 +86,8 @@ tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/checksum]
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'filetest_001_pos']
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test',
'filetest_001_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
@ -168,13 +166,13 @@ tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
# zfs_mount_006_pos - https://github.com/zfsonlinux/zfs/issues/4990
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_008_pos', 'zfs_mount_009_neg', 'zfs_mount_010_neg',
'zfs_mount_011_neg', 'zfs_mount_012_neg', 'zfs_mount_all_001_pos',
'zfs_mount_encrypted', 'zfs_mount_remount', 'zfs_multi_mount']
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_006_pos',
'zfs_mount_007_pos', 'zfs_mount_008_pos', 'zfs_mount_009_neg',
'zfs_mount_010_neg', 'zfs_mount_011_neg', 'zfs_mount_012_neg',
'zfs_mount_all_001_pos', 'zfs_mount_encrypted', 'zfs_mount_remount',
'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
@ -191,10 +189,9 @@ tags = ['functional', 'cli_root', 'zfs_promote']
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
# zfs_receive_004_neg - Fails for OpenZFS on illumos
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
@ -207,12 +204,10 @@ tags = ['functional', 'cli_root', 'zfs_receive']
tests = ['zfs_remap_cliargs', 'zfs_remap_obsolete_counts']
tags = ['functional', 'cli_root', 'zfs_remap']
# zfs_rename_006_pos - https://github.com/zfsonlinux/zfs/issues/5647
# zfs_rename_009_neg - https://github.com/zfsonlinux/zfs/issues/5648
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg',
'zfs_rename_007_pos', 'zfs_rename_008_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted']
@ -355,7 +350,6 @@ tags = ['functional', 'cli_root', 'zpool_get']
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
# zpool_import_missing_003_pos - https://github.com/zfsonlinux/zfs/issues/6839
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
@ -365,7 +359,7 @@ tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_015_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3',
@ -379,7 +373,6 @@ tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_labelclear]
@ -389,7 +382,8 @@ post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg', 'zpool_offline_003_pos']
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
@ -542,15 +536,11 @@ tests = ['large_dnode_001_pos', 'large_dnode_002_pos', 'large_dnode_003_pos',
'large_dnode_007_neg', 'large_dnode_008_pos', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow_pool]
tests = ['grow_pool_001_pos']
tags = ['functional', 'grow_pool']
[tests/functional/grow_replicas]
tests = ['grow_replicas_001_pos']
[tests/functional/grow]
pre =
post =
tags = ['functional', 'grow_replicas']
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
@ -686,10 +676,9 @@ tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg']
tags = ['functional', 'refquota']
# refreserv_004_pos - Fails for OpenZFS on illumos
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_005_pos']
'refreserv_004_pos', 'refreserv_005_pos']
tags = ['functional', 'refreserv']
[tests/functional/removal]
@ -714,28 +703,25 @@ tags = ['functional', 'rename_dirs']
tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos']
tags = ['functional', 'replacement']
# reservation_001_pos - https://github.com/zfsonlinux/zfs/issues/4445
# reservation_013_pos - https://github.com/zfsonlinux/zfs/issues/4444
# reservation_018_pos - https://github.com/zfsonlinux/zfs/issues/5642
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_019_pos',
'reservation_020_pos', 'reservation_021_neg', 'reservation_022_pos']
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
# rsend_008_pos - https://github.com/zfsonlinux/zfs/issues/6066
[tests/functional/rsend]
tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos',
'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos',
'rsend_009_pos', 'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos',
'rsend_013_pos', 'rsend_014_pos',
'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
@ -771,10 +757,9 @@ tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'snapshot_015_pos', 'snapshot_016_pos', 'snapshot_017_pos']
tags = ['functional', 'snapshot']
# snapused_004_pos - https://github.com/zfsonlinux/zfs/issues/5513
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_005_pos']
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
@ -812,10 +797,10 @@ tests = [
'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ]
tags = ['functional', 'userquota']
# vdev_zaps_007_pos -- fails due to a pre-existing issue with zpool split
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]

View File

@ -1,3 +1,4 @@
pkgdatadir = $(datadir)/@PACKAGE@/test-runner/bin
dist_pkgdata_SCRIPTS = \
test-runner.py
test-runner.py \
zts-report.py

View File

@ -0,0 +1,369 @@
#!/usr/bin/python
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
import os
import re
import sys
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require that the DISKS provided can be partitioned. This is
# normally not an issue because loop back devices are used for DISKS and they
# can be partition. There is one notable exception, the CentOS 6.x kernel is
# old enough that it does not support partitioning loop back devices.
#
disk_reason = 'Partitionable DISKS required'
#
# Some tests require a minimum python version of 3.5 and will be skipped when
# the default system version is too old. There may also be tests which require
# additional python modules be installed, for example python-cffi is required
# by the pyzfs tests.
#
python_reason = 'Python v3.5 or newer required'
python_deps_reason = 'Python modules missing: python-cffi'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require that the NFS client and server utilities be installed.
#
share_reason = 'NFS client and server utilities required'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests are not applicable to Linux or need to be updated to operate
# in the manor required by Linux. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "N/A on Linux"
summary = {
'total': float(0),
'passed': float(0),
'logfile': "Could not determine logfile location."
}
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'acl/posix/posix_001_pos': ['FAIL', known_reason],
'acl/posix/posix_002_pos': ['FAIL', known_reason],
'casenorm/sensitive_none_lookup': ['FAIL', '7633'],
'casenorm/sensitive_none_delete': ['FAIL', '7633'],
'casenorm/sensitive_formd_lookup': ['FAIL', '7633'],
'casenorm/sensitive_formd_delete': ['FAIL', '7633'],
'casenorm/insensitive_none_lookup': ['FAIL', '7633'],
'casenorm/insensitive_none_delete': ['FAIL', '7633'],
'casenorm/insensitive_formd_lookup': ['FAIL', '7633'],
'casenorm/insensitive_formd_delete': ['FAIL', '7633'],
'casenorm/mixed_none_lookup': ['FAIL', '7633'],
'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'],
'casenorm/mixed_none_delete': ['FAIL', '7633'],
'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'],
'casenorm/mixed_formd_delete': ['FAIL', '7633'],
'cli_root/zfs_mount/zfs_mount_006_pos': ['SKIP', '4990'],
'cli_root/zfs_receive/zfs_receive_004_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_create/zpool_create_016_pos': ['SKIP', na_reason],
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', '5771'],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', '5771'],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'inuse/inuse_001_pos': ['SKIP', na_reason],
'inuse/inuse_003_pos': ['SKIP', na_reason],
'inuse/inuse_006_pos': ['SKIP', na_reason],
'inuse/inuse_007_pos': ['SKIP', na_reason],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'removal/removal_condense_export': ['SKIP', known_reason],
'removal/removal_with_zdb': ['SKIP', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', '6066'],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
'xattr/xattr_008_pos': ['SKIP', na_reason],
'xattr/xattr_009_neg': ['SKIP', na_reason],
'xattr/xattr_010_neg': ['SKIP', na_reason],
'zvol/zvol_misc/zvol_misc_001_neg': ['SKIP', na_reason],
'zvol/zvol_misc/zvol_misc_003_neg': ['SKIP', na_reason],
'zvol/zvol_misc/zvol_misc_004_pos': ['SKIP', na_reason],
'zvol/zvol_misc/zvol_misc_005_neg': ['SKIP', na_reason],
'zvol/zvol_misc/zvol_misc_006_pos': ['SKIP', na_reason],
'zvol/zvol_swap/zvol_swap_003_pos': ['SKIP', na_reason],
'zvol/zvol_swap/zvol_swap_005_pos': ['SKIP', na_reason],
'zvol/zvol_swap/zvol_swap_006_pos': ['SKIP', na_reason],
}
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'cache/setup': ['SKIP', disk_reason],
'cache/cache_010_neg': ['FAIL', known_reason],
'chattr/setup': ['SKIP', exec_reason],
'clean_mirror/setup': ['SKIP', disk_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'],
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
'cli_root/zfs_rename/zfs_rename_006_pos': ['FAIL', '5647'],
'cli_root/zfs_rename/zfs_rename_009_neg': ['FAIL', '5648'],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', '6415'],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', '6416'],
'cli_root/zfs_share/setup': ['SKIP', share_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/setup': ['SKIP', share_reason],
'cli_root/zpool_add/setup': ['SKIP', disk_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_create/setup': ['SKIP', disk_reason],
'cli_root/zpool_create/zpool_create_008_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'],
'cli_root/zpool_export/setup': ['SKIP', disk_reason],
'cli_root/zpool_import/setup': ['SKIP', disk_reason],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_root/zpool_import/import_rewind_config_changed':
['FAIL', rewind_reason],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'],
'cli_root/zpool_remove/setup': ['SKIP', disk_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'],
'cli_user/misc/arc_summary3_001_pos': ['SKIP', python_reason],
'delegate/setup': ['SKIP', exec_reason],
'fault/auto_online_001_pos': ['SKIP', disk_reason],
'fault/auto_replace_001_pos': ['SKIP', disk_reason],
'history/history_004_pos': ['FAIL', '7026'],
'history/history_005_neg': ['FAIL', '6680'],
'history/history_006_neg': ['FAIL', '5657'],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'inuse/inuse_005_pos': ['SKIP', disk_reason],
'inuse/inuse_008_pos': ['SKIP', disk_reason],
'inuse/inuse_009_pos': ['SKIP', disk_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
'no_space/setup': ['SKIP', disk_reason],
'no_space/enospc_002_pos': ['FAIL', known_reason],
'projectquota/setup': ['SKIP', exec_reason],
'reservation/reservation_018_pos': ['FAIL', '5642'],
'rsend/rsend_019_pos': ['FAIL', '6086'],
'rsend/rsend_020_pos': ['FAIL', '6446'],
'rsend/rsend_021_pos': ['FAIL', '6446'],
'rsend/rsend_024_pos': ['FAIL', '5665'],
'rsend/send-c_volume': ['FAIL', '6087'],
'scrub_mirror/setup': ['SKIP', disk_reason],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapused/snapused_004_pos': ['FAIL', '5513'],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'threadsappend/threadsappend_001_pos': ['FAIL', '6136'],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
'write_dirs/setup': ['SKIP', disk_reason],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
}
def usage(s):
print s
sys.exit(1)
def process_results(pathname):
try:
f = open(pathname)
except IOError, e:
print 'Error opening file: %s' % e
sys.exit(1)
prefix = '/zfs-tests/tests/functional/'
pattern = '^Test:\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]'\
% prefix
pattern_log = '^\s*Log directory:\s*(\S*)'
d = {}
for l in f.readlines():
m = re.match(pattern, l)
if m and len(m.groups()) == 4:
summary['total'] += 1
if m.group(4) == "PASS":
summary['passed'] += 1
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, l)
if m:
summary['logfile'] = m.group(1)
return d
if __name__ == "__main__":
if len(sys.argv) is not 2:
usage('usage: %s <pathname>' % sys.argv[0])
results = process_results(sys.argv[1])
if summary['total'] == 0:
print "\n\nNo test results were found."
print "Log directory: %s" % summary['logfile']
sys.exit(0)
expected = []
unexpected = []
for test in results.keys():
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if ((test not in known or results[test] not in known[test][0]) and
(test not in maybe or results[test] not in maybe[test][0])):
unexpected.append(test)
else:
expected.append(test)
print "\nTests with results other than PASS that are expected:"
for test in sorted(expected):
issue_url = 'https://github.com/zfsonlinux/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "N/A on Linux" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif known[test][1].isdigit():
expect = issue_url + known[test][1]
else:
expect = known[test][1]
elif test in maybe:
if maybe[test][1].isdigit():
expect = issue_url + maybe[test][1]
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print " %s %s (%s)" % (results[test], test, expect)
print "\nTests with result of PASS that are unexpected:"
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print " %s %s (expected %s)" % (results[test], test, known[test][0])
print "\nTests with results other than PASS that are unexpected:"
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print " %s %s (expected %s)" % (results[test], test, expect)
if len(unexpected) == 0:
sys.exit(0)
else:
sys.exit(1)

View File

@ -22,8 +22,7 @@ SUBDIRS = \
exec \
fault \
features \
grow_pool \
grow_replicas \
grow \
history \
hkdf \
inheritance \

View File

@ -44,11 +44,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup_testenv
{
cleanup

View File

@ -40,5 +40,17 @@ log_must add_group $QGROUP
log_must add_user $QGROUP $QUSER1
log_must add_user $QGROUP $QUSER2
#
# Verify the test user can execute the zfs utilities. This may not
# be possible due to default permissions on the user home directory.
# This can be resolved granting group read access.
#
# chmod 0750 $HOME
#
user_run $QUSER1 zfs list
if [ $? -ne 0 ]; then
log_unsupported "Test user $QUSER1 cannot execute zfs utilities"
fi
DISK=${DISKS%% *}
default_setup $DISK

View File

@ -35,16 +35,4 @@
verify_runnable "global"
df -F zfs -h | grep "$TESTFS " >/dev/null
[[ $? == 0 ]] && log_must zfs umount -f $TESTDIR
destroy_pool $TESTPOOL
# recreate and destroy a zpool over the disks to restore the partitions to
# normal
if [[ -n $SINGLE_DISK ]]; then
log_must cleanup_devices $MIRROR_PRIMARY
else
log_must cleanup_devices $MIRROR_PRIMARY $MIRROR_SECONDARY
fi
log_pass
default_cleanup

View File

@ -28,39 +28,4 @@
# Copyright (c) 2013 by Delphix. All rights reserved.
#
typeset -i NUMBER_OF_DISKS=0
for i in $DISKS; do
[[ -n $MIRROR_PRIMARY ]] && MIRROR_SECONDARY=$i
[[ -z $MIRROR_PRIMARY ]] && MIRROR_PRIMARY=$i
done
if [[ -z $MIRROR_SECONDARY ]]; then
# We need to repartition the single disk to two slices
SINGLE_DISK=$MIRROR_PRIMARY
MIRROR_SECONDARY=$MIRROR_PRIMARY
SIDE_PRIMARY_PART=0
SIDE_SECONDARY_PART=1
if is_linux; then
SIDE_PRIMARY=${SINGLE_DISK}p1
SIDE_SECONDARY=${SINGLE_DISK}p2
else
SIDE_PRIMARY=${SINGLE_DISK}s${SIDE_PRIMARY_PART}
SIDE_SECONDARY=${SINGLE_DISK}s${SIDE_SECONDARY_PART}
fi
else
SIDE_PRIMARY_PART=0
SIDE_SECONDARY_PART=0
if is_linux; then
SIDE_PRIMARY=${MIRROR_PRIMARY}p1
SIDE_SECONDARY=${MIRROR_SECONDARY}p1
else
SIDE_PRIMARY=${MIRROR_PRIMARY}s${SIDE_PRIMARY_PART}
SIDE_SECONDARY=${MIRROR_SECONDARY}s${SIDE_SECONDARY_PART}
fi
fi
export MIRROR_PRIMARY MIRROR_SECONDARY SINGLE_DISK SIDE_PRIMARY SIDE_SECONDARY
export MIRROR_MEGS=100
export MIRROR_SIZE=${MIRROR_MEGS}m # default mirror size
set -A CHECKSUM_TYPES "fletcher2" "fletcher4" "sha256" "sha512" "skein" "edonr"

View File

@ -22,6 +22,7 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/properties.shlib
. $STF_SUITE/tests/functional/checksum/default.cfg
# DESCRIPTION:
# Sanity test to make sure checksum algorithms work.
@ -60,14 +61,9 @@ log_assert "Create and read back files with using different checksum algorithms"
log_onexit cleanup
FSSIZE=$(zpool list -Hp -o size $TESTPOOL)
WRITESZ=1048576
WRITECNT=$((($FSSIZE) / $WRITESZ ))
# Skip the first and last 4MB
SKIP=4127518
SKIPCNT=$((($SKIP / $WRITESZ )))
SKIPCNT=$((($SKIPCNT * 2)))
WRITECNT=$((($WRITECNT - $SKIPCNT)))
SKIPCNT=$(((4194304 / $WRITESZ) * 2))
WRITECNT=$((($MINVDEVSIZE / $WRITESZ) - $SKIPCNT))
# Get a list of vdevs in our pool
set -A array $(get_disklist_fullpath)
@ -75,19 +71,21 @@ set -A array $(get_disklist_fullpath)
# Get the first vdev, since we will corrupt it later
firstvdev=${array[0]}
# First test each checksum by writing a file using it, and confirm there's no
# errors.
for ((count = 0; count < ${#checksum_props[*]} ; count++)); do
i=${checksum_props[$count]}
zfs set checksum=$i $TESTPOOL
file_write -o overwrite -f $TESTDIR/test_$i -b $WRITESZ -c 5 -d R
done
zpool export $TESTPOOL
zpool import $TESTPOOL
zpool scrub $TESTPOOL
while is_pool_scrubbing $TESTPOOL; do
sleep 1
# Test each checksum by writing a file using it, confirm there are no errors.
typeset -i i=1
while [[ $i -lt ${#CHECKSUM_TYPES[*]} ]]; do
type=${CHECKSUM_TYPES[i]}
log_must zfs set checksum=$type $TESTPOOL
log_must file_write -o overwrite -f $TESTDIR/test_$type \
-b $WRITESZ -c 5 -d R
(( i = i + 1 ))
done
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
log_must zpool scrub $TESTPOOL
log_must wait_scrubbed $TESTPOOL
zpool status -P -v $TESTPOOL | grep $firstvdev | read -r name state rd wr cksum
log_assert "Normal file write test saw $cksum checksum errors"
log_must [ $cksum -eq 0 ]
@ -95,31 +93,32 @@ log_must [ $cksum -eq 0 ]
rm -fr $TESTDIR/*
log_assert "Test scrambling the disk and seeing checksum errors"
for ((count = 0; count < ${#checksum_props[*]} ; count++)); do
i=${checksum_props[$count]}
zfs set checksum=$i $TESTPOOL
file_write -o overwrite -f $TESTDIR/test_$i -b $WRITESZ -c 5 -d R
typeset -i j=1
while [[ $j -lt ${#CHECKSUM_TYPES[*]} ]]; do
type=${CHECKSUM_TYPES[$j]}
log_must zfs set checksum=$type $TESTPOOL
log_must file_write -o overwrite -f $TESTDIR/test_$type \
-b $WRITESZ -c 5 -d R
zpool export $TESTPOOL
log_must zpool export $TESTPOOL
# Scramble the data on the first vdev in our pool.
# Skip the first and last 16MB of data, then scramble the rest after that
#
file_write -o overwrite -f $firstvdev -s $SKIP -c $WRITECNT -b $WRITESZ -d R
# Scramble the data on the first vdev in our pool. Skip the first
# and last 16MB of data, then scramble the rest after that.
log_must dd if=/dev/zero of=$firstvdev bs=$WRITESZ skip=$SKIPCNT \
count=$WRITECNT
zpool import $TESTPOOL
log_must zpool import $TESTPOOL
log_must zpool scrub $TESTPOOL
log_must wait_scrubbed $TESTPOOL
i=${checksum_props[$count]}
zpool scrub $TESTPOOL
while is_pool_scrubbing $TESTPOOL; do
sleep 1
done
zpool status -P -v $TESTPOOL | grep $firstvdev | \
read -r name state rd wr cksum
zpool status -P -v $TESTPOOL | grep $firstvdev | read -r name state rd wr cksum
log_assert "Checksum '$i' caught $cksum checksum errors"
log_assert "Checksum '$type' caught $cksum checksum errors"
log_must [ $cksum -ne 0 ]
rm -f $TESTDIR/test_$i
zpool clear $TESTPOOL
rm -f $TESTDIR/test_$type
log_must zpool clear $TESTPOOL
(( j = j + 1 ))
done

View File

@ -30,22 +30,7 @@
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/checksum/default.cfg
verify_runnable "global"
if ! $(is_physical_device $DISKS) ; then
log_unsupported "This directory cannot be run on raw files."
fi
if [[ -n $SINGLE_DISK ]]; then
log_note "Partitioning a single disk ($SINGLE_DISK)"
else
log_note "Partitioning disks ($MIRROR_PRIMARY $MIRROR_SECONDARY)"
fi
log_must set_partition $SIDE_PRIMARY_PART "" $MIRROR_SIZE $MIRROR_PRIMARY
log_must set_partition $SIDE_SECONDARY_PART "" $MIRROR_SIZE $MIRROR_SECONDARY
default_mirror_setup $SIDE_PRIMARY $SIDE_SECONDARY
log_pass
default_mirror_setup $DISKS

View File

@ -43,11 +43,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup
{
[[ -e $propfile ]] && rm -f $propfile

View File

@ -45,7 +45,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/5479
if is_kmemleak; then
log_unsupported "Test case runs slowly when kmemleak is enabled"
fi

View File

@ -49,6 +49,11 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/4990
if is_linux; then
log_unsupported "Test case needs to be updated"
fi
function cleanup
{
log_must force_unmount $TESTPOOL/$TESTFS

View File

@ -27,6 +27,11 @@
. $STF_SUITE/include/libtest.shlib
share -s
if [ $? -ne 0 ]; then
log_unsupported "The NFS utilities are not installed"
fi
# Make sure NFS server is running before testing.
setup_nfs_server

View File

@ -27,6 +27,11 @@
. $STF_SUITE/include/libtest.shlib
share -s
if [ $? -ne 0 ]; then
log_unsupported "The NFS utilities are not installed"
fi
# Make sure NFS server is running before testing.
setup_nfs_server

View File

@ -45,11 +45,6 @@
verify_runnable "global"
# https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup
{
poolexists $TESTPOOL && \

View File

@ -45,11 +45,6 @@
verify_runnable "global"
# https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup
{
poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2

View File

@ -50,7 +50,7 @@ verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5771
if is_linux; then
log_unsupported "Requires additional ZED support"
log_unsupported "Requires autoexpand property support"
fi
function cleanup

View File

@ -50,7 +50,7 @@ verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5771
if is_linux; then
log_unsupported "Requires additional ZED support"
log_unsupported "Requires autoexpand property support"
fi
function cleanup

View File

@ -59,6 +59,11 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/6839
if is_linux; then
log_unsupported "Test case may be slow"
fi
set -A vdevs "" "mirror" "raidz"
function verify

View File

@ -44,11 +44,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5444
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
log_assert "When scrubbing, detach device should not break system."
log_must zpool scrub $TESTPOOL

View File

@ -43,11 +43,6 @@
verify_runnable "global"
# https://github.com/zfsonlinux/zfs/issues/6141
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup
{
for config in $CONFIGS; do

View File

@ -55,6 +55,18 @@ log_must add_group $OTHER_GROUP
log_must add_user $OTHER_GROUP $OTHER1
log_must add_user $OTHER_GROUP $OTHER2
#
# Verify the test user can execute the zfs utilities. This may not
# be possible due to default permissions on the user home directory.
# This can be resolved granting group read access.
#
# chmod 0750 $HOME
#
user_run $STAFF1 zfs list
if [ $? -ne 0 ]; then
log_unsupported "Test user $STAFF1 cannot execute zfs utilities"
fi
DISK=${DISKS%% *}
default_volume_setup $DISK
log_must chmod 777 $TESTDIR

View File

@ -48,10 +48,6 @@
verify_runnable "global"
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
log_assert "Setting devices=on on file system, the devices files in this file" \
"system can be used."
log_onexit cleanup

View File

@ -48,10 +48,6 @@
verify_runnable "global"
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
log_assert "Setting devices=off on file system, the devices files in this file"\
"system can not be used."
log_onexit cleanup

View File

@ -1,6 +1,7 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/grow_replicas
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/grow
dist_pkgdata_SCRIPTS = \
grow_pool_001_pos.ksh \
grow_replicas_001_pos.ksh
dist_pkgdata_DATA = \
grow_replicas.cfg
grow.cfg

View File

@ -1,4 +1,3 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
@ -21,33 +20,23 @@
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
# Copyright (c) 2013 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
verify_runnable "global"
export DEVICE1="$TEST_BASE_DIR/device-1"
export DEVICE2="$TEST_BASE_DIR/device-2"
export DEVICE3="$TEST_BASE_DIR/device-3"
export DEVICE4="$TEST_BASE_DIR/device-4"
ismounted $TESTFS && \
log_must zfs umount $TESTDIR
destroy_pool "$TESTPOOL"
#
# Here we create & destroy a zpool using the disks
# because this resets the partitions to normal
#
if [[ -z $DISK ]]; then
create_pool ZZZ "$DISK0 $DISK1"
destroy_pool ZZZ
else
create_pool ZZZ "$DISK"
destroy_pool ZZZ
fi
log_pass
export BLOCK_SIZE=131072
export TESTFILE1=file$$.1
export SMALL_WRITE_COUNT=100
export WRITE_COUNT=4096000

View File

@ -30,7 +30,7 @@
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
. $STF_SUITE/tests/functional/grow/grow.cfg
#
# DESCRIPTION:
@ -46,13 +46,29 @@
verify_runnable "global"
function cleanup
{
destroy_pool $TESTPOOL
rm -f $DEVICE1 $DEVICE2 $DEVICE3
}
log_assert "A zpool may be increased in capacity by adding a disk"
log_onexit cleanup
truncate -s $SPA_MINDEVSIZE $DEVICE1 $DEVICE2
create_pool $TESTPOOL $pooltype $DEVICE1 $DEVICE2
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_must zfs set compression=off $TESTPOOL/$TESTFS
file_write -o create -f $TESTDIR/$TESTFILE1 \
-b $BLOCK_SIZE -c $WRITE_COUNT -d 0
typeset -i zret=$?
readonly ENOSPC=28
if [[ $zret -ne $ENOSPC ]]; then
log_fail "file_write completed w/o ENOSPC, aborting!!!"
fi
@ -61,14 +77,15 @@ if [[ ! -s $TESTDIR/$TESTFILE1 ]]; then
log_fail "$TESTDIR/$TESTFILE1 was not created"
fi
if [[ -n $DISK ]]; then
log_must zpool add $TESTPOOL $DISK"s"$SLICE1
else
log_must zpool add $TESTPOOL $DISK1
fi
truncate -s $SPA_MINDEVSIZE $DEVICE3
log_must zpool add $TESTPOOL $DEVICE3
log_must file_write -o append -f $TESTDIR/$TESTFILE1 \
-b $BLOCK_SIZE -c $SMALL_WRITE_COUNT -d 0
log_must zfs inherit compression $TESTPOOL/$TESTFS
log_must destroy_pool $TESTPOOL
rm -f $DEVICE1 $DEVICE2 $DEVICE3
log_pass "TESTPOOL successfully grown"

View File

@ -30,7 +30,8 @@
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/tests/functional/grow_replicas/grow_replicas.cfg
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/grow/grow.cfg
# DESCRIPTION:
# A ZFS filesystem is limited by the amount of disk space
@ -44,18 +45,10 @@
verify_runnable "global"
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
if ! is_physical_device $DISKS; then
log_unsupported "This test case cannot be run on raw files"
fi
function cleanup
{
datasetexists $TESTPOOL && log_must destroy_pool $TESTPOOL
[[ -d $TESTDIR ]] && log_must rm -rf $TESTDIR
destroy_pool $TESTPOOL
rm -f $DEVICE1 $DEVICE2 $DEVICE3 $DEVICE4
}
log_assert "mirror/raidz pool may be increased in capacity by adding a disk"
@ -67,28 +60,13 @@ readonly ENOSPC=28
for pooltype in "mirror" "raidz"; do
log_note "Creating pool type: $pooltype"
if [[ -n $DISK ]]; then
log_note "No spare disks available. Using slices on $DISK"
for slice in $SLICES; do
log_must set_partition $slice "$cyl" $SIZE $DISK
cyl=$(get_endslice $DISK $slice)
done
create_pool $TESTPOOL $pooltype \
${DISK}${SLICE_PREFIX}${SLICE0} \
${DISK}${SLICE_PREFIX}${SLICE1}
else
log_must set_partition 0 "" $SIZE $DISK0
log_must set_partition 0 "" $SIZE $DISK1
create_pool $TESTPOOL $pooltype \
${DISK0}${SLICE_PREFIX}${SLICE0} \
${DISK1}${SLICE_PREFIX}${SLICE0}
fi
truncate -s $SPA_MINDEVSIZE $DEVICE1 $DEVICE2
create_pool $TESTPOOL $pooltype $DEVICE1 $DEVICE2
[[ -d $TESTDIR ]] && log_must rm -rf $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_must zfs set compression=off $TESTPOOL/$TESTFS
file_write -o create -f $TESTDIR/$TESTFILE1 \
-b $BLOCK_SIZE -c $WRITE_COUNT -d 0
@ -98,23 +76,14 @@ for pooltype in "mirror" "raidz"; do
[[ ! -s $TESTDIR/$TESTFILE1 ]] && \
log_fail "$TESTDIR/$TESTFILE1 was not created"
# $DISK will be set if we're using slices on one disk
if [[ -n $DISK ]]; then
log_must zpool add $TESTPOOL $pooltype \
${DISK}${SLICE_PREFIX}${SLICE3} \
${DISK}${SLICE_PREFIX}${SLICE4}
else
[[ -z $DISK2 || -z $DISK3 ]] &&
log_unsupported "No spare disks available"
log_must zpool add $TESTPOOL $pooltype \
${DISK2}${SLICE_PREFIX}${SLICE0} \
${DISK3}${SLICE_PREFIX}${SLICE0}
fi
truncate -s $SPA_MINDEVSIZE $DEVICE3 $DEVICE4
log_must zpool add $TESTPOOL $pooltype $DEVICE3 $DEVICE4
log_must file_write -o append -f $TESTDIR/$TESTFILE1 \
-b $BLOCK_SIZE -c $SMALL_WRITE_COUNT -d 0
log_must destroy_pool $TESTPOOL
rm -f $DEVICE1 $DEVICE2 $DEVICE3 $DEVICE4
done
log_pass "mirror/raidz pool successfully grown"

View File

@ -1,8 +0,0 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/grow_pool
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
grow_pool_001_pos.ksh
dist_pkgdata_DATA = \
grow_pool.cfg

View File

@ -1,76 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
export DISKSARRAY=$DISKS
export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
function set_disks
{
set -A disk_array $(find_disks $DISKS)
typeset -i i=0
typeset -i limit=2
while (( i < limit )); do
if [[ -n ${disk_array[$i]} ]]; then
export DISK${i}="${disk_array[$i]}"
else
export DISK=${DISKS%% *}
return
fi
((i = i + 1))
done
export DISK=""
}
export BLOCK_SIZE=8192
set_disks
#
# Do not make SIZE too large as the three slices may exceed
# the size of the disk, and also slow down the test
# which involves filling until ENOSPC
#
export SIZE="100mb"
export SMALL_WRITE_COUNT=100
export TESTFILE1=file$$.1
export WRITE_COUNT=65536000
if is_linux; then
set_device_dir
set_slice_prefix
export SLICE=1
export SLICE0=1
export SLICE1=2
else
export SLICE_PREFIX="s"
export SLICE=0
export SLICE0=0
export SLICE1=1
fi

View File

@ -1,57 +0,0 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
verify_runnable "global"
if ! $(is_physical_device $DISKS) ; then
log_unsupported "This directory cannot be run on raw files."
fi
if [[ -n $DISK ]]; then
log_note "No spare disks available. Using slices on $DISK"
log_must zero_partitions $DISK
for i in $SLICE0 $SLICE1 ; do
log_must set_partition $i "$cyl" $SIZE $DISK
cyl=$(get_endslice $DISK $i)
done
tmp=$DISK"s"$SLICE0
else
log_must zero_partitions $DISK0
log_must zero_partitions $DISK1
log_must set_partition $SLICE "" $SIZE $DISK0
log_must set_partition $SLICE "" $SIZE $DISK1
tmp=$DISK0$SLICE_PREFIX$SLICE
fi
default_setup $tmp

View File

@ -1,82 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/include/libtest.shlib
export DISKSARRAY=$DISKS
export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
function set_disks
{
set -A disk_array $(find_disks $DISKS)
typeset -i i=0
typeset -i limit=4
while (( i < limit )); do
if [[ -n ${disk_array[$i]} ]]; then
export DISK${i}="${disk_array[$i]}"
else
export DISK=${DISKS%% *}
return
fi
((i = i + 1))
done
export DISK=""
}
export BLOCK_SIZE=8192
set_disks
# Do not make SIZE too large as the three slices may exceed
# the size of the disk, and also slow down the test
# which involves filling until ENOSPC.
export SIZE="100mb"
export SMALL_WRITE_COUNT=100
export TESTFILE1=file$$.1
export WRITE_COUNT=65536000
if is_linux; then
export SLICES="0 1 2 3 4"
set_device_dir
set_slice_prefix
export SLICE0=1
export SLICE1=2
export SLICE2=3
export SLICE3=4
export SLICE4=5
else
export SLICES="0 1 3 4"
export SLICE_PREFIX="s"
export SLICE0=0
export SLICE1=1
export SLICE2=2
export SLICE3=3
export SLICE4=4
fi

View File

@ -31,11 +31,6 @@
. $STF_SUITE/include/libtest.shlib
# See issue: https://github.com/zfsonlinux/zfs/issues/7026
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
#
# DESCRIPTION:
# 'zpool history' can cope with simultaneous commands.

View File

@ -45,11 +45,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5657
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
function cleanup
{
if datasetexists $fs ; then

View File

@ -59,6 +59,18 @@ root_testfs=$TESTPOOL/$TESTFS1
add_group $HIST_GROUP
add_user $HIST_GROUP $HIST_USER
#
# Verify the test user can execute the zfs utilities. This may not
# be possible due to default permissions on the user home directory.
# This can be resolved granting group read access.
#
# chmod 0750 $HOME
#
user_run $HIST_USER zfs list
if [ $? -ne 0 ]; then
log_unsupported "Test user $HIST_USER cannot execute zfs utilities"
fi
run_and_verify "zfs create $root_testfs" "-l"
run_and_verify "zfs allow $HIST_GROUP snapshot,mount $root_testfs" "-l"
run_and_verify "zfs allow $HIST_USER destroy,mount $root_testfs" "-l"

View File

@ -31,10 +31,6 @@
. $STF_SUITE/include/libtest.shlib
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
#
# DESCRIPTION:
# Write a file to the allowable ZFS fs size.

View File

@ -46,11 +46,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
#
# Parse the results of zpool & zfs creation with specified size
#

View File

@ -27,6 +27,12 @@
verify_runnable "global"
# Verify that the required dependencies for testing are installed.
python -c "import cffi" 2>/dev/null
if [ $? -eq 1 ]; then
log_unsupported "python-cffi not found by Python"
fi
# We don't just try to "import libzfs_core" because we want to skip these tests
# only if pyzfs was not installed due to missing, build-time, dependencies; if
# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI
@ -34,8 +40,7 @@ verify_runnable "global"
python -c '
import pkgutil, sys
sys.exit(pkgutil.find_loader("libzfs_core") is None)'
if [ $? -eq 1 ]
then
if [ $? -eq 1 ]; then
log_unsupported "libzfs_core not found by Python"
fi

View File

@ -43,6 +43,9 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6066
log_unsupported "Occasionally hangs"
# Origin Clone
#
set -A dtst "$POOL" "$POOL/pclone" \

View File

@ -35,11 +35,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6086
if is_32bit; then
log_unsupported "Test case occasionally fails"
fi
log_assert "Verify resumability of a full and incremental ZFS send/receive " \
"in the presence of a corrupted stream"
log_onexit resume_cleanup $sendfs $streamfs

View File

@ -33,11 +33,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6446
if is_linux; then
log_unsupported "Test often hangs. Skipping."
fi
log_assert "Verify resumability of full ZFS send/receive with the -D " \
"(dedup) flag"

View File

@ -35,11 +35,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6446
if is_linux; then
log_unsupported "Test often hangs. Skipping."
fi
log_assert "Verify resumability of a full and incremental ZFS send/receive " \
"with the -e (embedded) flag"

View File

@ -38,11 +38,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/5654
if is_32bit; then
log_unsupported "Test case fails on 32-bit systems"
fi
log_assert "Verify resumability of an incremental ZFS send/receive with ZFS " \
"bookmarks"

View File

@ -35,11 +35,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/5665
if is_linux; then
log_unsupported "Test case hangs frequently."
fi
log_assert "Verify resumability of a full ZFS send/receive with the source " \
"filesystem unmounted"

View File

@ -35,11 +35,6 @@ function cleanup
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6087
if is_32bit; then
log_unsupported "Test case occasionally fails on 32-bit systems"
fi
log_assert "Verify compressed send works with volumes"
log_onexit cleanup

View File

@ -49,11 +49,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
# Setup array, 4 elements as a group, refer to:
# i+0: name of a snapshot
# i+1: mountpoint of the snapshot

View File

@ -45,11 +45,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5727
if is_32bit; then
log_unsupported "Test case slow on 32-bit systems"
fi
function cleanup
{
[[ -e $TESTDIR ]] && log_must rm -rf $TESTDIR/*

View File

@ -45,11 +45,6 @@
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6136
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
log_assert "Ensure multiple threads performing write appends to the same" \
"ZFS file succeed"

View File

@ -46,11 +46,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5727
if is_32bit; then
log_unsupported "Test case slow on 32-bit systems"
fi
function cleanup
{
[[ -e $TESTDIR ]] && log_must rm -rf $TESTDIR/*

View File

@ -43,7 +43,7 @@
verify_runnable "global"
if ! lsattr -pd > /dev/null 2>&1; then
log_unsupported "Current e2fsprogs does not support set/show project ID"
log_unsupported "Current lsattr does not support set/show project ID"
fi
log_assert "pool upgrade for projectquota should work"

View File

@ -31,11 +31,6 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/vdev_zaps/vdev_zaps.kshlib
# See issue: https://github.com/zfsonlinux/zfs/issues/6935
if is_linux; then
log_unsupported "Test case occasionally fails"
fi
log_assert "Per-vdev ZAPs are transferred properly on attach/detach"
DISK=${DISKS%% *}

View File

@ -46,10 +46,6 @@
verify_runnable "both"
if is_32bit; then
log_unsupported "Test case occasionally fails on 32-bit systems"
fi
function cleanup
{
for file in `find $TESTDIR -type f`; do

View File

@ -46,10 +46,6 @@
verify_runnable "both"
if is_32bit; then
log_unsupported "Test case runs slowly on 32 bit"
fi
function cleanup
{
for file in `find $TESTDIR -type f`; do

View File

@ -47,11 +47,6 @@
verify_runnable "global"
# See issue: https://github.com/zfsonlinux/zfs/issues/5848
if is_32bit; then
log_unsupported "Test case runs slowly on 32 bit"
fi
function cleanup
{
rm -rf $TESTDIR/*

View File

@ -44,10 +44,6 @@
verify_runnable "global"
if is_32bit; then
log_unsupported "Test case runs slowly on 32 bit"
fi
volsize=$(zfs get -H -o value volsize $TESTPOOL/$TESTVOL)
function cleanup
@ -111,7 +107,6 @@ if [ $retval -ne 0 ] ; then
# e2fsprogs-1.43.3 (Fedora 25 and older): returns 4
# e2fsprogs-1.43.4 (Fedora 26): returns 8
#
# https://github.com/zfsonlinux/zfs/issues/6297
if [ $retval -ne 4 -a $retval -ne 8 ] ; then
log_fail "fsck exited with wrong value $retval"
fi