From 40abc63c4043a6b51a9ab656d27d88503cc775d8 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Sun, 18 Apr 2021 21:58:36 -0700 Subject: [PATCH] ZTS: Improve redundancy test scripts - Add additional logging to provide more information about why the test failed. This including logging more of the individual commands and the contents and differences of the record files on failure. - Updated get_vdevs() to properly exclude all top-level vdevs including raidz3 and draid[1-3]. - Replaced gnudd with dd. This is the only remaining place in the test suite gnudd is used and it shouldn't be needed. - The refill_test_env function expects the pool as the first argument but never sets the pool variable. - Only fill the test pools to 50% of capacity instead of 75% to help speed up the tests. - Fix replace_missing_devs() calculation, MINDEVSIZE should be MINVDEVSIZE. - Fix damage_devs() so it overwrites almost all of the device so we're guaranteed to damage filesystem blocks. - redundancy_stripe.ksh should not use log_mustnot to check if the pool is healthy since the return value may be misinterpreted. Just perform a normal conditional check and log the failure. Reviewed-by: George Melikov Signed-off-by: Brian Behlendorf Closes #11906 --- .../functional/redundancy/redundancy.kshlib | 24 ++++++++++++------- .../redundancy/redundancy_draid3.ksh | 2 +- .../redundancy/redundancy_stripe.ksh | 4 +++- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib b/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib index 26ded8720d..baee8269b1 100644 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib @@ -146,7 +146,7 @@ function setup_test_env typeset -i i=0 typeset file=$TESTDIR/file typeset -i limit - (( limit = $(get_prop available $pool) / 4 )) + (( limit = $(get_prop available $pool) / 2 )) while true ; do [[ $(get_prop available $pool) -lt $limit ]] && break @@ -162,6 +162,7 @@ function setup_test_env function refill_test_env { log_note "Re-filling the filesystem ..." + typeset pool=$1 typeset -i ret=0 typeset -i i=0 typeset mntpnt @@ -217,8 +218,13 @@ function is_data_valid { typeset pool=$1 + log_must zpool scrub -w $pool + record_data $pool $PST_RECORD_FILE if ! diff $PRE_RECORD_FILE $PST_RECORD_FILE > /dev/null 2>&1; then + log_must cat $PRE_RECORD_FILE + log_must cat $PST_RECORD_FILE + diff -u $PRE_RECORD_FILE $PST_RECORD_FILE return 1 fi @@ -237,7 +243,7 @@ function get_vdevs #pool cnt typeset -i cnt=$2 typeset all_devs=$(zpool iostat -v $pool | awk '{print $1}'| \ - egrep -v "^pool$|^capacity$|^mirror$|^raidz1$|^raidz2$|---" | \ + egrep -v "^pool$|^capacity$|^mirror$|^raidz1$|^raidz2$|^raidz3$|^draid1.*|^draid2.*|^draid3.*|---" | \ egrep -v "/old$|^$pool$") typeset -i i=0 typeset vdevs @@ -265,9 +271,9 @@ function replace_missing_devs typeset vdev for vdev in $@; do - log_must gnudd if=/dev/zero of=$vdev \ - bs=1024k count=$(($MINDEVSIZE / (1024 * 1024))) \ - oflag=fdatasync + log_must dd if=/dev/zero of=$vdev \ + bs=1024k count=$((MINVDEVSIZE / (1024 * 1024))) \ + conv=fdatasync log_must zpool replace -wf $pool $vdev $vdev done } @@ -286,19 +292,19 @@ function damage_devs typeset -i cnt=$2 typeset label="$3" typeset vdevs - typeset -i bs_count=$((64 * 1024)) + typeset -i bs_count=$(((MINVDEVSIZE / 1024) - 4096)) vdevs=$(get_vdevs $pool $cnt) typeset dev if [[ -n $label ]]; then for dev in $vdevs; do - dd if=/dev/zero of=$dev seek=512 bs=1024 \ + log_must dd if=/dev/zero of=$dev seek=512 bs=1024 \ count=$bs_count conv=notrunc >/dev/null 2>&1 done else for dev in $vdevs; do - dd if=/dev/zero of=$dev bs=1024 count=$bs_count \ - conv=notrunc >/dev/null 2>&1 + log_must dd if=/dev/zero of=$dev bs=1024 \ + count=$bs_count conv=notrunc >/dev/null 2>&1 done fi diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid3.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid3.ksh index bddd150d0c..d4c823ed9b 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid3.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid3.ksh @@ -42,7 +42,7 @@ # 2. Create draid3 pool based on the virtual disk files. # 3. Fill the filesystem with directories and files. # 4. Record all the files and directories checksum information. -# 5. Damaged at most two of the virtual disk files. +# 5. Damaged at most three of the virtual disk files. # 6. Verify the data is correct to prove draid3 can withstand 3 devices # are failing. # diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_stripe.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_stripe.ksh index 7ee51051ea..b2c4a85feb 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_stripe.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_stripe.ksh @@ -57,6 +57,8 @@ setup_test_env $TESTPOOL "" $cnt damage_devs $TESTPOOL 1 "keep_label" log_must zpool scrub -w $TESTPOOL -log_mustnot is_healthy $TESTPOOL +if is_healthy $TESTPOOL ; then + log_fail "$pool should not be healthy." +fi log_pass "Striped pool has no data redundancy as expected."