zts: test single-disk pool resumes properly after disk pull

A single disk pool should suspend when its disk fails and hold the IO.
When the disk is returned, the pool should return and the IO be
reissued, leaving everything in good shape.

Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Don Brady <don.brady@klarasystems.com>
This commit is contained in:
Rob Norris 2024-05-09 20:22:21 +10:00 committed by Tony Hutter
parent c950c5d369
commit 25c4271d2f
4 changed files with 105 additions and 1 deletions

View File

@ -121,7 +121,7 @@ tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos', 'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos',
'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift',
'auto_spare_shared', 'decrypt_fault', 'decompress_fault', 'auto_spare_shared', 'decrypt_fault', 'decompress_fault',
'scrub_after_resilver', 'zpool_status_-s'] 'scrub_after_resilver', 'suspend_resume_single', 'zpool_status_-s']
tags = ['functional', 'fault'] tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux] [tests/functional/features/large_dnode:Linux]

View File

@ -379,6 +379,7 @@ if os.environ.get('CI') == 'true':
'fault/auto_replace_002_pos': ['SKIP', ci_reason], 'fault/auto_replace_002_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason], 'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason], 'fault/auto_spare_shared': ['SKIP', ci_reason],
'fault/suspend_resume_single': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason], 'procfs/pool_state': ['SKIP', ci_reason],
}) })

View File

@ -1476,6 +1476,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/fault/decompress_fault.ksh \ functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \ functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \ functional/fault/scrub_after_resilver.ksh \
functional/fault/suspend_resume_single.ksh \
functional/fault/setup.ksh \ functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \ functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \ functional/features/async_destroy/async_destroy_001_pos.ksh \

View File

@ -0,0 +1,102 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2024, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
set -x
DATAFILE="$TMPDIR/datafile"
function cleanup
{
destroy_pool $TESTPOOL
unload_scsi_debug
rm -f $DATA_FILE
}
log_onexit cleanup
log_assert "ensure single-disk pool resumes properly after suspend and clear"
# create a file, and take a checksum, so we can compare later
log_must dd if=/dev/random of=$DATAFILE bs=128K count=1
typeset sum1=$(cat $DATAFILE | md5sum)
# make a debug device that we can "unplug"
load_scsi_debug 100 1 1 1 '512b'
sd=$(get_debug_device)
# create a single-device pool
log_must zpool create $TESTPOOL $sd
log_must zpool sync
# "pull" the disk
log_must eval "echo offline > /sys/block/$sd/device/state"
# copy data onto the pool. it'll appear to succeed, but only be in memory
log_must cp $DATAFILE /$TESTPOOL/file
# wait until sync starts, and the pool suspends
log_note "waiting for pool to suspend"
typeset -i tries=10
until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do
if ((tries-- == 0)); then
log_fail "pool didn't suspend"
fi
sleep 1
done
# return the disk
log_must eval "echo running > /sys/block/$sd/device/state"
# clear the error states, which should reopen the vdev, get the pool back
# online, and replay the failed IO
log_must zpool clear $TESTPOOL
# wait a while for everything to sync out. if something is going to go wrong,
# this is where it will happen
log_note "giving pool time to settle and complete txg"
sleep 7
# if the pool suspended, then everything is bad
if [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; then
log_fail "pool suspended"
fi
# export the pool, to make sure it exports clean, and also to clear the file
# out of the cache
log_must zpool export $TESTPOOL
# import the pool
log_must zpool import $TESTPOOL
# sum the file we wrote earlier
typeset sum2=$(cat /$TESTPOOL/file | md5sum)
# make sure the checksums match
log_must test "$sum1" = "$sum2"
log_pass "single-disk pool resumes properly after disk suspend and clear"