Emit history events for 'zpool create'
History commands and events were being suppressed for the 'zpool create' command since the history object did not yet exist. Create the object earlier so this history doesn't get lost. Split the pool_destroy event in to pool_destroy and pool_export so they may be distinguished. Updated events_001_pos and events_002_pos test cases. They now check for the expected history events and were reworked to be more reliable. Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com> Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #6712 Closes #6486
This commit is contained in:
parent
1cfdb0e6e4
commit
d5e024cba2
|
@ -1005,7 +1005,8 @@ extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
|
|||
char *his_buf);
|
||||
extern int spa_history_log(spa_t *spa, const char *his_buf);
|
||||
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
|
||||
extern void spa_history_log_version(spa_t *spa, const char *operation);
|
||||
extern void spa_history_log_version(spa_t *spa, const char *operation,
|
||||
dmu_tx_t *tx);
|
||||
extern void spa_history_log_internal(spa_t *spa, const char *operation,
|
||||
dmu_tx_t *tx, const char *fmt, ...);
|
||||
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
|
||||
|
|
|
@ -101,6 +101,7 @@ extern "C" {
|
|||
#define ESC_ZFS_POOL_CREATE "pool_create"
|
||||
#define ESC_ZFS_POOL_DESTROY "pool_destroy"
|
||||
#define ESC_ZFS_POOL_IMPORT "pool_import"
|
||||
#define ESC_ZFS_POOL_EXPORT "pool_export"
|
||||
#define ESC_ZFS_VDEV_ADD "vdev_add"
|
||||
#define ESC_ZFS_VDEV_ATTACH "vdev_attach"
|
||||
#define ESC_ZFS_VDEV_CLEAR "vdev_clear"
|
||||
|
|
|
@ -3265,7 +3265,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
|
|||
* Log the fact that we booted up (so that we can detect if
|
||||
* we rebooted in the middle of an operation).
|
||||
*/
|
||||
spa_history_log_version(spa, "open");
|
||||
spa_history_log_version(spa, "open", NULL);
|
||||
|
||||
/*
|
||||
* Delete any inconsistent datasets.
|
||||
|
@ -4183,6 +4183,15 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
|||
|
||||
tx = dmu_tx_create_assigned(dp, txg);
|
||||
|
||||
/*
|
||||
* Create the pool's history object.
|
||||
*/
|
||||
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
|
||||
spa_history_create_obj(spa, tx);
|
||||
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
|
||||
spa_history_log_version(spa, "create", tx);
|
||||
|
||||
/*
|
||||
* Create the pool config object.
|
||||
*/
|
||||
|
@ -4228,12 +4237,6 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
|||
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
|
||||
spa->spa_meta_objset, obj));
|
||||
|
||||
/*
|
||||
* Create the pool's history object.
|
||||
*/
|
||||
if (version >= SPA_VERSION_ZPOOL_HISTORY)
|
||||
spa_history_create_obj(spa, tx);
|
||||
|
||||
/*
|
||||
* Generate some random noise for salted checksums to operate on.
|
||||
*/
|
||||
|
@ -4278,9 +4281,6 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
|||
VERIFY0(spa_keystore_remove_mapping(spa, root_dsobj, FTAG));
|
||||
|
||||
spa_config_sync(spa, B_FALSE, B_TRUE);
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
|
||||
|
||||
spa_history_log_version(spa, "create");
|
||||
|
||||
/*
|
||||
* Don't count references from objsets that are already closed
|
||||
|
@ -4467,7 +4467,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
|
|||
*/
|
||||
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
|
||||
|
||||
spa_history_log_version(spa, "import");
|
||||
spa_history_log_version(spa, "import", NULL);
|
||||
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
|
||||
|
||||
|
@ -4670,7 +4670,10 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
|
|||
}
|
||||
|
||||
export_spa:
|
||||
if (new_state == POOL_STATE_DESTROYED)
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
|
||||
else if (new_state == POOL_STATE_EXPORTED)
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
|
||||
|
||||
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
|
||||
spa_unload(spa);
|
||||
|
|
|
@ -89,17 +89,17 @@ spa_history_create_obj(spa_t *spa, dmu_tx_t *tx)
|
|||
spa_history_phys_t *shpp;
|
||||
objset_t *mos = spa->spa_meta_objset;
|
||||
|
||||
ASSERT(spa->spa_history == 0);
|
||||
ASSERT0(spa->spa_history);
|
||||
spa->spa_history = dmu_object_alloc(mos, DMU_OT_SPA_HISTORY,
|
||||
SPA_OLD_MAXBLOCKSIZE, DMU_OT_SPA_HISTORY_OFFSETS,
|
||||
sizeof (spa_history_phys_t), tx);
|
||||
|
||||
VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
|
||||
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
|
||||
DMU_POOL_HISTORY, sizeof (uint64_t), 1,
|
||||
&spa->spa_history, tx) == 0);
|
||||
&spa->spa_history, tx));
|
||||
|
||||
VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
|
||||
ASSERT(dbp->db_size >= sizeof (spa_history_phys_t));
|
||||
VERIFY0(dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
|
||||
ASSERT3U(dbp->db_size, >=, sizeof (spa_history_phys_t));
|
||||
|
||||
shpp = dbp->db_data;
|
||||
dmu_buf_will_dirty(dbp, tx);
|
||||
|
@ -530,7 +530,7 @@ log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
|
|||
* initialized yet, so don't bother logging the internal events.
|
||||
* Likewise if the pool is not writeable.
|
||||
*/
|
||||
if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
|
||||
if (spa_is_initializing(spa) || !spa_writeable(spa)) {
|
||||
fnvlist_free(nvl);
|
||||
return;
|
||||
}
|
||||
|
@ -616,11 +616,11 @@ spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
|
|||
}
|
||||
|
||||
void
|
||||
spa_history_log_version(spa_t *spa, const char *operation)
|
||||
spa_history_log_version(spa_t *spa, const char *operation, dmu_tx_t *tx)
|
||||
{
|
||||
utsname_t *u = utsname();
|
||||
|
||||
spa_history_log_internal(spa, operation, NULL,
|
||||
spa_history_log_internal(spa, operation, tx,
|
||||
"pool version %llu; software version %llu/%llu; uts %s %s %s %s",
|
||||
(u_longlong_t)spa_version(spa), SPA_VERSION, ZPL_VERSION,
|
||||
u->nodename, u->release, u->version, u->machine);
|
||||
|
|
|
@ -179,6 +179,8 @@ if is_linux; then
|
|||
DEV_MPATHDIR="/dev/mapper"
|
||||
|
||||
ZEDLET_DIR="/var/tmp/zed"
|
||||
ZED_LOG="$ZEDLET_DIR/zed.log"
|
||||
ZED_DEBUG_LOG="$ZEDLET_DIR/zed.debug.log"
|
||||
VDEVID_CONF="$ZEDLET_DIR/vdev_id.conf"
|
||||
VDEVID_CONF_ETC="/etc/zfs/vdev_id.conf"
|
||||
|
||||
|
@ -200,4 +202,4 @@ else
|
|||
fi
|
||||
export unpack_opts pack_opts verbose unpack_preserve pack_preserve \
|
||||
ZVOL_DEVDIR ZVOL_RDEVDIR NEWFS_DEFAULT_FS DEV_RDSKDIR DEV_MPATHDIR \
|
||||
ZEDLET_DIR VDEVID_CONF VDEVID_CONF_ETC
|
||||
ZEDLET_DIR ZED_LOG ZED_DEBUG_LOG VDEVID_CONF VDEVID_CONF_ETC
|
||||
|
|
|
@ -3361,11 +3361,14 @@ function zed_setup
|
|||
|
||||
# Customize the zed.rc file to enable the full debug log.
|
||||
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
|
||||
echo "ZED_DEBUG_LOG=$ZEDLET_DIR/zed.debug.log" >>$ZEDLET_DIR/zed.rc
|
||||
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
|
||||
|
||||
# Scripts must only be user writable.
|
||||
saved_umask=$(umask)
|
||||
log_must umask 0022
|
||||
log_must cp ${ZEDLET_LIBEXEC_DIR}/all-syslog.sh $ZEDLET_DIR
|
||||
log_must cp ${ZEDLET_LIBEXEC_DIR}/all-debug.sh $ZEDLET_DIR
|
||||
log_must touch $ZEDLET_DIR/zed.debug.log
|
||||
log_must umask $saved_umask
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -3381,10 +3384,9 @@ function zed_cleanup
|
|||
log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
|
||||
log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
|
||||
log_must rm -f ${ZEDLET_DIR}/all-debug.sh
|
||||
log_must rm -f ${ZEDLET_DIR}/zed.pid
|
||||
log_must rm -f ${ZEDLET_DIR}/zedlog
|
||||
log_must rm -f ${ZEDLET_DIR}/zed.debug.log
|
||||
log_must rm -f ${ZEDLET_DIR}/state
|
||||
log_must rm -f $ZED_LOG
|
||||
log_must rm -f $ZED_DEBUG_LOG
|
||||
log_must rm -f $VDEVID_CONF_ETC
|
||||
log_must rm -f $VDEVID_CONF
|
||||
rmdir $ZEDLET_DIR
|
||||
|
@ -3412,9 +3414,10 @@ function zed_start
|
|||
|
||||
log_note "Starting ZED"
|
||||
# run ZED in the background and redirect foreground logging
|
||||
# output to zedlog
|
||||
# output to $ZED_LOG.
|
||||
log_must truncate -s 0 $ZED_DEBUG_LOG
|
||||
log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
|
||||
"-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
|
||||
"-s $ZEDLET_DIR/state 2>$ZED_LOG &"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
@ -3431,7 +3434,9 @@ function zed_stop
|
|||
log_note "Stopping ZED"
|
||||
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
|
||||
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
|
||||
log_must kill $zedpid
|
||||
kill $zedpid
|
||||
wait $zedpid
|
||||
rm -f ${ZEDLET_DIR}/zed.pid
|
||||
fi
|
||||
|
||||
return 0
|
||||
|
|
|
@ -55,14 +55,22 @@ log_assert "Verify zpool sub-commands generate expected events"
|
|||
log_onexit cleanup
|
||||
|
||||
log_must truncate -s $MINVDEVSIZE $VDEV1 $VDEV2 $VDEV3 $VDEV4
|
||||
|
||||
log_must zpool events -c
|
||||
log_must zed_start
|
||||
|
||||
# Create a mirrored pool with two devices.
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
-e "sysevent.fs.zfs.pool_create" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool create $MPOOL mirror $VDEV1 $VDEV2"
|
||||
|
||||
# Set a pool property.
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
"zpool set comment=string $MPOOL"
|
||||
|
||||
# Add a cache device then remove it.
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
|
@ -86,15 +94,22 @@ run_and_verify -p "$MPOOL"\
|
|||
-e "resource.fs.zfs.statechange" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool offline $MPOOL $VDEV1"
|
||||
run_and_verify -p "$MPOOL" \
|
||||
run_and_verify -p "$MPOOL" -d 10 \
|
||||
-e "resource.fs.zfs.statechange" \
|
||||
-e "sysevent.fs.zfs.vdev_online" \
|
||||
-e "sysevent.fs.zfs.resilver_start" \
|
||||
-e "sysevent.fs.zfs.resilver_finish" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool online $MPOOL $VDEV1"
|
||||
|
||||
# Attach then detach a device from the mirror.
|
||||
run_and_verify -p "$MPOOL" \
|
||||
run_and_verify -p "$MPOOL" -d 10 \
|
||||
-e "sysevent.fs.zfs.vdev_attach" \
|
||||
-e "sysevent.fs.zfs.resilver_start" \
|
||||
-e "sysevent.fs.zfs.resilver_finish" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool attach $MPOOL $VDEV1 $VDEV4"
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.vdev_remove" \
|
||||
|
@ -102,26 +117,30 @@ run_and_verify -p "$MPOOL" \
|
|||
"zpool detach $MPOOL $VDEV4"
|
||||
|
||||
# Replace a device
|
||||
run_and_verify -p "$MPOOL" \
|
||||
run_and_verify -p "$MPOOL" -d 10 \
|
||||
-e "sysevent.fs.zfs.vdev_attach" \
|
||||
-e "sysevent.fs.zfs.resilver_start" \
|
||||
-e "sysevent.fs.zfs.resilver_finish" \
|
||||
-e "sysevent.fs.zfs.vdev_remove" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool replace -f $MPOOL $VDEV1 $VDEV4"
|
||||
|
||||
# Scrub a pool.
|
||||
run_and_verify -p "$MPOOL" \
|
||||
run_and_verify -p "$MPOOL" -d 10 \
|
||||
-e "sysevent.fs.zfs.scrub_start" \
|
||||
-e "sysevent.fs.zfs.scrub_finish" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
"zpool scrub $MPOOL"
|
||||
|
||||
# Export then import a pool (may change to a pool_export event)
|
||||
# Export then import a pool
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.pool_destroy" \
|
||||
-e "sysevent.fs.zfs.pool_export" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool export $MPOOL"
|
||||
run_and_verify -p "$MPOOL" \
|
||||
-e "sysevent.fs.zfs.pool_import" \
|
||||
-e "sysevent.fs.zfs.history_event" \
|
||||
-e "sysevent.fs.zfs.config_sync" \
|
||||
"zpool import -d $TEST_BASE_DIR $MPOOL"
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
# Verify ZED handles missed events from a pool when starting.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create a pool and generate some events.
|
||||
# 1. Clear the events and create a pool to generate some events.
|
||||
# 2. Start the ZED and verify it handles missed events.
|
||||
# 3. Stop the ZED
|
||||
# 4. Generate additional events.
|
||||
|
@ -51,7 +51,6 @@ function cleanup
|
|||
done
|
||||
|
||||
log_must rm -f $TMP_EVENTS_ZED $TMP_EVENTS_ZED
|
||||
log_must rm -f $ZEDLET_DIR/zed.debug.log.old
|
||||
log_must zed_stop
|
||||
}
|
||||
|
||||
|
@ -61,24 +60,25 @@ log_onexit cleanup
|
|||
log_must truncate -s $MINVDEVSIZE $VDEV1 $VDEV2
|
||||
|
||||
# 1. Create a pool and generate some events.
|
||||
log_must cp -f $ZEDLET_DIR/zed.debug.log $ZEDLET_DIR/zed.debug.log.old
|
||||
log_must truncate -s 0 $ZED_DEBUG_LOG
|
||||
log_must zpool events -c
|
||||
log_must zpool create $MPOOL mirror $VDEV1 $VDEV2
|
||||
|
||||
# 2. Start the ZED and verify it handles missed events.
|
||||
log_must zed_start
|
||||
log_must sleep 1
|
||||
diff $ZEDLET_DIR/zed.debug.log.old $ZEDLET_DIR/zed.debug.log | \
|
||||
grep "^> " | sed 's/^> //g' >$TMP_EVENTS_ZED
|
||||
log_must awk -v event="sysevent.fs.zfs.pool_create" \
|
||||
log_must file_wait $ZED_DEBUG_LOG
|
||||
log_must cp $ZED_DEBUG_LOG $TMP_EVENTS_ZED
|
||||
|
||||
awk -v event="sysevent.fs.zfs.pool_create" \
|
||||
'BEGIN{FS="\n"; RS=""} $0 ~ event { print $0 }' \
|
||||
$TMP_EVENTS_ZED >$TMP_EVENT_ZED
|
||||
log_must grep -q "^ZEVENT_POOL=$MPOOL" $TMP_EVENT_ZED
|
||||
|
||||
# 3. Stop the ZED
|
||||
zed_stop
|
||||
log_must truncate -s 0 $ZED_DEBUG_LOG
|
||||
|
||||
# 4. Generate additional events.
|
||||
log_must cp -f $ZEDLET_DIR/zed.debug.log $ZEDLET_DIR/zed.debug.log.old
|
||||
log_must zpool offline $MPOOL $VDEV1
|
||||
log_must zpool online $MPOOL $VDEV1
|
||||
log_must zpool scrub $MPOOL
|
||||
|
@ -90,9 +90,8 @@ done
|
|||
|
||||
# 5. Start the ZED and verify it only handled the new missed events.
|
||||
log_must zed_start
|
||||
log_must sleep 20
|
||||
diff $ZEDLET_DIR/zed.debug.log.old $ZEDLET_DIR/zed.debug.log | \
|
||||
grep "^> " | sed 's/^> //g' >$TMP_EVENTS_ZED
|
||||
log_must file_wait $ZED_DEBUG_LOG 15
|
||||
log_must cp $ZED_DEBUG_LOG $TMP_EVENTS_ZED
|
||||
|
||||
log_mustnot grep -q "sysevent.fs.zfs.pool_create" $TMP_EVENTS_ZED
|
||||
log_must grep -q "sysevent.fs.zfs.vdev_online" $TMP_EVENTS_ZED
|
||||
|
|
|
@ -27,13 +27,39 @@
|
|||
. $STF_SUITE/include/libtest.shlib
|
||||
. $STF_SUITE/tests/functional/events/events.cfg
|
||||
|
||||
#
|
||||
# Wait for up to 'timeout' seconds for the 'file' to settle, i.e.
|
||||
# not be updated for a period of 'delay' seconds.
|
||||
#
|
||||
function file_wait # file delay timeout
|
||||
{
|
||||
file=$1
|
||||
delay=${2:-3}
|
||||
timeout=${3:-120}
|
||||
|
||||
SECONDS=0
|
||||
|
||||
while [ $(( $(date +%s) - $(stat -c %Y $file) )) -lt $delay ]; do
|
||||
if [[ $SECONDS -gt $timeout ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
function run_and_verify
|
||||
{
|
||||
typeset event pool
|
||||
typeset delay event pool zedlog
|
||||
set -A events
|
||||
|
||||
while getopts "e:p:z:" opt; do
|
||||
while getopts "d:e:p:z:" opt; do
|
||||
case $opt in
|
||||
d)
|
||||
delay=$OPTARG
|
||||
;;
|
||||
e)
|
||||
events[${#events[*]}+1]=$OPTARG
|
||||
;;
|
||||
|
@ -48,7 +74,8 @@ function run_and_verify
|
|||
shift $(($OPTIND - 1))
|
||||
|
||||
pool=${pool:-$TESTPOOL}
|
||||
zedlog=${zedlog:-$ZEDLET_DIR/zed.debug.log}
|
||||
delay=${delay:-3}
|
||||
zedlog=${zedlog:-$ZED_DEBUG_LOG}
|
||||
fullcmd="$1"
|
||||
cmd=$(echo $fullcmd | awk '{print $1}')
|
||||
subcmd=$(echo $fullcmd | awk '{print $2}')
|
||||
|
@ -57,21 +84,28 @@ function run_and_verify
|
|||
[[ $cmd == "zpool" || $cmd == "zfs" ]] || \
|
||||
log_fail "run_and_verify called with \"$cmd ($fullcmd)\""
|
||||
|
||||
# Run the command as provided and collect the new events.
|
||||
log_must zpool events -c
|
||||
if [[ -f $zedlog ]]; then
|
||||
cp -f $zedlog $zedlog.old
|
||||
fi
|
||||
log_note "Checking events for command: '$fullcmd'"
|
||||
|
||||
# Remove any previous events from the logs.
|
||||
log_must zpool events -c
|
||||
log_must truncate -s 0 $zedlog
|
||||
|
||||
# Run the command as provided.
|
||||
log_must eval "$fullcmd"
|
||||
|
||||
log_must zpool events > $TMP_EVENTS 2>/dev/null
|
||||
log_must zpool events -v > $TMP_EVENTS_FULL 2>/dev/null
|
||||
if [[ -f $zedlog ]]; then
|
||||
sleep 5 # Brief delay for the ZED to handle the event.
|
||||
diff $zedlog.old $zedlog | grep "^> " | sed 's/^> //g' \
|
||||
>$TMP_EVENTS_ZED
|
||||
fi
|
||||
# Collect the new events and verify there are some.
|
||||
log_must zpool sync -f
|
||||
log_must file_wait $zedlog $delay
|
||||
log_must cp $zedlog $TMP_EVENTS_ZED
|
||||
log_must eval "zpool events >$TMP_EVENTS 2>/dev/null"
|
||||
log_must eval "zpool events -v > $TMP_EVENTS_FULL 2>/dev/null"
|
||||
|
||||
log_must test -s $TMP_EVENTS
|
||||
log_must test -s $TMP_EVENTS_FULL
|
||||
log_must test -s $TMP_EVENTS_ZED
|
||||
|
||||
log_note "Events generated:"
|
||||
cat $TMP_EVENTS
|
||||
|
||||
# Verify all the expected events appear in the log.
|
||||
for event in ${events[*]}; do
|
||||
|
@ -80,19 +114,18 @@ function run_and_verify
|
|||
log_must grep -q "$event" $TMP_EVENTS
|
||||
|
||||
# Verify the event is in the verbose output with pool name.
|
||||
log_must awk -v event="$event" \
|
||||
awk -v event="$event" \
|
||||
'BEGIN{FS="\n"; RS=""} $0 ~ event { print $0 }' \
|
||||
$TMP_EVENTS_FULL >$TMP_EVENT_FULL
|
||||
log_must grep -q "pool = \"$pool\"" $TMP_EVENT_FULL
|
||||
|
||||
# Verify the event was received by the ZED and logged.
|
||||
log_must awk -v event="$event" \
|
||||
awk -v event="$event" \
|
||||
'BEGIN{FS="\n"; RS=""} $0 ~ event { print $0 }' \
|
||||
$TMP_EVENTS_ZED >$TMP_EVENT_ZED
|
||||
log_must grep -q "^ZEVENT_POOL=$pool" $TMP_EVENT_ZED
|
||||
|
||||
done
|
||||
|
||||
log_must rm -f $TMP_EVENTS $TMP_EVENTS_FULL $TMP_EVENT_FULL \
|
||||
$TMP_EVENTS_ZED $TMP_EVENT_ZED $zedlog.old
|
||||
rm -f $TMP_EVENTS $TMP_EVENTS_FULL $TMP_EVENT_FULL \
|
||||
$TMP_EVENTS_ZED $TMP_EVENT_ZED
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue