From d34636151514829ed9bbd330bbe7dad79a1c817f Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Wed, 1 Dec 2021 09:38:53 -0800 Subject: [PATCH] Add zfs-test facility to automatically rerun failing tests This was a project proposed as part of the Quality theme for the hackthon for the 2021 OpenZFS Developer Summit. The idea is to improve the usability of the automated tests that get run when a PR is created by having failing tests automatically rerun in order to make flaky tests less impactful. Reviewed-by: John Kennedy Reviewed-by: Tony Nguyen Signed-off-by: Paul Dagnelie Closes #12740 --- .github/workflows/zfs-tests-functional.yml | 2 +- .github/workflows/zfs-tests-sanity.yml | 2 +- scripts/zfs-tests.sh | 38 +++++++++- tests/test-runner/bin/test-runner.py.in | 85 +++++++++++++++++----- tests/test-runner/bin/zts-report.py.in | 46 ++++++++++-- 5 files changed, 144 insertions(+), 29 deletions(-) diff --git a/.github/workflows/zfs-tests-functional.yml b/.github/workflows/zfs-tests-functional.yml index 584ad2785c..51328918e1 100644 --- a/.github/workflows/zfs-tests-functional.yml +++ b/.github/workflows/zfs-tests-functional.yml @@ -63,7 +63,7 @@ jobs: sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Tests run: | - /usr/share/zfs/zfs-tests.sh -v -s 3G + /usr/share/zfs/zfs-tests.sh -vR -s 3G - name: Prepare artifacts if: failure() run: | diff --git a/.github/workflows/zfs-tests-sanity.yml b/.github/workflows/zfs-tests-sanity.yml index c4d6af222b..48ebaae09c 100644 --- a/.github/workflows/zfs-tests-sanity.yml +++ b/.github/workflows/zfs-tests-sanity.yml @@ -59,7 +59,7 @@ jobs: sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Tests run: | - /usr/share/zfs/zfs-tests.sh -v -s 3G -r sanity + /usr/share/zfs/zfs-tests.sh -vR -s 3G -r sanity - name: Prepare artifacts if: failure() run: | diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index ac28788582..60499e09e2 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -21,6 +21,10 @@ # CDDL HEADER END # +# +# Copyright 2020 OmniOS Community Edition (OmniOSce) Association. +# + BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then @@ -48,6 +52,7 @@ ITERATIONS=1 ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh" ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh" UNAME=$(uname -s) +RERUN="" # Override some defaults if on FreeBSD if [ "$UNAME" = "FreeBSD" ] ; then @@ -322,6 +327,7 @@ OPTIONS: -f Use files only, disables block device tests -S Enable stack tracer (negative performance impact) -c Only create and populate constrained path + -R Automatically rerun failing tests -n NFSFILE Use the nfsfile to determine the NFS configuration -I NUM Number of iterations -d DIR Use DIR for files and loopback devices @@ -348,7 +354,7 @@ $0 -x EOF } -while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do +while getopts 'hvqxkfScRn:d:s:r:?t:T:u:I:' OPTION; do case $OPTION in h) usage @@ -376,6 +382,9 @@ while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do constrain_path exit ;; + R) + RERUN="yes" + ;; n) nfsfile=$OPTARG [ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile" @@ -694,12 +703,35 @@ ${TEST_RUNNER} ${QUIET:+-q} \ -i "${STF_SUITE}" \ -I "${ITERATIONS}" \ 2>&1 | tee "$RESULTS_FILE" - # # Analyze the results. # -${ZTS_REPORT} "$RESULTS_FILE" >"$REPORT_FILE" +${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE" RESULT=$? + +if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then + MAYBES="$($ZTS_REPORT --list-maybes)" + TEMP_RESULTS_FILE=$(mktemp -u -t zts-results-tmp.XXXXX -p "$FILEDIR") + TEST_LIST=$(mktemp -u -t test-list.XXXXX -p "$FILEDIR") + grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE" + for test_name in $MAYBES; do + grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST" + done + ${TEST_RUNNER} ${QUIET:+-q} \ + -c "${RUNFILES}" \ + -T "${TAGS}" \ + -i "${STF_SUITE}" \ + -I "${ITERATIONS}" \ + -l "${TEST_LIST}" \ + 2>&1 | tee "$RESULTS_FILE" + # + # Analyze the results. + # + ${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE" + RESULT=$? +fi + + cat "$REPORT_FILE" RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE") diff --git a/tests/test-runner/bin/test-runner.py.in b/tests/test-runner/bin/test-runner.py.in index bbabf247c1..d32e05c453 100755 --- a/tests/test-runner/bin/test-runner.py.in +++ b/tests/test-runner/bin/test-runner.py.in @@ -27,6 +27,7 @@ except ImportError: import os import sys import ctypes +import re from datetime import datetime from optparse import OptionParser @@ -495,6 +496,9 @@ Tags: %s self.timeout, self.user, self.pre, pre_user, self.post, post_user, self.failsafe, failsafe_user, self.tags) + def filter(self, keeplist): + self.tests = [x for x in self.tests if x in keeplist] + def verify(self): """ Check the pre/post/failsafe scripts, user and tests in this TestGroup. @@ -656,6 +660,24 @@ class TestRun(object): testgroup.verify() + def filter(self, keeplist): + for group in list(self.testgroups.keys()): + if group not in keeplist: + del self.testgroups[group] + continue + + g = self.testgroups[group] + + if g.pre and os.path.basename(g.pre) in keeplist[group]: + continue + + g.filter(keeplist[group]) + + for test in list(self.tests.keys()): + directory, base = os.path.split(test) + if directory not in keeplist or base not in keeplist[directory]: + del self.tests[test] + def read(self, options): """ Read in the specified runfiles, and apply the TestRun properties @@ -743,10 +765,18 @@ class TestRun(object): for test in sorted(self.tests.keys()): config.add_section(test) + for prop in Test.props: + if prop not in self.props: + config.set(test, prop, + getattr(self.tests[test], prop)) for testgroup in sorted(self.testgroups.keys()): config.add_section(testgroup) config.set(testgroup, 'tests', self.testgroups[testgroup].tests) + for prop in TestGroup.props: + if prop not in self.props: + config.set(testgroup, prop, + getattr(self.testgroups[testgroup], prop)) try: with open(options.template, 'w') as f: @@ -796,7 +826,7 @@ class TestRun(object): return global LOG_FILE_OBJ - if options.cmd != 'wrconfig': + if not options.template: try: old = os.umask(0) os.makedirs(self.outputdir, mode=0o777) @@ -939,17 +969,37 @@ def find_tests(testrun, options): testrun.addtest(p, options) +def filter_tests(testrun, options): + try: + fh = open(options.logfile, "r") + except Exception as e: + fail('%s' % e) + + failed = {} + while True: + line = fh.readline() + if not line: + break + m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line) + if not m: + continue + group, test = m.group(1, 2) + try: + failed[group].append(test) + except KeyError: + failed[group] = [test] + fh.close() + + testrun.filter(failed) + + def fail(retstr, ret=1): print('%s: %s' % (sys.argv[0], retstr)) exit(ret) def options_cb(option, opt_str, value, parser): - path_options = ['outputdir', 'template', 'testdir'] - - if option.dest == 'runfiles' and '-w' in parser.rargs or \ - option.dest == 'template' and '-c' in parser.rargs: - fail('-c and -w are mutually exclusive.') + path_options = ['outputdir', 'template', 'testdir', 'logfile'] if opt_str in parser.rargs: fail('%s may only be specified once.' % opt_str) @@ -957,8 +1007,6 @@ def options_cb(option, opt_str, value, parser): if option.dest == 'runfiles': parser.values.cmd = 'rdconfig' value = set(os.path.abspath(p) for p in value.split(',')) - if option.dest == 'template': - parser.values.cmd = 'wrconfig' if option.dest == 'tags': value = [x.strip() for x in value.split(',')] @@ -975,6 +1023,10 @@ def parse_args(): help='Specify tests to run via config files.') parser.add_option('-d', action='store_true', default=False, dest='dryrun', help='Dry run. Print tests, but take no other action.') + parser.add_option('-l', action='callback', callback=options_cb, + default=None, dest='logfile', metavar='logfile', + type='string', + help='Read logfile and re-run tests which failed.') parser.add_option('-g', action='store_true', default=False, dest='do_groups', help='Make directories TestGroups.') parser.add_option('-o', action='callback', callback=options_cb, @@ -1021,9 +1073,6 @@ def parse_args(): help='Number of times to run the test run.') (options, pathnames) = parser.parse_args() - if not options.runfiles and not options.template: - options.cmd = 'runtests' - if options.runfiles and len(pathnames): fail('Extraneous arguments.') @@ -1034,18 +1083,20 @@ def parse_args(): def main(): options = parse_args() + testrun = TestRun(options) - if options.cmd == 'runtests': - find_tests(testrun, options) - elif options.cmd == 'rdconfig': + if options.runfiles: testrun.read(options) - elif options.cmd == 'wrconfig': + else: find_tests(testrun, options) + + if options.logfile: + filter_tests(testrun, options) + + if options.template: testrun.write(options) exit(0) - else: - fail('Unknown command specified') testrun.complete_outputdirs() testrun.run(options) diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in index d540294975..cbbcb9641b 100755 --- a/tests/test-runner/bin/zts-report.py.in +++ b/tests/test-runner/bin/zts-report.py.in @@ -21,6 +21,7 @@ import os import re import sys +import argparse # # This script parses the stdout of zfstest, which has this format: @@ -370,10 +371,33 @@ def process_results(pathname): return d +class ListMaybesAction(argparse.Action): + def __init__(self, + option_strings, + dest="SUPPRESS", + default="SUPPRESS", + help="list flaky tests and exit"): + super(ListMaybesAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + for test in maybe: + print(test) + sys.exit(0) + + if __name__ == "__main__": - if len(sys.argv) != 2: - usage('usage: %s ' % sys.argv[0]) - results = process_results(sys.argv[1]) + parser = argparse.ArgumentParser(description='Analyze ZTS logs') + parser.add_argument('logfile') + parser.add_argument('--list-maybes', action=ListMaybesAction) + parser.add_argument('--no-maybes', action='store_false', dest='maybes') + args = parser.parse_args() + + results = process_results(args.logfile) if summary['total'] == 0: print("\n\nNo test results were found.") @@ -382,6 +406,7 @@ if __name__ == "__main__": expected = [] unexpected = [] + all_maybes = True for test in list(results.keys()): if results[test] == "PASS": @@ -394,11 +419,16 @@ if __name__ == "__main__": if setup in maybe and maybe[setup][0] == "SKIP": continue - if ((test not in known or results[test] not in known[test][0]) and - (test not in maybe or results[test] not in maybe[test][0])): - unexpected.append(test) - else: + if (test in known and results[test] in known[test][0]): expected.append(test) + elif test in maybe and results[test] in maybe[test][0]: + if results[test] == 'SKIP' or args.maybes: + expected.append(test) + elif not args.maybes: + unexpected.append(test) + else: + unexpected.append(test) + all_maybes = False print("\nTests with results other than PASS that are expected:") for test in sorted(expected): @@ -444,5 +474,7 @@ if __name__ == "__main__": if len(unexpected) == 0: sys.exit(0) + elif not args.maybes and all_maybes: + sys.exit(2) else: sys.exit(1)