2009-01-20 23:54:50 +00:00
|
|
|
#!/bin/bash
|
|
|
|
#
|
|
|
|
# Usage: zpios
|
2009-02-19 22:51:40 +00:00
|
|
|
# --threadcount -t =values
|
|
|
|
# --threadcount_low -l =value
|
|
|
|
# --threadcount_high -h =value
|
|
|
|
# --threadcount_incr -e =value
|
|
|
|
# --regioncount -n =values
|
|
|
|
# --regioncount_low -i =value
|
|
|
|
# --regioncount_high -j =value
|
|
|
|
# --regioncount_incr -k =value
|
|
|
|
# --offset -o =values
|
|
|
|
# --offset_low -m =value
|
|
|
|
# --offset_high -q =value
|
|
|
|
# --offset_incr -r =value
|
|
|
|
# --chunksize -c =values
|
|
|
|
# --chunksize_low -a =value
|
|
|
|
# --chunksize_high -b =value
|
|
|
|
# --chunksize_incr -g =value
|
|
|
|
# --regionsize -s =values
|
|
|
|
# --regionsize_low -A =value
|
|
|
|
# --regionsize_high -B =value
|
|
|
|
# --regionsize_incr -C =value
|
|
|
|
# --load -L =dmuio|ssf|fpp
|
|
|
|
# --pool -p =pool name
|
Pretty-up the 'make check' output
Reasonable output from 'make check' now looks roughly like this. The
big change is the consolidation of the all the zpion test results in
to a single table which can be easily scanned for failures/problems.
==================================== ZTEST ====================================
5 vdevs, 7 datasets, 23 threads, 300 seconds...
Pass 1, SIGKILL, 1 ENOSPC, 13.8% of 238M used, 17% done, 4m07s to go
Pass 2, SIGKILL, 1 ENOSPC, 23.7% of 238M used, 38% done, 3m04s to go
Pass 3, SIGKILL, 0 ENOSPC, 27.0% of 238M used, 66% done, 1m42s to go
Pass 4, SIGKILL, 0 ENOSPC, 27.4% of 238M used, 75% done, 1m14s to go
Pass 5, SIGKILL, 0 ENOSPC, 27.9% of 238M used, 89% done, 32s to go
Pass 6, Complete, 0 ENOSPC, 14.0% of 476M used, 100% done, 0s to go
5 killed, 1 completed, 83% kill rate
==================================== ZPIOS ====================================
status name id wr-data wr-ch wr-bw rd-data rd-ch rd-bw
-------------------------------------------------------------------------------
PASS: file-raid0 0 64m 64 13.04m 64m 64 842.22m
PASS: file-raid10 0 64m 64 134.19m 64m 64 842.22m
PASS: file-raidz 0 64m 64 87.56m 64m 64 853.45m
PASS: file-raidz2 0 64m 64 134.19m 64m 64 853.45m
PASS: lo-raid0 0 64m 64 429.59m 64m 64 14.63m
PASS: lo-raid10 0 64m 64 397.57m 64m 64 771.19m
PASS: lo-raidz 0 64m 64 206.48m 64m 64 688.27m
PASS: lo-raidz2 0 64m 64 14.34m 64m 64 711.21m
2009-07-21 21:41:35 +00:00
|
|
|
# --name -M =test name
|
2009-02-19 22:51:40 +00:00
|
|
|
# --cleanup -x
|
|
|
|
# --prerun -P =pre-command
|
|
|
|
# --postrun -R =post-command
|
|
|
|
# --log -G =log directory
|
|
|
|
# --regionnoise -I =shift
|
|
|
|
# --chunknoise -N =bytes
|
|
|
|
# --threaddelay -T =jiffies
|
|
|
|
# --verify -V
|
|
|
|
# --zerocopy -z
|
|
|
|
# --nowait -O
|
|
|
|
# --human-readable -H
|
|
|
|
# --verbose -v =increase verbosity
|
|
|
|
# --help -? =this help
|
2009-01-20 23:54:50 +00:00
|
|
|
|
2009-08-18 04:50:55 +00:00
|
|
|
ZPIOS_CMD="${ZPIOS} \
|
2009-01-20 23:54:50 +00:00
|
|
|
--load=dmuio \
|
2009-02-19 22:51:40 +00:00
|
|
|
--pool=${ZPOOL_NAME} \
|
Pretty-up the 'make check' output
Reasonable output from 'make check' now looks roughly like this. The
big change is the consolidation of the all the zpion test results in
to a single table which can be easily scanned for failures/problems.
==================================== ZTEST ====================================
5 vdevs, 7 datasets, 23 threads, 300 seconds...
Pass 1, SIGKILL, 1 ENOSPC, 13.8% of 238M used, 17% done, 4m07s to go
Pass 2, SIGKILL, 1 ENOSPC, 23.7% of 238M used, 38% done, 3m04s to go
Pass 3, SIGKILL, 0 ENOSPC, 27.0% of 238M used, 66% done, 1m42s to go
Pass 4, SIGKILL, 0 ENOSPC, 27.4% of 238M used, 75% done, 1m14s to go
Pass 5, SIGKILL, 0 ENOSPC, 27.9% of 238M used, 89% done, 32s to go
Pass 6, Complete, 0 ENOSPC, 14.0% of 476M used, 100% done, 0s to go
5 killed, 1 completed, 83% kill rate
==================================== ZPIOS ====================================
status name id wr-data wr-ch wr-bw rd-data rd-ch rd-bw
-------------------------------------------------------------------------------
PASS: file-raid0 0 64m 64 13.04m 64m 64 842.22m
PASS: file-raid10 0 64m 64 134.19m 64m 64 842.22m
PASS: file-raidz 0 64m 64 87.56m 64m 64 853.45m
PASS: file-raidz2 0 64m 64 134.19m 64m 64 853.45m
PASS: lo-raid0 0 64m 64 429.59m 64m 64 14.63m
PASS: lo-raid10 0 64m 64 397.57m 64m 64 771.19m
PASS: lo-raidz 0 64m 64 206.48m 64m 64 688.27m
PASS: lo-raidz2 0 64m 64 14.34m 64m 64 711.21m
2009-07-21 21:41:35 +00:00
|
|
|
--name=${ZPOOL_CONFIG} \
|
2009-01-20 23:54:50 +00:00
|
|
|
--threadcount=256 \
|
|
|
|
--regioncount=65536 \
|
|
|
|
--regionsize=4M \
|
|
|
|
--chunksize=1M \
|
|
|
|
--offset=4M \
|
|
|
|
--cleanup \
|
|
|
|
--human-readable \
|
|
|
|
${ZPIOS_OPTIONS}"
|
|
|
|
|
|
|
|
zpios_start() {
|
Pretty-up the 'make check' output
Reasonable output from 'make check' now looks roughly like this. The
big change is the consolidation of the all the zpion test results in
to a single table which can be easily scanned for failures/problems.
==================================== ZTEST ====================================
5 vdevs, 7 datasets, 23 threads, 300 seconds...
Pass 1, SIGKILL, 1 ENOSPC, 13.8% of 238M used, 17% done, 4m07s to go
Pass 2, SIGKILL, 1 ENOSPC, 23.7% of 238M used, 38% done, 3m04s to go
Pass 3, SIGKILL, 0 ENOSPC, 27.0% of 238M used, 66% done, 1m42s to go
Pass 4, SIGKILL, 0 ENOSPC, 27.4% of 238M used, 75% done, 1m14s to go
Pass 5, SIGKILL, 0 ENOSPC, 27.9% of 238M used, 89% done, 32s to go
Pass 6, Complete, 0 ENOSPC, 14.0% of 476M used, 100% done, 0s to go
5 killed, 1 completed, 83% kill rate
==================================== ZPIOS ====================================
status name id wr-data wr-ch wr-bw rd-data rd-ch rd-bw
-------------------------------------------------------------------------------
PASS: file-raid0 0 64m 64 13.04m 64m 64 842.22m
PASS: file-raid10 0 64m 64 134.19m 64m 64 842.22m
PASS: file-raidz 0 64m 64 87.56m 64m 64 853.45m
PASS: file-raidz2 0 64m 64 134.19m 64m 64 853.45m
PASS: lo-raid0 0 64m 64 429.59m 64m 64 14.63m
PASS: lo-raid10 0 64m 64 397.57m 64m 64 771.19m
PASS: lo-raidz 0 64m 64 206.48m 64m 64 688.27m
PASS: lo-raidz2 0 64m 64 14.34m 64m 64 711.21m
2009-07-21 21:41:35 +00:00
|
|
|
if [ ${VERBOSE} ]; then
|
|
|
|
ZPIOS_CMD="${ZPIOS_CMD} --verbose"
|
|
|
|
echo ${ZPIOS_CMD}
|
|
|
|
fi
|
|
|
|
|
2009-01-20 23:54:50 +00:00
|
|
|
${ZPIOS_CMD} || exit 1
|
|
|
|
}
|
|
|
|
|
|
|
|
zpios_stop() {
|
Pretty-up the 'make check' output
Reasonable output from 'make check' now looks roughly like this. The
big change is the consolidation of the all the zpion test results in
to a single table which can be easily scanned for failures/problems.
==================================== ZTEST ====================================
5 vdevs, 7 datasets, 23 threads, 300 seconds...
Pass 1, SIGKILL, 1 ENOSPC, 13.8% of 238M used, 17% done, 4m07s to go
Pass 2, SIGKILL, 1 ENOSPC, 23.7% of 238M used, 38% done, 3m04s to go
Pass 3, SIGKILL, 0 ENOSPC, 27.0% of 238M used, 66% done, 1m42s to go
Pass 4, SIGKILL, 0 ENOSPC, 27.4% of 238M used, 75% done, 1m14s to go
Pass 5, SIGKILL, 0 ENOSPC, 27.9% of 238M used, 89% done, 32s to go
Pass 6, Complete, 0 ENOSPC, 14.0% of 476M used, 100% done, 0s to go
5 killed, 1 completed, 83% kill rate
==================================== ZPIOS ====================================
status name id wr-data wr-ch wr-bw rd-data rd-ch rd-bw
-------------------------------------------------------------------------------
PASS: file-raid0 0 64m 64 13.04m 64m 64 842.22m
PASS: file-raid10 0 64m 64 134.19m 64m 64 842.22m
PASS: file-raidz 0 64m 64 87.56m 64m 64 853.45m
PASS: file-raidz2 0 64m 64 134.19m 64m 64 853.45m
PASS: lo-raid0 0 64m 64 429.59m 64m 64 14.63m
PASS: lo-raid10 0 64m 64 397.57m 64m 64 771.19m
PASS: lo-raidz 0 64m 64 206.48m 64m 64 688.27m
PASS: lo-raidz2 0 64m 64 14.34m 64m 64 711.21m
2009-07-21 21:41:35 +00:00
|
|
|
[ ${VERBOSE} ] && echo
|
2009-01-20 23:54:50 +00:00
|
|
|
}
|