Merge branch 'linux-zpios' into refs/top-bases/linux-zfs-branch

This commit is contained in:
Brian Behlendorf 2009-02-06 15:02:31 -08:00
commit f8154e7251
2 changed files with 196 additions and 75 deletions

View File

@ -1,83 +1,115 @@
#!/bin/bash #!/bin/bash
#
# Wrapper script for easily running a survey of zpios based tests
#
prog=survey.sh . ./common.sh
. ../.script-config PROG=zpios-survey.sh
LOG=/home/`whoami`/zpios-logs/`uname -r`/zpios-`date +%Y%m%d`/ usage() {
mkdir -p ${LOG} cat << EOF
USAGE:
$0 [hvp] <-c config> <-t test>
# Apply all tunings described below to generate some best case DESCRIPTION:
# numbers for what is acheivable with some more elbow grease. Helper script for easy zpios survey benchmarking.
NAME="prefetch+zerocopy+checksum+pending1024+kmem"
echo "----------------------- ${NAME} ------------------------------"
./zpios.sh \
"" \
"zfs_prefetch_disable=1 zfs_vdev_max_pending=1024 zio_bulk_flags=0x100" \
"--zerocopy" \
${LOG}/${NAME}/ \
"${CMDDIR}/zfs/zfs set checksum=off lustre" | \
tee ${LOG}/${NAME}.txt
# Baseline number for an out of the box config with no manual tuning. OPTIONS:
# Ideally, we will want things to be automatically tuned and for this -h Show this message
# number to approach the tweaked out results above. -v Verbose
NAME="baseline" -p Enable profiling
echo "----------------------- ${NAME} ------------------------------" -c Zpool configuration
./zpios.sh \ -t Zpios test
"" \ -l Zpios survey log
"" \
"" \ EOF
${LOG}/${NAME}/ | \ }
tee ${LOG}/${NAME}.txt
print_header() {
echo
echo "================================================================"
echo "Test: $1"
echo
}
# Baseline performance for an out of the box config with no manual tuning.
# Ideally, we want everything to be automatically tuned for your system and
# for this to perform reasonably well.
zpios_survey_base() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+baseline"
print_header ${TEST_NAME}
./zfs.sh ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Disable ZFS's prefetching. For some reason still not clear to me # Disable ZFS's prefetching. For some reason still not clear to me
# current prefetching policy is quite bad for a random workload. # current prefetching policy is quite bad for a random workload.
# Allow the algorithm to detect a random workload and not do anything # Allowint the algorithm to detect a random workload and not do
# may be the way to address this issue. # anything may be the way to address this issue.
NAME="prefetch" zpios_survey_prefetch() {
echo "----------------------- ${NAME} ------------------------------" TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+prefetch"
./zpios.sh \ print_header ${TEST_NAME}
"" \
"zfs_prefetch_disable=1" \
"" \
${LOG}/${NAME}/ | \
tee ${LOG}/${NAME}.txt
# As expected, simulating a zerocopy IO path improves performance ./zfs.sh ${VERBOSE_FLAG} \
# by freeing up lots of CPU which is wasted move data between buffers. zfs="zfs_prefetch_disable=1" | \
NAME="zerocopy" tee -a ${ZPIOS_SURVEY_LOG}
echo "----------------------- ${NAME} ------------------------------" ./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
./zpios.sh \ tee -a ${ZPIOS_SURVEY_LOG}
"" \ ./zfs.sh -u ${VERBOSE_FLAG} | \
"" \ tee -a ${ZPIOS_SURVEY_LOG}
"--zerocopy" \ }
${LOG}/${NAME}/ | \
tee ${LOG}/${NAME}.txt # Simulating a zerocopy IO path should improve performance by freeing up
# lots of CPU which is wasted move data between buffers.
zpios_survey_zerocopy() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+zerocopy"
print_header ${TEST_NAME}
./zfs.sh ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--zerocopy" | \
tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Disabling checksumming should show some (if small) improvement # Disabling checksumming should show some (if small) improvement
# simply due to freeing up a modest amount of CPU. # simply due to freeing up a modest amount of CPU.
NAME="checksum" zpios_survey_checksum() {
echo "----------------------- ${NAME} ------------------------------" TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+checksum"
./zpios.sh \ print_header ${TEST_NAME}
"" \
"" \ ./zfs.sh ${VERBOSE_FLAG} | \
"" \ tee -a ${ZPIOS_SURVEY_LOG}
${LOG}/${NAME}/ \ ./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
"${CMDDIR}/zfs/zfs set checksum=off lustre" | \ -s "set checksum=off" | \
tee ${LOG}/${NAME}.txt tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Increasing the pending IO depth also seems to improve things likely # Increasing the pending IO depth also seems to improve things likely
# at the expense of latency. This should be exported more because I'm # at the expense of latency. This should be explored more because I'm
# seeing a much bigger impact there that I would have expected. There # seeing a much bigger impact there that I would have expected. There
# may be some low hanging fruit to be found here. # may be some low hanging fruit to be found here.
NAME="pending" zpios_survey_pending() {
echo "----------------------- ${NAME} ------------------------------" TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+pending"
./zpios.sh \ print_header ${TEST_NAME}
"" \
"zfs_vdev_max_pending=1024" \ ./zfs.sh ${VERBOSE_FLAG} \
"" \ zfs="zfs_vdev_max_pending=1024" | \
${LOG}/${NAME}/ | \ tee -a ${ZPIOS_SURVEY_LOG}
tee ${LOG}/${NAME}.txt ./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# To avoid memory fragmentation issues our slab implementation can be # To avoid memory fragmentation issues our slab implementation can be
# based on a virtual address space. Interestingly, we take a pretty # based on a virtual address space. Interestingly, we take a pretty
@ -92,11 +124,84 @@ echo "----------------------- ${NAME} ------------------------------"
# #
# 0x100 = KMC_KMEM - Force kmem_* based slab # 0x100 = KMC_KMEM - Force kmem_* based slab
# 0x200 = KMC_VMEM - Force vmem_* based slab # 0x200 = KMC_VMEM - Force vmem_* based slab
NAME="kmem" zpios_survey_kmem() {
echo "----------------------- ${NAME} ------------------------------" TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+kmem"
./zpios.sh \ print_header ${TEST_NAME}
"" \
"zio_bulk_flags=0x100" \ ./zfs.sh ${VERBOSE_FLAG} \
"" \ zfs="zio_bulk_flags=0x100" | \
${LOG}/${NAME}/ | \ tee -a ${ZPIOS_SURVEY_LOG}
tee ${LOG}/${NAME}.txt ./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Apply all possible turning concurrently to get a best case number
zpios_survey_all() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all"
print_header ${TEST_NAME}
./zfs.sh ${VERBOSE_FLAG} \
zfs="zfs_prefetch_disable=1" \
zfs="zfs_vdev_max_pending=1024" \
zfs="zio_bulk_flags=0x100" | \
tee -a ${ZPIOS_SURVEY_LOG}
./zpios.sh ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--zerocopy" \
-s "set checksum=off" | \
tee -a ${ZPIOS_SURVEY_LOG}
./zfs.sh -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
PROFILE=
ZPOOL_NAME=zpios-survey
ZPOOL_CONFIG=zpool-config.sh
ZPIOS_TEST=zpios-test.sh
ZPIOS_SURVEY_LOG=/dev/null
while getopts 'hvpc:t:l:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
VERBOSE_FLAG="-v"
;;
p)
PROFILE=1
PROFILE_FLAG="-p"
;;
c)
ZPOOL_CONFIG=${OPTARG}
;;
t)
ZPIOS_TEST=${OPTARG}
;;
l)
ZPIOS_SURVEY_LOG=${OPTARG}
;;
?)
usage
exit
;;
esac
done
if [ $(id -u) != 0 ]; then
die "Must run as root"
fi
zpios_survey_base
zpios_survey_prefetch
zpios_survey_zerocopy
zpios_survey_checksum
zpios_survey_pending
zpios_survey_kmem
zpios_survey_all
exit 0

View File

@ -28,6 +28,9 @@ OPTIONS:
-p Enable profiling -p Enable profiling
-c Zpool configuration -c Zpool configuration
-t Zpios test -t Zpios test
-o Additional zpios options
-l Additional zpool options
-s Additional zfs options
EOF EOF
} }
@ -115,8 +118,11 @@ PROFILE=
ZPOOL_CONFIG=zpool-config.sh ZPOOL_CONFIG=zpool-config.sh
ZPIOS_TEST=zpios-test.sh ZPIOS_TEST=zpios-test.sh
ZPOOL_NAME=zpios ZPOOL_NAME=zpios
ZPIOS_OPTIONS=
ZPOOL_OPTIONS=""
ZFS_OPTIONS=""
while getopts 'hvpc:t:' OPTION; do while getopts 'hvpc:t:o:l:s:' OPTION; do
case $OPTION in case $OPTION in
h) h)
usage usage
@ -135,6 +141,15 @@ while getopts 'hvpc:t:' OPTION; do
t) t)
ZPIOS_TEST=${TOPDIR}/scripts/zpios-test/${OPTARG}.sh ZPIOS_TEST=${TOPDIR}/scripts/zpios-test/${OPTARG}.sh
;; ;;
o)
ZPIOS_OPTIONS=${OPTARG}
;;
l) # Passed through to zpool-create.sh
ZPOOL_OPTIONS=${OPTARG}
;;
s) # Passed through to zpool-create.sh
ZFS_OPTIONS=${OPTARG}
;;
?) ?)
usage usage
exit exit
@ -170,7 +185,8 @@ if [ ${VERBOSE} ]; then
fi fi
# Create the zpool configuration # Create the zpool configuration
./zpool-create.sh ${VERBOSE_FLAG} -p ${ZPOOL_NAME} -c ${ZPOOL_CONFIG} || exit 1 ./zpool-create.sh ${VERBOSE_FLAG} -p ${ZPOOL_NAME} -c ${ZPOOL_CONFIG} \
-l "${ZPOOL_OPTIONS}" -s "${ZFS_OPTIONS}" || exit 1
if [ $PROFILE ]; then if [ $PROFILE ]; then
ZPIOS_CMD="${ZPIOS_CMD} --log=${PROFILE_ZPIOS_LOG}" ZPIOS_CMD="${ZPIOS_CMD} --log=${PROFILE_ZPIOS_LOG}"