Add ZFS perf test for dbuf cache

This change adds a test for sequential reads out of the dbuf cache.
It's essentially a copy of sequential_reads_cached, using a smaller
data set. The sequential read tests are renamed to differentiate them.

Authored by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Wren Kennedy <john.kennedy@delphix.com>
Closes #7225
This commit is contained in:
John Wren Kennedy 2018-02-28 10:38:37 -08:00 committed by Brian Behlendorf
parent d699aaef09
commit e086e717c3
7 changed files with 118 additions and 5 deletions

View File

@ -25,8 +25,8 @@ outputdir = /var/tmp/test_results
tags = ['perf']
[tests/perf/regression]
tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_cached',
'sequential_reads_cached_clone', 'random_reads', 'random_writes',
'random_readwrite']
tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached',
'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached',
'random_reads', 'random_writes', 'random_readwrite']
post =
tags = ['perf', 'regression']

View File

@ -44,6 +44,7 @@ export SYSTEM_FILES='arp
file
find
fio
free
getconf
getent
getfacl

View File

@ -188,6 +188,24 @@ function get_max_arc_size
echo $max_arc_size
}
function get_max_dbuf_cache_size
{
typeset -l max_dbuf_cache_size
if is_linux; then
max_dbuf_cache_size=$(get_tunable dbuf_cache_max_bytes)
else
max_dbuf_cache_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `dbuf_cache_max_bytes);
exit(0);
}')
[[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed"
fi
echo $max_dbuf_cache_size
}
# Create a file with some information about how this system is configured.
function get_system_config
{

View File

@ -3,8 +3,9 @@ dist_pkgdata_SCRIPTS = \
random_reads.ksh \
random_readwrite.ksh \
random_writes.ksh \
sequential_reads_cached_clone.ksh \
sequential_reads_cached.ksh \
sequential_reads_arc_cached_clone.ksh \
sequential_reads_arc_cached.ksh \
sequential_reads_dbuf_cached.ksh \
sequential_reads.ksh \
sequential_writes.ksh \
setup.ksh

View File

@ -0,0 +1,93 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is not cleared to ensure that all data is cached.
#
# This is basically a copy of the sequential_reads_cached test case, but with
# a smaller dateset so that we can fit everything into the decompressed, linear
# space in the dbuf cache.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill ${fio##*/}
pkill ${iostat##*/}
log_must_busy zfs destroy $TESTFS
log_must_busy zpool destroy $PERFPOOL
}
trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must zfs create $PERF_FS_OPTS $TESTFS
# Ensure the working set can be cached in the dbuf cache.
export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
fi
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
"vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat")
else
export collect_scripts=("kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat"
"dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
"dtrace -s $PERF_SCRIPTS/profile.d" "profile")
fi
log_note "Sequential cached reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio false false
log_pass "Measure IO stats during sequential cached read load"