OpenZFS - Performance regression suite for zfstest

Author: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Don Brady <don.brady@intel.com>
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: David Quigley <david.quigley@intel.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
Ported-by: Don Brady <don.brady@intel.com>

OpenZFS-issue: https://www.illumos.org/issues/6950
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/dcbf3bd6
Delphix-commit: https://github.com/delphix/delphix-os/commit/978ed49
Closes #4929

ZFS Test Suite Performance Regression Tests

This was pulled into OpenZFS via the compressed arc featureand was
separated out in zfsonlinux as a separate pull request from PR-4768.
It originally came in as QA-4903 in Delphix-OS from John Kennedy.

Expected Usage:

$ DISKS="sdb sdc sdd" zfs-tests.sh -r perf-regression.run

Porting Notes:
1. Added assertions in the setup script to make sure required tools
   (fio, mpstat, ...) are present.
2. For the config.json generation in perf.shlib used arcstats and
    other binaries instead of dtrace to query the values.
3. For the perf data collection:
   - use "zpool iostat -lpvyL" instead of the io.d dtrace script
    (currently not collecting zfs_read/write latency stats)
   - mpstat and iostat take different arguments
   - prefetch_io.sh is a placeholder that uses arcstats instead of
     dtrace
4. Build machines require fio, mdadm and sysstat pakage (YMMV).

Future Work:
   - Need a way to measure zfs_read and zfs_write latencies per pool.
   - Need tools to takes two sets of output and display/graph the
     differences
   - Bring over additional regression tests from Delphix
This commit is contained in:
John Wren Kennedy 2016-08-03 21:26:15 +00:00 committed by Brian Behlendorf
parent 7050a65d5c
commit 679d73e98b
31 changed files with 1360 additions and 10 deletions

View File

@ -26,10 +26,12 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_COMMON], [
AC_PATH_TOOL(DU, du, "")
AC_PATH_TOOL(ECHO, echo, "")
AC_PATH_TOOL(EGREP, egrep, "")
AC_PATH_TOOL(FALSE, false, "")
AC_PATH_TOOL(FDISK, fdisk, "")
AC_PATH_TOOL(FGREP, fgrep, "")
AC_PATH_TOOL(FILE, file, "")
AC_PATH_TOOL(FIND, find, "")
AC_PATH_TOOL(FIO, fio, "")
AC_PATH_TOOL(FSCK, fsck, "")
AC_PATH_TOOL(GNUDD, dd, "")
AC_PATH_TOOL(GETCONF, getconf, "")
@ -74,6 +76,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_COMMON], [
AC_PATH_TOOL(SYNC, sync, "")
AC_PATH_TOOL(TAIL, tail, "")
AC_PATH_TOOL(TAR, tar, "")
AC_PATH_TOOL(TIMEOUT, timeout, "")
AC_PATH_TOOL(TOUCH, touch, "")
AC_PATH_TOOL(TR, tr, "")
AC_PATH_TOOL(TRUNCATE, truncate, "")
@ -96,14 +99,21 @@ dnl # These commands may take different command line arguments.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_LINUX], [
AC_PATH_TOOL(BLOCKDEV, blockdev, "")
AC_PATH_TOOL(CHACL, chacl, "")
AC_PATH_TOOL(COMPRESS, gzip, "")
AC_PATH_TOOL(FORMAT, parted, "")
AC_PATH_TOOL(FREE, free, "")
AC_PATH_TOOL(GETFACL, getfacl, "")
AC_PATH_TOOL(IOSTAT, iostat, "")
AC_PATH_TOOL(LOCKFS, lsof, "")
AC_PATH_TOOL(LSBLK, lsblk, "")
AC_PATH_TOOL(MODUNLOAD, rmmod, "")
AC_PATH_TOOL(MPSTAT, mpstat, "")
AC_PATH_TOOL(NEWFS, mke2fs, "")
AC_PATH_TOOL(NPROC, nproc, "")
AC_PATH_TOOL(PFEXEC, sudo, "")
AC_PATH_TOOL(READLINK, readlink, "")
AC_PATH_TOOL(SETFACL, setfacl, "")
AC_PATH_TOOL(SHARE, exportfs, "")
AC_PATH_TOOL(SWAP, swapon, "")
AC_PATH_TOOL(SWAPADD, swapon, "")
@ -112,10 +122,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_LINUX], [
AC_PATH_TOOL(UFSRESTORE, restore, "")
AC_PATH_TOOL(UNCOMPRESS, gunzip, "")
AC_PATH_TOOL(UNSHARE, exportfs, "")
AC_PATH_TOOL(GETFACL, getfacl, "")
AC_PATH_TOOL(SETFACL, setfacl, "")
AC_PATH_TOOL(CHACL, chacl, "")
AC_PATH_TOOL(NPROC, nproc, "")
AC_PATH_TOOL(VMSTAT, vmstat, "")
PAGESIZE=$($GETCONF PAGESIZE)
AC_SUBST(PAGESIZE)

View File

@ -280,6 +280,10 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_swap/Makefile
tests/zfs-tests/tests/perf/Makefile
tests/zfs-tests/tests/perf/fio/Makefile
tests/zfs-tests/tests/perf/regression/Makefile
tests/zfs-tests/tests/perf/scripts/Makefile
tests/zfs-tests/tests/stress/Makefile
rpm/Makefile
rpm/redhat/Makefile

View File

@ -174,6 +174,9 @@ Requires: lsscsi
Requires: mdadm
Requires: bc
Requires: ksh
Requires: fio
Requires: acl
Requires: sysstat
%description test
This package contains test infrastructure and support scripts for

View File

@ -13,3 +13,9 @@ s:usr/src/lib/libzfs/common:lib/libzfs:g
s:usr/src/lib/libzfs_core/common:lib/libzfs_core:g
s:lib/libzpool/common/sys:include/sys:g
s:lib/libzpool/common:lib/libzpool:g
s:usr/src/test/zfs-tests/include:tests/zfs-tests/include:g
s:usr/src/test/zfs-tests/runfiles:tests/runfiles:g
s:usr/src/test/zfs-tests/tests/functional:tests/zfs-tests/tests/functional:g
s:usr/src/test/zfs-tests/tests/perf:tests/zfs-tests/tests/perf:g
s:usr/src/test/test-runner/cmd/run.py:tests/test-runner/cmd/test-runner.py:g

View File

@ -0,0 +1,30 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 0
post_user = root
post = cleanup
outputdir = /var/tmp/test_results
[tests/perf/regression]
tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_cached',
'sequential_reads_cached_clone', 'random_reads', 'random_writes',
'random_readwrite']
post =

View File

@ -26,6 +26,7 @@
from subprocess import PIPE
from subprocess import Popen
from sys import argv
from sys import maxint
from sys import exit
from threading import Timer
from time import time
@ -121,13 +122,16 @@ class Cmd(object):
def __init__(self, pathname, outputdir=None, timeout=None, user=None):
self.pathname = pathname
self.outputdir = outputdir or 'BASEDIR'
self.timeout = timeout or 60
self.timeout = timeout
self.user = user or ''
self.killed = False
self.result = Result()
if self.timeout == None:
self.timeout = 60
def __str__(self):
return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nUser: %s\n" % (
return "Pathname: %s\nOutputdir: %s\nTimeout: %d\nUser: %s\n" % (
self.pathname, self.outputdir, self.timeout, self.user)
def kill_cmd(self, proc):
@ -213,6 +217,9 @@ def run(self, options):
self.result.starttime = time()
proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
# Allow a special timeout value of 0 to mean infinity
if int(self.timeout) == 0:
self.timeout = maxint
t = Timer(int(self.timeout), self.kill_cmd, [proc])
try:
@ -301,7 +308,7 @@ def __str__(self):
pre_user = ' (as %s)' % (self.pre_user)
if len(self.post_user):
post_user = ' (as %s)' % (self.post_user)
return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nPre: %s%s\nPost: " \
return "Pathname: %s\nOutputdir: %s\nTimeout: %d\nPre: %s%s\nPost: " \
"%s%s\nUser: %s\n" % (self.pathname, self.outputdir,
self.timeout, self.pre, pre_user, self.post, post_user,
self.user)
@ -376,7 +383,7 @@ def __str__(self):
pre_user = ' (as %s)' % (self.pre_user)
if len(self.post_user):
post_user = ' (as %s)' % (self.post_user)
return "Pathname: %s\nOutputdir: %s\nTests: %s\nTimeout: %s\n" \
return "Pathname: %s\nOutputdir: %s\nTests: %s\nTimeout: %d\n" \
"Pre: %s%s\nPost: %s%s\nUser: %s\n" % (self.pathname,
self.outputdir, self.tests, self.timeout, self.pre, pre_user,
self.post, post_user, self.user)

View File

@ -27,11 +27,14 @@ export DU="@DU@"
export DUMPADM="@DUMPADM@"
export ECHO="@ECHO@"
export EGREP="@EGREP@"
export FALSE="@FALSE@"
export FDISK="@FDISK@"
export FGREP="@FGREP@"
export FILE="@FILE@"
export FIND="@FIND@"
export FIO="@FIO@"
export FORMAT="@FORMAT@"
export FREE="@FREE@"
export FSCK="@FSCK@"
export GETENT="@GETENT@"
export GETFACL="@GETFACL@"
@ -44,6 +47,7 @@ export GROUPMOD="@GROUPMOD@"
export HEAD="@HEAD@"
export HOSTNAME="@HOSTNAME@"
export ID="@ID@"
export IOSTAT="@IOSTAT@"
export ISAINFO="@ISAINFO@"
export KILL="@KILL@"
export KSH="@KSH@"
@ -61,6 +65,7 @@ export MNTTAB="@MNTTAB@"
export MODINFO="@MODINFO@"
export MODUNLOAD="@MODUNLOAD@"
export MOUNT="@MOUNT@"
export MPSTAT="@MPSTAT@"
export MV="@MV@"
export NAWK="@AWK@"
export NEWFS="@NEWFS@"
@ -98,6 +103,7 @@ export SWAPADD="@SWAPADD@"
export SYNC="@SYNC@"
export TAIL="@TAIL@"
export TAR="@TAR@"
export TIMEOUT="@TIMEOUT@"
export TOUCH="@TOUCH@"
export TR="@TR@"
export TRUNCATE="@TRUNCATE@"
@ -115,6 +121,7 @@ export UNSHARE="@UNSHARE@"
export USERADD="@USERADD@"
export USERDEL="@USERDEL@"
export USERMOD="@USERMOD@"
export VMSTAT="@VMSTAT@"
export WAIT="@WAIT@"
export WC="@WC@"
export ZONEADM="@ZONEADM@"

View File

@ -104,12 +104,15 @@ export COMPRESSION_PROP=on
export CHECKSUM_PROP=on
# some common variables used by test scripts :
export FIO_SCRIPTS=$STF_SUITE/tests/perf/fio
export PERF_SCRIPTS=$STF_SUITE/tests/perf/scripts
# some test pool names
export TESTPOOL=testpool.$$
export TESTPOOL1=testpool1.$$
export TESTPOOL2=testpool2.$$
export TESTPOOL3=testpool3.$$
export PERFPOOL=perfpool
# some test file system names
export TESTFS=testfs.$$

View File

@ -2809,6 +2809,30 @@ function vdevs_in_pool
return 0;
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$(echo $((max > i ? max : i)))
done
echo $max
}
function get_min
{
typeset -l i min=$1
shift
for i in "$@"; do
min=$(echo $((min < i ? min : i)))
done
echo $min
}
#
# Wait for newly created block devices to have their minors created.
#

View File

@ -1 +1 @@
SUBDIRS = functional stress
SUBDIRS = functional perf stress

View File

@ -25,7 +25,7 @@
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
# Copyright (c) 2013, 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/clean_mirror/default.cfg
@ -36,6 +36,32 @@
# the contents of the mirror.
# This code is sourced into each of these test cases.
#
# Synchronize all the data in pool
#
# $1 pool name
#
function sync_pool #pool
{
typeset pool=$1
log_must $SYNC
log_must $SLEEP 2
# Flush all the pool data.
typeset -i ret
$ZPOOL scrub $pool >/dev/null 2>&1
ret=$?
(( $ret != 0 )) && \
log_fail "$ZPOOL scrub $pool failed."
while ! is_pool_scrubbed $pool; do
if is_pool_resilvered $pool ; then
log_fail "$pool should not be resilver completed."
fi
log_must $SLEEP 2
done
}
function overwrite_verify_mirror
{
typeset AFFECTED_DEVICE=$1
@ -60,6 +86,12 @@ function overwrite_verify_mirror
atfile=0
#
# Flush out the cache so that we ensure we're reading from disk.
#
log_must $ZPOOL export $TESTPOOL
log_must $ZPOOL import $TESTPOOL
typeset -i failedcount=0
while (( atfile < FILE_COUNT )); do
files[$atfile]=$TESTDIR/file.$atfile
@ -75,4 +107,6 @@ function overwrite_verify_mirror
log_fail "of the $FILE_COUNT files $failedcount did not " \
"have the same checksum before and after."
fi
sync_pool $TESTPOOL
}

View File

@ -0,0 +1,7 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf
dist_pkgdata_SCRIPTS = perf.shlib
SUBDIRS = \
fio \
regression \
scripts

View File

@ -0,0 +1,8 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/fio
dist_pkgdata_SCRIPTS = \
mkfiles.fio \
random_reads.fio \
random_readwrite.fio \
random_writes.fio \
sequential_reads.fio \
sequential_writes.fio

View File

@ -0,0 +1,30 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
ioengine=psync
bs=1024k
rw=write
thread=1
directory=/${TESTFS}
numjobs=${NUMJOBS}
filesize=${FILE_SIZE}
buffer_compress_percentage=33
buffer_compress_chunk=4096
[job]

View File

@ -0,0 +1,31 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
overwrite=0
thread=1
rw=randread
time_based=1
directory=/${TESTFS}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
[job]

View File

@ -0,0 +1,35 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
nrfiles=16
group_reporting=1
fallocate=0
overwrite=0
thread=1
rw=randrw
rwmixread=80
time_based=1
directory=/${TESTFS}
runtime=${RUNTIME}
bssplit=4k/50:8k/30:128k/10:1m/10
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
buffer_compress_percentage=33
buffer_compress_chunk=4096
[job]

View File

@ -0,0 +1,33 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
thread=1
rw=randwrite
time_based=1
directory=/${TESTFS}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
filesize=${FILESIZE}
buffer_compress_percentage=33
buffer_compress_chunk=4096
[job]

View File

@ -0,0 +1,31 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
overwrite=0
thread=1
rw=read
time_based=1
directory=/${TESTFS}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
[job]

View File

@ -0,0 +1,33 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
[global]
filename_format=file$jobnum
group_reporting=1
fallocate=0
thread=1
rw=write
time_based=1
directory=/${TESTFS}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
filesize=${FILESIZE}
buffer_compress_percentage=33
buffer_compress_chunk=4096
[job]

View File

@ -0,0 +1,308 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
# Copyright (c) 2016, Intel Corporation.
#
. $STF_SUITE/include/libtest.shlib
# If neither is specified, do a nightly run.
[[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1
# Default runtime for each type of test run.
export PERF_RUNTIME_WEEKLY=$((30 * 60))
export PERF_RUNTIME_NIGHTLY=$((10 * 60))
# Default fs creation options
export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \
' -o checksum=sha256 -o redundant_metadata=most'}
function get_sync_str
{
typeset sync=$1
typeset sync_str=''
[[ $sync -eq 0 ]] && sync_str='async'
[[ $sync -eq 1 ]] && sync_str='sync'
$ECHO $sync_str
}
#
# This function will run fio in a loop, according to the .fio file passed
# in and a number of environment variables. The following variables can be
# set before launching zfstest to override the defaults.
#
# PERF_RUNTIME: The time in seconds each fio invocation should run.
# PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are
# nightly and weekly.
# PERF_NTHREADS: A list of how many threads each fio invocation will use.
# PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO.
# PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO.
# PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag'
# pairs that will be added to the scripts specified in each test.
#
function do_fio_run
{
typeset script=$1
typeset do_recreate=$2
typeset clear_cache=$3
typeset threads sync iosize
for threads in $PERF_NTHREADS; do
for sync in $PERF_SYNC_TYPES; do
for iosize in $PERF_IOSIZES; do
log_note "Running with $threads" \
"$(get_sync_str $sync) threads, $iosize ios"
if $do_recreate; then
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS \
$TESTFS
fi
if $clear_cache; then
# Clear the ARC
$ZPOOL export $PERFPOOL
$ZPOOL import $PERFPOOL
fi
export RUNTIME=$PERF_RUNTIME
export FILESIZE=$((TOTAL_SIZE / threads))
export NUMJOBS=$threads
export SYNC_TYPE=$sync
export BLOCKSIZE=$iosize
$SYNC
# Start the data collection
do_collect_scripts $threads $sync $iosize
# Start the load
log_must $FIO $FIO_SCRIPTS/$script
done
done
done
}
#
# This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS.
# The script at index N is launched in the background, with its output
# redirected to a logfile containing the tag specified at index N + 1.
#
function do_collect_scripts
{
typeset threads=$1
typeset sync=$2
typeset iosize=$3
[[ -n $collect_scripts ]] || log_fail "No data collection scripts."
[[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified."
# This will be part of the output filename.
typeset sync_str=$(get_sync_str $sync)
typeset suffix="$sync_str.$iosize-ios.$threads-threads"
# Add in user supplied scripts and logfiles, if any.
typeset oIFS=$IFS
IFS=','
for item in $PERF_COLLECT_SCRIPTS; do
collect_scripts+=($($ECHO $item | $SED 's/^ *//g'))
done
IFS=$oIFS
typeset idx=0
while [[ $idx -lt "${#collect_scripts[@]}" ]]; do
typeset logbase="$(get_perf_output_dir)/$($BASENAME \
$SUDO_COMMAND)"
typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix"
$TIMEOUT $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 &
((idx += 2))
done
# Need to explicitly return 0 because timeout(1) will kill
# a child process and cause us to return non-zero.
return 0
}
# Find a place to deposit performance data collected while under load.
function get_perf_output_dir
{
typeset dir="$(pwd)/perf_data"
[[ -d $dir ]] || $MKDIR -p $dir
$ECHO $dir
}
#
# Destroy and create the pool used for performance tests. The
# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool
# configuration by specifying the pool creation command in their environment.
# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created.
#
function recreate_perfpool
{
[[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set."
poolexists $PERFPOOL && destroy_pool $PERFPOOL
if [[ -n $PERFPOOL_CREATE_CMD ]]; then
log_must $PERFPOOL_CREATE_CMD
else
log_must eval "$ZPOOL create -f $PERFPOOL $DISKS"
fi
}
function get_max_arc_size
{
if is_linux; then
typeset -l max_arc_size=`$AWK '$1 == "c_max" { print $3 }' \
/proc/spl/kstat/zfs/arcstats`
else
typeset -l max_arc_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `arc_stats.arcstat_c_max.value.ui64);
exit(0);
}')
fi
[[ $? -eq 0 ]] || log_fail "get_max_arc_size failed"
$ECHO $max_arc_size
}
# Create a file with some information about how this system is configured.
function get_system_config
{
typeset config=$PERF_DATA_DIR/$1
$ECHO "{" >>$config
if is_linux; then
$ECHO " \"ncpus\": \"$($NPROC --all)\"," >>$config
$ECHO " \"physmem\": \"$($FREE -b | \
$AWK '$1 == "Mem:" { print $2 }')\"," >>$config
$ECHO " \"c_max\": \"$(get_max_arc_size)\"," >>$config
$ECHO " \"hostname\": \"$($UNAME -n)\"," >>$config
$ECHO " \"kernel version\": \"$($UNAME -sr)\"," >>$config
else
$DTRACE -qn 'BEGIN{
printf(" \"ncpus\": %d,\n", `ncpus);
printf(" \"physmem\": %u,\n", `physmem * `_pagesize);
printf(" \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64);
printf(" \"kmem_flags\": \"0x%x\",", `kmem_flags);
exit(0)}' >>$config
$ECHO " \"hostname\": \"$($UNAME -n)\"," >>$config
$ECHO " \"kernel version\": \"$($UNAME -v)\"," >>$config
fi
if is_linux; then
$LSBLK -dino NAME,SIZE | $AWK 'BEGIN {
printf(" \"disks\": {\n"); first = 1}
{disk = $1} {size = $2;
if (first != 1) {printf(",\n")} else {first = 0}
printf(" \"%s\": \"%s\"", disk, size)}
END {printf("\n },\n")}' >>$config
zfs_tunables="/sys/module/zfs/parameters"
printf " \"tunables\": {\n" >>$config
for tunable in \
zfs_arc_max \
zfs_arc_meta_limit \
zfs_arc_sys_free \
zfs_dirty_data_max \
zfs_flags \
zfs_prefetch_disable \
zfs_txg_timeout \
zfs_vdev_aggregation_limit \
zfs_vdev_async_read_max_active \
zfs_vdev_async_write_max_active \
zfs_vdev_sync_read_max_active \
zfs_vdev_sync_write_max_active \
zio_delay_max
do
if [ "$tunable" != "zfs_arc_max" ]
then
printf ",\n" >>$config
fi
printf " \"$tunable\": \"$(cat $zfs_tunables/$tunable)\"" \
>>$config
done
printf "\n }\n" >>$config
else
$IOSTAT -En | $AWK 'BEGIN {
printf(" \"disks\": {\n"); first = 1}
/^c/ {disk = $1}
/^Size: [^0]/ {size = $2;
if (first != 1) {printf(",\n")} else {first = 0}
printf(" \"%s\": \"%s\"", disk, size)}
END {printf("\n },\n")}' >>$config
$SED -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \
$AWK -F= 'BEGIN {printf(" \"system\": {\n"); first = 1}
{if (first != 1) {printf(",\n")} else {first = 0};
printf(" \"%s\": %s", $1, $2)}
END {printf("\n }\n")}' >>$config
fi
$ECHO "}" >>$config
}
function num_jobs_by_cpu
{
if is_linux; then
typeset ncpu=$($NPROC --all)
else
typeset ncpu=$($PSRINFO | $WC -l)
fi
typeset num_jobs=$ncpu
[[ $ncpu -gt 8 ]] && num_jobs=$($ECHO "$ncpu * 3 / 4" | $BC)
$ECHO $num_jobs
}
#
# On illumos this looks like: ":sd3:sd4:sd1:sd2:"
#
function pool_to_lun_list
{
typeset pool=$1
typeset ctd ctds devname lun
typeset lun_list=':'
if is_linux; then
ctds=$($ZPOOL list -HLv $pool | \
$AWK '/sd[a-z]*|loop[0-9]*|dm-[0-9]*/ {print $1}')
for ctd in $ctds; do
lun_list="$lun_list$ctd:"
done
else
ctds=$($ZPOOL list -v $pool |
$AWK '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ {print $1}')
for ctd in $ctds; do
# Get the device name as it appears in /etc/path_to_inst
devname=$($READLINK -f /dev/dsk/${ctd}s0 | $SED -n \
's/\/devices\([^:]*\):.*/\1/p')
# Add a string composed of the driver name and instance
# number to the list for comparison with dev_statname.
lun=$($SED 's/"//g' /etc/path_to_inst | $GREP \
$devname | $AWK '{print $3$2}')
un_list="$lun_list$lun:"
done
fi
$ECHO $lun_list
}
# Create a perf_data directory to hold performance statistics and
# configuration information.
export PERF_DATA_DIR=$(get_perf_output_dir)
[[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json

View File

@ -0,0 +1,10 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/regression
dist_pkgdata_SCRIPTS = \
random_reads.ksh \
random_readwrite.ksh \
random_writes.ksh \
sequential_reads_cached_clone.ksh \
sequential_reads_cached.ksh \
sequential_reads.ksh \
sequential_writes.ksh \
setup.ksh

View File

@ -0,0 +1,83 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during random read load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must $FIO $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1"
"iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Random reads with $PERF_RUNTYPE settings"
do_fio_run random_reads.fio $FALSE $TRUE
log_pass "Measure IO stats during random read load"

View File

@ -0,0 +1,83 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_readwrite job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read and write from are created prior to the first fio run,
# and used for all fio runs. The ARC is cleared with `zinject -a` prior to
# each run so reads will go to disk.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during random read-write load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES='' # bssplit used instead
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES='' # bssplit used instead
fi
# Layout the files to be used by the readwrite tests. Create as many files
# as the largest number of threads. An fio run with fewer threads will use
# a subset of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must $FIO $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1"
"iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Random reads and writes with $PERF_RUNTYPE settings"
do_fio_run random_readwrite.fio $FALSE $TRUE
log_pass "Measure IO stats during random read and write load"

View File

@ -0,0 +1,75 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_writes job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during random write load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1"
"iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Random writes with $PERF_RUNTYPE settings"
do_fio_run random_writes.fio $TRUE $FALSE
log_pass "Measure IO stats during random write load"

View File

@ -0,0 +1,84 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must $FIO $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Sequential reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio $FALSE $TRUE
log_pass "Measure IO stats during sequential read load"

View File

@ -0,0 +1,83 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is not cleared to ensure that all data is cached.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must $FIO $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Sequential cached reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio $FALSE $FALSE
log_pass "Measure IO stats during sequential cached read load"

View File

@ -0,0 +1,99 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. This test will exercise cached read performance from
# a clone filesystem. The data is initially cached in the ARC and then
# a snapshot and clone are created. All the performance runs are then
# initiated against the clone filesystem to exercise the performance of
# reads when the ARC has to create another buffer from a different dataset.
# It will also exercise the need to evict the duplicate buffer once the last
# reference on that buffer is released.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
log_must $ZFS destroy $TESTFS
}
log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
log_must $FIO $FIO_SCRIPTS/mkfiles.fio
log_note "Creating snapshot, $TESTSNAP, of $TESTFS"
create_snapshot $TESTFS $TESTSNAP
log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP"
create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE
#
# Reset the TESTFS to point to the clone
#
export TESTFS=$PERFPOOL/$TESTCLONE
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1" "iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "$VMSTAT 1"
"vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio $FALSE $FALSE
log_pass "Measure IO stats during sequential cached read load"

View File

@ -0,0 +1,75 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_writes job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
log_assert "Measure IO stats during sequential write load"
log_onexit cleanup
function cleanup
{
log_must $ZFS destroy $TESTFS
}
export TESTFS=$PERFPOOL/testfs
recreate_perfpool
log_must $ZFS create $PERF_FS_OPTS $TESTFS
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
fi
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
export collect_scripts=("$ZPOOL iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$VMSTAT 1" "vmstat" "$MPSTAT -P ALL 1" "mpstat" "$IOSTAT -dxyz 1"
"iostat")
else
export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$VMSTAT 1" "vmstat" "$MPSTAT 1" "mpstat" "$IOSTAT -xcnz 1" "iostat")
fi
log_note "Sequential writes with $PERF_RUNTYPE settings"
do_fio_run sequential_writes.fio $TRUE $FALSE
log_pass "Measure IO stats during sequential write load"

View File

@ -0,0 +1,30 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
[[ -z $FIO ]] && log_fail "Missing fio"
[[ -z $FREE ]] && log_fail "Missing free"
[[ -z $IOSTAT ]] && log_fail "Missing iostat"
[[ -z $LSBLK ]] && log_fail "Missing lsblk"
[[ -z $MPSTAT ]] && log_fail "Missing mpstat"
[[ -z $VMSTAT ]] && log_fail "Missing vmstat"
verify_runnable "global"
verify_disk_count "$DISKS" 3
log_pass

View File

@ -0,0 +1,2 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf/scripts
dist_pkgdata_SCRIPTS = prefetch_io.sh

View File

@ -0,0 +1,85 @@
#!/bin/bash
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016 by Intel, Corp.
#
#
# Linux platform placeholder for collecting prefetch I/O stats
# TBD if we can add additional kstats to achieve the desired results
#
zfs_kstats="/proc/spl/kstat/zfs"
AWK=${AWK:-awk}
DATE=${DATE:-date}
function get_prefetch_ios
{
typeset -l data_misses=`$AWK '$1 == "prefetch_data_misses" \
{ print $3 }' $zfs_kstats/arcstats`
typeset -l metadata_misses=`$AWK '$1 == "prefetch_metadata_misses" \
{ print $3 }' $zfs_kstats/arcstats`
typeset -l total_misses=$(( $data_misses + $metadata_misses ))
echo $total_misses
}
function get_prefetched_demand_reads
{
typeset -l demand_reads=`$AWK '$1 == "demand_hit_predictive_prefetch" \
{ print $3 }' $zfs_kstats/arcstats`
echo $demand_reads
}
function get_sync_wait_for_async
{
typeset -l sync_wait=`$AWK '$1 == "sync_wait_for_async" \
{ print $3 }' $zfs_kstats/arcstats`
echo $sync_wait
}
if [ $# -ne 2 ]
then
echo "Usage: `basename $0` <poolname> interval" >&2
exit 1
fi
poolname=$1
interval=$2
prefetch_ios=$(get_prefetch_ios)
prefetched_demand_reads=$(get_prefetched_demand_reads)
sync_wait_for_async=$(get_sync_wait_for_async)
while true
do
new_prefetch_ios=$(get_prefetch_ios)
printf "%u\n%-24s\t%u\n" $($DATE +%s) "prefetch_ios" \
$(( $new_prefetch_ios - $prefetch_ios ))
prefetch_ios=$new_prefetch_ios
new_prefetched_demand_reads=$(get_prefetched_demand_reads)
printf "%-24s\t%u\n" "prefetched_demand_reads" \
$(( $new_prefetched_demand_reads - $prefetched_demand_reads ))
prefetched_demand_reads=$new_prefetched_demand_reads
new_sync_wait_for_async=$(get_sync_wait_for_async)
printf "%-24s\t%u\n" "sync_wait_for_async" \
$(( $new_sync_wait_for_async - $sync_wait_for_async ))
sync_wait_for_async=$new_sync_wait_for_async
sleep $interval
done