numam-spdk/test/lib/nvme/nvme.sh
Jim Harris 8de75f8107 nvme/perf: add software-based latency tracking
The latency tracking is done with ranges of bucket arrays.
The bucket for any given I/O is determined solely by TSC
deltas - any translation to microseconds is only done after
the test is finished and statistics are printed.

Each range has a number of buckets determined by a
NUM_BUCKETS_PER_RANGE value which is currently set to 128.
The buckets in ranges 0 and 1 each map to one specific TSC
delta.  The buckets in subsequent ranges each map to twice
as many TSC deltas as buckets in the previous range:

Range 0:  1 TSC each - 128 buckets cover deltas    0 to  127
Range 1:  1 TSC each - 128 buckets cover deltas  128 to  255
Range 2:  2 TSC each - 128 buckets cover deltas  256 to  511
Range 3:  4 TSC each - 128 buckets cover deltas  512 to 1023
Range 4:  8 TSC each - 128 buckets cover deltas 1024 to 2047
Range 5: 16 TSC each - 128 buckets cover deltas 2048 to 4095
etc.

While here, change some variable names and usage
messages to differentiate between the existing latency
tracking via vendor-specific NVMe log pages on Intel
NVMe SSDs, and the newly added latency tracking done
in software.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I299f1c1f6dbfa7ea0e73085f7a685e71fc687a2b
2017-05-17 09:49:27 -07:00

96 lines
2.1 KiB
Bash
Executable File

#!/usr/bin/env bash
set -e
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/scripts/autotest_common.sh
function linux_iter_pci {
lspci -mm -n -D | grep $1 | tr -d '"' | awk -F " " '{print $1}'
}
timing_enter nvme
if [ $RUN_NIGHTLY -eq 1 ]; then
timing_enter aer
$testdir/aer/aer
timing_exit aer
timing_enter reset
$testdir/reset/reset -q 64 -w write -s 4096 -t 2
timing_exit reset
fi
timing_enter identify
$rootdir/examples/nvme/identify/identify
for bdf in $(linux_iter_pci 0108); do
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}"
done
timing_exit identify
timing_enter perf
$rootdir/examples/nvme/perf/perf -q 128 -w read -s 12288 -t 1 -L
timing_exit perf
timing_enter reserve
$rootdir/examples/nvme/reserve/reserve
timing_exit reserve
timing_enter hello_world
$rootdir/examples/nvme/hello_world/hello_world
timing_exit
timing_enter overhead
$rootdir/test/lib/nvme/overhead/overhead -s 4096 -t 1
timing_exit overhead
if [ -d /usr/src/fio ]; then
timing_enter fio_plugin
for bdf in $(linux_iter_pci 0108); do
/usr/src/fio/fio $rootdir/examples/nvme/fio_plugin/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=1"
break
done
timing_exit fio_plugin
fi
timing_enter arbitration
$rootdir/examples/nvme/arbitration/arbitration -t 3
timing_exit arbitration
if [ $(uname -s) = Linux ]; then
timing_enter multi_process
$rootdir/examples/nvme/arbitration/arbitration -i 0 -s 4096 -t 10 -c 0xf &
pid=$!
sleep 3
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w randread -s 4096 -t 10 -c 0x10 &
sleep 1
kill -9 $!
count=0
while [ $count -le 2 ]; do
$rootdir/examples/nvme/perf/perf -i 0 -q 1 -w read -s 4096 -t 1 -c 0xf
count=$(($count + 1))
done
count=0
while [ $count -le 1 ]; do
core=$((1 << (($count + 4))))
printf -v hexcore "0x%x" "$core"
$rootdir/examples/nvme/perf/perf -i 0 -q 128 -w read -s 4096 -t 1 -c $hexcore &
$rootdir/examples/nvme/identify/identify -i 0 &
count=$(($count + 1))
done
wait $pid
timing_exit multi_process
fi
timing_enter sgl
$testdir/sgl/sgl
timing_exit sgl
timing_enter e2edp
$testdir/e2edp/nvme_dp
timing_exit e2edp
timing_exit nvme