test/nvme: enable BPF traces for bdevperf performance tests

Enable BPF traces only for bdevperf for now, as it's
easiest to do thanks to "-z" option (wait for RPC before
starting the workload).

Other performance tools will come later - workloads start
immediately and enabling traces will need more thought.

Change-Id: I4a1cab358f4f3fcf1c838ed2dcd06b595c79f3dc
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8687
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Karol Latecki 2021-07-06 13:14:28 +02:00 committed by Tomasz Zawadzki
parent ed5f83391b
commit 16e90dbded
2 changed files with 46 additions and 2 deletions

View File

@ -407,12 +407,49 @@ function run_nvme_fio() {
}
function run_bdevperf() {
echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
local bdevperf_rpc
local bdevperf_pid
local rpc_socket
local bpf_script_cmd
local bpf_script_pid
local bpf_app_pid
local main_core_param=""
bdevperf_rpc="$rootdir/test/bdev/bdevperf/bdevperf.py"
rpc_socket="/var/tmp/spdk.sock"
if [[ -n $MAIN_CORE ]]; then
main_core_param="-p ${MAIN_CORE}"
fi
$bdevperf_dir/bdevperf --json $testdir/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]" -r /var/tmp/spdk.sock $main_core_param
echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
$bdevperf_dir/bdevperf --json $testdir/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]" -r "$rpc_socket" $main_core_param -z &
bdevperf_pid=$!
waitforlisten $bdevperf_pid
if [[ ${#BPFTRACES[@]} -gt 0 ]]; then
echo "INFO: Enabling BPF Traces ${BPFTRACES[*]}"
bpf_script_cmd=("$rootdir/scripts/bpftrace.sh")
bpf_script_cmd+=("$bdevperf_pid")
for trace in "${BPFTRACES[@]}"; do
bpf_script_cmd+=("$rootdir/scripts/bpf/$trace")
done
BPF_OUTFILE=$TMP_BPF_FILE "${bpf_script_cmd[@]}" &
bpf_script_pid=$!
sleep 3
fi
PYTHONPATH=$PYTHONPATH:$rootdir/scripts $bdevperf_rpc -s "$rpc_socket" perform_tests
# Using "-z" option causes bdevperf to NOT exit automatically after running the test,
# so we need to stop it ourselves.
kill -s SIGINT $bdevperf_pid
wait $bdevperf_pid
if ((bpf_script_pid)); then
wait $bpf_script_pid
fi
sleep 1
}

View File

@ -36,6 +36,7 @@ FIO_BIN=$CONFIG_FIO_SOURCE_DIR/fio
FIO_FNAME_STRATEGY="group"
TMP_RESULT_FILE=$testdir/result.json
MAIN_CORE=""
TMP_BPF_FILE=$testdir/bpftraces.txt
PLUGIN="nvme"
DISKCFG=""
BDEV_CACHE=""
@ -47,6 +48,7 @@ PRECONDITIONING=true
CPUFREQ=""
PERFTOP=false
DPDKMEM=false
BPFTRACES=()
DATE="$(date +'%m_%d_%Y_%H%M%S')"
function usage() {
@ -111,6 +113,9 @@ function usage() {
echo "Other options:"
echo " --perftop Run perftop measurements on the same CPU cores as specified in --cpu-allowed option."
echo " --dpdk-mem-stats Dump DPDK memory stats during the test."
echo " --bpf-traces=LIST Comma delimited list of .bt scripts for enabling BPF traces."
echo " List of .bt scripts available in spdk/scripts/bpf."
echo " Only for spdk-perf-bdev"
set -x
}
@ -163,6 +168,7 @@ while getopts 'h-:' optchar; do
cpu-frequency=*) CPUFREQ="${OPTARG#*=}" ;;
perftop) PERFTOP=true ;;
dpdk-mem-stats) DPDKMEM=true ;;
bpf-traces=*) IFS="," read -r -a BPFTRACES <<< "${OPTARG#*=}" ;;
latency-log) LATENCY_LOG=true ;;
main-core=*) MAIN_CORE="${OPTARG#*=}" ;;
*)
@ -304,6 +310,7 @@ for ((j = 0; j < REPEAT_NO; j++)); do
iops_disks=$(bc "$iops_disks + $iops")
bw=$(bc "$bw + $bandwidth")
cp $TMP_RESULT_FILE $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
[[ -f $TMP_BPF_FILE ]] && mv $TMP_BPF_FILE $result_dir/bpftraces_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.txt
elif [ $PLUGIN = "spdk-perf-nvme" ]; then
run_nvmeperf $DISKNO > $TMP_RESULT_FILE
read -r iops bandwidth mean_lat min_lat max_lat <<< $(get_nvmeperf_results)