test/nvme_perf: add option for alternative fio job layout
Allow for alternative fio job layout. "group" (default) will create X fio job sections for X assigned CPUs and distribute available NVMes between sections. (e.g. 4 CPU test with 24 NVMes will result in 4 fio job sections with 6 NVMes each and scaled iodepth) "split" will create a separate job section for each available NVMe, but still pin it to desired CPU thread using cpus_allowed option. This allows for alternative way of testing io_uring which fails to run some cases with fixedbufs=true and high iodepth values (caused by system-wide default UIO_MAXIOV=1024). Signed-off-by: Karol Latecki <karol.latecki@intel.com> Change-Id: Iab835596a59f6437b067d1117208e5abd6c8d36b Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5727 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Michal Berger <michalx.berger@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
parent
7985c34ca0
commit
ec1494e92b
@ -260,19 +260,28 @@ function create_fio_config() {
|
||||
# SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
|
||||
# Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
|
||||
QD=$IODEPTH
|
||||
if [[ "$NOIOSCALING" = false ]]; then
|
||||
if [[ "$NOIOSCALING" == false ]]; then
|
||||
QD=$((IODEPTH * total_disks_per_core))
|
||||
fi
|
||||
|
||||
fio_job_section+=("")
|
||||
fio_job_section+=("[filename${i}]")
|
||||
fio_job_section+=("iodepth=$QD")
|
||||
fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]}")
|
||||
if [[ "$FIO_FNAME_STRATEGY" == "group" ]]; then
|
||||
fio_job_section+=("")
|
||||
fio_job_section+=("[filename${i}]")
|
||||
fio_job_section+=("iodepth=$QD")
|
||||
fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]}")
|
||||
fi
|
||||
|
||||
while [[ "$m" -lt "$total_disks_per_core" ]]; do
|
||||
# Try to add disks to job section if it's NUMA node matches NUMA
|
||||
# for currently selected CPU
|
||||
if [[ "${disks_numa[$n]}" == "$core_numa" ]]; then
|
||||
if [[ "$FIO_FNAME_STRATEGY" == "split" ]]; then
|
||||
fio_job_section+=("")
|
||||
fio_job_section+=("[filename${m}-${cores[$i]}]")
|
||||
fio_job_section+=("iodepth=$QD")
|
||||
fio_job_section+=("cpus_allowed=${cores[$i]} #CPU NUMA Node ${cores_numa[$i]}")
|
||||
fi
|
||||
|
||||
if [[ "$plugin" == "spdk-plugin-nvme" ]]; then
|
||||
fio_job_section+=("filename=trtype=PCIe traddr=${disks[$n]//:/.} ns=1 #NVMe NUMA Node ${disks_numa[$n]}")
|
||||
elif [[ "$plugin" == "spdk-plugin-bdev" ]]; then
|
||||
|
@ -33,6 +33,7 @@ LATENCY_LOG=false
|
||||
IO_BATCH_SUBMIT=0
|
||||
IO_BATCH_COMPLETE=0
|
||||
FIO_BIN=$CONFIG_FIO_SOURCE_DIR/fio
|
||||
FIO_FNAME_STRATEGY="group"
|
||||
TMP_RESULT_FILE=$testdir/result.json
|
||||
PLUGIN="nvme"
|
||||
DISKCFG=""
|
||||
@ -76,6 +77,8 @@ function usage() {
|
||||
echo " --io-batch-complete=INT Value for iodepth_batch_complete fio option [default=$IO_BATCH_COMPLETE]"
|
||||
echo " --fio-bin=PATH Path to fio binary. [default=$FIO_BIN]"
|
||||
echo " Applicable only for fio-based tests."
|
||||
echo " --fio-fname-strategy=STR Use 'group' to group filenames under job section with common CPU or"
|
||||
echo " use 'split' to create a separate fio job section for each filename [default=$FIO_FNAME_STRATEGY]"
|
||||
echo
|
||||
echo "Test setup parameters:"
|
||||
echo " --driver=STR Selects tool used for testing. Choices available:"
|
||||
@ -130,6 +133,12 @@ while getopts 'h-:' optchar; do
|
||||
io-batch-submit=*) IO_BATCH_SUBMIT="${OPTARG#*=}" ;;
|
||||
io-batch-complete=*) IO_BATCH_COMPLETE="${OPTARG#*=}" ;;
|
||||
fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
|
||||
fio-fname-strategy=*)
|
||||
FIO_FNAME_STRATEGY="${OPTARG#*=}"
|
||||
if [[ "$FIO_FNAME_STRATEGY" == "split" ]]; then
|
||||
NOIOSCALING=true
|
||||
fi
|
||||
;;
|
||||
driver=*) PLUGIN="${OPTARG#*=}" ;;
|
||||
disk-config=*)
|
||||
DISKCFG="${OPTARG#*=}"
|
||||
|
Loading…
Reference in New Issue
Block a user