test/scheduler: Use separete cgroup for the tests

Two cgroups are created prior running the scheduler tests:
  - /cpuset/spdk
  - /cpuset/all

/cpuset/spdk is the cgroup dedicated for the tests, i.e., the SPDK
processes executed along the way. The resources consist of the cpus
that are initially picked up by isolate_cores.sh.

/cpuset/all is the "dummy" cgroup that holds most of the remaining
processes that run on the target system - "most" since not every
process (especially kernel threads) can be migrated between cgroups.
This cgroup's resources include all the online cpus except those
selected for the /cpuset/spdk.

This should allow for lowering the noise on the target SPDK's cpus
and make sure that load on each cpu is generated exclusively by the
SPDK.

Fixes issue #1950

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ic45149f55052ff03bead0b9bea086f95c87ea75d
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10584
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Michal Berger 2021-12-07 11:51:36 +01:00 committed by Tomasz Zawadzki
parent 1fd9dccf78
commit 977a53573f
3 changed files with 29 additions and 1 deletions

View File

@ -142,5 +142,11 @@ kill_in_cgroup() {
fi
}
remove_cpuset_cgroup() {
if ((cgroup_version == 2)); then
remove_cgroup /cpuset
fi
}
declare -r sysfs_cgroup=/sys/fs/cgroup
cgroup_version=$(check_cgroup)

View File

@ -379,7 +379,7 @@ exec_under_dynamic_scheduler() {
if [[ -e /proc/$spdk_pid/status ]]; then
killprocess "$spdk_pid"
fi
"$@" --wait-for-rpc &
exec_in_cgroup "/cpuset/spdk" "$@" --wait-for-rpc &
spdk_pid=$!
# Give some time for the app to init itself
waitforlisten "$spdk_pid"

View File

@ -3,9 +3,21 @@ xtrace_disable
source "$testdir/common.sh"
restore_cgroups() {
xtrace_disable
kill_in_cgroup "/cpuset/spdk"
remove_cgroup "/cpuset/spdk"
remove_cgroup "/cpuset/all"
remove_cpuset_cgroup
xtrace_restore
}
trap "restore_cgroups" EXIT
# Number of cpus to include in the mask
NUM_CPUS=${NUM_CPUS:-8}
init_cpuset_cgroup
map_cpus
# Build core mask. Avoid all CPUs that may be offline and skip cpu0
@ -33,6 +45,16 @@ filter_allowed_list
all_cpus=("${allowed[@]}")
all_cpus_csv=$(fold_array_onto_string "${all_cpus[@]}")
all_cpumask=$(mask_cpus "${all_cpus[@]}")
all_cpus_mems=0
# Pin spdk cores to a new cgroup
create_cgroup "/cpuset/spdk"
create_cgroup "/cpuset/all"
set_cgroup_attr "/cpuset/spdk" cpuset.cpus "$spdk_cpus_csv"
set_cgroup_attr "/cpuset/spdk" cpuset.mems "$spdk_cpus_mems"
set_cgroup_attr "/cpuset/all" cpuset.cpus "$all_cpus_csv"
set_cgroup_attr "/cpuset/all" cpuset.mems "$all_cpus_mems"
move_cgroup_procs "/cpuset" "/cpuset/all"
export \
"spdk_cpumask=$spdk_cpumask" \