scheduler_dynamic: move thread to least busy core

In cases when all cores are already doing too much work
to fit a thread, active threads should still be balanced
over all cores.

When current core is overloaded, place the thread
on another that is less busy.

The core limit is set to 95% to catch only ones that are
fully busy.
Decreasing that value would make spreading out the threads
move aggressive.

Changed thread load in one of the unit tests to reflect the
95% limit.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I3b3bc5f7fbd22725441fa811d61446950000cc46
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8113
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Maciej Szwed <maciej.szwed@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Tomasz Zawadzki 2021-06-10 08:44:08 -04:00 committed by Ben Walker
parent cf8405fc24
commit 11c9b3960b
2 changed files with 41 additions and 1 deletions

View File

@ -54,6 +54,7 @@ static struct core_stats *g_cores;
#define SCHEDULER_THREAD_BUSY 100
#define SCHEDULER_LOAD_LIMIT 50
#define SCHEDULER_CORE_LIMIT 95
static uint32_t
_get_next_target_core(void)
@ -126,6 +127,33 @@ _move_thread(struct spdk_lw_thread *lw_thread, uint32_t dst_core)
lw_thread->lcore = dst_core;
}
static bool
_is_core_over_limit(uint32_t core_id)
{
struct core_stats *core = &g_cores[core_id];
uint64_t busy, idle;
/* Core with no or single thread cannot be over the limit. */
if (core->thread_count <= 1) {
return false;
}
busy = core->busy;
idle = core->idle;
/* No work was done, exit before possible division by 0. */
if (busy == 0) {
return false;
}
/* Work done was less than the limit */
if (busy * 100 / (busy + idle) < SCHEDULER_CORE_LIMIT) {
return false;
}
return true;
}
static bool
_can_core_fit_thread(struct spdk_lw_thread *lw_thread, uint32_t dst_core)
{
@ -159,6 +187,7 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
uint32_t i;
uint32_t target_lcore;
uint32_t current_lcore = lw_thread->lcore;
uint32_t least_busy_lcore = lw_thread->lcore;
struct spdk_thread *thread = spdk_thread_get_from_ctx(lw_thread);
struct spdk_cpuset *cpumask = spdk_thread_get_cpumask(thread);
@ -171,6 +200,11 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
continue;
}
/* Search for least busy core. */
if (g_cores[target_lcore].busy < g_cores[least_busy_lcore].busy) {
least_busy_lcore = target_lcore;
}
/* Skip cores that cannot fit the thread and current one. */
if (!_can_core_fit_thread(lw_thread, target_lcore) || target_lcore == current_lcore) {
continue;
@ -179,6 +213,12 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
return target_lcore;
}
/* For cores over the limit, place the thread on least busy core
* to balance threads. */
if (_is_core_over_limit(current_lcore)) {
return least_busy_lcore;
}
/* If no better core is found, remain on the same one. */
return current_lcore;
}

View File

@ -894,7 +894,7 @@ test_governor(void)
/* TEST 3 */
/* Make second thread very busy so that it will be moved to second core */
spdk_set_thread(thread[1]);
busy = spdk_poller_register(poller_run_busy, (void *)1000, 0);
busy = spdk_poller_register(poller_run_busy, (void *)2000, 0);
_reactor_run(reactor);
spdk_poller_unregister(&busy);