subsystem/nvmf: remove cpu_mask hint when creating poll group

Passing cpu_mask hints that match only single core were
usefull to prevent any accidents when doing round-robin
in case of 'static' scheduler.
In practice this is not required in case of 'static' scheduler,
the threads will be spread out over all reactors anyway.

This hinders other schedulers which try to respect the cpu_mask
hints, as they would not move the thread to any reactor.
Preventing bunching up less used threads on single reactor.

Drawback of this patch is that poll group names will not match
the cores they are on.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I5fb308362dd045228ea9fcca24f988388854c054
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7028
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: <dongx.yi@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Tomasz Zawadzki 2021-03-23 07:10:10 -04:00
parent d20f41881b
commit 47c4304d83
3 changed files with 4 additions and 12 deletions

View File

@ -166,7 +166,7 @@ DEPDIRS-event_vmd := event vmd $(JSON_LIBS) log thread
DEPDIRS-event_bdev := event bdev event_accel event_vmd event_sock
DEPDIRS-event_nbd := event nbd event_bdev
DEPDIRS-event_nvmf := event nvmf event_bdev event_sock $(BDEV_DEPS_THREAD)
DEPDIRS-event_nvmf := event nvmf event_bdev event_sock thread log bdev $(JSON_LIBS)
DEPDIRS-event_scsi := event scsi event_bdev
DEPDIRS-event_iscsi := event iscsi event_scsi event_sock

View File

@ -38,7 +38,6 @@
#include "spdk/log.h"
#include "spdk/nvme.h"
#include "spdk/nvmf_cmd.h"
#include "spdk/util.h"
enum nvmf_tgt_state {
NVMF_TGT_INIT_NONE = 0,
@ -186,7 +185,6 @@ nvmf_tgt_create_poll_group(void *ctx)
static void
nvmf_tgt_create_poll_groups(void)
{
struct spdk_cpuset tmp_cpumask = {};
uint32_t i;
char thread_name[32];
struct spdk_thread *thread;
@ -195,11 +193,9 @@ nvmf_tgt_create_poll_groups(void)
assert(g_tgt_init_thread != NULL);
SPDK_ENV_FOREACH_CORE(i) {
spdk_cpuset_zero(&tmp_cpumask);
spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%u", i);
thread = spdk_thread_create(thread_name, &tmp_cpumask);
thread = spdk_thread_create(thread_name, NULL);
assert(thread != NULL);
spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);

View File

@ -38,13 +38,10 @@ idle() {
# The expectation here is that when SPDK app is idle the following is true:
# - all threads are assigned to main lcore
# - threads are not being moved between lcores
# - each thread has a mask pinned to a single cpu
local all_set
xtrace_disable
while ((samples++ < 5)); do
all_set=0 cpusmask=0
cpusmask=0
reactor_framework=$(rpc_cmd framework_get_reactors | jq -r '.reactors[]')
threads=($(
jq -r "select(.lcore == $spdk_main_core) | .lw_threads[].name" <<< "$reactor_framework"
@ -56,10 +53,9 @@ idle() {
done
printf 'SPDK cpumask: %x Threads cpumask: %x\n' "$spdk_cpusmask" "$cpusmask"
thread_stats && ((cpusmask == spdk_cpusmask)) && all_set=1
thread_stats
done
((all_set == 1))
xtrace_restore
}