nvmf: allow poll groups to run on a subset of cores

In order to avoid latency imbalances, the user can specify a cpu mask
on which the poll groups should run. This code update added data structures
to control set of CPU cores.

Change-Id: Iaf69d75da2fc6fed350d97d11027ce09e9432210
Signed-off-by: Yuri <yuriy.kirichok@hpe.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5610
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Yuri 2021-05-21 17:09:00 +00:00 committed by Tomasz Zawadzki
parent e0c9c640c6
commit d63fbc8a8e
3 changed files with 20 additions and 4 deletions

View File

@ -167,7 +167,7 @@ DEPDIRS-event_vmd := init vmd $(JSON_LIBS) log thread
DEPDIRS-event_bdev := init bdev event_accel event_vmd event_sock
DEPDIRS-event_nbd := init nbd event_bdev
DEPDIRS-event_nvmf := init nvmf event_bdev event_sock thread log bdev $(JSON_LIBS)
DEPDIRS-event_nvmf := init nvmf event_bdev event_sock thread log bdev util $(JSON_LIBS)
DEPDIRS-event_scsi := init scsi event_bdev
DEPDIRS-event_iscsi := init iscsi event_scsi event_sock

View File

@ -61,4 +61,6 @@ extern uint16_t g_spdk_nvmf_tgt_crdt[3];
extern struct spdk_nvmf_tgt *g_spdk_nvmf_tgt;
extern struct spdk_cpuset *g_poll_groups_mask;
#endif

View File

@ -64,6 +64,7 @@ struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
.admin_passthru.identify_ctrlr = false
};
struct spdk_cpuset *g_poll_groups_mask = NULL;
struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
uint32_t g_spdk_nvmf_tgt_max_subsystems = 0;
uint16_t g_spdk_nvmf_tgt_crdt[3] = {0, 0, 0};
@ -150,6 +151,16 @@ nvmf_tgt_destroy_poll_groups(void)
}
}
static uint32_t
nvmf_get_cpuset_count(void)
{
if (g_poll_groups_mask) {
return spdk_cpuset_count(g_poll_groups_mask);
} else {
return spdk_env_get_core_count();
}
}
static void
nvmf_tgt_create_poll_group_done(void *ctx)
{
@ -157,9 +168,9 @@ nvmf_tgt_create_poll_group_done(void *ctx)
TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
assert(g_num_poll_groups < spdk_env_get_core_count());
assert(g_num_poll_groups < nvmf_get_cpuset_count());
if (++g_num_poll_groups == spdk_env_get_core_count()) {
if (++g_num_poll_groups == nvmf_get_cpuset_count()) {
g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
nvmf_tgt_advance_state();
}
@ -195,9 +206,12 @@ nvmf_tgt_create_poll_groups(void)
assert(g_tgt_init_thread != NULL);
SPDK_ENV_FOREACH_CORE(i) {
if (g_poll_groups_mask && !spdk_cpuset_get_cpu(g_poll_groups_mask, i)) {
continue;
}
snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%u", i);
thread = spdk_thread_create(thread_name, NULL);
thread = spdk_thread_create(thread_name, g_poll_groups_mask);
assert(thread != NULL);
spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);