nvmf: Only run on the master lcore.

Temporarily, only run on the master lcore. This makes
some temporary refactoring possible that is required
to move to a truly scalable threading model.

Change-Id: I13a2e03107a27f8ec18b023b15f653d374a137b5
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-07-07 15:47:51 -07:00
parent 9113e6a949
commit 72a7fd3cff

View File

@ -373,68 +373,5 @@ spdk_nvmf_conn_do_work(void *arg)
static int
nvmf_allocate_reactor(uint64_t cpumask)
{
int i, selected_core;
enum rte_lcore_state_t state;
int master_lcore = rte_get_master_lcore();
int32_t num_pollers, min_pollers;
cpumask &= spdk_app_get_core_mask();
if (cpumask == 0) {
return 0;
}
min_pollers = INT_MAX;
selected_core = 0;
/* we use u64 as CPU core mask */
for (i = 0; i < RTE_MAX_LCORE && i < 64; i++) {
if (!((1ULL << i) & cpumask)) {
continue;
}
/*
* DPDK returns WAIT for the master lcore instead of RUNNING.
* So we always treat the reactor on master core as RUNNING.
*/
if (i == master_lcore) {
state = RUNNING;
} else {
state = rte_eal_get_lcore_state(i);
}
if (state == FINISHED) {
rte_eal_wait_lcore(i);
}
switch (state) {
case WAIT:
case FINISHED:
/* Idle cores have 0 pollers */
if (0 < min_pollers) {
selected_core = i;
min_pollers = 0;
}
break;
case RUNNING:
/* This lcore is running, check how many pollers it already has */
num_pollers = rte_atomic32_read(&g_num_connections[i]);
/* Fill each lcore to target minimum, else select least loaded lcore */
if (num_pollers < (SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE *
g_nvmf_tgt.MaxConnectionsPerSession)) {
/* If fewer than the target number of session connections
* exist then add to this lcore
*/
return i;
} else if (num_pollers < min_pollers) {
/* Track the lcore that has the minimum number of pollers
* to be used if no lcores have already met our criteria
*/
selected_core = i;
min_pollers = num_pollers;
}
break;
}
}
return selected_core;
return rte_get_master_lcore();
}