- Bail out of tdq_idled if !smp_started or idle stealing is disabled. This

fixes a bug on UP machines with SMP kernels where the idle thread
   constantly switches after trying to steal work from the local cpu.
 - Make the idle stealing code more robust against self selection.
 - Prefer to steal from the cpu with the highest load that has at least one
   transferable thread.  Before we selected the cpu with the highest
   transferable count which excludes bound threads.

Collaborated with:	csjp
Approved by:		re
This commit is contained in:
Jeff Roberson 2007-10-08 23:50:39 +00:00
parent 05dc0eb204
commit 88f530cc25
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=172484

View File

@ -740,9 +740,10 @@ tdq_idled(struct tdq *tdq)
struct tdq *steal;
int highload;
int highcpu;
int load;
int cpu;
if (smp_started == 0 || steal_idle == 0)
return (1);
/* We don't want to be preempted while we're iterating over tdqs */
spinlock_enter();
tdg = tdq->tdq_group;
@ -762,29 +763,34 @@ tdq_idled(struct tdq *tdq)
}
TDQ_UNLOCK(tdq);
}
/*
* Find the least loaded CPU with a transferable thread and attempt
* to steal it. We make a lockless pass and then verify that the
* thread is still available after locking.
*/
for (;;) {
if (steal_idle == 0)
break;
highcpu = 0;
highload = 0;
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
steal = TDQ_CPU(cpu);
load = TDQ_CPU(cpu)->tdq_transferable;
if (load < highload)
if (steal->tdq_transferable == 0)
continue;
highload = load;
if (steal->tdq_load < highload)
continue;
highload = steal->tdq_load;
highcpu = cpu;
}
if (highload < steal_thresh)
break;
steal = TDQ_CPU(highcpu);
if (steal == tdq)
break;
tdq_lock_pair(tdq, steal);
if (steal->tdq_transferable >= steal_thresh)
if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
goto steal;
tdq_unlock_pair(tdq, steal);
break;
}
spinlock_exit();
return (1);