MFi386: revision 225936
Add some improvements in the idle table callbacks: - Replace instances of manual assembly instruction "hlt" call with halt() function calling. - In cpu_idle_mwait() avoid races in check to sched_runnable() using the same pattern used in cpu_idle_hlt() with the 'hlt' instruction. - Add comments explaining the logic behind the pattern used in cpu_idle_hlt() and other idle callbacks.
This commit is contained in:
parent
5166b1fbe8
commit
09713cafdf
@ -1117,7 +1117,7 @@ void
|
||||
cpu_halt(void)
|
||||
{
|
||||
for (;;)
|
||||
__asm__ ("hlt");
|
||||
halt();
|
||||
}
|
||||
|
||||
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
|
||||
@ -1136,9 +1136,22 @@ cpu_idle_hlt(int busy)
|
||||
|
||||
state = (int *)PCPU_PTR(monitorbuf);
|
||||
*state = STATE_SLEEPING;
|
||||
|
||||
/*
|
||||
* We must absolutely guarentee that hlt is the next instruction
|
||||
* after sti or we introduce a timing window.
|
||||
* Since we may be in a critical section from cpu_idle(), if
|
||||
* an interrupt fires during that critical section we may have
|
||||
* a pending preemption. If the CPU halts, then that thread
|
||||
* may not execute until a later interrupt awakens the CPU.
|
||||
* To handle this race, check for a runnable thread after
|
||||
* disabling interrupts and immediately return if one is
|
||||
* found. Also, we must absolutely guarentee that hlt is
|
||||
* the next instruction after sti. This ensures that any
|
||||
* interrupt that fires after the call to disable_intr() will
|
||||
* immediately awaken the CPU from hlt. Finally, please note
|
||||
* that on x86 this works fine because of interrupts enabled only
|
||||
* after the instruction following sti takes place, while IF is set
|
||||
* to 1 immediately, allowing hlt instruction to acknowledge the
|
||||
* interrupt.
|
||||
*/
|
||||
disable_intr();
|
||||
if (sched_runnable())
|
||||
@ -1164,11 +1177,19 @@ cpu_idle_mwait(int busy)
|
||||
|
||||
state = (int *)PCPU_PTR(monitorbuf);
|
||||
*state = STATE_MWAIT;
|
||||
if (!sched_runnable()) {
|
||||
cpu_monitor(state, 0, 0);
|
||||
if (*state == STATE_MWAIT)
|
||||
cpu_mwait(0, MWAIT_C1);
|
||||
|
||||
/* See comments in cpu_idle_hlt(). */
|
||||
disable_intr();
|
||||
if (sched_runnable()) {
|
||||
enable_intr();
|
||||
*state = STATE_RUNNING;
|
||||
return;
|
||||
}
|
||||
cpu_monitor(state, 0, 0);
|
||||
if (*state == STATE_MWAIT)
|
||||
__asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
|
||||
else
|
||||
enable_intr();
|
||||
*state = STATE_RUNNING;
|
||||
}
|
||||
|
||||
@ -1180,6 +1201,12 @@ cpu_idle_spin(int busy)
|
||||
|
||||
state = (int *)PCPU_PTR(monitorbuf);
|
||||
*state = STATE_RUNNING;
|
||||
|
||||
/*
|
||||
* The sched_runnable() call is racy but as long as there is
|
||||
* a loop missing it one time will have just a little impact if any
|
||||
* (and it is much better than missing the check at all).
|
||||
*/
|
||||
for (i = 0; i < 1000; i++) {
|
||||
if (sched_runnable())
|
||||
return;
|
||||
|
Loading…
x
Reference in New Issue
Block a user