Axe the idle_event eventhandler, and add a MD cpu_idle function used

for things such as halting CPU's, idling CPU's, etc.

Discussed with:	msmith
This commit is contained in:
John Baldwin 2000-10-19 07:47:16 +00:00
parent de2c745ce3
commit dc13e6dfbb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=67308
4 changed files with 12 additions and 26 deletions

View File

@ -994,7 +994,6 @@ cpu_halt(void)
* the !SMP case, as there is no clean way to ensure that a CPU will be
* woken when there is work available for it.
*/
#ifndef SMP
static int cpu_idle_hlt = 1;
SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
&cpu_idle_hlt, 0, "Idle loop HLT enable");
@ -1005,9 +1004,10 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
* the time between calling hlt and the next interrupt even though there
* is a runnable process.
*/
static void
cpu_idle(void *junk, int count)
void
cpu_idle(void)
{
#ifndef SMP
if (cpu_idle_hlt) {
disable_intr();
if (procrunnable())
@ -1017,16 +1017,9 @@ cpu_idle(void *junk, int count)
__asm __volatile("hlt");
}
}
#endif
}
static void cpu_idle_register(void *junk)
{
EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
}
SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND,
cpu_idle_register, NULL)
#endif /* !SMP */
/*
* Clear registers on exec
*/

View File

@ -994,7 +994,6 @@ cpu_halt(void)
* the !SMP case, as there is no clean way to ensure that a CPU will be
* woken when there is work available for it.
*/
#ifndef SMP
static int cpu_idle_hlt = 1;
SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
&cpu_idle_hlt, 0, "Idle loop HLT enable");
@ -1005,9 +1004,10 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
* the time between calling hlt and the next interrupt even though there
* is a runnable process.
*/
static void
cpu_idle(void *junk, int count)
void
cpu_idle(void)
{
#ifndef SMP
if (cpu_idle_hlt) {
disable_intr();
if (procrunnable())
@ -1017,16 +1017,9 @@ cpu_idle(void *junk, int count)
__asm __volatile("hlt");
}
}
#endif
}
static void cpu_idle_register(void *junk)
{
EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
}
SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND,
cpu_idle_register, NULL)
#endif /* !SMP */
/*
* Clear registers on exec
*/

View File

@ -40,8 +40,6 @@ SYSINIT(idle_setup, SI_SUB_SCHED_IDLE, SI_ORDER_FIRST, idle_setup, NULL)
static void idle_proc(void *dummy);
EVENTHANDLER_FAST_DEFINE(idle_event, idle_eventhandler_t);
/*
* setup per-cpu idle process contexts
*/
@ -102,8 +100,9 @@ idle_proc(void *dummy)
if (vm_page_zero_idle() != 0)
continue;
/* call out to any cpu-becoming-idle events */
EVENTHANDLER_FAST_INVOKE(idle_event, 0);
#ifdef __i386__
cpu_idle();
#endif
}
mtx_enter(&sched_lock, MTX_SPIN);

View File

@ -517,6 +517,7 @@ int suser_xxx __P((const struct ucred *cred, const struct proc *proc,
void remrunqueue __P((struct proc *));
void cpu_switch __P((void));
void cpu_throw __P((void)) __dead2;
void cpu_idle __P((void));
void unsleep __P((struct proc *));
void cpu_exit __P((struct proc *)) __dead2;