Use low level constructs borrowed from interrupt threads to wait for

work in proc0.
Remove the TDP_WAKEPROC0 workaround.
This commit is contained in:
Stephan Uphoff 2005-05-23 23:01:53 +00:00
parent 64b5fbaa04
commit d13ec71369
5 changed files with 47 additions and 27 deletions

View File

@ -503,15 +503,7 @@ exit1(struct thread *td, int rv)
critical_enter();
mtx_unlock_spin(&sched_lock);
wakeup(p->p_pptr);
/*
* XXX hack, swap in parent process, please see TDP_WAKEPROC0
* code, because TDP_WAKEPROC0 is only useful if thread is
* leaving critical region, but here we never leave and
* thread_exit() will call cpu_throw(), TDP_WAKEPROC0 is never
* cleared.
*/
if (p->p_pptr->p_sflag & PS_SWAPINREQ)
wakeup(&proc0);
PROC_UNLOCK(p->p_pptr);
mtx_lock_spin(&sched_lock);
critical_exit();

View File

@ -593,15 +593,9 @@ critical_exit(void)
td = curthread;
KASSERT(td->td_critnest != 0,
("critical_exit: td_critnest == 0"));
if (td->td_critnest == 1) {
if (td->td_pflags & TDP_WAKEPROC0) {
td->td_pflags &= ~TDP_WAKEPROC0;
wakeup(&proc0);
}
td->td_critnest = 0;
#ifdef PREEMPTION
if (td->td_critnest == 1) {
td->td_critnest = 0;
mtx_assert(&sched_lock, MA_NOTOWNED);
if (td->td_owepreempt) {
td->td_critnest = 1;
@ -610,12 +604,11 @@ critical_exit(void)
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
}
} else
#endif
} else {
td->td_critnest--;
}
CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
(long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
}

View File

@ -410,11 +410,10 @@ setrunnable(struct thread *td)
p->p_sflag |= PS_SWAPINREQ;
/*
* due to a LOR between sched_lock and
* the sleepqueue chain locks, delay
* wakeup proc0 until thread leaves
* critical region.
* the sleepqueue chain locks, use
* lower level scheduling functions.
*/
curthread->td_pflags |= TDP_WAKEPROC0;
kick_proc0();
}
} else
sched_wakeup(td);

View File

@ -370,7 +370,7 @@ struct thread {
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
#define TDP_UNUSED8 0x00000100 /* --available -- */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_WAKEPROC0 0x00000400 /* Wants caller to wakeup(&proc0) */
#define TDP_UNUSED10 0x00000400 /* --available -- */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
@ -848,6 +848,7 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
void kick_proc0(void);
int leavepgrp(struct proc *p);
int maybe_preempt(struct thread *td);
void mi_switch(int flags, struct thread *newtd);

View File

@ -113,6 +113,10 @@ SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
static void swapout(struct proc *);
#endif
static volatile int proc0_rescan;
/*
* MPSAFE
*
@ -596,6 +600,9 @@ scheduler(dummy)
loop:
if (vm_page_count_min()) {
VM_WAIT;
mtx_lock_spin(&sched_lock);
proc0_rescan = 0;
mtx_unlock_spin(&sched_lock);
goto loop;
}
@ -641,7 +648,13 @@ scheduler(dummy)
* Nothing to do, back to sleep.
*/
if ((p = pp) == NULL) {
tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
mtx_lock_spin(&sched_lock);
if (!proc0_rescan) {
TD_SET_IWAIT(&thread0);
mi_switch(SW_VOL, NULL);
}
proc0_rescan = 0;
mtx_unlock_spin(&sched_lock);
goto loop;
}
PROC_LOCK(p);
@ -653,6 +666,9 @@ scheduler(dummy)
*/
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
PROC_UNLOCK(p);
mtx_lock_spin(&sched_lock);
proc0_rescan = 0;
mtx_unlock_spin(&sched_lock);
goto loop;
}
@ -668,10 +684,29 @@ scheduler(dummy)
PROC_UNLOCK(p);
mtx_lock_spin(&sched_lock);
p->p_swtime = 0;
proc0_rescan = 0;
mtx_unlock_spin(&sched_lock);
goto loop;
}
void kick_proc0(void)
{
struct thread *td = &thread0;
if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0);
TD_CLR_IWAIT(td);
setrunqueue(td, SRQ_INTR);
} else {
proc0_rescan = 1;
CTR2(KTR_INTR, "%s: state %d",
__func__, td->td_state);
}
}
#ifndef NO_SWAPPING
/*