- When newtd is specified to sched_switch() it was not being initialized

properly.  We have to temporarily unlock the TDQ lock so we can lock
   the thread and add it to the run queue.  This is used only for KSE.
 - When we add a thread from the tdq_move() via sched_balance() we need to
   ipi the target if it's sitting in the idle thread or it'll never run.

Reported by:	Rene Landan
Approved by:	re
This commit is contained in:
Jeff Roberson 2007-07-19 19:51:45 +00:00
parent 40e4089d34
commit 08c9a16c4f

View File

@ -683,6 +683,7 @@ tdq_move(struct tdq *from, struct tdq *to)
ts->ts_cpu = cpu;
td->td_lock = TDQ_LOCKPTR(to);
tdq_add(to, td, SRQ_YIELDING);
tdq_notify(ts);
}
/*
@ -1656,6 +1657,26 @@ sched_unlend_user_prio(struct thread *td, u_char prio)
sched_lend_user_prio(td, prio);
}
/*
* Add the thread passed as 'newtd' to the run queue before selecting
* the next thread to run. This is only used for KSE.
*/
static void
sched_switchin(struct tdq *tdq, struct thread *td)
{
#ifdef SMP
spinlock_enter();
TDQ_UNLOCK(tdq);
thread_lock(td);
spinlock_exit();
sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
#else
td->td_lock = TDQ_LOCKPTR(tdq);
#endif
tdq_add(tdq, td, SRQ_YIELDING);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
}
/*
* Block a thread for switching. Similar to thread_block() but does not
* bump the spin count.
@ -1750,14 +1771,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
*/
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
/*
* If KSE assigned a new thread just add it here and pick the best one.
* If KSE assigned a new thread just add it here and let choosethread
* select the best one.
*/
if (newtd != NULL) {
/* XXX This is bogus. What if the thread is locked elsewhere? */
td->td_lock = TDQ_LOCKPTR(tdq);
td->td_sched->ts_cpu = cpuid;
tdq_add(tdq, td, SRQ_YIELDING);
}
if (newtd != NULL)
sched_switchin(tdq, newtd);
newtd = choosethread();
/*
* Call the MD code to switch contexts if necessary.