Don't do IPIs on behalf of interrupt threads.
just punt straight on through to teh preemption code. Make a KASSSERT out of a condition that can no longer occur. MFC after: 1 week
This commit is contained in:
parent
7a31cc1039
commit
6a574b2afc
@ -502,8 +502,8 @@ maybe_preempt(struct thread *td)
|
||||
* to the new thread.
|
||||
*/
|
||||
ctd = curthread;
|
||||
if (ctd->td_kse == NULL || ctd->td_kse->ke_thread != ctd)
|
||||
return (0);
|
||||
KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
|
||||
("thread has no (or wrong) sched-private part."));
|
||||
pri = td->td_priority;
|
||||
cpri = ctd->td_priority;
|
||||
if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
|
||||
|
@ -949,9 +949,10 @@ sched_add(struct thread *td, int flags)
|
||||
* the thread is unpinned
|
||||
* or pinned to another cpu,
|
||||
* and there are other available and idle CPUs.
|
||||
* if we are idle, then skip straight to preemption.
|
||||
* if we are idle, or it's an interrupt,
|
||||
* then skip straight to preemption.
|
||||
*/
|
||||
if ( (! idle) &&
|
||||
if ( (! idle) && ((flags & SRQ_INTR) == 0) &&
|
||||
(idle_cpus_mask & ~(hlt_cpus_mask | me)) &&
|
||||
( KSE_CAN_MIGRATE(ke) ||
|
||||
ke->ke_runq != &runq_pcpu[PCPU_GET(cpuid)])) {
|
||||
|
Loading…
Reference in New Issue
Block a user