diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index a42c1534f89e..534f59cd5a42 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -816,12 +816,7 @@ sched_class(struct thread *td, int class) static void sched_priority(struct thread *td, u_char prio) { - struct thread *newtd; - struct runq *rq; - u_char orig_pri; -#ifdef SMP - struct thread *cputd; -#endif + KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, @@ -837,43 +832,10 @@ sched_priority(struct thread *td, u_char prio) THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; - orig_pri = td->td_priority; td->td_priority = prio; if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { sched_rem(td); sched_add(td, SRQ_BORING); - } else if (orig_pri < prio && TD_IS_RUNNING(td)) { - /* - * If we have decreased the priority of a running thread, we - * have to check if it should be preempted. - */ - rq = &runq; - newtd = runq_choose(&runq); -#ifdef SMP - cputd = runq_choose(&runq_pcpu[td->td_oncpu]); - if (newtd == NULL || - (cputd != NULL && cputd->td_priority < td->td_priority)) - newtd = cputd; -#endif - - if (newtd != NULL && newtd->td_priority < prio -#ifndef FULL_PREEMPTION - && (newtd->td_priority <= PRI_MAX_ITHD || - prio >= PRI_MIN_IDLE)) -#endif - ) { - if (td == curthread) - /* - * Don't reschedule the thread here as it may - * be losing priority because it has released a - * mutex, and in that case we need it to finish - * releasing the lock before it gets preempted. - */ - td->td_owepreempt = 1; - else - kick_other_cpu(newtd->td_priority, - td->td_oncpu); - } } } diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 93644469ce53..b650f24db9e5 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -319,7 +319,7 @@ static void tdq_add(struct tdq *, struct thread *, int); #ifdef SMP static int tdq_move(struct tdq *, struct tdq *); static int tdq_idled(struct tdq *); -static void tdq_notify(struct tdq *, int); +static void tdq_notify(struct tdq *, struct thread *); static struct thread *tdq_steal(struct tdq *, int); static struct thread *runq_steal(struct runq *, int); static int sched_pickcpu(struct thread *, int); @@ -1040,14 +1040,16 @@ tdq_idled(struct tdq *tdq) * Notify a remote cpu of new work. Sends an IPI if criteria are met. */ static void -tdq_notify(struct tdq *tdq, int pri) +tdq_notify(struct tdq *tdq, struct thread *td) { struct thread *ctd; + int pri; int cpu; if (tdq->tdq_ipipending) return; - cpu = TDQ_ID(tdq); + cpu = td_get_sched(td)->ts_cpu; + pri = td->td_priority; ctd = pcpu_find(cpu)->pc_curthread; if (!sched_shouldpreempt(pri, ctd->td_priority, 1)) return; @@ -1673,22 +1675,6 @@ sched_pctcpu_update(struct td_sched *ts, int run) ts->ts_ltick = t; } -static void -sched_check_preempt(struct tdq *tdq, struct thread *td) -{ - - KASSERT(TD_IS_RUNNING(td), ("thread is not running")); - TDQ_LOCK_ASSERT(tdq, MA_OWNED); - KASSERT(tdq == TDQ_CPU(td->td_sched->ts_cpu), - ("tdq does not contain td")); - - if (tdq == TDQ_SELF()) { - if (sched_shouldpreempt(tdq->tdq_lowpri, td->td_priority, 0)) - td->td_owepreempt = 1; - } else - tdq_notify(tdq, tdq->tdq_lowpri); -} - /* * Adjust the priority of a thread. Move it to the appropriate run-queue * if necessary. This is the back-end for several priority related @@ -1740,9 +1726,6 @@ sched_thread_priority(struct thread *td, u_char prio) tdq->tdq_lowpri = prio; else if (tdq->tdq_lowpri == oldpri) tdq_setlowpri(tdq, td); - - if (oldpri < prio) - sched_check_preempt(tdq, td); return; } td->td_priority = prio; @@ -1871,7 +1854,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) */ tdq_lock_pair(tdn, tdq); tdq_add(tdn, td, flags); - tdq_notify(tdn, td->td_priority); + tdq_notify(tdn, td); TDQ_UNLOCK(tdn); spinlock_exit(); #endif @@ -2446,7 +2429,7 @@ sched_add(struct thread *td, int flags) tdq = sched_setcpu(td, cpu, flags); tdq_add(tdq, td, flags); if (cpu != PCPU_GET(cpuid)) { - tdq_notify(tdq, td->td_priority); + tdq_notify(tdq, td); return; } #else