Check for preemption after lowering a thread's priority

When a high-priority thread is waiting for a mutex held by a
low-priority thread, it temporarily lends its priority to the
low-priority thread to prevent priority inversion.  When the mutex
is released, the lent priority is revoked and the low-priority
thread goes back to its original priority.

When the priority of that thread is lowered (through a call to
sched_priority()), the schedule was not checking whether
there is now a high-priority thread in the run queue.  This can
cause threads with real-time priority to be starved in the run
queue while the low-priority thread finishes its quantum.

Fix this by explicitly checking whether preemption is necessary
when a thread's priority is lowered.

Sponsored by: Dell EMC Isilon
Obtained from: Sandvine Inc
Differential Revision:	https://reviews.freebsd.org/D9518
Reviewed by: Jeff Roberson (ule)
MFC after: 1 month
This commit is contained in:
Ryan Stone 2017-02-16 19:41:13 +00:00
parent c53cc7187c
commit 09ae7c4814
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313814
2 changed files with 63 additions and 8 deletions

View File

@ -816,7 +816,12 @@ sched_class(struct thread *td, int class)
static void
sched_priority(struct thread *td, u_char prio)
{
struct thread *newtd;
struct runq *rq;
u_char orig_pri;
#ifdef SMP
struct thread *cputd;
#endif
KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
"prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
@ -832,10 +837,43 @@ sched_priority(struct thread *td, u_char prio)
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
return;
orig_pri = td->td_priority;
td->td_priority = prio;
if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
sched_rem(td);
sched_add(td, SRQ_BORING);
} else if (orig_pri < prio && TD_IS_RUNNING(td)) {
/*
* If we have decreased the priority of a running thread, we
* have to check if it should be preempted.
*/
rq = &runq;
newtd = runq_choose(&runq);
#ifdef SMP
cputd = runq_choose(&runq_pcpu[td->td_oncpu]);
if (newtd == NULL ||
(cputd != NULL && cputd->td_priority < td->td_priority))
newtd = cputd;
#endif
if (newtd != NULL && newtd->td_priority < prio
#ifndef FULL_PREEMPTION
&& (newtd->td_priority <= PRI_MAX_ITHD ||
prio >= PRI_MIN_IDLE))
#endif
) {
if (td == curthread)
/*
* Don't reschedule the thread here as it may
* be losing priority because it has released a
* mutex, and in that case we need it to finish
* releasing the lock before it gets preempted.
*/
td->td_owepreempt = 1;
else
kick_other_cpu(newtd->td_priority,
td->td_oncpu);
}
}
}

View File

@ -319,7 +319,7 @@ static void tdq_add(struct tdq *, struct thread *, int);
#ifdef SMP
static int tdq_move(struct tdq *, struct tdq *);
static int tdq_idled(struct tdq *);
static void tdq_notify(struct tdq *, struct thread *);
static void tdq_notify(struct tdq *, int);
static struct thread *tdq_steal(struct tdq *, int);
static struct thread *runq_steal(struct runq *, int);
static int sched_pickcpu(struct thread *, int);
@ -1040,16 +1040,14 @@ tdq_idled(struct tdq *tdq)
* Notify a remote cpu of new work. Sends an IPI if criteria are met.
*/
static void
tdq_notify(struct tdq *tdq, struct thread *td)
tdq_notify(struct tdq *tdq, int pri)
{
struct thread *ctd;
int pri;
int cpu;
if (tdq->tdq_ipipending)
return;
cpu = td_get_sched(td)->ts_cpu;
pri = td->td_priority;
cpu = TD_ID(tdq);
ctd = pcpu_find(cpu)->pc_curthread;
if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
return;
@ -1675,6 +1673,22 @@ sched_pctcpu_update(struct td_sched *ts, int run)
ts->ts_ltick = t;
}
static void
sched_check_preempt(struct tdq *tdq, struct thread *td)
{
KASSERT(TD_IS_RUNNING(td), ("thread is not running"));
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
KASSERT(tdq == TDQ_CPU(td->td_sched->ts_cpu),
("tdq does not contain td"));
if (tdq == TDQ_SELF()) {
if (sched_shouldpreempt(tdq->tdq_lowpri, td->td_priority, 0))
td->td_owepreempt = 1;
} else
tdq_notify(tdq, tdq->tdq_lowpri);
}
/*
* Adjust the priority of a thread. Move it to the appropriate run-queue
* if necessary. This is the back-end for several priority related
@ -1726,6 +1740,9 @@ sched_thread_priority(struct thread *td, u_char prio)
tdq->tdq_lowpri = prio;
else if (tdq->tdq_lowpri == oldpri)
tdq_setlowpri(tdq, td);
if (oldpri < prio)
sched_check_preempt(tdq, td);
return;
}
td->td_priority = prio;
@ -1854,7 +1871,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
*/
tdq_lock_pair(tdn, tdq);
tdq_add(tdn, td, flags);
tdq_notify(tdn, td);
tdq_notify(tdn, td->td_priority);
TDQ_UNLOCK(tdn);
spinlock_exit();
#endif
@ -2429,7 +2446,7 @@ sched_add(struct thread *td, int flags)
tdq = sched_setcpu(td, cpu, flags);
tdq_add(tdq, td, flags);
if (cpu != PCPU_GET(cpuid)) {
tdq_notify(tdq, td);
tdq_notify(tdq, td->td_priority);
return;
}
#else