- Use a better algorithm in sched_pctcpu_update()
Contributed by: Thomaswuerfl@gmx.de - In sched_prio(), adjust the run queue for threads which may need to move to the current queue due to priority propagation . - In sched_switch(), fix style bug introduced when the KSE support went in. Columns are 80 chars wide, not 90. - In sched_switch(), Fix the comparison in the idle case and explicitly re-initialize the runq in the not propagated case. - Remove dead code in sched_clock(). - In sched_clock(), If we're an IDLE class td set NEEDRESCHED so that threads that have become runnable will get a chance to. - In sched_runnable(), if we're not the IDLETD, we should not consider curthread when examining the load. This mimics the 4BSD behavior of returning 0 when the only runnable thread is running. - In sched_userret(), remove the code for setting NEEDRESCHED entirely. This is not necessary and is not implemented in 4BSD. - Use the correct comparison in sched_add() when checking to see if an idle prio task has had it's priority temporarily elevated.
This commit is contained in:
parent
6a0e476222
commit
3f741ca117
@ -674,10 +674,13 @@ sched_slice(struct kse *ke)
|
|||||||
static void
|
static void
|
||||||
sched_interact_update(struct ksegrp *kg)
|
sched_interact_update(struct ksegrp *kg)
|
||||||
{
|
{
|
||||||
/* XXX Fixme, use a linear algorithm and not a while loop. */
|
int ratio;
|
||||||
while ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) {
|
|
||||||
kg->kg_runtime = (kg->kg_runtime / 5) * 4;
|
if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) {
|
||||||
kg->kg_slptime = (kg->kg_slptime / 5) * 4;
|
ratio = ((SCHED_SLP_RUN_MAX * 15) / (kg->kg_runtime +
|
||||||
|
kg->kg_slptime ));
|
||||||
|
kg->kg_runtime = (kg->kg_runtime * ratio) / 16;
|
||||||
|
kg->kg_slptime = (kg->kg_slptime * ratio) / 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -775,13 +778,28 @@ sched_pickcpu(void)
|
|||||||
void
|
void
|
||||||
sched_prio(struct thread *td, u_char prio)
|
sched_prio(struct thread *td, u_char prio)
|
||||||
{
|
{
|
||||||
|
struct kse *ke;
|
||||||
|
|
||||||
|
ke = td->td_kse;
|
||||||
mtx_assert(&sched_lock, MA_OWNED);
|
mtx_assert(&sched_lock, MA_OWNED);
|
||||||
if (TD_ON_RUNQ(td)) {
|
if (TD_ON_RUNQ(td)) {
|
||||||
|
/*
|
||||||
|
* If the priority has been elevated due to priority
|
||||||
|
* propagation, we may have to move ourselves to a new
|
||||||
|
* queue. We still call adjustrunqueue below in case kse
|
||||||
|
* needs to fix things up.
|
||||||
|
*/
|
||||||
|
if ((td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
|
||||||
|
prio < td->td_ksegrp->kg_user_pri) ||
|
||||||
|
(td->td_ksegrp->kg_pri_class == PRI_IDLE &&
|
||||||
|
prio < PRI_MIN_IDLE)) {
|
||||||
|
runq_remove(ke->ke_runq, ke);
|
||||||
|
ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
|
||||||
|
runq_add(ke->ke_runq, ke);
|
||||||
|
}
|
||||||
adjustrunqueue(td, prio);
|
adjustrunqueue(td, prio);
|
||||||
} else {
|
} else
|
||||||
td->td_priority = prio;
|
td->td_priority = prio;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -806,12 +824,16 @@ sched_switch(struct thread *td)
|
|||||||
setrunqueue(td);
|
setrunqueue(td);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This queue is always correct except for idle threads which
|
* This queue is always correct except for idle threads
|
||||||
* have a higher priority due to priority propagation.
|
* which have a higher priority due to priority
|
||||||
|
* propagation.
|
||||||
*/
|
*/
|
||||||
if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
|
if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) {
|
||||||
ke->ke_thread->td_priority > PRI_MIN_IDLE)
|
if (td->td_priority < PRI_MIN_IDLE)
|
||||||
ke->ke_runq = KSEQ_SELF()->ksq_curr;
|
ke->ke_runq = KSEQ_SELF()->ksq_curr;
|
||||||
|
else
|
||||||
|
ke->ke_runq = &KSEQ_SELF()->ksq_idle;
|
||||||
|
}
|
||||||
runq_add(ke->ke_runq, ke);
|
runq_add(ke->ke_runq, ke);
|
||||||
/* setrunqueue(td); */
|
/* setrunqueue(td); */
|
||||||
}
|
}
|
||||||
@ -1017,9 +1039,6 @@ sched_clock(struct thread *td)
|
|||||||
struct kseq *kseq;
|
struct kseq *kseq;
|
||||||
struct ksegrp *kg;
|
struct ksegrp *kg;
|
||||||
struct kse *ke;
|
struct kse *ke;
|
||||||
#if 0
|
|
||||||
struct kse *nke;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sched_setup() apparently happens prior to stathz being set. We
|
* sched_setup() apparently happens prior to stathz being set. We
|
||||||
@ -1057,28 +1076,18 @@ sched_clock(struct thread *td)
|
|||||||
CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
|
CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
|
||||||
ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
|
ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Idle tasks should always resched.
|
||||||
|
*/
|
||||||
|
if (kg->kg_pri_class == PRI_IDLE) {
|
||||||
|
td->td_flags |= TDF_NEEDRESCHED;
|
||||||
|
return;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* We only do slicing code for TIMESHARE ksegrps.
|
* We only do slicing code for TIMESHARE ksegrps.
|
||||||
*/
|
*/
|
||||||
if (kg->kg_pri_class != PRI_TIMESHARE)
|
if (kg->kg_pri_class != PRI_TIMESHARE)
|
||||||
return;
|
return;
|
||||||
/*
|
|
||||||
* Check for a higher priority task on the run queue. This can happen
|
|
||||||
* on SMP if another processor woke up a process on our runq.
|
|
||||||
*/
|
|
||||||
kseq = KSEQ_SELF();
|
|
||||||
#if 0
|
|
||||||
if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq, 0)) != NULL) {
|
|
||||||
if (sched_strict &&
|
|
||||||
nke->ke_thread->td_priority < td->td_priority)
|
|
||||||
td->td_flags |= TDF_NEEDRESCHED;
|
|
||||||
else if (nke->ke_thread->td_priority <
|
|
||||||
td->td_priority SCHED_PRIO_SLOP)
|
|
||||||
|
|
||||||
if (nke->ke_thread->td_priority < td->td_priority)
|
|
||||||
td->td_flags |= TDF_NEEDRESCHED;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* We used a tick charge it to the ksegrp so that we can compute our
|
* We used a tick charge it to the ksegrp so that we can compute our
|
||||||
* interactivity.
|
* interactivity.
|
||||||
@ -1090,6 +1099,7 @@ sched_clock(struct thread *td)
|
|||||||
* We used up one time slice.
|
* We used up one time slice.
|
||||||
*/
|
*/
|
||||||
ke->ke_slice--;
|
ke->ke_slice--;
|
||||||
|
kseq = KSEQ_SELF();
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
kseq->ksq_rslices--;
|
kseq->ksq_rslices--;
|
||||||
#endif
|
#endif
|
||||||
@ -1121,8 +1131,12 @@ sched_runnable(void)
|
|||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
kseq = KSEQ_SELF();
|
kseq = KSEQ_SELF();
|
||||||
|
|
||||||
if (kseq->ksq_load)
|
if ((curthread->td_flags & TDF_IDLETD) != 0) {
|
||||||
goto out;
|
if (kseq->ksq_load > 0)
|
||||||
|
goto out;
|
||||||
|
} else
|
||||||
|
if (kseq->ksq_load - 1 > 0)
|
||||||
|
goto out;
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
/*
|
/*
|
||||||
* For SMP we may steal other processor's KSEs. Just search until we
|
* For SMP we may steal other processor's KSEs. Just search until we
|
||||||
@ -1150,32 +1164,12 @@ void
|
|||||||
sched_userret(struct thread *td)
|
sched_userret(struct thread *td)
|
||||||
{
|
{
|
||||||
struct ksegrp *kg;
|
struct ksegrp *kg;
|
||||||
#if 0
|
|
||||||
struct kseq *kseq;
|
|
||||||
struct kse *ke;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
kg = td->td_ksegrp;
|
|
||||||
|
|
||||||
|
kg = td->td_ksegrp;
|
||||||
|
|
||||||
if (td->td_priority != kg->kg_user_pri) {
|
if (td->td_priority != kg->kg_user_pri) {
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
td->td_priority = kg->kg_user_pri;
|
td->td_priority = kg->kg_user_pri;
|
||||||
/*
|
|
||||||
* This optimization is temporarily disabled because it
|
|
||||||
* breaks priority propagation.
|
|
||||||
*/
|
|
||||||
#if 0
|
|
||||||
kseq = KSEQ_SELF();
|
|
||||||
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
|
|
||||||
#ifdef SMP
|
|
||||||
kseq->ksq_load > kseq->ksq_cpus &&
|
|
||||||
#else
|
|
||||||
kseq->ksq_load > 1 &&
|
|
||||||
#endif
|
|
||||||
(ke = kseq_choose(kseq, 0)) != NULL &&
|
|
||||||
ke->ke_thread->td_priority < td->td_priority)
|
|
||||||
#endif
|
|
||||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
|
||||||
mtx_unlock_spin(&sched_lock);
|
mtx_unlock_spin(&sched_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1267,7 +1261,7 @@ sched_add(struct thread *td)
|
|||||||
/*
|
/*
|
||||||
* This is for priority prop.
|
* This is for priority prop.
|
||||||
*/
|
*/
|
||||||
if (ke->ke_thread->td_priority > PRI_MIN_IDLE)
|
if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
|
||||||
ke->ke_runq = kseq->ksq_curr;
|
ke->ke_runq = kseq->ksq_curr;
|
||||||
else
|
else
|
||||||
ke->ke_runq = &kseq->ksq_idle;
|
ke->ke_runq = &kseq->ksq_idle;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user