Tidy up the scheduler's code for changing the priority of a thread.

Logically pretty much a NOP.
This commit is contained in:
julian 2002-10-14 20:34:31 +00:00
parent dd5ddc932f
commit 7314f176a2
5 changed files with 65 additions and 58 deletions

View File

@ -277,13 +277,14 @@ kse_reassign(struct kse *ke)
CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
}
#if 0
/*
* Remove a thread from its KSEGRP's run queue.
* This in turn may remove it from a KSE if it was already assigned
* to one, possibly causing a new thread to be assigned to the KSE
* and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
*/
void
static void
remrunqueue(struct thread *td)
{
struct thread *td2, *td3;
@ -325,6 +326,51 @@ remrunqueue(struct thread *td)
kse_reassign(ke);
}
}
#endif
/*
* Change the priority of a thread that is on the run queue.
*/
void
adjustrunqueue( struct thread *td, int newpri)
{
struct ksegrp *kg;
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
/*
* If it's a bound thread/KSE pair, take the shortcut. All non-KSE
* threads are BOUND.
*/
ke = td->td_kse;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
if ((td->td_flags & TDF_UNBOUND) == 0) {
/* We only care about the kse in the run queue. */
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
sched_rem(ke);
td->td_priority = newpri;
sched_add(ke);
}
return;
}
/*
* An unbound thread. This is not optimised yet.
*/
kg = td->td_ksegrp;
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
if (ke) {
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned =
TAILQ_PREV(td, threadqueue, td_runq);
}
sched_rem(ke);
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
td->td_priority = newpri;
setrunqueue(td);
}
void
setrunqueue(struct thread *td)
@ -662,18 +708,6 @@ runq_remove(struct runq *rq, struct kse *ke)
}
}
#if 0
static void
runq_readjust(struct runq *rq, struct kse *ke)
{
if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
runq_remove(rq, ke);
runq_add(rq, ke);
}
}
#endif
#if 0
void
panc(char *string1, char *string2)

View File

@ -187,10 +187,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
kg->kg_user_pri;
setrunqueue(td);
sched_prio(td, kg->kg_user_pri);
}
}
}
@ -220,10 +217,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
kg->kg_user_pri;
setrunqueue(td);
sched_prio(td, kg->kg_user_pri);
}
}

View File

@ -316,30 +316,8 @@ schedcpu(void *arg)
kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
resetpriority(kg);
FOREACH_THREAD_IN_GROUP(kg, td) {
int changedqueue;
if (td->td_priority >= PUSER) {
/*
* Only change the priority
* of threads that are still at their
* user priority.
* XXXKSE This is problematic
* as we may need to re-order
* the threads on the KSEG list.
*/
changedqueue =
((td->td_priority / RQ_PPQ) !=
(kg->kg_user_pri / RQ_PPQ));
td->td_priority = kg->kg_user_pri;
if (changedqueue && TD_ON_RUNQ(td)) {
/* this could be optimised */
remrunqueue(td);
td->td_priority =
kg->kg_user_pri;
setrunqueue(td);
} else {
td->td_priority = kg->kg_user_pri;
}
sched_prio(td, kg->kg_user_pri);
}
}
} /* end of ksegrp loop */
@ -491,14 +469,20 @@ sched_nice(struct ksegrp *kg, int nice)
resetpriority(kg);
}
/*
* Adjust the priority of a thread.
* This may include moving the thread within the KSEGRP,
* changing the assignment of a kse to the thread,
* and moving a KSE in the system run queue.
*/
void
sched_prio(struct thread *td, u_char prio)
{
td->td_priority = prio;
if (TD_ON_RUNQ(td)) {
remrunqueue(td);
setrunqueue(td);
adjustrunqueue(td, prio);
} else {
td->td_priority = prio;
}
}
@ -527,6 +511,7 @@ sched_switchout(struct thread *td)
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
td->td_lastcpu = ke->ke_oncpu;
td->td_last_kse = ke;
ke->ke_oncpu = NOCPU;
ke->ke_flags &= ~KEF_NEEDRESCHED;
/*

View File

@ -187,10 +187,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
kg->kg_user_pri;
setrunqueue(td);
sched_prio(td, kg->kg_user_pri);
}
}
}
@ -220,10 +217,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
td->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else if (TD_ON_RUNQ(td)) {
if (td->td_priority > kg->kg_user_pri) {
remrunqueue(td);
td->td_priority =
kg->kg_user_pri;
setrunqueue(td);
sched_prio(td, kg->kg_user_pri);
}
}

View File

@ -268,8 +268,8 @@ struct thread {
#define td_startzero td_flags
int td_flags; /* (j) TDF_* flags. */
int td_inhibitors; /* (j) Why can not run */
struct kse *td_last_kse; /* Where it wants to be if possible. */
struct kse *td_kse; /* Current KSE if running. */
struct kse *td_last_kse; /* (j) Previous value of td_kse */
struct kse *td_kse; /* (j) Current KSE if running. */
int td_dupfd; /* (k) Ret value from fdopen. XXX */
void *td_wchan; /* (j) Sleep address. */
const char *td_wmesg; /* (j) Reason for sleep. */
@ -844,6 +844,7 @@ struct proc *pfind(pid_t); /* Find process by id. */
struct pgrp *pgfind(pid_t); /* Find process group by id. */
struct proc *zpfind(pid_t); /* Find zombie process by id. */
void adjustrunqueue(struct thread *, int newpri);
void ast(struct trapframe *framep);
struct thread *choosethread(void);
int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
@ -871,7 +872,6 @@ void threadinit(void);
void proc_linkup(struct proc *p, struct ksegrp *kg,
struct kse *ke, struct thread *td);
void proc_reparent(struct proc *child, struct proc *newparent);
void remrunqueue(struct thread *);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void setrunnable(struct thread *);