updatepri() works on a ksegrp (where the scheduling parameters are), so

directly give it the ksegrp instead of the thread.  The only thing it used
to use in the thread was the ksegrp.

Reviewed by:	julian
This commit is contained in:
Peter Wemm 2002-08-28 23:45:15 +00:00
parent 97e2d5fc53
commit d13947c3b0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=102544
3 changed files with 24 additions and 21 deletions

View File

@ -507,6 +507,7 @@ static __inline void
cv_wakeup(struct cv *cvp) cv_wakeup(struct cv *cvp)
{ {
struct thread *td; struct thread *td;
struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED);
td = TAILQ_FIRST(&cvp->cv_waitq); td = TAILQ_FIRST(&cvp->cv_waitq);
@ -519,9 +520,10 @@ cv_wakeup(struct cv *cvp)
/* OPTIMIZED EXPANSION OF setrunnable(td); */ /* OPTIMIZED EXPANSION OF setrunnable(td); */
CTR3(KTR_PROC, "cv_signal: thread %p (pid %d, %s)", CTR3(KTR_PROC, "cv_signal: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm); td, td->td_proc->p_pid, td->td_proc->p_comm);
if (td->td_ksegrp->kg_slptime > 1) /* XXXKSE */ kg = td->td_ksegrp;
updatepri(td); if (kg->kg_slptime > 1) /* XXXKSE */
td->td_ksegrp->kg_slptime = 0; updatepri(kg);
kg->kg_slptime = 0;
if (td->td_proc->p_sflag & PS_INMEM) { if (td->td_proc->p_sflag & PS_INMEM) {
setrunqueue(td); setrunqueue(td);
maybe_resched(td); maybe_resched(td);

View File

@ -376,16 +376,11 @@ schedcpu(arg)
* least six times the loadfactor will decay p_estcpu to zero. * least six times the loadfactor will decay p_estcpu to zero.
*/ */
void void
updatepri(td) updatepri(struct ksegrp *kg)
register struct thread *td;
{ {
register struct ksegrp *kg;
register unsigned int newcpu; register unsigned int newcpu;
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
if (td == NULL)
return;
kg = td->td_ksegrp;
newcpu = kg->kg_estcpu; newcpu = kg->kg_estcpu;
if (kg->kg_slptime > 5 * loadfac) if (kg->kg_slptime > 5 * loadfac)
kg->kg_estcpu = 0; kg->kg_estcpu = 0;
@ -395,7 +390,7 @@ updatepri(td)
newcpu = decay_cpu(loadfac, newcpu); newcpu = decay_cpu(loadfac, newcpu);
kg->kg_estcpu = newcpu; kg->kg_estcpu = newcpu;
} }
resetpriority(td->td_ksegrp); resetpriority(kg);
} }
/* /*
@ -705,6 +700,7 @@ wakeup(ident)
register struct slpquehead *qp; register struct slpquehead *qp;
register struct thread *td; register struct thread *td;
struct thread *ntd; struct thread *ntd;
struct ksegrp *kg;
struct proc *p; struct proc *p;
mtx_lock_spin(&sched_lock); mtx_lock_spin(&sched_lock);
@ -720,9 +716,10 @@ wakeup(ident)
/* OPTIMIZED EXPANSION OF setrunnable(p); */ /* OPTIMIZED EXPANSION OF setrunnable(p); */
CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)", CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm); td, p->p_pid, p->p_comm);
if (td->td_ksegrp->kg_slptime > 1) kg = td->td_ksegrp;
updatepri(td); if (kg->kg_slptime > 1)
td->td_ksegrp->kg_slptime = 0; updatepri(kg);
kg->kg_slptime = 0;
if (p->p_sflag & PS_INMEM) { if (p->p_sflag & PS_INMEM) {
setrunqueue(td); setrunqueue(td);
maybe_resched(td); maybe_resched(td);
@ -754,6 +751,7 @@ wakeup_one(ident)
register struct thread *td; register struct thread *td;
register struct proc *p; register struct proc *p;
struct thread *ntd; struct thread *ntd;
struct ksegrp *kg;
mtx_lock_spin(&sched_lock); mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)]; qp = &slpque[LOOKUP(ident)];
@ -768,9 +766,10 @@ wakeup_one(ident)
/* OPTIMIZED EXPANSION OF setrunnable(p); */ /* OPTIMIZED EXPANSION OF setrunnable(p); */
CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)", CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm); td, p->p_pid, p->p_comm);
if (td->td_ksegrp->kg_slptime > 1) kg = td->td_ksegrp;
updatepri(td); if (kg->kg_slptime > 1)
td->td_ksegrp->kg_slptime = 0; updatepri(kg);
kg->kg_slptime = 0;
if (p->p_sflag & PS_INMEM) { if (p->p_sflag & PS_INMEM) {
setrunqueue(td); setrunqueue(td);
maybe_resched(td); maybe_resched(td);
@ -794,7 +793,7 @@ wakeup_one(ident)
* The machine independent parts of mi_switch(). * The machine independent parts of mi_switch().
*/ */
void void
mi_switch() mi_switch(void)
{ {
struct bintime new_switchtime; struct bintime new_switchtime;
struct thread *td = curthread; /* XXX */ struct thread *td = curthread; /* XXX */
@ -931,6 +930,7 @@ void
setrunnable(struct thread *td) setrunnable(struct thread *td)
{ {
struct proc *p = td->td_proc; struct proc *p = td->td_proc;
struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED);
switch (p->p_state) { switch (p->p_state) {
@ -960,9 +960,10 @@ setrunnable(struct thread *td)
case TDS_RUNQ: /* not yet had time to suspend */ case TDS_RUNQ: /* not yet had time to suspend */
break; break;
} }
if (td->td_ksegrp->kg_slptime > 1) kg = td->td_ksegrp;
updatepri(td); if (kg->kg_slptime > 1)
td->td_ksegrp->kg_slptime = 0; updatepri(kg);
kg->kg_slptime = 0;
if ((p->p_sflag & PS_INMEM) == 0) { if ((p->p_sflag & PS_INMEM) == 0) {
td->td_state = TDS_SWAPPED; td->td_state = TDS_SWAPPED;
if ((p->p_sflag & PS_SWAPPINGIN) == 0) { if ((p->p_sflag & PS_SWAPPINGIN) == 0) {

View File

@ -829,7 +829,7 @@ void cpu_idle(void);
void cpu_switch(void); void cpu_switch(void);
void cpu_throw(void) __dead2; void cpu_throw(void) __dead2;
void unsleep(struct thread *); void unsleep(struct thread *);
void updatepri(struct thread *); void updatepri(struct ksegrp *);
void userret(struct thread *, struct trapframe *, u_int); void userret(struct thread *, struct trapframe *, u_int);
void maybe_resched(struct thread *); void maybe_resched(struct thread *);