In a threaded world, differnt priorirites become properties of
different entities. Make it so. Reviewed by: jhb@freebsd.org (john baldwin)
This commit is contained in:
parent
7146f7f115
commit
2c1007663f
@ -326,10 +326,10 @@ proc0_init(void *dummy __unused)
|
||||
p->p_sflag = PS_INMEM;
|
||||
p->p_stat = SRUN;
|
||||
p->p_ksegrp.kg_nice = NZERO;
|
||||
p->p_ksegrp.kg_pri.pri_class = PRI_TIMESHARE;
|
||||
p->p_ksegrp.kg_pri.pri_level = PVM;
|
||||
p->p_ksegrp.kg_pri.pri_native = PUSER;
|
||||
p->p_ksegrp.kg_pri.pri_user = PUSER;
|
||||
kg->kg_pri_class = PRI_TIMESHARE;
|
||||
kg->kg_user_pri = PUSER;
|
||||
td->td_priority = PVM;
|
||||
td->td_base_pri = PUSER;
|
||||
|
||||
p->p_peers = 0;
|
||||
p->p_leader = p;
|
||||
|
@ -177,7 +177,7 @@ cv_waitq_add(struct cv *cvp, struct thread *td)
|
||||
td->td_wmesg = cvp->cv_description;
|
||||
td->td_kse->ke_slptime = 0; /* XXXKSE */
|
||||
td->td_ksegrp->kg_slptime = 0; /* XXXKSE */
|
||||
td->td_ksegrp->kg_pri.pri_native = td->td_ksegrp->kg_pri.pri_level;
|
||||
td->td_base_pri = td->td_priority;
|
||||
CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
|
||||
td->td_proc->p_pid, td->td_proc->p_comm);
|
||||
TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
|
||||
@ -487,7 +487,7 @@ cv_wakeup(struct cv *cvp)
|
||||
td->td_proc->p_stat = SRUN;
|
||||
if (td->td_proc->p_sflag & PS_INMEM) {
|
||||
setrunqueue(td);
|
||||
maybe_resched(td->td_ksegrp);
|
||||
maybe_resched(td);
|
||||
} else {
|
||||
td->td_proc->p_sflag |= PS_SWAPINREQ;
|
||||
wakeup(&proc0); /* XXXKSE */
|
||||
|
@ -131,14 +131,14 @@ ithread_update(struct ithd *ithd)
|
||||
strncpy(p->p_comm, ithd->it_name, sizeof(ithd->it_name));
|
||||
ih = TAILQ_FIRST(&ithd->it_handlers);
|
||||
if (ih == NULL) {
|
||||
td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD;
|
||||
td->td_priority = PRI_MAX_ITHD;
|
||||
ithd->it_flags &= ~IT_ENTROPY;
|
||||
return;
|
||||
}
|
||||
|
||||
entropy = 0;
|
||||
td->td_ksegrp->kg_pri.pri_level = ih->ih_pri;
|
||||
td->td_ksegrp->kg_pri.pri_native = ih->ih_pri;
|
||||
td->td_priority = ih->ih_pri;
|
||||
td->td_base_pri = ih->ih_pri;
|
||||
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
|
||||
if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
|
||||
sizeof(p->p_comm)) {
|
||||
@ -198,8 +198,8 @@ ithread_create(struct ithd **ithread, int vector, int flags,
|
||||
return (error);
|
||||
}
|
||||
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
|
||||
td->td_ksegrp->kg_pri.pri_class = PRI_ITHD;
|
||||
td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD;
|
||||
td->td_ksegrp->kg_pri_class = PRI_ITHD;
|
||||
td->td_priority = PRI_MAX_ITHD;
|
||||
p->p_stat = SWAIT;
|
||||
ithd->it_td = td;
|
||||
td->td_ithd = ithd;
|
||||
|
@ -67,8 +67,6 @@
|
||||
#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
|
||||
: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
|
||||
|
||||
#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri)
|
||||
|
||||
/*
|
||||
* Lock classes for sleep and spin mutexes.
|
||||
*/
|
||||
@ -90,7 +88,7 @@ static void
|
||||
propagate_priority(struct thread *td)
|
||||
{
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
int pri = kg->kg_pri.pri_level;
|
||||
int pri = td->td_priority;
|
||||
struct mtx *m = td->td_blocked;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -112,13 +110,13 @@ propagate_priority(struct thread *td)
|
||||
|
||||
MPASS(td->td_proc->p_magic == P_MAGIC);
|
||||
KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
|
||||
if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
|
||||
if (td->td_priority <= pri) /* lower is higher priority */
|
||||
return;
|
||||
|
||||
/*
|
||||
* Bump this thread's priority.
|
||||
*/
|
||||
SET_PRIO(td, pri);
|
||||
td->td_priority = pri;
|
||||
|
||||
/*
|
||||
* If lock holder is actually running, just bump priority.
|
||||
@ -174,7 +172,7 @@ propagate_priority(struct thread *td)
|
||||
}
|
||||
|
||||
td1 = TAILQ_PREV(td, threadqueue, td_blkq);
|
||||
if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
|
||||
if (td1->td_priority <= pri) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -188,7 +186,7 @@ propagate_priority(struct thread *td)
|
||||
TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
|
||||
TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
|
||||
MPASS(td1->td_proc->p_magic == P_MAGIC);
|
||||
if (td1->td_ksegrp->kg_pri.pri_level > pri)
|
||||
if (td1->td_priority > pri)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -327,8 +325,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
MPASS(td1 != NULL);
|
||||
m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
|
||||
|
||||
if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
|
||||
SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
|
||||
if (td1->td_priority < td->td_priority)
|
||||
td->td_priority = td1->td_priority;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
@ -377,7 +375,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
|
||||
} else {
|
||||
TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
|
||||
if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
|
||||
if (td1->td_priority > td->td_priority)
|
||||
break;
|
||||
if (td1)
|
||||
TAILQ_INSERT_BEFORE(td1, td, td_blkq);
|
||||
@ -499,14 +497,14 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
|
||||
pri = PRI_MAX;
|
||||
LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
|
||||
int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
|
||||
int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
|
||||
if (cp < pri)
|
||||
pri = cp;
|
||||
}
|
||||
|
||||
if (pri > kg->kg_pri.pri_native)
|
||||
pri = kg->kg_pri.pri_native;
|
||||
SET_PRIO(td, pri);
|
||||
if (pri > td->td_base_pri)
|
||||
pri = td->td_base_pri;
|
||||
td->td_priority = pri;
|
||||
|
||||
if (LOCK_LOG_TEST(&m->mtx_object, opts))
|
||||
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
|
||||
@ -516,7 +514,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
td1->td_proc->p_stat = SRUN;
|
||||
setrunqueue(td1);
|
||||
|
||||
if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
if (td->td_critnest == 1 && td1->td_priority < pri) {
|
||||
#ifdef notyet
|
||||
if (td->td_ithd != NULL) {
|
||||
struct ithd *it = td->td_ithd;
|
||||
|
@ -446,7 +446,7 @@ poll_idle(void)
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
pri = td->td_ksegrp->kg_pri.pri_level;
|
||||
pri = td->td_priority;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
for (;;) {
|
||||
|
@ -532,7 +532,6 @@ fill_kinfo_proc(p, kp)
|
||||
kp->ki_dsize = vm->vm_dsize;
|
||||
kp->ki_ssize = vm->vm_ssize;
|
||||
}
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
if ((p->p_sflag & PS_INMEM) && p->p_stats) {
|
||||
kp->ki_start = p->p_stats->p_start;
|
||||
kp->ki_rusage = p->p_stats->p_ru;
|
||||
@ -556,11 +555,14 @@ fill_kinfo_proc(p, kp)
|
||||
/* vvv XXXKSE */
|
||||
kp->ki_runtime = p->p_runtime;
|
||||
kp->ki_pctcpu = p->p_kse.ke_pctcpu;
|
||||
kp->ki_estcpu = p->p_ksegrp.kg_estcpu;
|
||||
kp->ki_slptime = p->p_ksegrp.kg_slptime;
|
||||
kp->ki_estcpu = td->td_ksegrp->kg_estcpu;
|
||||
kp->ki_slptime = td->td_ksegrp->kg_slptime;
|
||||
kp->ki_wchan = td->td_wchan;
|
||||
kp->ki_pri = p->p_ksegrp.kg_pri;
|
||||
kp->ki_nice = p->p_ksegrp.kg_nice;
|
||||
kp->ki_pri.pri_level = td->td_priority;
|
||||
kp->ki_pri.pri_user = td->td_ksegrp->kg_user_pri;
|
||||
kp->ki_pri.pri_class = td->td_ksegrp->kg_pri_class;
|
||||
kp->ki_pri.pri_native = td->td_base_pri;
|
||||
kp->ki_nice = td->td_ksegrp->kg_nice;
|
||||
kp->ki_rqindex = p->p_kse.ke_rqindex;
|
||||
kp->ki_oncpu = p->p_kse.ke_oncpu;
|
||||
kp->ki_lastcpu = td->td_lastcpu;
|
||||
|
@ -289,7 +289,7 @@ rtprio(td, uap)
|
||||
if ((error = p_cansee(curp, p)))
|
||||
break;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
pri_to_rtp(&p->p_ksegrp.kg_pri /* XXXKSE */ , &rtp);
|
||||
pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
error = copyout(&rtp, uap->rtp, sizeof(struct rtprio));
|
||||
break;
|
||||
@ -321,7 +321,7 @@ rtprio(td, uap)
|
||||
}
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
error = rtp_to_pri(&rtp, &p->p_ksegrp.kg_pri);
|
||||
error = rtp_to_pri(&rtp, &p->p_ksegrp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
break;
|
||||
default:
|
||||
@ -335,48 +335,50 @@ done2:
|
||||
}
|
||||
|
||||
int
|
||||
rtp_to_pri(struct rtprio *rtp, struct priority *pri)
|
||||
rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
|
||||
{
|
||||
|
||||
if (rtp->prio > RTP_PRIO_MAX)
|
||||
return (EINVAL);
|
||||
switch (RTP_PRIO_BASE(rtp->type)) {
|
||||
case RTP_PRIO_REALTIME:
|
||||
pri->pri_level = PRI_MIN_REALTIME + rtp->prio;
|
||||
kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
|
||||
break;
|
||||
case RTP_PRIO_NORMAL:
|
||||
pri->pri_level = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
break;
|
||||
case RTP_PRIO_IDLE:
|
||||
pri->pri_level = PRI_MIN_IDLE + rtp->prio;
|
||||
kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
|
||||
break;
|
||||
default:
|
||||
return (EINVAL);
|
||||
}
|
||||
pri->pri_class = rtp->type;
|
||||
pri->pri_native = pri->pri_level;
|
||||
pri->pri_user = pri->pri_level;
|
||||
kg->kg_pri_class = rtp->type;
|
||||
if (curthread->td_ksegrp == kg) {
|
||||
curthread->td_base_pri = kg->kg_user_pri;
|
||||
curthread->td_priority = kg->kg_user_pri; /* XXX dubious */
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
pri_to_rtp(struct priority *pri, struct rtprio *rtp)
|
||||
pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
|
||||
{
|
||||
|
||||
switch (PRI_BASE(pri->pri_class)) {
|
||||
switch (PRI_BASE(kg->kg_pri_class)) {
|
||||
case PRI_REALTIME:
|
||||
rtp->prio = pri->pri_level - PRI_MIN_REALTIME;
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
|
||||
break;
|
||||
case PRI_TIMESHARE:
|
||||
rtp->prio = pri->pri_level - PRI_MIN_TIMESHARE;
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
|
||||
break;
|
||||
case PRI_IDLE:
|
||||
rtp->prio = pri->pri_level - PRI_MIN_IDLE;
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rtp->type = pri->pri_class;
|
||||
rtp->type = kg->kg_pri_class;
|
||||
}
|
||||
|
||||
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
||||
|
@ -1492,10 +1492,9 @@ runfast:
|
||||
* Maybe just one would be enough?
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if (kg->kg_pri.pri_level > PUSER) {
|
||||
kg->kg_pri.pri_level = PUSER;
|
||||
}
|
||||
|
||||
if (FIRST_THREAD_IN_PROC(p)->td_priority > PUSER) {
|
||||
FIRST_THREAD_IN_PROC(p)->td_priority = PUSER;
|
||||
}
|
||||
run:
|
||||
/* If we jump here, sched_lock has to be owned. */
|
||||
|
@ -388,7 +388,7 @@ uio_yield()
|
||||
td = curthread;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
DROP_GIANT();
|
||||
td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
|
||||
td->td_priority = td->td_ksegrp->kg_user_pri; /* XXXKSE */
|
||||
setrunqueue(td);
|
||||
td->td_proc->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
|
@ -181,7 +181,6 @@ runq_add(struct runq *rq, struct kse *ke)
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
|
||||
struct ksegrp *kg = ke->ke_ksegrp;
|
||||
#ifdef INVARIANTS
|
||||
struct proc *p = ke->ke_proc;
|
||||
#endif
|
||||
@ -192,12 +191,12 @@ runq_add(struct runq *rq, struct kse *ke)
|
||||
p, p->p_comm));
|
||||
KASSERT(runq_find(rq, ke) == 0,
|
||||
("runq_add: proc %p (%s) already in run queue", ke, p->p_comm));
|
||||
pri = kg->kg_pri.pri_level / RQ_PPQ;
|
||||
pri = ke->ke_thread->td_priority / RQ_PPQ;
|
||||
ke->ke_rqindex = pri;
|
||||
runq_setbit(rq, pri);
|
||||
rqh = &rq->rq_queues[pri];
|
||||
CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
|
||||
ke->ke_proc, kg->kg_pri.pri_level, pri, rqh);
|
||||
ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
|
||||
TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
|
||||
ke->ke_flags |= KEF_ONRUNQ;
|
||||
}
|
||||
@ -279,9 +278,6 @@ runq_init(struct runq *rq)
|
||||
void
|
||||
runq_remove(struct runq *rq, struct kse *ke)
|
||||
{
|
||||
#ifdef KTR
|
||||
struct ksegrp *kg = ke->ke_ksegrp;
|
||||
#endif
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
|
||||
@ -291,7 +287,7 @@ runq_remove(struct runq *rq, struct kse *ke)
|
||||
pri = ke->ke_rqindex;
|
||||
rqh = &rq->rq_queues[pri];
|
||||
CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
|
||||
ke, kg->kg_pri.pri_level, pri, rqh);
|
||||
ke, ke->ke_thread->td_priority, pri, rqh);
|
||||
KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
|
||||
TAILQ_REMOVE(rqh, ke, ke_procq);
|
||||
if (TAILQ_EMPTY(rqh)) {
|
||||
|
@ -120,12 +120,11 @@ SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
|
||||
* schedulers into account.
|
||||
*/
|
||||
void
|
||||
maybe_resched(kg)
|
||||
struct ksegrp *kg;
|
||||
maybe_resched(struct thread *td)
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (kg->kg_pri.pri_level < curthread->td_ksegrp->kg_pri.pri_level)
|
||||
if (td->td_priority < curthread->td_priority)
|
||||
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
|
||||
}
|
||||
|
||||
@ -257,10 +256,11 @@ schedcpu(arg)
|
||||
void *arg;
|
||||
{
|
||||
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
|
||||
register struct proc *p;
|
||||
register struct kse *ke;
|
||||
register struct ksegrp *kg;
|
||||
register int realstathz;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
struct kse *ke;
|
||||
struct ksegrp *kg;
|
||||
int realstathz;
|
||||
int awake;
|
||||
|
||||
realstathz = stathz ? stathz : hz;
|
||||
@ -321,15 +321,16 @@ schedcpu(arg)
|
||||
}
|
||||
kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
|
||||
resetpriority(kg);
|
||||
if (kg->kg_pri.pri_level >= PUSER &&
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
if (td->td_priority >= PUSER &&
|
||||
(p->p_sflag & PS_INMEM)) {
|
||||
int changedqueue =
|
||||
((kg->kg_pri.pri_level / RQ_PPQ) !=
|
||||
(kg->kg_pri.pri_user / RQ_PPQ));
|
||||
((td->td_priority / RQ_PPQ) !=
|
||||
(kg->kg_user_pri / RQ_PPQ));
|
||||
|
||||
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
|
||||
td->td_priority = kg->kg_user_pri;
|
||||
FOREACH_KSE_IN_GROUP(kg, ke) {
|
||||
if ((ke->ke_oncpu == NOCPU) && /* idle */
|
||||
if ((ke->ke_oncpu == NOCPU) &&
|
||||
(p->p_stat == SRUN) && /* XXXKSE */
|
||||
changedqueue) {
|
||||
remrunqueue(ke->ke_thread);
|
||||
@ -459,7 +460,7 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
td->td_wmesg = wmesg;
|
||||
td->td_kse->ke_slptime = 0; /* XXXKSE */
|
||||
td->td_ksegrp->kg_slptime = 0;
|
||||
td->td_ksegrp->kg_pri.pri_level = priority & PRIMASK;
|
||||
td->td_priority = priority & PRIMASK;
|
||||
CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
|
||||
td, p->p_pid, p->p_comm, wmesg, ident);
|
||||
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
|
||||
@ -628,7 +629,7 @@ restart:
|
||||
td->td_proc->p_stat = SRUN;
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
setrunqueue(td);
|
||||
maybe_resched(td->td_ksegrp);
|
||||
maybe_resched(td);
|
||||
} else {
|
||||
p->p_sflag |= PS_SWAPINREQ;
|
||||
wakeup((caddr_t)&proc0);
|
||||
@ -673,7 +674,7 @@ wakeup_one(ident)
|
||||
td->td_proc->p_stat = SRUN;
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
setrunqueue(td);
|
||||
maybe_resched(td->td_ksegrp);
|
||||
maybe_resched(td);
|
||||
break;
|
||||
} else {
|
||||
p->p_sflag |= PS_SWAPINREQ;
|
||||
@ -829,7 +830,7 @@ setrunnable(struct thread *td)
|
||||
wakeup((caddr_t)&proc0);
|
||||
} else {
|
||||
setrunqueue(td);
|
||||
maybe_resched(td->td_ksegrp);
|
||||
maybe_resched(td);
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
@ -844,16 +845,19 @@ resetpriority(kg)
|
||||
register struct ksegrp *kg;
|
||||
{
|
||||
register unsigned int newpriority;
|
||||
struct thread *td;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (kg->kg_pri.pri_class == PRI_TIMESHARE) {
|
||||
if (kg->kg_pri_class == PRI_TIMESHARE) {
|
||||
newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
|
||||
NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
|
||||
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
|
||||
PRI_MAX_TIMESHARE);
|
||||
kg->kg_pri.pri_user = newpriority;
|
||||
kg->kg_user_pri = newpriority;
|
||||
}
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
maybe_resched(td);
|
||||
}
|
||||
maybe_resched(kg);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
@ -943,8 +947,8 @@ schedclock(td)
|
||||
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
|
||||
if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
|
||||
resetpriority(td->td_ksegrp);
|
||||
if (kg->kg_pri.pri_level >= PUSER)
|
||||
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
|
||||
if (td->td_priority >= PUSER)
|
||||
td->td_priority = kg->kg_user_pri;
|
||||
}
|
||||
} else {
|
||||
panic("schedclock");
|
||||
@ -961,7 +965,7 @@ yield(struct thread *td, struct yield_args *uap)
|
||||
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
kg->kg_pri.pri_level = PRI_MAX_TIMESHARE;
|
||||
td->td_priority = PRI_MAX_TIMESHARE;
|
||||
setrunqueue(td);
|
||||
kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch();
|
||||
|
@ -100,7 +100,7 @@ getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
|
||||
int e = 0;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
switch (rtp.type)
|
||||
{
|
||||
@ -145,7 +145,7 @@ int ksched_getparam(register_t *ret, struct ksched *ksched,
|
||||
struct rtprio rtp;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (RTP_PRIO_IS_REALTIME(rtp.type))
|
||||
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
|
||||
@ -165,6 +165,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
{
|
||||
int e = 0;
|
||||
struct rtprio rtp;
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
|
||||
switch(policy)
|
||||
{
|
||||
@ -179,7 +180,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
rtp_to_pri(&rtp, kg);
|
||||
td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
@ -194,7 +195,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
rtp.type = RTP_PRIO_NORMAL;
|
||||
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
rtp_to_pri(&rtp, kg);
|
||||
|
||||
/* XXX Simply revert to whatever we had for last
|
||||
* normal scheduler priorities.
|
||||
|
@ -80,7 +80,7 @@ userret(td, frame, oticks)
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
|
||||
td->td_priority = kg->kg_user_pri;
|
||||
if (ke->ke_flags & KEF_NEEDRESCHED) {
|
||||
DROP_GIANT();
|
||||
setrunqueue(td);
|
||||
|
@ -67,8 +67,6 @@
|
||||
#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
|
||||
: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
|
||||
|
||||
#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri)
|
||||
|
||||
/*
|
||||
* Lock classes for sleep and spin mutexes.
|
||||
*/
|
||||
@ -90,7 +88,7 @@ static void
|
||||
propagate_priority(struct thread *td)
|
||||
{
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
int pri = kg->kg_pri.pri_level;
|
||||
int pri = td->td_priority;
|
||||
struct mtx *m = td->td_blocked;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -112,13 +110,13 @@ propagate_priority(struct thread *td)
|
||||
|
||||
MPASS(td->td_proc->p_magic == P_MAGIC);
|
||||
KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
|
||||
if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
|
||||
if (td->td_priority <= pri) /* lower is higher priority */
|
||||
return;
|
||||
|
||||
/*
|
||||
* Bump this thread's priority.
|
||||
*/
|
||||
SET_PRIO(td, pri);
|
||||
td->td_priority = pri;
|
||||
|
||||
/*
|
||||
* If lock holder is actually running, just bump priority.
|
||||
@ -174,7 +172,7 @@ propagate_priority(struct thread *td)
|
||||
}
|
||||
|
||||
td1 = TAILQ_PREV(td, threadqueue, td_blkq);
|
||||
if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
|
||||
if (td1->td_priority <= pri) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -188,7 +186,7 @@ propagate_priority(struct thread *td)
|
||||
TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
|
||||
TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
|
||||
MPASS(td1->td_proc->p_magic == P_MAGIC);
|
||||
if (td1->td_ksegrp->kg_pri.pri_level > pri)
|
||||
if (td1->td_priority > pri)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -327,8 +325,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
MPASS(td1 != NULL);
|
||||
m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
|
||||
|
||||
if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
|
||||
SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
|
||||
if (td1->td_priority < td->td_priority)
|
||||
td->td_priority = td1->td_priority;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
@ -377,7 +375,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
|
||||
} else {
|
||||
TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
|
||||
if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
|
||||
if (td1->td_priority > td->td_priority)
|
||||
break;
|
||||
if (td1)
|
||||
TAILQ_INSERT_BEFORE(td1, td, td_blkq);
|
||||
@ -499,14 +497,14 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
|
||||
pri = PRI_MAX;
|
||||
LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
|
||||
int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
|
||||
int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
|
||||
if (cp < pri)
|
||||
pri = cp;
|
||||
}
|
||||
|
||||
if (pri > kg->kg_pri.pri_native)
|
||||
pri = kg->kg_pri.pri_native;
|
||||
SET_PRIO(td, pri);
|
||||
if (pri > td->td_base_pri)
|
||||
pri = td->td_base_pri;
|
||||
td->td_priority = pri;
|
||||
|
||||
if (LOCK_LOG_TEST(&m->mtx_object, opts))
|
||||
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
|
||||
@ -516,7 +514,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
td1->td_proc->p_stat = SRUN;
|
||||
setrunqueue(td1);
|
||||
|
||||
if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
if (td->td_critnest == 1 && td1->td_priority < pri) {
|
||||
#ifdef notyet
|
||||
if (td->td_ithd != NULL) {
|
||||
struct ithd *it = td->td_ithd;
|
||||
|
@ -100,7 +100,7 @@ getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
|
||||
int e = 0;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
switch (rtp.type)
|
||||
{
|
||||
@ -145,7 +145,7 @@ int ksched_getparam(register_t *ret, struct ksched *ksched,
|
||||
struct rtprio rtp;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (RTP_PRIO_IS_REALTIME(rtp.type))
|
||||
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
|
||||
@ -165,6 +165,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
{
|
||||
int e = 0;
|
||||
struct rtprio rtp;
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
|
||||
switch(policy)
|
||||
{
|
||||
@ -179,7 +180,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
rtp_to_pri(&rtp, kg);
|
||||
td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
@ -194,7 +195,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
|
||||
rtp.type = RTP_PRIO_NORMAL;
|
||||
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
rtp_to_pri(&rtp, kg);
|
||||
|
||||
/* XXX Simply revert to whatever we had for last
|
||||
* normal scheduler priorities.
|
||||
|
@ -267,6 +267,8 @@ struct thread {
|
||||
/* XXXKSE p_md is in the "on your own" section in old struct proc */
|
||||
struct mdthread td_md; /* (k) Any machine-dependent fields. */
|
||||
register_t td_retval[2]; /* (k) Syscall aux returns. */
|
||||
u_char td_base_pri; /* (j) Thread base kernel priority */
|
||||
u_char td_priority; /* (j) Thread active priority */
|
||||
#define td_endcopy td_pcb
|
||||
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
@ -335,12 +337,13 @@ struct ksegrp {
|
||||
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
|
||||
|
||||
#define kg_startzero kg_estcpu
|
||||
u_int kg_slptime; /* (j) How long completely blocked. */
|
||||
u_int kg_estcpu; /* Sum of the same field in KSEs. */
|
||||
#define kg_endzero kg_pri
|
||||
u_int kg_slptime; /* (j) How long completely blocked. */
|
||||
#define kg_endzero kg_pri_class
|
||||
|
||||
#define kg_startcopy kg_endzero
|
||||
struct priority kg_pri; /* (j) Process priority. */
|
||||
char kg_pri_class; /* (j) */
|
||||
char kg_user_pri; /* (j) priority when in userland */
|
||||
char kg_nice; /* (j?/k?) Process "nice" value. */
|
||||
struct rtprio kg_rtprio; /* (j) Realtime priority. */
|
||||
#define kg_endcopy kg_runnable
|
||||
@ -700,7 +703,7 @@ void cpu_throw __P((void)) __dead2;
|
||||
void unsleep __P((struct thread *));
|
||||
void updatepri __P((struct thread *));
|
||||
void userret __P((struct thread *, struct trapframe *, u_int));
|
||||
void maybe_resched __P((struct ksegrp *));
|
||||
void maybe_resched __P((struct thread *));
|
||||
|
||||
void cpu_exit __P((struct thread *));
|
||||
void exit1 __P((struct thread *, int)) __dead2;
|
||||
|
@ -75,8 +75,9 @@ struct rtprio {
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
int rtp_to_pri(struct rtprio *, struct priority *);
|
||||
void pri_to_rtp(struct priority *, struct rtprio *);
|
||||
struct ksegrp;
|
||||
int rtp_to_pri(struct rtprio *, struct ksegrp *);
|
||||
void pri_to_rtp(struct ksegrp *, struct rtprio *);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -1280,7 +1280,7 @@ retry:
|
||||
VOP_UNLOCK(vp, 0, td);
|
||||
if (error != EWOULDBLOCK)
|
||||
break;
|
||||
tsleep(vp, td->td_ksegrp->kg_pri.pri_user, "nap", 1);
|
||||
tsleep(vp, td->td_ksegrp->kg_user_pri, "nap", 1);
|
||||
goto retry;
|
||||
}
|
||||
indiroff = (lbn - NDADDR) % NINDIR(fs);
|
||||
@ -1308,7 +1308,7 @@ retry:
|
||||
VOP_UNLOCK(vp, 0, td);
|
||||
if (error != EWOULDBLOCK)
|
||||
break;
|
||||
tsleep(vp, td->td_ksegrp->kg_pri.pri_user, "nap", 1);
|
||||
tsleep(vp, td->td_ksegrp->kg_user_pri, "nap", 1);
|
||||
goto retry;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
|
@ -485,6 +485,7 @@ int action;
|
||||
{
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct thread *td;
|
||||
struct proc *outp, *outp2;
|
||||
int outpri, outpri2;
|
||||
int didswap = 0;
|
||||
@ -531,7 +532,7 @@ retry:
|
||||
* Check all the thread groups..
|
||||
*/
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if (PRI_IS_REALTIME(kg->kg_pri.pri_class)) {
|
||||
if (PRI_IS_REALTIME(kg->kg_pri_class)) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
goto nextproc;
|
||||
@ -543,7 +544,7 @@ retry:
|
||||
* Also guarantee swap_idle_threshold1
|
||||
* time in memory.
|
||||
*/
|
||||
if (((kg->kg_pri.pri_level) < PSOCK) ||
|
||||
if (((FIRST_THREAD_IN_PROC(p)->td_priority) < PSOCK) ||
|
||||
(kg->kg_slptime < swap_idle_threshold1)) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
|
@ -114,7 +114,8 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
case SSTOP:
|
||||
kg = &p->p_ksegrp; /* XXXKSE */
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
if (kg->kg_pri.pri_level <= PZERO)
|
||||
if (FIRST_THREAD_IN_PROC(p)->td_priority
|
||||
<= PZERO)
|
||||
totalp->t_dw++;
|
||||
else if (kg->kg_slptime < maxslp)
|
||||
totalp->t_sl++;
|
||||
|
@ -119,7 +119,7 @@ vm_pagezero(void)
|
||||
rtp.prio = RTP_PRIO_MAX;
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
|
||||
rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
for (;;) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user