- Restore runq to manipulating threads directly by putting runq links and

rqindex back in struct thread.
 - Compile kern_switch.c independently again and stop #include'ing it from
   schedulers.
 - Remove the ts_thread backpointers and convert most code to go from
   struct thread to struct td_sched.
 - Cleanup the ts_flags #define garbage that was causing us to sometimes
   do things that expanded to td->td_sched->ts_thread->td_flags in 4BSD.
 - Export the kern.sched sysctl node in sysctl.h
This commit is contained in:
Jeff Roberson 2008-03-20 05:51:16 +00:00
parent 788ef8117c
commit 9727e63745
7 changed files with 209 additions and 252 deletions

View File

@ -1485,6 +1485,7 @@ kern/kern_sema.c standard
kern/kern_shutdown.c standard
kern/kern_sig.c standard
kern/kern_subr.c standard
kern/kern_switch.c standard
kern/kern_sx.c standard
kern/kern_synch.c standard
kern/kern_syscalls.c standard

View File

@ -30,7 +30,6 @@ __FBSDID("$FreeBSD$");
#include "opt_sched.h"
#ifndef KERN_SWITCH_INCLUDE
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kdb.h>
@ -41,10 +40,8 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sched.h>
#else /* KERN_SWITCH_INCLUDE */
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
#include <sys/smp.h>
#endif
#include <sys/sysctl.h>
#include <machine/cpu.h>
@ -299,39 +296,39 @@ runq_setbit(struct runq *rq, int pri)
* corresponding status bit.
*/
void
runq_add(struct runq *rq, struct td_sched *ts, int flags)
runq_add(struct runq *rq, struct thread *td, int flags)
{
struct rqhead *rqh;
int pri;
pri = ts->ts_thread->td_priority / RQ_PPQ;
ts->ts_rqindex = pri;
pri = td->td_priority / RQ_PPQ;
td->td_rqindex = pri;
runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
td, td->td_priority, pri, rqh);
if (flags & SRQ_PREEMPTED) {
TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
TAILQ_INSERT_HEAD(rqh, td, td_runq);
} else {
TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
TAILQ_INSERT_TAIL(rqh, td, td_runq);
}
}
void
runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
{
struct rqhead *rqh;
KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
ts->ts_rqindex = pri;
td->td_rqindex = pri;
runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
td, td->td_priority, pri, rqh);
if (flags & SRQ_PREEMPTED) {
TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
TAILQ_INSERT_HEAD(rqh, td, td_runq);
} else {
TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
TAILQ_INSERT_TAIL(rqh, td, td_runq);
}
}
/*
@ -360,11 +357,11 @@ runq_check(struct runq *rq)
/*
* Find the highest priority process on the run queue.
*/
struct td_sched *
struct thread *
runq_choose_fuzz(struct runq *rq, int fuzz)
{
struct rqhead *rqh;
struct td_sched *ts;
struct thread *td;
int pri;
while ((pri = runq_findbit(rq)) != -1) {
@ -377,22 +374,22 @@ runq_choose_fuzz(struct runq *rq, int fuzz)
*/
int count = fuzz;
int cpu = PCPU_GET(cpuid);
struct td_sched *ts2;
ts2 = ts = TAILQ_FIRST(rqh);
struct thread *td2;
td2 = td = TAILQ_FIRST(rqh);
while (count-- && ts2) {
if (ts->ts_thread->td_lastcpu == cpu) {
ts = ts2;
while (count-- && td2) {
if (td->td_lastcpu == cpu) {
td = td2;
break;
}
ts2 = TAILQ_NEXT(ts2, ts_procq);
td2 = TAILQ_NEXT(td2, td_runq);
}
} else
ts = TAILQ_FIRST(rqh);
KASSERT(ts != NULL, ("runq_choose_fuzz: no proc on busy queue"));
td = TAILQ_FIRST(rqh);
KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
CTR3(KTR_RUNQ,
"runq_choose_fuzz: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
return (ts);
"runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
return (td);
}
CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
@ -402,43 +399,43 @@ runq_choose_fuzz(struct runq *rq, int fuzz)
/*
* Find the highest priority process on the run queue.
*/
struct td_sched *
struct thread *
runq_choose(struct runq *rq)
{
struct rqhead *rqh;
struct td_sched *ts;
struct thread *td;
int pri;
while ((pri = runq_findbit(rq)) != -1) {
rqh = &rq->rq_queues[pri];
ts = TAILQ_FIRST(rqh);
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
td = TAILQ_FIRST(rqh);
KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
CTR3(KTR_RUNQ,
"runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
return (ts);
"runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
return (td);
}
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
return (NULL);
}
struct td_sched *
struct thread *
runq_choose_from(struct runq *rq, u_char idx)
{
struct rqhead *rqh;
struct td_sched *ts;
struct thread *td;
int pri;
if ((pri = runq_findbit_from(rq, idx)) != -1) {
rqh = &rq->rq_queues[pri];
ts = TAILQ_FIRST(rqh);
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
td = TAILQ_FIRST(rqh);
KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
CTR4(KTR_RUNQ,
"runq_choose_from: pri=%d td_sched=%p idx=%d rqh=%p",
pri, ts, ts->ts_rqindex, rqh);
return (ts);
"runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
pri, td, td->td_rqindex, rqh);
return (td);
}
CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
return (NULL);
}
@ -448,36 +445,26 @@ runq_choose_from(struct runq *rq, u_char idx)
* Caller must set state afterwards.
*/
void
runq_remove(struct runq *rq, struct td_sched *ts)
runq_remove(struct runq *rq, struct thread *td)
{
runq_remove_idx(rq, ts, NULL);
runq_remove_idx(rq, td, NULL);
}
void
runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
{
struct rqhead *rqh;
u_char pri;
KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
KASSERT(td->td_flags & TDF_INMEM,
("runq_remove_idx: thread swapped out"));
pri = ts->ts_rqindex;
pri = td->td_rqindex;
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
rqh = &rq->rq_queues[pri];
CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
{
struct td_sched *nts;
TAILQ_FOREACH(nts, rqh, ts_procq)
if (nts == ts)
break;
if (ts != nts)
panic("runq_remove_idx: ts %p not on rqindex %d",
ts, pri);
}
TAILQ_REMOVE(rqh, ts, ts_procq);
CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
td, td->td_priority, pri, rqh);
TAILQ_REMOVE(rqh, td, td_runq);
if (TAILQ_EMPTY(rqh)) {
CTR0(KTR_RUNQ, "runq_remove_idx: empty");
runq_clrbit(rq, pri);
@ -485,5 +472,3 @@ runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
*idx = (pri + 1) % RQ_NQS;
}
}
#endif /* KERN_SWITCH_INCLUDE */

View File

@ -81,10 +81,7 @@ __FBSDID("$FreeBSD$");
* the requirements of this scheduler
*/
struct td_sched {
TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
struct thread *ts_thread; /* (*) Active associated thread. */
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
u_char ts_rqindex; /* (j) Run queue index. */
int ts_cpticks; /* (j) Ticks of cpu time. */
int ts_slptime; /* (j) Seconds !RUNNING. */
struct runq *ts_runq; /* runq the thread is currently on */
@ -92,13 +89,7 @@ struct td_sched {
/* flags kept in td_flags */
#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */
#define TDF_BOUND TDF_SCHED2
#define ts_flags ts_thread->td_flags
#define TSF_DIDRUN TDF_DIDRUN /* thread actually ran. */
#define TSF_EXIT TDF_EXIT /* thread is being killed. */
#define TSF_BOUND TDF_BOUND /* stuck to one CPU */
#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
#define SKE_RUNQ_PCPU(ts) \
((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
@ -299,8 +290,6 @@ maybe_preempt(struct thread *td)
*/
ctd = curthread;
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
("thread has no (or wrong) sched-private part."));
KASSERT((td->td_inhibitors == 0),
("maybe_preempt: trying to run inhibited thread"));
pri = td->td_priority;
@ -462,13 +451,13 @@ schedcpu(void)
*/
if (TD_ON_RUNQ(td)) {
awake = 1;
ts->ts_flags &= ~TSF_DIDRUN;
td->td_flags &= ~TDF_DIDRUN;
} else if (TD_IS_RUNNING(td)) {
awake = 1;
/* Do not clear TSF_DIDRUN */
} else if (ts->ts_flags & TSF_DIDRUN) {
/* Do not clear TDF_DIDRUN */
} else if (td->td_flags & TDF_DIDRUN) {
awake = 1;
ts->ts_flags &= ~TSF_DIDRUN;
td->td_flags &= ~TDF_DIDRUN;
}
/*
@ -636,7 +625,6 @@ schedinit(void)
proc0.p_sched = NULL; /* XXX */
thread0.td_sched = &td_sched0;
thread0.td_lock = &sched_lock;
td_sched0.ts_thread = &thread0;
mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
}
@ -740,7 +728,6 @@ sched_fork_thread(struct thread *td, struct thread *childtd)
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
ts = childtd->td_sched;
bzero(ts, sizeof(*ts));
ts->ts_thread = childtd;
}
void
@ -779,8 +766,7 @@ sched_priority(struct thread *td, u_char prio)
if (td->td_priority == prio)
return;
td->td_priority = prio;
if (TD_ON_RUNQ(td) &&
td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
sched_rem(td);
sched_add(td, SRQ_BORING);
}
@ -961,7 +947,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
*/
KASSERT((newtd->td_inhibitors == 0),
("trying to run inhibited thread"));
newtd->td_sched->ts_flags |= TSF_DIDRUN;
newtd->td_flags |= TDF_DIDRUN;
TD_SET_RUNNING(newtd);
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
@ -1186,7 +1172,7 @@ sched_add(struct thread *td, int flags)
single_cpu = 1;
CTR3(KTR_RUNQ,
"sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
} else if ((ts)->ts_flags & TSF_BOUND) {
} else if ((td)->td_flags & TDF_BOUND) {
/* Find CPU from bound runq */
KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
cpu = ts->ts_runq - &runq_pcpu[0];
@ -1223,7 +1209,7 @@ sched_add(struct thread *td, int flags)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, ts, flags);
runq_add(ts->ts_runq, td, flags);
}
#else /* SMP */
{
@ -1268,7 +1254,7 @@ sched_add(struct thread *td, int flags)
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, ts, flags);
runq_add(ts->ts_runq, td, flags);
maybe_resched(td);
}
#endif /* SMP */
@ -1290,7 +1276,7 @@ sched_rem(struct thread *td)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
runq_remove(ts->ts_runq, ts);
runq_remove(ts->ts_runq, td);
TD_SET_CAN_RUN(td);
}
@ -1301,40 +1287,40 @@ sched_rem(struct thread *td)
struct thread *
sched_choose(void)
{
struct td_sched *ts;
struct thread *td;
struct runq *rq;
mtx_assert(&sched_lock, MA_OWNED);
#ifdef SMP
struct td_sched *kecpu;
struct thread *tdcpu;
rq = &runq;
ts = runq_choose_fuzz(&runq, runq_fuzz);
kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
td = runq_choose_fuzz(&runq, runq_fuzz);
tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
if (ts == NULL ||
(kecpu != NULL &&
kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
if (td == NULL ||
(tdcpu != NULL &&
tdcpu->td_priority < td->td_priority)) {
CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
PCPU_GET(cpuid));
ts = kecpu;
td = tdcpu;
rq = &runq_pcpu[PCPU_GET(cpuid)];
} else {
CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts);
CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
}
#else
rq = &runq;
ts = runq_choose(&runq);
td = runq_choose(&runq);
#endif
if (ts) {
runq_remove(rq, ts);
ts->ts_flags |= TSF_DIDRUN;
if (td) {
runq_remove(rq, td);
td->td_flags |= TDF_DIDRUN;
KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
KASSERT(td->td_flags & TDF_INMEM,
("sched_choose: thread swapped out"));
return (ts->ts_thread);
return (td);
}
return (PCPU_GET(idlethread));
}
@ -1383,7 +1369,7 @@ sched_bind(struct thread *td, int cpu)
ts = td->td_sched;
ts->ts_flags |= TSF_BOUND;
td->td_flags |= TDF_BOUND;
#ifdef SMP
ts->ts_runq = &runq_pcpu[cpu];
if (PCPU_GET(cpuid) == cpu)
@ -1397,14 +1383,14 @@ void
sched_unbind(struct thread* td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_sched->ts_flags &= ~TSF_BOUND;
td->td_flags &= ~TDF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
return (td->td_sched->ts_flags & TSF_BOUND);
return (td->td_flags & TDF_BOUND);
}
void
@ -1515,6 +1501,3 @@ void
sched_affinity(struct thread *td)
{
}
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"

View File

@ -83,11 +83,8 @@ __FBSDID("$FreeBSD$");
* by the thread lock.
*/
struct td_sched {
TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
struct thread *ts_thread; /* Active associated thread. */
struct runq *ts_runq; /* Run-queue we're queued on. */
short ts_flags; /* TSF_* flags. */
u_char ts_rqindex; /* Run queue index. */
u_char ts_cpu; /* CPU that we have affinity for. */
int ts_rltick; /* Real last tick, for affinity. */
int ts_slice; /* Ticks of slice remaining. */
@ -258,12 +255,12 @@ static void sched_interact_fork(struct thread *);
static void sched_pctcpu_update(struct td_sched *);
/* Operations on per processor queues */
static struct td_sched * tdq_choose(struct tdq *);
static struct thread *tdq_choose(struct tdq *);
static void tdq_setup(struct tdq *);
static void tdq_load_add(struct tdq *, struct td_sched *);
static void tdq_load_rem(struct tdq *, struct td_sched *);
static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
static void tdq_load_add(struct tdq *, struct thread *);
static void tdq_load_rem(struct tdq *, struct thread *);
static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
static __inline void tdq_runq_rem(struct tdq *, struct thread *);
static inline int sched_shouldpreempt(int, int, int);
void tdq_print(int cpu);
static void runq_print(struct runq *rq);
@ -271,13 +268,13 @@ static void tdq_add(struct tdq *, struct thread *, int);
#ifdef SMP
static int tdq_move(struct tdq *, struct tdq *);
static int tdq_idled(struct tdq *);
static void tdq_notify(struct tdq *, struct td_sched *);
static struct td_sched *tdq_steal(struct tdq *, int);
static struct td_sched *runq_steal(struct runq *, int);
static int sched_pickcpu(struct td_sched *, int);
static void tdq_notify(struct tdq *, struct thread *);
static struct thread *tdq_steal(struct tdq *, int);
static struct thread *runq_steal(struct runq *, int);
static int sched_pickcpu(struct thread *, int);
static void sched_balance(void);
static int sched_balance_pair(struct tdq *, struct tdq *);
static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
static inline struct tdq *sched_setcpu(struct thread *, int, int);
static inline struct mtx *thread_block_switch(struct thread *);
static inline void thread_unblock_switch(struct thread *, struct mtx *);
static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
@ -297,7 +294,7 @@ static void
runq_print(struct runq *rq)
{
struct rqhead *rqh;
struct td_sched *ts;
struct thread *td;
int pri;
int j;
int i;
@ -309,9 +306,10 @@ runq_print(struct runq *rq)
if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
pri = j + (i << RQB_L2BPW);
rqh = &rq->rq_queues[pri];
TAILQ_FOREACH(ts, rqh, ts_procq) {
TAILQ_FOREACH(td, rqh, td_runq) {
printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
td, td->td_name, td->td_priority,
td->td_rqindex, pri);
}
}
}
@ -383,19 +381,21 @@ sched_shouldpreempt(int pri, int cpri, int remote)
* queue position for timeshare threads.
*/
static __inline void
tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
{
struct td_sched *ts;
u_char pri;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
TD_SET_RUNQ(ts->ts_thread);
if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
pri = td->td_priority;
ts = td->td_sched;
TD_SET_RUNQ(td);
if (THREAD_CAN_MIGRATE(td)) {
tdq->tdq_transferable++;
ts->ts_flags |= TSF_XFERABLE;
}
pri = ts->ts_thread->td_priority;
if (pri <= PRI_MAX_REALTIME) {
ts->ts_runq = &tdq->tdq_realtime;
} else if (pri <= PRI_MAX_TIMESHARE) {
@ -419,11 +419,11 @@ tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
pri = (unsigned char)(pri - 1) % RQ_NQS;
} else
pri = tdq->tdq_ridx;
runq_add_pri(ts->ts_runq, ts, pri, flags);
runq_add_pri(ts->ts_runq, td, pri, flags);
return;
} else
ts->ts_runq = &tdq->tdq_idle;
runq_add(ts->ts_runq, ts, flags);
runq_add(ts->ts_runq, td, flags);
}
/*
@ -432,22 +432,25 @@ tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
* transferable count does not reflect them.
*/
static __inline void
tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
tdq_runq_rem(struct tdq *tdq, struct thread *td)
{
struct td_sched *ts;
ts = td->td_sched;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
KASSERT(ts->ts_runq != NULL,
("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
("tdq_runq_remove: thread %p null ts_runq", td));
if (ts->ts_flags & TSF_XFERABLE) {
tdq->tdq_transferable--;
ts->ts_flags &= ~TSF_XFERABLE;
}
if (ts->ts_runq == &tdq->tdq_timeshare) {
if (tdq->tdq_idx != tdq->tdq_ridx)
runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
else
runq_remove_idx(ts->ts_runq, ts, NULL);
runq_remove_idx(ts->ts_runq, td, NULL);
} else
runq_remove(ts->ts_runq, ts);
runq_remove(ts->ts_runq, td);
}
/*
@ -455,17 +458,18 @@ tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
* for this thread to the referenced thread queue.
*/
static void
tdq_load_add(struct tdq *tdq, struct td_sched *ts)
tdq_load_add(struct tdq *tdq, struct thread *td)
{
struct td_sched *ts;
int class;
ts = td->td_sched;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
class = PRI_BASE(ts->ts_thread->td_pri_class);
THREAD_LOCK_ASSERT(td, MA_OWNED);
class = PRI_BASE(td->td_pri_class);
tdq->tdq_load++;
CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
if (class != PRI_ITHD &&
(ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload++;
}
@ -474,15 +478,16 @@ tdq_load_add(struct tdq *tdq, struct td_sched *ts)
* exiting.
*/
static void
tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
tdq_load_rem(struct tdq *tdq, struct thread *td)
{
struct td_sched *ts;
int class;
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
ts = td->td_sched;
THREAD_LOCK_ASSERT(td, MA_OWNED);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
class = PRI_BASE(ts->ts_thread->td_pri_class);
if (class != PRI_ITHD &&
(ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
class = PRI_BASE(td->td_pri_class);
if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload--;
KASSERT(tdq->tdq_load != 0,
("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
@ -497,16 +502,13 @@ tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
static void
tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
{
struct td_sched *ts;
struct thread *td;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
if (ctd == NULL)
ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
ts = tdq_choose(tdq);
if (ts)
td = ts->ts_thread;
if (ts == NULL || td->td_priority > ctd->td_priority)
td = tdq_choose(tdq);
if (td == NULL || td->td_priority > ctd->td_priority)
tdq->tdq_lowpri = ctd->td_priority;
else
tdq->tdq_lowpri = td->td_priority;
@ -847,10 +849,10 @@ tdq_move(struct tdq *from, struct tdq *to)
tdq = from;
cpu = TDQ_ID(to);
ts = tdq_steal(tdq, cpu);
if (ts == NULL)
td = tdq_steal(tdq, cpu);
if (td == NULL)
return (0);
td = ts->ts_thread;
ts = td->td_sched;
/*
* Although the run queue is locked the thread may be blocked. Lock
* it to clear this and acquire the run-queue lock.
@ -926,7 +928,7 @@ tdq_idled(struct tdq *tdq)
* Notify a remote cpu of new work. Sends an IPI if criteria are met.
*/
static void
tdq_notify(struct tdq *tdq, struct td_sched *ts)
tdq_notify(struct tdq *tdq, struct thread *td)
{
int cpri;
int pri;
@ -934,8 +936,8 @@ tdq_notify(struct tdq *tdq, struct td_sched *ts)
if (tdq->tdq_ipipending)
return;
cpu = ts->ts_cpu;
pri = ts->ts_thread->td_priority;
cpu = td->td_sched->ts_cpu;
pri = td->td_priority;
cpri = pcpu_find(cpu)->pc_curthread->td_priority;
if (!sched_shouldpreempt(pri, cpri, 1))
return;
@ -947,12 +949,12 @@ tdq_notify(struct tdq *tdq, struct td_sched *ts)
* Steals load from a timeshare queue. Honors the rotating queue head
* index.
*/
static struct td_sched *
static struct thread *
runq_steal_from(struct runq *rq, int cpu, u_char start)
{
struct td_sched *ts;
struct rqbits *rqb;
struct rqhead *rqh;
struct thread *td;
int first;
int bit;
int pri;
@ -976,10 +978,10 @@ again:
pri = RQB_FFS(rqb->rqb_bits[i]);
pri += (i << RQB_L2BPW);
rqh = &rq->rq_queues[pri];
TAILQ_FOREACH(ts, rqh, ts_procq) {
if (first && THREAD_CAN_MIGRATE(ts->ts_thread) &&
THREAD_CAN_SCHED(ts->ts_thread, cpu))
return (ts);
TAILQ_FOREACH(td, rqh, td_runq) {
if (first && THREAD_CAN_MIGRATE(td) &&
THREAD_CAN_SCHED(td, cpu))
return (td);
first = 1;
}
}
@ -994,12 +996,12 @@ again:
/*
* Steals load from a standard linear queue.
*/
static struct td_sched *
static struct thread *
runq_steal(struct runq *rq, int cpu)
{
struct rqhead *rqh;
struct rqbits *rqb;
struct td_sched *ts;
struct thread *td;
int word;
int bit;
@ -1011,10 +1013,10 @@ runq_steal(struct runq *rq, int cpu)
if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
continue;
rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
TAILQ_FOREACH(ts, rqh, ts_procq)
if (THREAD_CAN_MIGRATE(ts->ts_thread) &&
THREAD_CAN_SCHED(ts->ts_thread, cpu))
return (ts);
TAILQ_FOREACH(td, rqh, td_runq)
if (THREAD_CAN_MIGRATE(td) &&
THREAD_CAN_SCHED(td, cpu))
return (td);
}
}
return (NULL);
@ -1023,17 +1025,17 @@ runq_steal(struct runq *rq, int cpu)
/*
* Attempt to steal a thread in priority order from a thread queue.
*/
static struct td_sched *
static struct thread *
tdq_steal(struct tdq *tdq, int cpu)
{
struct td_sched *ts;
struct thread *td;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
return (ts);
if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx))
!= NULL)
return (ts);
if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
return (td);
if ((td = runq_steal_from(&tdq->tdq_timeshare,
cpu, tdq->tdq_ridx)) != NULL)
return (td);
return (runq_steal(&tdq->tdq_idle, cpu));
}
@ -1042,18 +1044,17 @@ tdq_steal(struct tdq *tdq, int cpu)
* current lock and returns with the assigned queue locked.
*/
static inline struct tdq *
sched_setcpu(struct td_sched *ts, int cpu, int flags)
sched_setcpu(struct thread *td, int cpu, int flags)
{
struct thread *td;
struct tdq *tdq;
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
tdq = TDQ_CPU(cpu);
td = ts->ts_thread;
ts->ts_cpu = cpu;
/* If the lock matches just return the queue. */
td->td_sched->ts_cpu = cpu;
/*
* If the lock matches just return the queue.
*/
if (td->td_lock == TDQ_LOCKPTR(tdq))
return (tdq);
#ifdef notyet
@ -1079,10 +1080,10 @@ sched_setcpu(struct td_sched *ts, int cpu, int flags)
}
static int
sched_pickcpu(struct td_sched *ts, int flags)
sched_pickcpu(struct thread *td, int flags)
{
struct cpu_group *cg;
struct thread *td;
struct td_sched *ts;
struct tdq *tdq;
cpumask_t mask;
int self;
@ -1090,7 +1091,7 @@ sched_pickcpu(struct td_sched *ts, int flags)
int cpu;
self = PCPU_GET(cpuid);
td = ts->ts_thread;
ts = td->td_sched;
if (smp_started == 0)
return (self);
/*
@ -1144,29 +1145,28 @@ sched_pickcpu(struct td_sched *ts, int flags)
/*
* Pick the highest priority task we have and return it.
*/
static struct td_sched *
static struct thread *
tdq_choose(struct tdq *tdq)
{
struct td_sched *ts;
struct thread *td;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
ts = runq_choose(&tdq->tdq_realtime);
if (ts != NULL)
return (ts);
ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
if (ts != NULL) {
KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
td = runq_choose(&tdq->tdq_realtime);
if (td != NULL)
return (td);
td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
if (td != NULL) {
KASSERT(td->td_priority >= PRI_MIN_TIMESHARE,
("tdq_choose: Invalid priority on timeshare queue %d",
ts->ts_thread->td_priority));
return (ts);
td->td_priority));
return (td);
}
ts = runq_choose(&tdq->tdq_idle);
if (ts != NULL) {
KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
td = runq_choose(&tdq->tdq_idle);
if (td != NULL) {
KASSERT(td->td_priority >= PRI_MIN_IDLE,
("tdq_choose: Invalid priority on idle queue %d",
ts->ts_thread->td_priority));
return (ts);
td->td_priority));
return (td);
}
return (NULL);
@ -1238,7 +1238,7 @@ sched_setup(void *dummy)
/* Add thread0's load since it's running. */
TDQ_LOCK(tdq);
thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
tdq_load_add(tdq, &td_sched0);
tdq_load_add(tdq, &thread0);
tdq->tdq_lowpri = thread0.td_priority;
TDQ_UNLOCK(tdq);
}
@ -1455,7 +1455,6 @@ schedinit(void)
thread0.td_sched = &td_sched0;
td_sched0.ts_ltick = ticks;
td_sched0.ts_ftick = ticks;
td_sched0.ts_thread = &thread0;
td_sched0.ts_slice = sched_slice;
}
@ -1683,7 +1682,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
tdn = TDQ_CPU(td->td_sched->ts_cpu);
#ifdef SMP
tdq_load_rem(tdq, td->td_sched);
tdq_load_rem(tdq, td);
/*
* Do the lock dance required to avoid LOR. We grab an extra
* spinlock nesting to prevent preemption while we're
@ -1693,7 +1692,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
thread_block_switch(td); /* This releases the lock on tdq. */
TDQ_LOCK(tdn);
tdq_add(tdn, td, flags);
tdq_notify(tdn, td->td_sched);
tdq_notify(tdn, td);
/*
* After we unlock tdn the new cpu still can't switch into this
* thread until we've unblocked it in cpu_switch(). The lock
@ -1759,14 +1758,14 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
SRQ_OURSELF|SRQ_YIELDING;
if (ts->ts_cpu == cpuid)
tdq_runq_add(tdq, ts, srqflag);
tdq_runq_add(tdq, td, srqflag);
else
mtx = sched_switch_migrate(tdq, td, srqflag);
} else {
/* This thread must be going to sleep. */
TDQ_LOCK(tdq);
mtx = thread_block_switch(td);
tdq_load_rem(tdq, ts);
tdq_load_rem(tdq, td);
}
/*
* We enter here with the thread blocked and assigned to the
@ -1912,7 +1911,6 @@ sched_fork_thread(struct thread *td, struct thread *child)
ts2 = child->td_sched;
child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
child->td_cpuset = cpuset_ref(td->td_cpuset);
ts2->ts_thread = child;
ts2->ts_cpu = ts->ts_cpu;
ts2->ts_flags = 0;
/*
@ -2137,16 +2135,16 @@ out:
struct thread *
sched_choose(void)
{
struct td_sched *ts;
struct thread *td;
struct tdq *tdq;
tdq = TDQ_SELF();
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
ts = tdq_choose(tdq);
if (ts) {
ts->ts_ltick = ticks;
tdq_runq_rem(tdq, ts);
return (ts->ts_thread);
td = tdq_choose(tdq);
if (td) {
td->td_sched->ts_ltick = ticks;
tdq_runq_rem(tdq, td);
return (td);
}
return (PCPU_GET(idlethread));
}
@ -2184,7 +2182,6 @@ sched_setpreempt(struct thread *td)
void
tdq_add(struct tdq *tdq, struct thread *td, int flags)
{
struct td_sched *ts;
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
@ -2194,11 +2191,10 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags)
KASSERT(td->td_flags & TDF_INMEM,
("sched_add: thread swapped out"));
ts = td->td_sched;
if (td->td_priority < tdq->tdq_lowpri)
tdq->tdq_lowpri = td->td_priority;
tdq_runq_add(tdq, ts, flags);
tdq_load_add(tdq, ts);
tdq_runq_add(tdq, td, flags);
tdq_load_add(tdq, td);
}
/*
@ -2210,7 +2206,6 @@ sched_add(struct thread *td, int flags)
{
struct tdq *tdq;
#ifdef SMP
struct td_sched *ts;
int cpu;
#endif
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
@ -2228,12 +2223,11 @@ sched_add(struct thread *td, int flags)
* Pick the destination cpu and if it isn't ours transfer to the
* target cpu.
*/
ts = td->td_sched;
cpu = sched_pickcpu(ts, flags);
tdq = sched_setcpu(ts, cpu, flags);
cpu = sched_pickcpu(td, flags);
tdq = sched_setcpu(td, cpu, flags);
tdq_add(tdq, td, flags);
if (cpu != PCPU_GET(cpuid)) {
tdq_notify(tdq, ts);
tdq_notify(tdq, td);
return;
}
#else
@ -2259,19 +2253,17 @@ void
sched_rem(struct thread *td)
{
struct tdq *tdq;
struct td_sched *ts;
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
ts = td->td_sched;
tdq = TDQ_CPU(ts->ts_cpu);
tdq = TDQ_CPU(td->td_sched->ts_cpu);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
KASSERT(TD_ON_RUNQ(td),
("sched_rem: thread not on run queue"));
tdq_runq_rem(tdq, ts);
tdq_load_rem(tdq, ts);
tdq_runq_rem(tdq, td);
tdq_load_rem(tdq, td);
TD_SET_CAN_RUN(td);
if (td->td_priority == tdq->tdq_lowpri)
tdq_setlowpri(tdq, NULL);
@ -2331,7 +2323,7 @@ sched_affinity(struct thread *td)
* an ipi to force the issue.
*/
cpu = ts->ts_cpu;
ts->ts_cpu = sched_pickcpu(ts, 0);
ts->ts_cpu = sched_pickcpu(td, 0);
if (cpu != PCPU_GET(cpuid))
ipi_selected(1 << cpu, IPI_PREEMPT);
#endif
@ -2463,7 +2455,7 @@ sched_throw(struct thread *td)
spinlock_exit();
} else {
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
tdq_load_rem(tdq, td->td_sched);
tdq_load_rem(tdq, td);
lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
}
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
@ -2501,8 +2493,7 @@ sched_fork_exit(struct thread *td)
&TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
}
static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
"Scheduler");
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
"Scheduler name");
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
@ -2532,7 +2523,3 @@ SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
/* ps compat. All cpu percentages from ULE are weighted. */
static int ccpu = 0;
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
#define KERN_SWITCH_INCLUDE 1
#include "kern/kern_switch.c"

View File

@ -174,8 +174,7 @@ struct thread {
struct mtx *volatile td_lock; /* replaces sched lock */
struct proc *td_proc; /* (*) Associated process. */
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
/* The two queues below should someday be merged. */
TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
@ -233,6 +232,7 @@ struct thread {
/* Copied during fork1() or thread_sched_upcall(). */
#define td_startcopy td_endzero
u_char td_rqindex; /* (t) Run queue index. */
u_char td_base_pri; /* (t) Thread base kernel priority. */
u_char td_priority; /* (t) Thread active priority. */
u_char td_pri_class; /* (t) Scheduling class. */

View File

@ -31,7 +31,7 @@
#include <machine/runq.h>
struct td_sched;
struct thread;
/*
* Run queue parameters.
@ -43,7 +43,7 @@ struct td_sched;
/*
* Head of run queues.
*/
TAILQ_HEAD(rqhead, td_sched);
TAILQ_HEAD(rqhead, thread);
/*
* Bit array which maintains the status of a run queue. When a queue is
@ -62,14 +62,14 @@ struct runq {
struct rqhead rq_queues[RQ_NQS];
};
void runq_add(struct runq *, struct td_sched *, int);
void runq_add_pri(struct runq *, struct td_sched *, u_char, int);
void runq_add(struct runq *, struct thread *, int);
void runq_add_pri(struct runq *, struct thread *, u_char, int);
int runq_check(struct runq *);
struct td_sched *runq_choose(struct runq *);
struct td_sched *runq_choose_from(struct runq *, u_char);
struct td_sched *runq_choose_fuzz(struct runq *, int);
struct thread *runq_choose(struct runq *);
struct thread *runq_choose_from(struct runq *, u_char);
struct thread *runq_choose_fuzz(struct runq *, int);
void runq_init(struct runq *);
void runq_remove(struct runq *, struct td_sched *);
void runq_remove_idx(struct runq *, struct td_sched *, u_char *);
void runq_remove(struct runq *, struct thread *);
void runq_remove_idx(struct runq *, struct thread *, u_char *);
#endif

View File

@ -631,6 +631,7 @@ SYSCTL_DECL(_kern);
SYSCTL_DECL(_kern_features);
SYSCTL_DECL(_kern_ipc);
SYSCTL_DECL(_kern_proc);
SYSCTL_DECL(_kern_sched);
SYSCTL_DECL(_sysctl);
SYSCTL_DECL(_vm);
SYSCTL_DECL(_vm_stats);