- Remove setrunqueue and replace it with direct calls to sched_add().
setrunqueue() was mostly empty. The few asserts and thread state setting were moved to the individual schedulers. sched_add() was chosen to displace it for naming consistency reasons. - Remove adjustrunqueue, it was 4 lines of code that was ifdef'd to be different on all three schedulers where it was only called in one place each. - Remove the long ifdef'd out remrunqueue code. - Remove the now redundant ts_state. Inspect the thread state directly. - Don't set TSF_* flags from kern_switch.c, we were only doing this to support a feature in one scheduler. - Change sched_choose() to return a thread rather than a td_sched. Also, rely on the schedulers to return the idlethread. This simplifies the logic in choosethread(). Aside from the run queue links kern_switch.c mostly does not care about the contents of td_sched. Discussed with: julian - Move the idle thread loop into the per scheduler area. ULE wants to do something different from the other schedulers. Suggested by: jhb Tested on: x86/amd64 sched_{4BSD, ULE, CORE}.
This commit is contained in:
parent
3c93ca7d2f
commit
f0393f063a
@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/proc.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/resourcevar.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/syscallsubr.h>
|
||||
#include <sys/sysproto.h>
|
||||
#include <sys/unistd.h>
|
||||
@ -480,7 +481,7 @@ linux_fork(struct thread *td, struct linux_fork_args *args)
|
||||
/* make it run */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return (0);
|
||||
@ -521,7 +522,7 @@ linux_vfork(struct thread *td, struct linux_vfork_args *args)
|
||||
/* make it run */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
/* wait for the children to exit, ie. emulate vfork */
|
||||
@ -672,7 +673,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
td->td_retval[0] = p2->p_pid;
|
||||
|
@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/sysproto.h>
|
||||
#include <sys/unistd.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/sched.h>
|
||||
|
||||
#include <machine/frame.h>
|
||||
#include <machine/psl.h>
|
||||
@ -326,7 +327,7 @@ linux_fork(struct thread *td, struct linux_fork_args *args)
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return (0);
|
||||
@ -369,7 +370,7 @@ linux_vfork(struct thread *td, struct linux_vfork_args *args)
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
/* wait for the children to exit, ie. emulate vfork */
|
||||
@ -566,7 +567,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
td->td_retval[0] = p2->p_pid;
|
||||
|
@ -730,7 +730,7 @@ kick_init(const void *udata __unused)
|
||||
td = FIRST_THREAD_IN_PROC(initproc);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
|
||||
|
@ -704,7 +704,7 @@ fork1(td, flags, pages, procp)
|
||||
*/
|
||||
if ((flags & RFSTOPPED) == 0) {
|
||||
TD_SET_CAN_RUN(td2);
|
||||
setrunqueue(td2, SRQ_BORING);
|
||||
sched_add(td2, SRQ_BORING);
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
|
@ -43,8 +43,6 @@ __FBSDID("$FreeBSD$");
|
||||
static void idle_setup(void *dummy);
|
||||
SYSINIT(idle_setup, SI_SUB_SCHED_IDLE, SI_ORDER_FIRST, idle_setup, NULL)
|
||||
|
||||
static void idle_proc(void *dummy);
|
||||
|
||||
/*
|
||||
* Set up per-cpu idle process contexts. The AP's shouldn't be running or
|
||||
* accessing their idle processes at this point, so don't bother with
|
||||
@ -62,11 +60,11 @@ idle_setup(void *dummy)
|
||||
|
||||
#ifdef SMP
|
||||
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
error = kthread_create(idle_proc, NULL, &p,
|
||||
error = kthread_create(sched_idletd, NULL, &p,
|
||||
RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid);
|
||||
pc->pc_idlethread = FIRST_THREAD_IN_PROC(p);
|
||||
#else
|
||||
error = kthread_create(idle_proc, NULL, &p,
|
||||
error = kthread_create(sched_idletd, NULL, &p,
|
||||
RFSTOPPED | RFHIGHPID, 0, "idle");
|
||||
PCPU_SET(idlethread, FIRST_THREAD_IN_PROC(p));
|
||||
#endif
|
||||
@ -87,41 +85,3 @@ idle_setup(void *dummy)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual idle process.
|
||||
*/
|
||||
static void
|
||||
idle_proc(void *dummy)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef SMP
|
||||
cpumask_t mycpu;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
#ifdef SMP
|
||||
mycpu = PCPU_GET(cpumask);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
idle_cpus_mask |= mycpu;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#endif
|
||||
for (;;) {
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
|
||||
while (sched_runnable() == 0)
|
||||
cpu_idle();
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask &= ~mycpu;
|
||||
#endif
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask |= mycpu;
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ ithread_destroy(struct intr_thread *ithread)
|
||||
ithread->it_flags |= IT_DEAD;
|
||||
if (TD_AWAITING_INTR(td)) {
|
||||
TD_CLR_IWAIT(td);
|
||||
setrunqueue(td, SRQ_INTR);
|
||||
sched_add(td, SRQ_INTR);
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
@ -552,7 +552,7 @@ intr_event_schedule_thread(struct intr_event *ie)
|
||||
CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
|
||||
p->p_comm);
|
||||
TD_CLR_IWAIT(td);
|
||||
setrunqueue(td, SRQ_INTR);
|
||||
sched_add(td, SRQ_INTR);
|
||||
} else {
|
||||
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
|
||||
__func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
|
||||
|
@ -710,7 +710,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
*/
|
||||
if (newtd != td) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
sched_add(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
} else {
|
||||
@ -745,7 +745,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
sched_add(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
@ -1111,7 +1111,7 @@ thread_switchout(struct thread *td, int flags, struct thread *nextthread)
|
||||
td->td_pflags &= ~TDP_CAN_UNBIND;
|
||||
td2 = thread_schedule_upcall(td, ku);
|
||||
if (flags & SW_INVOL || nextthread) {
|
||||
setrunqueue(td2, SRQ_YIELDING);
|
||||
sched_add(td2, SRQ_YIELDING);
|
||||
} else {
|
||||
/* Keep up with reality.. we have one extra thread
|
||||
* in the picture.. and it's 'running'.
|
||||
|
@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/sx.h>
|
||||
#include <sys/unistd.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/sched.h>
|
||||
|
||||
#include <machine/stdarg.h>
|
||||
|
||||
@ -113,7 +114,7 @@ kthread_create(void (*func)(void *), void *arg,
|
||||
/* Delay putting it on the run queue until now. */
|
||||
if (!(flags & RFSTOPPED)) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
|
@ -86,34 +86,20 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
||||
struct thread *
|
||||
choosethread(void)
|
||||
{
|
||||
struct td_sched *ts;
|
||||
struct thread *td;
|
||||
|
||||
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
|
||||
if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
|
||||
/* Shutting down, run idlethread on AP's */
|
||||
td = PCPU_GET(idlethread);
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
TD_SET_RUNNING(td);
|
||||
return (td);
|
||||
}
|
||||
#endif
|
||||
|
||||
retry:
|
||||
ts = sched_choose();
|
||||
if (ts) {
|
||||
td = ts->ts_thread;
|
||||
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
|
||||
td, td->td_priority);
|
||||
} else {
|
||||
/* Simulate runq_choose() having returned the idle thread */
|
||||
td = PCPU_GET(idlethread);
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
}
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
td = sched_choose();
|
||||
|
||||
/*
|
||||
* If we are in panic, only allow system threads,
|
||||
@ -130,69 +116,6 @@ choosethread(void)
|
||||
return (td);
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* currently not used.. threads remove themselves from the
|
||||
* run queue by running.
|
||||
*/
|
||||
static void
|
||||
remrunqueue(struct thread *td)
|
||||
{
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
|
||||
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
|
||||
TD_SET_CAN_RUN(td);
|
||||
/* remove from sys run queue */
|
||||
sched_rem(td);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change the priority of a thread that is on the run queue.
|
||||
*/
|
||||
void
|
||||
adjustrunqueue( struct thread *td, int newpri)
|
||||
{
|
||||
struct td_sched *ts;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
|
||||
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
|
||||
/* We only care about the td_sched in the run queue. */
|
||||
td->td_priority = newpri;
|
||||
#ifndef SCHED_CORE
|
||||
if (ts->ts_rqindex != (newpri / RQ_PPQ))
|
||||
#else
|
||||
if (ts->ts_rqindex != newpri)
|
||||
#endif
|
||||
{
|
||||
sched_rem(td);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
setrunqueue(struct thread *td, int flags)
|
||||
{
|
||||
|
||||
CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
|
||||
td, td->td_proc->p_pid);
|
||||
CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((td->td_inhibitors == 0),
|
||||
("setrunqueue: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("setrunqueue: bad thread state"));
|
||||
TD_SET_RUNQ(td);
|
||||
sched_add(td, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel thread preemption implementation. Critical sections mark
|
||||
* regions of code in which preemptions are not allowed.
|
||||
@ -283,7 +206,7 @@ maybe_preempt(struct thread *td)
|
||||
pri = td->td_priority;
|
||||
cpri = ctd->td_priority;
|
||||
if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
|
||||
TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
|
||||
TD_IS_INHIBITED(ctd))
|
||||
return (0);
|
||||
#ifndef FULL_PREEMPTION
|
||||
if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
|
||||
@ -301,7 +224,6 @@ maybe_preempt(struct thread *td)
|
||||
* Thread is runnable but not yet put on system run queue.
|
||||
*/
|
||||
MPASS(TD_ON_RUNQ(td));
|
||||
MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
|
||||
TD_SET_RUNNING(td);
|
||||
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
|
||||
td->td_proc->p_pid, td->td_proc->p_comm);
|
||||
@ -579,7 +501,7 @@ runq_choose_from(struct runq *rq, int idx)
|
||||
/*
|
||||
* Remove the thread from the queue specified by its priority, and clear the
|
||||
* corresponding status bit if the queue becomes empty.
|
||||
* Caller must set ts->ts_state afterwards.
|
||||
* Caller must set state afterwards.
|
||||
*/
|
||||
void
|
||||
runq_remove(struct runq *rq, struct td_sched *ts)
|
||||
@ -642,7 +564,6 @@ sched_newthread(struct thread *td)
|
||||
bzero(ts, sizeof(*ts));
|
||||
td->td_sched = ts;
|
||||
ts->ts_thread = td;
|
||||
ts->ts_state = TSS_THREAD;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -241,7 +241,7 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
}
|
||||
TD_SET_CAN_RUN(newtd);
|
||||
/* if ((flags & THR_SUSPENDED) == 0) */
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
sched_add(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return (error);
|
||||
|
@ -83,10 +83,6 @@ struct td_sched {
|
||||
struct thread *ts_thread; /* (*) Active associated thread. */
|
||||
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
|
||||
u_char ts_rqindex; /* (j) Run queue index. */
|
||||
enum {
|
||||
TSS_THREAD = 0x0, /* slaved to thread state */
|
||||
TSS_ONRUNQ
|
||||
} ts_state; /* (j) TD_STAT in scheduler status. */
|
||||
int ts_cpticks; /* (j) Ticks of cpu time. */
|
||||
struct runq *ts_runq; /* runq the thread is currently on */
|
||||
};
|
||||
@ -112,8 +108,6 @@ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
|
||||
|
||||
static struct callout roundrobin_callout;
|
||||
|
||||
static struct td_sched *sched_choose(void);
|
||||
|
||||
static void setup_runqs(void);
|
||||
static void roundrobin(void *arg);
|
||||
static void schedcpu(void);
|
||||
@ -404,11 +398,10 @@ schedcpu(void)
|
||||
* because the thread may not HAVE everything in
|
||||
* memory? XXX I think this is out of date.
|
||||
*/
|
||||
if (ts->ts_state == TSS_ONRUNQ) {
|
||||
if (TD_ON_RUNQ(td)) {
|
||||
awake = 1;
|
||||
ts->ts_flags &= ~TSF_DIDRUN;
|
||||
} else if ((ts->ts_state == TSS_THREAD) &&
|
||||
(TD_IS_RUNNING(td))) {
|
||||
} else if (TD_IS_RUNNING(td)) {
|
||||
awake = 1;
|
||||
/* Do not clear TSF_DIDRUN */
|
||||
} else if (ts->ts_flags & TSF_DIDRUN) {
|
||||
@ -584,7 +577,6 @@ schedinit(void)
|
||||
proc0.p_sched = NULL; /* XXX */
|
||||
thread0.td_sched = &td_sched0;
|
||||
td_sched0.ts_thread = &thread0;
|
||||
td_sched0.ts_state = TSS_THREAD;
|
||||
}
|
||||
|
||||
int
|
||||
@ -709,10 +701,11 @@ sched_priority(struct thread *td, u_char prio)
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (td->td_priority == prio)
|
||||
return;
|
||||
if (TD_ON_RUNQ(td)) {
|
||||
adjustrunqueue(td, prio);
|
||||
} else {
|
||||
td->td_priority = prio;
|
||||
td->td_priority = prio;
|
||||
if (TD_ON_RUNQ(td) &&
|
||||
td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
|
||||
sched_rem(td);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
}
|
||||
|
||||
@ -878,7 +871,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
else {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
/* Put us back on the run queue. */
|
||||
setrunqueue(td, (flags & SW_PREEMPT) ?
|
||||
sched_add(td, (flags & SW_PREEMPT) ?
|
||||
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
||||
SRQ_OURSELF|SRQ_YIELDING);
|
||||
}
|
||||
@ -928,7 +921,7 @@ sched_wakeup(struct thread *td)
|
||||
resetpriority(td);
|
||||
}
|
||||
td->td_slptime = 0;
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
@ -1065,15 +1058,16 @@ sched_add(struct thread *td, int flags)
|
||||
|
||||
ts = td->td_sched;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT(ts->ts_state != TSS_ONRUNQ,
|
||||
("sched_add: td_sched %p (%s) already in run queue", ts,
|
||||
td->td_proc->p_comm));
|
||||
KASSERT((td->td_inhibitors == 0),
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
|
||||
TD_SET_RUNQ(td);
|
||||
|
||||
if (td->td_pinned != 0) {
|
||||
cpu = td->td_lastcpu;
|
||||
@ -1119,21 +1113,22 @@ sched_add(struct thread *td, int flags)
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_add();
|
||||
runq_add(ts->ts_runq, ts, flags);
|
||||
ts->ts_state = TSS_ONRUNQ;
|
||||
}
|
||||
#else /* SMP */
|
||||
{
|
||||
struct td_sched *ts;
|
||||
ts = td->td_sched;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT(ts->ts_state != TSS_ONRUNQ,
|
||||
("sched_add: td_sched %p (%s) already in run queue", ts,
|
||||
td->td_proc->p_comm));
|
||||
KASSERT((td->td_inhibitors == 0),
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
TD_SET_RUNQ(td);
|
||||
CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
|
||||
ts->ts_runq = &runq;
|
||||
|
||||
@ -1155,7 +1150,6 @@ sched_add(struct thread *td, int flags)
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_add();
|
||||
runq_add(ts->ts_runq, ts, flags);
|
||||
ts->ts_state = TSS_ONRUNQ;
|
||||
maybe_resched(td);
|
||||
}
|
||||
#endif /* SMP */
|
||||
@ -1168,7 +1162,7 @@ sched_rem(struct thread *td)
|
||||
ts = td->td_sched;
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_rem: process swapped out"));
|
||||
KASSERT((ts->ts_state == TSS_ONRUNQ),
|
||||
KASSERT(TD_ON_RUNQ(td),
|
||||
("sched_rem: thread not on run queue"));
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
|
||||
@ -1178,15 +1172,14 @@ sched_rem(struct thread *td)
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_rem();
|
||||
runq_remove(ts->ts_runq, ts);
|
||||
|
||||
ts->ts_state = TSS_THREAD;
|
||||
TD_SET_CAN_RUN(td);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select threads to run.
|
||||
* Notice that the running threads still consume a slot.
|
||||
*/
|
||||
struct td_sched *
|
||||
struct thread *
|
||||
sched_choose(void)
|
||||
{
|
||||
struct td_sched *ts;
|
||||
@ -1217,12 +1210,13 @@ sched_choose(void)
|
||||
|
||||
if (ts) {
|
||||
runq_remove(rq, ts);
|
||||
ts->ts_state = TSS_THREAD;
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
|
||||
KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_choose: process swapped out"));
|
||||
}
|
||||
return (ts);
|
||||
return (ts->ts_thread);
|
||||
}
|
||||
return (PCPU_GET(idlethread));
|
||||
}
|
||||
|
||||
void
|
||||
@ -1264,8 +1258,6 @@ sched_bind(struct thread *td, int cpu)
|
||||
if (PCPU_GET(cpuid) == cpu)
|
||||
return;
|
||||
|
||||
ts->ts_state = TSS_THREAD;
|
||||
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#endif
|
||||
}
|
||||
@ -1325,5 +1317,44 @@ void
|
||||
sched_tick(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual idle process.
|
||||
*/
|
||||
void
|
||||
sched_idletd(void *dummy)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef SMP
|
||||
cpumask_t mycpu;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
#ifdef SMP
|
||||
mycpu = PCPU_GET(cpumask);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
idle_cpus_mask |= mycpu;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#endif
|
||||
for (;;) {
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
|
||||
while (sched_runnable() == 0)
|
||||
cpu_idle();
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask &= ~mycpu;
|
||||
#endif
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask |= mycpu;
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
||||
#define KERN_SWITCH_INCLUDE 1
|
||||
#include "kern/kern_switch.c"
|
||||
|
@ -189,10 +189,6 @@ struct td_sched {
|
||||
int ts_flags; /* (j) TSF_* flags. */
|
||||
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
|
||||
u_char ts_rqindex; /* (j) Run queue index. */
|
||||
enum {
|
||||
TSS_THREAD = 0x0, /* slaved to thread state */
|
||||
TSS_ONRUNQ
|
||||
} ts_state; /* (j) thread sched specific status. */
|
||||
int ts_slice; /* Time slice in ticks */
|
||||
struct kseq *ts_kseq; /* Kseq the thread belongs to */
|
||||
struct krunq *ts_runq; /* Assiociated runqueue */
|
||||
@ -350,7 +346,6 @@ static void kseq_runq_rem(struct kseq *, struct td_sched *);
|
||||
static void kseq_setup(struct kseq *);
|
||||
|
||||
static int sched_is_timeshare(struct thread *td);
|
||||
static struct td_sched *sched_choose(void);
|
||||
static int sched_calc_pri(struct td_sched *ts);
|
||||
static int sched_starving(struct kseq *, unsigned, struct td_sched *);
|
||||
static void sched_pctcpu_update(struct td_sched *);
|
||||
@ -501,7 +496,7 @@ krunq_choose(struct krunq *rq)
|
||||
/*
|
||||
* Remove the KSE from the queue specified by its priority, and clear the
|
||||
* corresponding status bit if the queue becomes empty.
|
||||
* Caller must set ts->ts_state afterwards.
|
||||
* Caller must set state afterwards.
|
||||
*/
|
||||
static void
|
||||
krunq_remove(struct krunq *rq, struct td_sched *ts)
|
||||
@ -790,7 +785,6 @@ schedinit(void)
|
||||
proc0.p_sched = NULL; /* XXX */
|
||||
thread0.td_sched = &kse0;
|
||||
kse0.ts_thread = &thread0;
|
||||
kse0.ts_state = TSS_THREAD;
|
||||
kse0.ts_slice = 100;
|
||||
}
|
||||
|
||||
@ -842,6 +836,8 @@ sched_thread_priority(struct thread *td, u_char prio)
|
||||
* propagation, we may have to move ourselves to a new
|
||||
* queue. We still call adjustrunqueue below in case td_sched
|
||||
* needs to fix things up.
|
||||
*
|
||||
* XXX td_priority is never set here.
|
||||
*/
|
||||
if (prio < td->td_priority && ts->ts_runq != NULL &&
|
||||
ts->ts_runq != ts->ts_kseq->ksq_curr) {
|
||||
@ -849,7 +845,11 @@ sched_thread_priority(struct thread *td, u_char prio)
|
||||
ts->ts_runq = ts->ts_kseq->ksq_curr;
|
||||
krunq_add(ts->ts_runq, ts);
|
||||
}
|
||||
adjustrunqueue(td, prio);
|
||||
if (ts->ts_rqindex != prio) {
|
||||
sched_rem(td);
|
||||
td->td_priority = prio;
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
} else
|
||||
td->td_priority = prio;
|
||||
}
|
||||
@ -990,7 +990,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
/* We are ending our run so make our slot available again */
|
||||
kseq_load_rem(ksq, ts);
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
setrunqueue(td, (flags & SW_PREEMPT) ?
|
||||
sched_add(td, (flags & SW_PREEMPT) ?
|
||||
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
||||
SRQ_OURSELF|SRQ_YIELDING);
|
||||
} else {
|
||||
@ -1084,7 +1084,7 @@ sched_wakeup(struct thread *td)
|
||||
sched_user_prio(td, sched_recalc_pri(ts, now));
|
||||
}
|
||||
}
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1344,7 +1344,7 @@ sched_userret(struct thread *td)
|
||||
}
|
||||
}
|
||||
|
||||
struct td_sched *
|
||||
struct thread *
|
||||
sched_choose(void)
|
||||
{
|
||||
struct td_sched *ts;
|
||||
@ -1371,12 +1371,11 @@ sched_choose(void)
|
||||
|
||||
if (ts != NULL) {
|
||||
kseq_runq_rem(kseq, ts);
|
||||
ts->ts_state = TSS_THREAD;
|
||||
ts->ts_flags &= ~TSF_PREEMPTED;
|
||||
ts->ts_timestamp = sched_timestamp();
|
||||
return (ts->ts_thread);
|
||||
}
|
||||
|
||||
return (ts);
|
||||
return (PCPU_GET(idlethread));
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
@ -1481,11 +1480,16 @@ sched_add(struct thread *td, int flags)
|
||||
#endif
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
KASSERT((td->td_inhibitors == 0),
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
TD_SET_RUNQ(td);
|
||||
mytd = curthread;
|
||||
ts = td->td_sched;
|
||||
KASSERT(ts->ts_state != TSS_ONRUNQ,
|
||||
("sched_add: td_sched %p (%s) already in run queue", ts,
|
||||
ts->ts_proc->p_comm));
|
||||
KASSERT(ts->ts_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
KASSERT(ts->ts_runq == NULL,
|
||||
@ -1559,7 +1563,6 @@ sched_add(struct thread *td, int flags)
|
||||
need_resched = TDF_NEEDRESCHED;
|
||||
}
|
||||
|
||||
ts->ts_state = TSS_ONRUNQ;
|
||||
kseq_runq_add(ksq, ts);
|
||||
kseq_load_add(ksq, ts);
|
||||
|
||||
@ -1602,13 +1605,13 @@ sched_rem(struct thread *td)
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
ts = td->td_sched;
|
||||
KASSERT((ts->ts_state == TSS_ONRUNQ),
|
||||
KASSERT(TD_ON_RUNQ(td),
|
||||
("sched_rem: KSE not on run queue"));
|
||||
|
||||
kseq = ts->ts_kseq;
|
||||
kseq_runq_rem(kseq, ts);
|
||||
kseq_load_rem(kseq, ts);
|
||||
ts->ts_state = TSS_THREAD;
|
||||
TD_SET_CAN_RUN(td);
|
||||
}
|
||||
|
||||
fixpt_t
|
||||
@ -1705,5 +1708,44 @@ sched_sizeof_thread(void)
|
||||
{
|
||||
return (sizeof(struct thread) + sizeof(struct td_sched));
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual idle process.
|
||||
*/
|
||||
void
|
||||
sched_idletd(void *dummy)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef SMP
|
||||
cpumask_t mycpu;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
#ifdef SMP
|
||||
mycpu = PCPU_GET(cpumask);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
idle_cpus_mask |= mycpu;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#endif
|
||||
for (;;) {
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
|
||||
while (sched_runnable() == 0)
|
||||
cpu_idle();
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask &= ~mycpu;
|
||||
#endif
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#ifdef SMP
|
||||
idle_cpus_mask |= mycpu;
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
||||
#define KERN_SWITCH_INCLUDE 1
|
||||
#include "kern/kern_switch.c"
|
||||
|
@ -355,7 +355,7 @@ taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
|
||||
continue;
|
||||
td = FIRST_THREAD_IN_PROC(tq->tq_pproc[i]);
|
||||
sched_prio(td, pri);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
|
@ -875,7 +875,7 @@ turnstile_unpend(struct turnstile *ts, int owner_type)
|
||||
#endif
|
||||
TD_CLR_LOCK(td);
|
||||
MPASS(TD_CAN_RUN(td));
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
} else {
|
||||
td->td_flags |= TDF_TSNOBLOCK;
|
||||
MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
|
||||
|
@ -645,14 +645,6 @@ struct proc {
|
||||
#define SW_INVOL 0x0002 /* Involuntary switch. */
|
||||
#define SW_PREEMPT 0x0004 /* The invol switch is a preemption */
|
||||
|
||||
/* Flags for setrunqueue(). Why are we setting this thread on the run queue? */
|
||||
#define SRQ_BORING 0x0000 /* No special circumstances. */
|
||||
#define SRQ_YIELDING 0x0001 /* We are yielding (from mi_switch). */
|
||||
#define SRQ_OURSELF 0x0002 /* It is ourself (from mi_switch). */
|
||||
#define SRQ_INTR 0x0004 /* It is probably urgent. */
|
||||
#define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
|
||||
#define SRQ_BORROWING 0x0010 /* Priority updated due to prio_lend */
|
||||
|
||||
/* How values for thread_single(). */
|
||||
#define SINGLE_NO_EXIT 0
|
||||
#define SINGLE_EXIT 1
|
||||
@ -809,7 +801,6 @@ struct proc *pfind(pid_t); /* Find process by id. */
|
||||
struct pgrp *pgfind(pid_t); /* Find process group by id. */
|
||||
struct proc *zpfind(pid_t); /* Find zombie process by id. */
|
||||
|
||||
void adjustrunqueue(struct thread *, int newpri);
|
||||
void ast(struct trapframe *framep);
|
||||
struct thread *choosethread(void);
|
||||
int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
|
||||
@ -846,7 +837,6 @@ int securelevel_ge(struct ucred *cr, int level);
|
||||
int securelevel_gt(struct ucred *cr, int level);
|
||||
void sessrele(struct session *);
|
||||
void setrunnable(struct thread *);
|
||||
void setrunqueue(struct thread *, int flags);
|
||||
void setsugid(struct proc *p);
|
||||
int sigonstack(size_t sp);
|
||||
void sleepinit(void);
|
||||
|
@ -115,6 +115,8 @@ void sched_clock(struct thread *td);
|
||||
void sched_rem(struct thread *td);
|
||||
void sched_tick(void);
|
||||
void sched_relinquish(struct thread *td);
|
||||
struct thread *sched_choose(void);
|
||||
void sched_idletd(void *);
|
||||
|
||||
/*
|
||||
* Binding makes cpu affinity permanent while pinning is used to temporarily
|
||||
@ -145,6 +147,15 @@ sched_unpin(void)
|
||||
curthread->td_pinned--;
|
||||
}
|
||||
|
||||
/* sched_add arguments (formerly setrunqueue) */
|
||||
#define SRQ_BORING 0x0000 /* No special circumstances. */
|
||||
#define SRQ_YIELDING 0x0001 /* We are yielding (from mi_switch). */
|
||||
#define SRQ_OURSELF 0x0002 /* It is ourself (from mi_switch). */
|
||||
#define SRQ_INTR 0x0004 /* It is probably urgent. */
|
||||
#define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
|
||||
#define SRQ_BORROWING 0x0010 /* Priority updated due to prio_lend */
|
||||
|
||||
|
||||
/* temporarily here */
|
||||
void schedinit(void);
|
||||
void sched_init_concurrency(struct proc *p);
|
||||
|
@ -765,9 +765,9 @@ void kick_proc0(void)
|
||||
|
||||
|
||||
if (TD_AWAITING_INTR(td)) {
|
||||
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0);
|
||||
CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
|
||||
TD_CLR_IWAIT(td);
|
||||
setrunqueue(td, SRQ_INTR);
|
||||
sched_add(td, SRQ_INTR);
|
||||
} else {
|
||||
proc0_rescan = 1;
|
||||
CTR2(KTR_INTR, "%s: state %d",
|
||||
|
@ -181,7 +181,7 @@ pagezero_start(void __unused *arg)
|
||||
td = FIRST_THREAD_IN_PROC(pagezero_proc);
|
||||
sched_class(td, PRI_IDLE);
|
||||
sched_prio(td, PRI_MAX_IDLE);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
sched_add(td, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL)
|
||||
|
Loading…
Reference in New Issue
Block a user