Give setrunqueue() and sched_add() more of a clue as to

where they are coming from and what is expected from them.

MFC after:	2 days
This commit is contained in:
Julian Elischer 2004-09-01 02:11:28 +00:00
parent b443062227
commit 2630e4c90c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=134586
16 changed files with 50 additions and 30 deletions

View File

@ -180,7 +180,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
setrunqueue(td2);
setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;

View File

@ -503,7 +503,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
setrunqueue(td2);
setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;

View File

@ -365,7 +365,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
*/
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td2);
setrunqueue(td2);
setrunqueue(td2, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = p2->p_pid;

View File

@ -727,7 +727,7 @@ kick_init(const void *udata __unused)
td = FIRST_THREAD_IN_PROC(initproc);
mtx_lock_spin(&sched_lock);
TD_SET_CAN_RUN(td);
setrunqueue(td); /* XXXKSE */
setrunqueue(td, SRQ_BORING); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)

View File

@ -710,7 +710,7 @@ fork1(td, flags, pages, procp)
*/
if ((flags & RFSTOPPED) == 0) {
TD_SET_CAN_RUN(td2);
setrunqueue(td2);
setrunqueue(td2, SRQ_BORING);
}
mtx_unlock_spin(&sched_lock);

View File

@ -240,7 +240,7 @@ ithread_destroy(struct ithd *ithread)
mtx_lock_spin(&sched_lock);
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
setrunqueue(td);
setrunqueue(td, SRQ_INTR);
}
mtx_unlock_spin(&sched_lock);
mtx_unlock(&ithread->it_lock);
@ -408,7 +408,7 @@ ithread_schedule(struct ithd *ithread)
if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
TD_CLR_IWAIT(td);
setrunqueue(td);
setrunqueue(td, SRQ_INTR);
} else {
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
__func__, p->p_pid, ithread->it_need, td->td_state);

View File

@ -751,7 +751,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
*/
if (newtd != td) {
mtx_lock_spin(&sched_lock);
setrunqueue(newtd);
setrunqueue(newtd, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}
return (0);
@ -1113,7 +1113,7 @@ thread_switchout(struct thread *td)
td->td_upcall = NULL;
td->td_pflags &= ~TDP_CAN_UNBIND;
td2 = thread_schedule_upcall(td, ku);
setrunqueue(td2);
setrunqueue(td2, SRQ_YIELDING);
}
}

View File

@ -114,7 +114,7 @@ kthread_create(void (*func)(void *), void *arg,
/* Delay putting it on the run queue until now. */
if (!(flags & RFSTOPPED)) {
mtx_lock_spin(&sched_lock);
setrunqueue(td);
setrunqueue(td, SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}

View File

@ -214,7 +214,7 @@ kse_reassign(struct kse *ke)
td->td_kse = ke;
ke->ke_thread = td;
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
sched_add(td);
sched_add(td, SRQ_BORING);
return;
}
@ -298,7 +298,7 @@ adjustrunqueue( struct thread *td, int newpri)
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
sched_rem(td);
sched_add(td);
sched_add(td, SRQ_BORING);
}
return;
}
@ -316,11 +316,11 @@ adjustrunqueue( struct thread *td, int newpri)
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
td->td_priority = newpri;
setrunqueue(td);
setrunqueue(td, SRQ_BORING);
}
void
setrunqueue(struct thread *td)
setrunqueue(struct thread *td, int flags)
{
struct kse *ke;
struct ksegrp *kg;
@ -341,7 +341,7 @@ setrunqueue(struct thread *td)
* and the KSE is always already attached.
* Totally ignore the ksegrp run queue.
*/
sched_add(td);
sched_add(td, flags);
return;
}
@ -436,7 +436,7 @@ setrunqueue(struct thread *td)
td2->td_kse = ke;
ke->ke_thread = td2;
}
sched_add(ke->ke_thread);
sched_add(ke->ke_thread, flags);
} else {
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
td, td->td_ksegrp, td->td_proc->p_pid);

View File

@ -182,7 +182,7 @@ thr_create(struct thread *td, struct thr_create_args *uap)
TD_SET_CAN_RUN(td0);
if ((uap->flags & THR_SUSPENDED) == 0)
setrunqueue(td0);
setrunqueue(td0, SRQ_BORING);
mtx_unlock_spin(&sched_lock);

View File

@ -664,7 +664,7 @@ sched_switch(struct thread *td, struct thread *newtd)
TD_SET_CAN_RUN(td);
else if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td);
setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
} else if (p->p_flag & P_SA) {
/*
* We will not be on the run queue. So we must be
@ -691,11 +691,11 @@ sched_wakeup(struct thread *td)
if (kg->kg_slptime > 1)
updatepri(kg);
kg->kg_slptime = 0;
setrunqueue(td);
setrunqueue(td, SRQ_BORING);
}
void
sched_add(struct thread *td)
sched_add(struct thread *td, int flags)
{
struct kse *ke;
@ -717,8 +717,13 @@ sched_add(struct thread *td)
*/
if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])
#endif
if (maybe_preempt(td))
return;
/*
* Don't try preempt if we are already switching.
* all hell might break loose.
*/
if ((flags & SRQ_YIELDING) == 0)
if (maybe_preempt(td))
return;
#ifdef SMP
if (KSE_CAN_MIGRATE(ke)) {

View File

@ -1183,7 +1183,7 @@ sched_switch(struct thread *td, struct thread *newtd)
* Don't allow the kse to migrate from a preemption.
*/
ke->ke_flags |= KEF_HOLD;
setrunqueue(td);
setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
} else {
if (ke->ke_runq) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
@ -1281,7 +1281,7 @@ sched_wakeup(struct thread *td)
td->td_kse, hzticks);
td->td_slptime = 0;
}
setrunqueue(td);
setrunqueue(td, SRQ_BORING);
}
/*
@ -1581,10 +1581,19 @@ sched_choose(void)
}
void
sched_add(struct thread *td)
sched_add(struct thread *td, int flags)
{
sched_add_internal(td, 1);
/* let jeff work out how to map the flags better */
/* I'm open to suggestions */
if (flags & SRQ_YIELDING)
/*
* Preempting during switching can be bad JUJU
* especially for KSE processes
*/
sched_add_internal(td, 0);
else
sched_add_internal(td, 1);
}
static void

View File

@ -736,7 +736,7 @@ turnstile_unpend(struct turnstile *ts)
td->td_lockname = NULL;
TD_CLR_LOCK(td);
MPASS(TD_CAN_RUN(td));
setrunqueue(td);
setrunqueue(td, SRQ_BORING);
} else {
td->td_flags |= TDF_TSNOBLOCK;
MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));

View File

@ -716,6 +716,12 @@ struct proc {
#define SW_VOL 0x0001 /* Voluntary switch. */
#define SW_INVOL 0x0002 /* Involuntary switch. */
/* flags for setrunqueue(). Why are we setting this thread on the run queue? */
#define SRQ_BORING 0x0000 /* No special circumstances */
#define SRQ_YIELDING 0x0001 /* we are yielding (from mi_switch) */
#define SRQ_OURSELF 0x0002 /* it is ourself (from mi_switch) */
#define SRQ_INTR 0x0004 /* it is probably urgent */
/* How values for thread_single(). */
#define SINGLE_NO_EXIT 0
#define SINGLE_EXIT 1
@ -905,7 +911,7 @@ void proc_reparent(struct proc *child, struct proc *newparent);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void setrunnable(struct thread *);
void setrunqueue(struct thread *);
void setrunqueue(struct thread *, int flags);
void setsugid(struct proc *p);
int sigonstack(size_t sp);
void sleepinit(void);

View File

@ -73,7 +73,7 @@ void sched_wakeup(struct thread *td);
/*
* Threads are moved on and off of run queues
*/
void sched_add(struct thread *td);
void sched_add(struct thread *td, int flags);
struct kse *sched_choose(void); /* XXX Should be thread * */
void sched_clock(struct thread *td);
void sched_rem(struct thread *td);

View File

@ -187,7 +187,7 @@ pagezero_start(void __unused *arg)
pagezero_proc->p_flag |= P_NOLOAD;
PROC_UNLOCK(pagezero_proc);
mtx_lock_spin(&sched_lock);
setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc));
setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc), SRQ_BORING);
mtx_unlock_spin(&sched_lock);
}
SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL)