When preempting a thread, put it back on the HEAD of its run queue.
(Only really implemented in 4bsd) MFC after: 4 days
This commit is contained in:
parent
c5c3fb335f
commit
c20c691bed
@ -526,9 +526,7 @@ maybe_preempt(struct thread *td)
|
||||
}
|
||||
|
||||
/*
|
||||
* Our thread state says that we are already on a run queue, so
|
||||
* update our state as if we had been dequeued by choosethread().
|
||||
* However we must not actually be on the system run queue yet.
|
||||
* Thread is runnable but not yet put on system run queue.
|
||||
*/
|
||||
MPASS(TD_ON_RUNQ(td));
|
||||
MPASS(td->td_sched->ke_state != KES_ONRUNQ);
|
||||
@ -551,7 +549,7 @@ maybe_preempt(struct thread *td)
|
||||
TD_SET_RUNNING(td);
|
||||
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
|
||||
td->td_proc->p_pid, td->td_proc->p_comm);
|
||||
mi_switch(SW_INVOL, td);
|
||||
mi_switch(SW_INVOL|SW_PREEMPT, td);
|
||||
return (1);
|
||||
#else
|
||||
return (0);
|
||||
@ -651,7 +649,7 @@ runq_setbit(struct runq *rq, int pri)
|
||||
* corresponding status bit.
|
||||
*/
|
||||
void
|
||||
runq_add(struct runq *rq, struct kse *ke)
|
||||
runq_add(struct runq *rq, struct kse *ke, int flags)
|
||||
{
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
@ -662,7 +660,11 @@ runq_add(struct runq *rq, struct kse *ke)
|
||||
rqh = &rq->rq_queues[pri];
|
||||
CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
|
||||
ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
|
||||
TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
|
||||
if (flags & SRQ_PREEMPTED) {
|
||||
TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
|
||||
} else {
|
||||
TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -811,26 +811,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The thread we are about to run needs to be counted as if it had been
|
||||
* added to the run queue and selected.
|
||||
* it came from:
|
||||
* A preemption
|
||||
* An upcall
|
||||
* A followon
|
||||
* Do this before saving curthread so that the slot count
|
||||
* doesn't give an overly optimistic view when that happens.
|
||||
*/
|
||||
if (newtd) {
|
||||
KASSERT((newtd->td_inhibitors == 0),
|
||||
("trying to run inhibitted thread"));
|
||||
SLOT_USE(newtd->td_ksegrp);
|
||||
newtd->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
TD_SET_RUNNING(newtd);
|
||||
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt++;
|
||||
}
|
||||
|
||||
td->td_lastcpu = td->td_oncpu;
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
td->td_pflags &= ~TDP_OWEPREEMPT;
|
||||
@ -844,21 +824,43 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
if (td == PCPU_GET(idlethread))
|
||||
TD_SET_CAN_RUN(td);
|
||||
else {
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
/* Put us back on the run queue (kse and all). */
|
||||
setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
|
||||
setrunqueue(td, (flags & SW_PREEMPT) ?
|
||||
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
||||
SRQ_OURSELF|SRQ_YIELDING);
|
||||
} else if (p->p_flag & P_HADTHREADS) {
|
||||
/*
|
||||
* We will not be on the run queue. So we must be
|
||||
* sleeping or similar. As it's available,
|
||||
* someone else can use the KSE if they need it.
|
||||
* It's NOT available if we are about to need it
|
||||
*/
|
||||
slot_fill(td->td_ksegrp);
|
||||
if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
|
||||
slot_fill(td->td_ksegrp);
|
||||
}
|
||||
}
|
||||
if (newtd == NULL)
|
||||
if (newtd) {
|
||||
/*
|
||||
* The thread we are about to run needs to be counted
|
||||
* as if it had been added to the run queue and selected.
|
||||
* It came from:
|
||||
* * A preemption
|
||||
* * An upcall
|
||||
* * A followon
|
||||
*/
|
||||
KASSERT((newtd->td_inhibitors == 0),
|
||||
("trying to run inhibitted thread"));
|
||||
SLOT_USE(newtd->td_ksegrp);
|
||||
newtd->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
TD_SET_RUNNING(newtd);
|
||||
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt++;
|
||||
} else {
|
||||
newtd = choosethread();
|
||||
}
|
||||
|
||||
if (td != newtd)
|
||||
cpu_switch(td, newtd);
|
||||
sched_lock.mtx_lock = (uintptr_t)td;
|
||||
@ -1052,8 +1054,8 @@ sched_add(struct thread *td, int flags)
|
||||
}
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt++;
|
||||
SLOT_USE(td->td_ksegrp);
|
||||
runq_add(ke->ke_runq, ke);
|
||||
SLOT_USE(td->td_ksegrp);
|
||||
runq_add(ke->ke_runq, ke, flags);
|
||||
ke->ke_ksegrp->kg_runq_kses++;
|
||||
ke->ke_state = KES_ONRUNQ;
|
||||
maybe_resched(td);
|
||||
|
@ -186,7 +186,6 @@ do { \
|
||||
("slots out of whack"));*/ \
|
||||
} while (0)
|
||||
|
||||
|
||||
static struct kse kse0;
|
||||
static struct kg_sched kg_sched0;
|
||||
|
||||
@ -405,7 +404,7 @@ kseq_runq_add(struct kseq *kseq, struct kse *ke)
|
||||
ke->ke_flags |= KEF_XFERABLE;
|
||||
}
|
||||
#endif
|
||||
runq_add(ke->ke_runq, ke);
|
||||
runq_add(ke->ke_runq, ke, 0);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
@ -896,7 +895,7 @@ kseq_choose(struct kseq *kseq)
|
||||
runq_remove(ke->ke_runq, ke);
|
||||
sched_slice(ke);
|
||||
ke->ke_runq = kseq->ksq_next;
|
||||
runq_add(ke->ke_runq, ke);
|
||||
runq_add(ke->ke_runq, ke, 0);
|
||||
continue;
|
||||
}
|
||||
return (ke);
|
||||
@ -1232,7 +1231,7 @@ sched_prio(struct thread *td, u_char prio)
|
||||
ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
|
||||
runq_remove(ke->ke_runq, ke);
|
||||
ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
|
||||
runq_add(ke->ke_runq, ke);
|
||||
runq_add(ke->ke_runq, ke, 0);
|
||||
}
|
||||
/*
|
||||
* Hold this kse on this cpu so that sched_prio() doesn't
|
||||
@ -1285,16 +1284,25 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
/*
|
||||
* We will not be on the run queue.
|
||||
* So we must be sleeping or similar.
|
||||
* Don't use the slot if we will need it
|
||||
* for newtd.
|
||||
*/
|
||||
if (td->td_proc->p_flag & P_HADTHREADS)
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) &&
|
||||
(newtd == NULL ||
|
||||
newtd->td_ksegrp != td->td_ksegrp))
|
||||
slot_fill(td->td_ksegrp);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (newtd != NULL) {
|
||||
/*
|
||||
* If we bring in a thread,
|
||||
* then account for it as if it had been added to the
|
||||
* run queue and then chosen.
|
||||
*/
|
||||
newtd->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
TD_SET_RUNNING(newtd);
|
||||
SLOT_USE(newtd->td_ksegrp);
|
||||
TD_SET_RUNNING(newtd);
|
||||
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
|
||||
} else
|
||||
newtd = choosethread();
|
||||
|
@ -669,12 +669,14 @@ struct proc {
|
||||
/* Flags for mi_switch(). */
|
||||
#define SW_VOL 0x0001 /* Voluntary switch. */
|
||||
#define SW_INVOL 0x0002 /* Involuntary switch. */
|
||||
#define SW_PREEMPT 0x0004 /* The invol switch is a preemption */
|
||||
|
||||
/* Flags for setrunqueue(). Why are we setting this thread on the run queue? */
|
||||
#define SRQ_BORING 0x0000 /* No special circumstances. */
|
||||
#define SRQ_YIELDING 0x0001 /* We are yielding (from mi_switch). */
|
||||
#define SRQ_OURSELF 0x0002 /* It is ourself (from mi_switch). */
|
||||
#define SRQ_INTR 0x0004 /* It is probably urgent. */
|
||||
#define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
|
||||
|
||||
/* How values for thread_single(). */
|
||||
#define SINGLE_NO_EXIT 0
|
||||
|
@ -62,7 +62,7 @@ struct runq {
|
||||
struct rqhead rq_queues[RQ_NQS];
|
||||
};
|
||||
|
||||
void runq_add(struct runq *, struct kse *);
|
||||
void runq_add(struct runq *, struct kse *, int flags);
|
||||
int runq_check(struct runq *);
|
||||
struct kse *runq_choose(struct runq *);
|
||||
void runq_init(struct runq *);
|
||||
|
Loading…
x
Reference in New Issue
Block a user