Add some code to allow threads to nominat a sibling to run if theyu are going to sleep.
MFC after: 1 week
This commit is contained in:
parent
3071824ff0
commit
3389af30e8
@ -192,7 +192,7 @@ retry:
|
||||
* sched_thread_exit() (local)
|
||||
* sched_switch() (local)
|
||||
* sched_thread_exit() (local)
|
||||
* remrunqueue() (local) (commented out)
|
||||
* remrunqueue() (local)
|
||||
*/
|
||||
static void
|
||||
slot_fill(struct ksegrp *kg)
|
||||
@ -224,7 +224,7 @@ slot_fill(struct ksegrp *kg)
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
#ifdef SCHED_4BSD
|
||||
/*
|
||||
* Remove a thread from its KSEGRP's run queue.
|
||||
* This in turn may remove it from a KSE if it was already assigned
|
||||
@ -248,7 +248,7 @@ remrunqueue(struct thread *td)
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
|
||||
/* Bring its kse with it, leave the thread attached */
|
||||
/* remve from sys run queue and free up a slot */
|
||||
sched_rem(td);
|
||||
kg->kg_avail_opennings++;
|
||||
ke->ke_state = KES_THREAD;
|
||||
@ -259,7 +259,7 @@ remrunqueue(struct thread *td)
|
||||
kg->kg_runnable--;
|
||||
if (ke->ke_state == KES_ONRUNQ) {
|
||||
/*
|
||||
* This thread has been assigned to a KSE.
|
||||
* This thread has been assigned to the system run queue.
|
||||
* We need to dissociate it and try assign the
|
||||
* KSE to the next available thread. Then, we should
|
||||
* see if we need to move the KSE in the run queues.
|
||||
@ -271,7 +271,7 @@ remrunqueue(struct thread *td)
|
||||
KASSERT((td2 != NULL), ("last assigned has wrong value"));
|
||||
if (td2 == td)
|
||||
kg->kg_last_assigned = td3;
|
||||
slot_fill(kg);
|
||||
/* slot_fill(kg); */ /* will replace it with another */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -337,7 +337,7 @@ mi_switch(int flags, struct thread *newtd)
|
||||
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
|
||||
if (td->td_proc->p_flag & P_SA)
|
||||
newtd = thread_switchout(td, flags, newtd);
|
||||
sched_switch(td, newtd);
|
||||
sched_switch(td, newtd, flags);
|
||||
|
||||
CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
|
||||
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
|
||||
|
@ -247,7 +247,22 @@ static int forward_wakeup_use_htt = 0;
|
||||
SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
|
||||
&forward_wakeup_use_htt, 0,
|
||||
"account for htt");
|
||||
|
||||
#endif
|
||||
static int sched_followon = 0;
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
|
||||
&sched_followon, 0,
|
||||
"allow threads to share a quantum");
|
||||
|
||||
static int sched_pfollowons = 0;
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
|
||||
&sched_pfollowons, 0,
|
||||
"number of followons done to a different ksegrp");
|
||||
|
||||
static int sched_kgfollowons = 0;
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
|
||||
&sched_kgfollowons, 0,
|
||||
"number of followons done in a ksegrp");
|
||||
|
||||
/*
|
||||
* Arrange to reschedule if necessary, taking the priorities and
|
||||
@ -733,10 +748,13 @@ sched_sleep(struct thread *td)
|
||||
td->td_base_pri = td->td_priority;
|
||||
}
|
||||
|
||||
static void remrunqueue(struct thread *td);
|
||||
|
||||
void
|
||||
sched_switch(struct thread *td, struct thread *newtd)
|
||||
sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
{
|
||||
struct kse *ke;
|
||||
struct ksegrp *kg;
|
||||
struct proc *p;
|
||||
|
||||
ke = td->td_kse;
|
||||
@ -746,6 +764,33 @@ sched_switch(struct thread *td, struct thread *newtd)
|
||||
|
||||
if ((p->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt--;
|
||||
|
||||
/*
|
||||
* We are volunteering to switch out so we get to nominate
|
||||
* a successor for the rest of our quantum
|
||||
* First try another thread in our ksegrp, and then look for
|
||||
* other ksegrps in our process.
|
||||
*/
|
||||
if (sched_followon &&
|
||||
(p->p_flag & P_HADTHREADS) &&
|
||||
(flags & SW_VOL) &&
|
||||
newtd == NULL) {
|
||||
/* lets schedule another thread from this process */
|
||||
kg = td->td_ksegrp;
|
||||
if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
|
||||
remrunqueue(newtd);
|
||||
sched_kgfollowons++;
|
||||
} else {
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
|
||||
sched_pfollowons++;
|
||||
remrunqueue(newtd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The thread we are about to run needs to be counted as if it had been
|
||||
* added to the run queue and selected.
|
||||
@ -757,6 +802,7 @@ sched_switch(struct thread *td, struct thread *newtd)
|
||||
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt++;
|
||||
}
|
||||
|
||||
td->td_lastcpu = td->td_oncpu;
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
td->td_pflags &= ~TDP_OWEPREEMPT;
|
||||
|
@ -1225,7 +1225,7 @@ sched_prio(struct thread *td, u_char prio)
|
||||
}
|
||||
|
||||
void
|
||||
sched_switch(struct thread *td, struct thread *newtd)
|
||||
sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
{
|
||||
struct kse *ke;
|
||||
|
||||
|
@ -66,7 +66,7 @@ void sched_fork_thread(struct thread *td, struct thread *child);
|
||||
fixpt_t sched_pctcpu(struct thread *td);
|
||||
void sched_prio(struct thread *td, u_char prio);
|
||||
void sched_sleep(struct thread *td);
|
||||
void sched_switch(struct thread *td, struct thread *newtd);
|
||||
void sched_switch(struct thread *td, struct thread *newtd, int flags);
|
||||
void sched_userret(struct thread *td);
|
||||
void sched_wakeup(struct thread *td);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user