Thinking about it I came to the conclusion that the KSE states were incorrectly

formulated.  The correct states should be:
IDLE:  On the idle KSE list for that KSEG
RUNQ:  Linked onto the system run queue.
THREAD: Attached to a thread and slaved to whatever state the thread is in.

This means that most places where we were adjusting kse state can go away
as it is just moving around because the thread is..
The only places we need to adjust the KSE state is in transition to and from
the idle and run queues.

Reviewed by:	jhb@freebsd.org
This commit is contained in:
Julian Elischer 2002-07-14 03:43:33 +00:00
parent 010b4b09f1
commit c3b98db091
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=99942
9 changed files with 18 additions and 162 deletions

View File

@ -342,7 +342,7 @@ proc0_init(void *dummy __unused)
td->td_base_pri = PUSER;
td->td_kse = ke; /* XXXKSE */
ke->ke_oncpu = 0;
ke->ke_state = KES_RUNNING;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
/* proc_linkup puts it in the idle queue, that's not what we want. */
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);

View File

@ -510,11 +510,10 @@ fork1(td, flags, procp)
/* Set up the thread as an active thread (as if runnable). */
TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist);
kg2->kg_idle_kses--;
ke2->ke_state = KES_UNQUEUED;
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
KASSERT((ke2->ke_kgrlist.tqe_next != ke2), ("linked to self!"));
/* note.. XXXKSE no pcb or u-area yet */
@ -835,7 +834,6 @@ fork_exit(callout, arg, frame)
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
td->td_state = TDS_RUNNING; /* Already done in switch() on 386. */
td->td_kse->ke_state = KES_RUNNING;
/*
* Finish setting up thread glue. We need to initialize
* the thread into a td_critnest=1 state. Some platforms

View File

@ -62,8 +62,7 @@ idle_setup(void *dummy)
p->p_flag |= P_NOLOAD;
td = FIRST_THREAD_IN_PROC(p);
td->td_state = TDS_RUNQ;
td->td_kse->ke_state = KES_ONRUNQ;
td->td_state = TDS_UNQUEUED;
td->td_kse->ke_flags |= KEF_IDLEKSE;
#ifdef SMP
}
@ -84,8 +83,6 @@ idle_proc(void *dummy)
td = curthread;
p = td->td_proc;
td->td_state = TDS_RUNNING;
td->td_kse->ke_state = KES_RUNNING;
for (;;) {
mtx_assert(&Giant, MA_NOTOWNED);
@ -115,7 +112,6 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
td->td_kse->ke_state = KES_RUNNING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -327,9 +327,6 @@ thread_exit(void)
/* Reassign this thread's KSE. */
if (ke != NULL) {
KASSERT((ke->ke_state == KES_RUNNING), ("zapping kse not running"));
KASSERT((ke->ke_thread == td ), ("kse ke_thread mismatch against curthread"));
KASSERT((ke->ke_thread->td_state == TDS_RUNNING), ("zapping thread not running"));
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;

View File

@ -212,10 +212,8 @@ kse_link(struct kse *ke, struct ksegrp *kg)
{
struct proc *p = kg->kg_proc;
KASSERT((ke->ke_state != KES_ONRUNQ), ("linking suspect kse on run queue"));
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses++;
KASSERT((ke->ke_state != KES_IDLE), ("already on idle queue"));
ke->ke_state = KES_IDLE;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses++;

View File

@ -155,7 +155,6 @@ choosethread(void)
} else {
/* Simulate runq_choose() having returned the idle thread */
td = PCPU_GET(idlethread);
td->td_kse->ke_state = KES_RUNNING;
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
}
td->td_state = TDS_RUNNING;
@ -196,7 +195,6 @@ kse_reassign(struct kse *ke)
runq_add(&runq, ke);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
} else {
KASSERT((ke->ke_state != KES_IDLE), ("kse already idle"));
ke->ke_state = KES_IDLE;
ke->ke_thread = NULL;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
@ -239,7 +237,7 @@ remrunqueue(struct thread *td)
if ((td->td_flags & TDF_UNBOUND) == 0) {
/* Bring its kse with it, leave the thread attached */
runq_remove(&runq, ke);
ke->ke_state = KES_UNQUEUED;
ke->ke_state = KES_THREAD;
return;
}
if (ke) {
@ -286,8 +284,6 @@ remrunqueue(struct thread *td)
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
}
#if 1 /* use the first version */
void
setrunqueue(struct thread *td)
{
@ -331,7 +327,7 @@ setrunqueue(struct thread *td)
*/
ke = TAILQ_FIRST(&kg->kg_iq);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_UNQUEUED;
ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
/*
@ -345,8 +341,9 @@ setrunqueue(struct thread *td)
runq_remove(&runq, ke);
}
} else {
KASSERT(ke->ke_thread == td, ("KSE/thread mismatch"));
KASSERT(ke->ke_state != KES_IDLE, ("KSE unexpectedly idle"));
/*
* Temporarily disassociate so it looks like the other cases.
*/
ke->ke_thread = NULL;
td->td_kse = NULL;
}
@ -374,7 +371,7 @@ setrunqueue(struct thread *td)
if (tda == NULL) {
/*
* No pre-existing last assigned so whoever is first
* gets the KSE we borught in.. (may be us)
* gets the KSE we brought in.. (maybe us)
*/
td2 = TAILQ_FIRST(&kg->kg_runq);
KASSERT((td2->td_kse == NULL),
@ -404,121 +401,6 @@ setrunqueue(struct thread *td)
}
}
#else
void
setrunqueue(struct thread *td)
{
struct kse *ke;
struct ksegrp *kg;
struct thread *td2;
CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state"));
td->td_state = TDS_RUNQ;
kg = td->td_ksegrp;
kg->kg_runnable++;
if ((td->td_flags & TDF_UNBOUND) == 0) {
/*
* Common path optimisation: Only one of everything
* and the KSE is always already attached.
* Totally ignore the ksegrp run queue.
*/
runq_add(&runq, td->td_kse);
return;
}
/*
* First add the thread to the ksegrp's run queue at
* the appropriate place.
*/
TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
if (td2->td_priority > td->td_priority) {
TAILQ_INSERT_BEFORE(td2, td, td_runq);
break;
}
}
if (td2 == NULL) {
/* We ran off the end of the TAILQ or it was empty. */
TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
}
/*
* The following could be achieved by simply doing:
* td->td_kse = NULL; kse_reassign(ke);
* but I felt that I'd try do it inline here.
* All this work may not be worth it.
*/
if ((ke = td->td_kse)) { /* XXXKSE */
/*
* We have a KSE already. See whether we can keep it
* or if we need to give it to someone else.
* Either way it will need to be inserted into
* the runq. kse_reassign() will do this as will runq_add().
*/
if ((kg->kg_last_assigned) &&
(kg->kg_last_assigned->td_priority > td->td_priority)) {
/*
* We can definitly keep the KSE
* as the "last assignead thread" has
* less priority than we do.
* The "last assigned" pointer stays the same.
*/
runq_add(&runq, ke);
return;
}
/*
* Give it to the correct thread,
* which may be (often is) us, but may not be.
*/
td->td_kse = NULL;
kse_reassign(ke);
return;
}
/*
* There are two cases where KSE adjustment is needed.
* Usurpation of an already assigned KSE, and assignment
* of a previously IDLE KSE.
*/
if (kg->kg_idle_kses) {
/*
* If there are unassigned KSEs then we definitly
* will be assigned one from the idle KSE list.
* If we are the last, we should get the "last
* assigned" pointer set to us as well.
*/
ke = TAILQ_FIRST(&kg->kg_iq);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_UNQUEUED;
kg->kg_idle_kses--;
ke->ke_thread = td;
td->td_kse = ke;
runq_add(&runq, ke);
if (TAILQ_NEXT(td, td_runq) == NULL) {
kg->kg_last_assigned = td;
}
} else if (kg->kg_last_assigned &&
(kg->kg_last_assigned->td_priority > td->td_priority)) {
/*
* If there were none last-assigned, all KSEs
* are actually out running as we speak.
* If there was a last assigned, but we didn't see it,
* we must be inserting before it, so take the KSE from
* the last assigned, and back it up one entry. Then,
* assign the KSE to the new thread and adjust its priority.
*/
td2 = kg->kg_last_assigned;
ke = td2->td_kse;
kg->kg_last_assigned =
TAILQ_PREV(td2, threadqueue, td_runq);
td2->td_kse = NULL;
td->td_kse = ke;
ke->ke_thread = td;
runq_readjust(&runq, ke);
}
}
#endif
/************************************************************************
* Critical section marker functions *
************************************************************************/
@ -634,14 +516,11 @@ runq_add(struct runq *rq, struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL), ("runq_add: No KSE on thread"));
if (ke->ke_state == KES_ONRUNQ)
return;
#if defined(INVARIANTS) && defined(DIAGNOSTIC)
KASSERT((ke->ke_thread->td_kse != NULL),
("runq_add: No KSE on thread"));
KASSERT(ke->ke_state != KES_ONRUNQ,
("runq_add: kse %p (%s) already in run queue", ke,
ke->ke_proc->p_comm));
#endif
pri = ke->ke_thread->td_priority / RQ_PPQ;
ke->ke_rqindex = pri;
runq_setbit(rq, pri);
@ -702,7 +581,7 @@ runq_choose(struct runq *rq)
runq_clrbit(rq, pri);
}
ke->ke_state = KES_RUNNING;
ke->ke_state = KES_THREAD;
KASSERT((ke->ke_thread != NULL),
("runq_choose: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL),
@ -737,7 +616,7 @@ runq_remove(struct runq *rq, struct kse *ke)
CTR0(KTR_RUNQ, "runq_remove: empty");
runq_clrbit(rq, pri);
}
ke->ke_state = KES_UNQUEUED;
ke->ke_state = KES_THREAD;
ke->ke_ksegrp->kg_runq_kses--;
}

View File

@ -282,8 +282,9 @@ schedcpu(arg)
* the kse slptimes are not touched in wakeup
* because the thread may not HAVE a KSE
*/
if (ke->ke_state == KES_ONRUNQ ||
ke->ke_state == KES_RUNNING) {
if ((ke->ke_state == KES_ONRUNQ) ||
((ke->ke_state == KES_THREAD) &&
(ke->ke_thread->td_state == TDS_RUNNING))) {
ke->ke_slptime++;
} else {
ke->ke_slptime = 0;
@ -442,8 +443,6 @@ msleep(ident, mtx, priority, wmesg, timo)
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
KASSERT((td->td_kse != NULL), ("msleep: NULL KSE?"));
KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse state?"));
WITNESS_SLEEP(0, &mtx->mtx_object);
KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
("sleeping without a mutex"));
@ -470,19 +469,12 @@ msleep(ident, mtx, priority, wmesg, timo)
mtx_lock_spin(&sched_lock);
if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
/* Don't recurse here! */
KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateX?"));
td->td_flags |= TDF_INMSLEEP;
thread_schedule_upcall(td, td->td_kse);
td->td_flags &= ~TDF_INMSLEEP;
KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateY?"));
}
mtx_unlock_spin(&sched_lock);
}
KASSERT((td->td_kse != NULL), ("msleep: NULL KSE2?"));
KASSERT((td->td_kse->ke_state == KES_RUNNING),
("msleep: kse state2?"));
KASSERT((td->td_kse->ke_thread == td),
("msleep: kse/thread mismatch?"));
}
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@ -797,7 +789,7 @@ mi_switch()
u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
KASSERT((ke->ke_state == KES_RUNNING), ("mi_switch: kse state?"));
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
#ifdef INVARIANTS
if (td->td_state != TDS_MTX &&
td->td_state != TDS_RUNQ &&
@ -884,7 +876,6 @@ mi_switch()
}
cpu_switch();
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
td->td_kse->ke_state = KES_RUNNING;
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,

View File

@ -327,9 +327,6 @@ thread_exit(void)
/* Reassign this thread's KSE. */
if (ke != NULL) {
KASSERT((ke->ke_state == KES_RUNNING), ("zapping kse not running"));
KASSERT((ke->ke_thread == td ), ("kse ke_thread mismatch against curthread"));
KASSERT((ke->ke_thread->td_state == TDS_RUNNING), ("zapping thread not running"));
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;

View File

@ -369,7 +369,7 @@ struct kse {
KES_IDLE = 0x10,
KES_ONRUNQ,
KES_UNQUEUED, /* in transit */
KES_RUNNING
KES_THREAD /* slaved to thread state */
} ke_state; /* (j) S* process status. */
void *ke_mailbox; /* the userland mailbox address */
struct thread *ke_tdspare; /* spare thread for upcalls */