- Split the struct kse into struct upcall and struct kse. struct kse will
soon be visible only to schedulers. This greatly simplifies much the KSE code. Submitted by: davidxu
This commit is contained in:
parent
9ca123a9b5
commit
590a39e29b
@ -309,7 +309,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
|
||||
/* XXX */
|
||||
|
@ -312,8 +312,6 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
{
|
||||
struct pcb *pcb2;
|
||||
|
||||
td->td_flags |= TDF_UPCALLING;
|
||||
|
||||
/* Point the pcb to the top of the stack. */
|
||||
pcb2 = td->td_pcb;
|
||||
|
||||
@ -370,7 +368,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
* in thread_userret() itself can be done as well.
|
||||
*/
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -387,15 +385,15 @@ cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
* function.
|
||||
*/
|
||||
td->td_frame->tf_esp =
|
||||
(int)ke->ke_stack.ss_sp + ke->ke_stack.ss_size - 16;
|
||||
td->td_frame->tf_eip = (int)ke->ke_upcall;
|
||||
(int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
|
||||
td->td_frame->tf_eip = (int)ku->ku_func;
|
||||
|
||||
/*
|
||||
* Pass the address of the mailbox for this kse to the uts
|
||||
* function as a parameter on the stack.
|
||||
*/
|
||||
suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
|
||||
(int)ke->ke_mailbox);
|
||||
(int)ku->ku_mailbox);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -170,15 +170,6 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
|
||||
if (TD_AWAITING_INTR(td)) {
|
||||
db_printf("[IWAIT]");
|
||||
}
|
||||
if (TD_LENDER(td)) {
|
||||
db_printf("[LOAN]");
|
||||
}
|
||||
if (TD_IS_IDLE(td)) {
|
||||
db_printf("[IDLE]");
|
||||
}
|
||||
if (TD_IS_EXITING(td)) {
|
||||
db_printf("[EXIT]");
|
||||
}
|
||||
break;
|
||||
case TDS_CAN_RUN:
|
||||
db_printf("[Can run]");
|
||||
|
@ -312,8 +312,6 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
{
|
||||
struct pcb *pcb2;
|
||||
|
||||
td->td_flags |= TDF_UPCALLING;
|
||||
|
||||
/* Point the pcb to the top of the stack. */
|
||||
pcb2 = td->td_pcb;
|
||||
|
||||
@ -370,7 +368,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
* in thread_userret() itself can be done as well.
|
||||
*/
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -387,15 +385,15 @@ cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
* function.
|
||||
*/
|
||||
td->td_frame->tf_esp =
|
||||
(int)ke->ke_stack.ss_sp + ke->ke_stack.ss_size - 16;
|
||||
td->td_frame->tf_eip = (int)ke->ke_upcall;
|
||||
(int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
|
||||
td->td_frame->tf_eip = (int)ku->ku_func;
|
||||
|
||||
/*
|
||||
* Pass the address of the mailbox for this kse to the uts
|
||||
* function as a parameter on the stack.
|
||||
*/
|
||||
suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
|
||||
(int)ke->ke_mailbox);
|
||||
(int)ku->ku_mailbox);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -117,7 +117,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,6 @@ proc0_init(void *dummy __unused)
|
||||
ke->ke_oncpu = 0;
|
||||
ke->ke_state = KES_THREAD;
|
||||
ke->ke_thread = td;
|
||||
ke->ke_owner = td;
|
||||
p->p_peers = 0;
|
||||
p->p_leader = p;
|
||||
|
||||
|
@ -375,7 +375,7 @@ statclock(frame)
|
||||
* Charge the time as appropriate.
|
||||
*/
|
||||
if (p->p_flag & P_KSES)
|
||||
thread_add_ticks_intr(1, 1);
|
||||
thread_statclock(1);
|
||||
p->p_uticks++;
|
||||
if (ke->ke_ksegrp->kg_nice > NZERO)
|
||||
cp_time[CP_NICE]++;
|
||||
@ -399,7 +399,7 @@ statclock(frame)
|
||||
cp_time[CP_INTR]++;
|
||||
} else {
|
||||
if (p->p_flag & P_KSES)
|
||||
thread_add_ticks_intr(0, 1);
|
||||
thread_statclock(0);
|
||||
td->td_sticks++;
|
||||
p->p_sticks++;
|
||||
if (p != PCPU_GET(idlethread)->td_proc)
|
||||
|
@ -210,10 +210,7 @@ kern_execve(td, fname, argv, envv, mac_p)
|
||||
* so unset the associated flags and lose KSE mode.
|
||||
*/
|
||||
p->p_flag &= ~P_KSES;
|
||||
td->td_flags &= ~TDF_UNBOUND;
|
||||
td->td_mailbox = NULL;
|
||||
td->td_kse->ke_mailbox = NULL;
|
||||
td->td_kse->ke_flags &= ~KEF_DOUPCALL;
|
||||
thread_single_end();
|
||||
}
|
||||
p->p_flag |= P_INEXEC;
|
||||
|
@ -147,7 +147,7 @@ exit1(td, rv)
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXXKSE: MUST abort all other threads before proceeding past here.
|
||||
* XXXKSE: MUST abort all other threads before proceeding past here.
|
||||
*/
|
||||
PROC_LOCK(p);
|
||||
if (p->p_flag & P_KSES) {
|
||||
@ -156,17 +156,6 @@ exit1(td, rv)
|
||||
* if so, act apropriatly, (exit or suspend);
|
||||
*/
|
||||
thread_suspend_check(0);
|
||||
/*
|
||||
* Here is a trick..
|
||||
* We need to free up our KSE to process other threads
|
||||
* so that we can safely set the UNBOUND flag
|
||||
* (whether or not we have a mailbox) as we are NEVER
|
||||
* going to return to the user.
|
||||
* The flag will not be set yet if we are exiting
|
||||
* because of a signal, pagefault, or similar
|
||||
* (or even an exit(2) from the UTS).
|
||||
*/
|
||||
td->td_flags |= TDF_UNBOUND;
|
||||
|
||||
/*
|
||||
* Kill off the other threads. This requires
|
||||
@ -192,7 +181,6 @@ exit1(td, rv)
|
||||
* Turn off threading support.
|
||||
*/
|
||||
p->p_flag &= ~P_KSES;
|
||||
td->td_flags &= ~TDF_UNBOUND;
|
||||
thread_single_end(); /* Don't need this any more. */
|
||||
}
|
||||
/*
|
||||
|
@ -499,9 +499,7 @@ again:
|
||||
/* Set up the thread as an active thread (as if runnable). */
|
||||
ke2->ke_state = KES_THREAD;
|
||||
ke2->ke_thread = td2;
|
||||
ke2->ke_owner = td2;
|
||||
td2->td_kse = ke2;
|
||||
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
|
||||
|
||||
/*
|
||||
* Duplicate sub-structures as needed.
|
||||
|
1178
sys/kern/kern_kse.c
1178
sys/kern/kern_kse.c
File diff suppressed because it is too large
Load Diff
@ -1506,9 +1506,6 @@ psignal(p, sig)
|
||||
if (TD_IS_SLEEPING(td) &&
|
||||
(td->td_flags & TDF_SINTR))
|
||||
thread_suspend_one(td);
|
||||
else if (TD_IS_IDLE(td)) {
|
||||
thread_suspend_one(td);
|
||||
}
|
||||
}
|
||||
if (p->p_suspcount == p->p_numthreads) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1621,9 +1618,6 @@ tdsignal(struct thread *td, int sig, sig_t action)
|
||||
cv_abort(td);
|
||||
else
|
||||
abortsleep(td);
|
||||
} else if (TD_IS_IDLE(td)) {
|
||||
TD_CLR_IDLE(td);
|
||||
setrunnable(td);
|
||||
}
|
||||
#ifdef SMP
|
||||
else {
|
||||
|
@ -111,7 +111,7 @@ static void runq_readjust(struct runq *rq, struct kse *ke);
|
||||
* Functions that manipulate runnability from a thread perspective. *
|
||||
************************************************************************/
|
||||
/*
|
||||
* Select the KSE that will be run next. From that find the thread, and x
|
||||
* Select the KSE that will be run next. From that find the thread, and
|
||||
* remove it from the KSEGRP's run queue. If there is thread clustering,
|
||||
* this will be what does it.
|
||||
*/
|
||||
@ -127,7 +127,7 @@ retry:
|
||||
td = ke->ke_thread;
|
||||
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
|
||||
kg = ke->ke_ksegrp;
|
||||
if (TD_IS_UNBOUND(td)) {
|
||||
if (td->td_proc->p_flag & P_KSES) {
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
if (kg->kg_last_assigned == td) {
|
||||
kg->kg_last_assigned = TAILQ_PREV(td,
|
||||
@ -158,9 +158,8 @@ retry:
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a KSE (now surplus or at least loanable), either assign a new
|
||||
* runable thread to it (and put it in the run queue) or put it in
|
||||
* the ksegrp's idle KSE list.
|
||||
* Given a surplus KSE, either assign a new runable thread to it
|
||||
* (and put it in the run queue) or put it in the ksegrp's idle KSE list.
|
||||
* Or maybe give it back to its owner if it's been loaned.
|
||||
* Assumes that the original thread is either not runnable or
|
||||
* already on the run queue
|
||||
@ -170,108 +169,54 @@ kse_reassign(struct kse *ke)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
struct thread *td;
|
||||
struct thread *owner;
|
||||
struct thread *original;
|
||||
int loaned;
|
||||
struct kse_upcall *ku;
|
||||
|
||||
KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
|
||||
KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
|
||||
("reassigning KSE with no or runnable thread"));
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
kg = ke->ke_ksegrp;
|
||||
owner = ke->ke_owner;
|
||||
loaned = TD_LENDER(owner);
|
||||
original = ke->ke_thread;
|
||||
|
||||
if (TD_CAN_UNBIND(original) && (original->td_standin)) {
|
||||
KASSERT((owner == original),
|
||||
("Early thread borrowing?"));
|
||||
KASSERT(original == NULL || TD_IS_INHIBITED(original),
|
||||
("reassigning KSE with runnable thread"));
|
||||
kg = ke->ke_ksegrp;
|
||||
if (original) {
|
||||
/*
|
||||
* The outgoing thread is "threaded" and has never
|
||||
* scheduled an upcall.
|
||||
* decide whether this is a short or long term event
|
||||
* and thus whether or not to schedule an upcall.
|
||||
* if it is a short term event, just suspend it in
|
||||
* If the outgoing thread is in threaded group and has never
|
||||
* scheduled an upcall, decide whether this is a short
|
||||
* or long term event and thus whether or not to schedule
|
||||
* an upcall.
|
||||
* If it is a short term event, just suspend it in
|
||||
* a way that takes its KSE with it.
|
||||
* Select the events for which we want to schedule upcalls.
|
||||
* For now it's just sleep.
|
||||
* Other threads that still have not fired an upcall
|
||||
* are held to their KSE using the temorary Binding.
|
||||
* XXXKSE eventually almost any inhibition could do.
|
||||
*/
|
||||
if (TD_ON_SLEEPQ(original)) {
|
||||
/*
|
||||
* An bound thread that can still unbind itself
|
||||
* has been scheduled out.
|
||||
* If it is sleeping, then we need to schedule an
|
||||
* upcall.
|
||||
* XXXKSE eventually almost any inhibition could do.
|
||||
if (TD_CAN_UNBIND(original) && (original->td_standin) &&
|
||||
TD_ON_SLEEPQ(original)) {
|
||||
/*
|
||||
* Release ownership of upcall, and schedule an upcall
|
||||
* thread, this new upcall thread becomes the owner of
|
||||
* the upcall structure.
|
||||
*/
|
||||
ku = original->td_upcall;
|
||||
ku->ku_owner = NULL;
|
||||
original->td_upcall = NULL;
|
||||
original->td_flags &= ~TDF_CAN_UNBIND;
|
||||
original->td_flags |= TDF_UNBOUND;
|
||||
thread_schedule_upcall(original, ke);
|
||||
owner = ke->ke_owner;
|
||||
loaned = 1;
|
||||
thread_schedule_upcall(original, ku);
|
||||
}
|
||||
original->td_kse = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current thread was borrowing, then make things consistent
|
||||
* by giving it back to the owner for the moment. The original thread
|
||||
* must be unbound and have already used its chance for
|
||||
* firing off an upcall. Threads that have not yet made an upcall
|
||||
* can not borrow KSEs.
|
||||
*/
|
||||
if (loaned) {
|
||||
TD_CLR_LOAN(owner);
|
||||
ke->ke_thread = owner;
|
||||
original->td_kse = NULL; /* give it amnesia */
|
||||
/*
|
||||
* Upcalling threads have lower priority than all
|
||||
* in-kernel threads, However threads that have loaned out
|
||||
* their KSE and are NOT upcalling have the priority that
|
||||
* they have. In other words, only look for other work if
|
||||
* the owner is not runnable, OR is upcalling.
|
||||
*/
|
||||
if (TD_CAN_RUN(owner) &&
|
||||
((owner->td_flags & TDF_UPCALLING) == 0)) {
|
||||
setrunnable(owner);
|
||||
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
|
||||
ke, owner);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Either the owner is not runnable, or is an upcall.
|
||||
* Find the first unassigned thread
|
||||
* If there is a 'last assigned' then see what's next.
|
||||
* otherwise look at what is first.
|
||||
*/
|
||||
if ((td = kg->kg_last_assigned)) {
|
||||
if ((td = kg->kg_last_assigned) != NULL)
|
||||
td = TAILQ_NEXT(td, td_runq);
|
||||
} else {
|
||||
else
|
||||
td = TAILQ_FIRST(&kg->kg_runq);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we found one assign it the kse, otherwise idle the kse.
|
||||
* If we found one, assign it the kse, otherwise idle the kse.
|
||||
*/
|
||||
if (td) {
|
||||
/*
|
||||
* Assign the new thread to the KSE.
|
||||
* and make the KSE runnable again,
|
||||
*/
|
||||
if (TD_IS_BOUND(owner)) {
|
||||
/*
|
||||
* If there is a reason to keep the previous
|
||||
* owner, do so.
|
||||
*/
|
||||
TD_SET_LOAN(owner);
|
||||
} else {
|
||||
/* otherwise, cut it free */
|
||||
ke->ke_owner = td;
|
||||
owner->td_kse = NULL;
|
||||
}
|
||||
kg->kg_last_assigned = td;
|
||||
td->td_kse = ke;
|
||||
ke->ke_thread = td;
|
||||
@ -280,43 +225,11 @@ kse_reassign(struct kse *ke)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now handle any waiting upcall.
|
||||
* Since we didn't make them runnable before.
|
||||
*/
|
||||
if (TD_CAN_RUN(owner)) {
|
||||
setrunnable(owner);
|
||||
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
|
||||
ke, owner);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is possible that this is the last thread in the group
|
||||
* because the KSE is being shut down or the process
|
||||
* is exiting.
|
||||
*/
|
||||
if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
|
||||
ke->ke_thread = NULL;
|
||||
owner->td_kse = NULL;
|
||||
kse_unlink(ke);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this stage all we know is that the owner
|
||||
* is the same as the 'active' thread in the KSE
|
||||
* and that it is
|
||||
* Presently NOT loaned out.
|
||||
* Put it on the loanable queue. Make it fifo
|
||||
* so that long term sleepers donate their KSE's first.
|
||||
*/
|
||||
KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
|
||||
ke->ke_state = KES_THREAD;
|
||||
ke->ke_flags |= KEF_ONLOANQ;
|
||||
TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
|
||||
kg->kg_loan_kses++;
|
||||
CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
|
||||
ke->ke_state = KES_IDLE;
|
||||
ke->ke_thread = NULL;
|
||||
TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
|
||||
kg->kg_idle_kses++;
|
||||
CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -325,7 +238,7 @@ kse_reassign(struct kse *ke)
|
||||
* Remove a thread from its KSEGRP's run queue.
|
||||
* This in turn may remove it from a KSE if it was already assigned
|
||||
* to one, possibly causing a new thread to be assigned to the KSE
|
||||
* and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
|
||||
* and the KSE getting a new priority.
|
||||
*/
|
||||
static void
|
||||
remrunqueue(struct thread *td)
|
||||
@ -335,17 +248,16 @@ remrunqueue(struct thread *td)
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
|
||||
KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
|
||||
kg = td->td_ksegrp;
|
||||
ke = td->td_kse;
|
||||
/*
|
||||
* If it's a bound thread/KSE pair, take the shortcut. All non-KSE
|
||||
* threads are BOUND.
|
||||
*/
|
||||
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
|
||||
kg->kg_runnable--;
|
||||
TD_SET_CAN_RUN(td);
|
||||
if (TD_IS_BOUND(td)) {
|
||||
/*
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
if ((td->td_proc->p_flag & P_KSES) == 0) {
|
||||
/* Bring its kse with it, leave the thread attached */
|
||||
sched_rem(ke);
|
||||
ke->ke_state = KES_THREAD;
|
||||
@ -363,7 +275,7 @@ remrunqueue(struct thread *td)
|
||||
sched_rem(ke);
|
||||
ke->ke_state = KES_THREAD;
|
||||
td2 = kg->kg_last_assigned;
|
||||
KASSERT((td2 != NULL), ("last assigned has wrong value "));
|
||||
KASSERT((td2 != NULL), ("last assigned has wrong value"));
|
||||
if (td2 == td)
|
||||
kg->kg_last_assigned = td3;
|
||||
kse_reassign(ke);
|
||||
@ -381,14 +293,14 @@ adjustrunqueue( struct thread *td, int newpri)
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
|
||||
/*
|
||||
* If it's a bound thread/KSE pair, take the shortcut. All non-KSE
|
||||
* threads are BOUND.
|
||||
*/
|
||||
KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
|
||||
|
||||
ke = td->td_kse;
|
||||
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
|
||||
if (TD_IS_BOUND(td)) {
|
||||
/*
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
if ((td->td_proc->p_flag & P_KSES) == 0) {
|
||||
/* We only care about the kse in the run queue. */
|
||||
td->td_priority = newpri;
|
||||
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
|
||||
@ -397,9 +309,8 @@ adjustrunqueue( struct thread *td, int newpri)
|
||||
}
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* An unbound thread. This is not optimised yet.
|
||||
*/
|
||||
|
||||
/* It is a threaded process */
|
||||
kg = td->td_ksegrp;
|
||||
kg->kg_runnable--;
|
||||
TD_SET_CAN_RUN(td);
|
||||
@ -439,48 +350,17 @@ setrunqueue(struct thread *td)
|
||||
sched_add(td->td_kse);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* If the process is threaded but the thread is bound then
|
||||
* there is still a little extra to do re. KSE loaning.
|
||||
*/
|
||||
if (TD_IS_BOUND(td)) {
|
||||
KASSERT((td->td_kse != NULL),
|
||||
("queueing BAD thread to run queue"));
|
||||
ke = td->td_kse;
|
||||
KASSERT((ke->ke_owner == ke->ke_thread),
|
||||
("setrunqueue: Hey KSE loaned out"));
|
||||
if (ke->ke_flags & KEF_ONLOANQ) {
|
||||
ke->ke_flags &= ~KEF_ONLOANQ;
|
||||
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
|
||||
kg->kg_loan_kses--;
|
||||
}
|
||||
sched_add(td->td_kse);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, so we are threading with this thread.
|
||||
* We don't have a KSE, see if we can get one..
|
||||
*/
|
||||
tda = kg->kg_last_assigned;
|
||||
if ((ke = td->td_kse) == NULL) {
|
||||
/*
|
||||
* We will need a KSE, see if there is one..
|
||||
* First look for a free one, before getting desperate.
|
||||
* If we can't get one, our priority is not high enough..
|
||||
* that's ok..
|
||||
*/
|
||||
if (kg->kg_loan_kses) {
|
||||
if (kg->kg_idle_kses) {
|
||||
/*
|
||||
* Failing that see if we can borrow one.
|
||||
* There is a free one so it's ours for the asking..
|
||||
*/
|
||||
ke = TAILQ_FIRST(&kg->kg_lq);
|
||||
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
|
||||
ke->ke_flags &= ~KEF_ONLOANQ;
|
||||
ke = TAILQ_FIRST(&kg->kg_iq);
|
||||
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
|
||||
ke->ke_state = KES_THREAD;
|
||||
TD_SET_LOAN(ke->ke_owner);
|
||||
ke->ke_thread = NULL;
|
||||
kg->kg_loan_kses--;
|
||||
kg->kg_idle_kses--;
|
||||
} else if (tda && (tda->td_priority > td->td_priority)) {
|
||||
/*
|
||||
* None free, but there is one we can commandeer.
|
||||
@ -495,11 +375,7 @@ setrunqueue(struct thread *td)
|
||||
} else {
|
||||
/*
|
||||
* Temporarily disassociate so it looks like the other cases.
|
||||
* If the owner wasn't lending before, then it is now..
|
||||
*/
|
||||
if (!TD_LENDER(ke->ke_owner)) {
|
||||
TD_SET_LOAN(ke->ke_owner);
|
||||
}
|
||||
ke->ke_thread = NULL;
|
||||
td->td_kse = NULL;
|
||||
}
|
||||
@ -831,6 +707,7 @@ thread_sanity_check(struct thread *td, char *string)
|
||||
if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
|
||||
panc(string, "where on earth does lastassigned point?");
|
||||
}
|
||||
#if 0
|
||||
FOREACH_THREAD_IN_GROUP(kg, td2) {
|
||||
if (((td2->td_flags & TDF_UNBOUND) == 0) &&
|
||||
(TD_ON_RUNQ(td2))) {
|
||||
@ -840,6 +717,7 @@ thread_sanity_check(struct thread *td, char *string)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
if ((unassigned + assigned) != kg->kg_runnable) {
|
||||
panc(string, "wrong number in runnable");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -254,7 +254,7 @@ static struct witness_order_list_entry order_lists[] = {
|
||||
#endif
|
||||
{ "clk", &lock_class_mtx_spin },
|
||||
{ "mutex profiling lock", &lock_class_mtx_spin },
|
||||
{ "zombie_thread_lock", &lock_class_mtx_spin },
|
||||
{ "kse zombie lock", &lock_class_mtx_spin },
|
||||
{ "ALD Queue", &lock_class_mtx_spin },
|
||||
#ifdef __ia64__
|
||||
{ "MCA spin lock", &lock_class_mtx_spin },
|
||||
|
@ -285,7 +285,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
|
||||
return;
|
||||
|
@ -285,7 +285,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
|
||||
return;
|
||||
|
@ -141,7 +141,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
|
||||
{
|
||||
}
|
||||
|
||||
|
110
sys/sys/proc.h
110
sys/sys/proc.h
@ -258,7 +258,7 @@ They would be given priorities calculated from the KSEG.
|
||||
* This is what is put to sleep and reactivated.
|
||||
* The first KSE available in the correct group will run this thread.
|
||||
* If several are available, use the one on the same CPU as last time.
|
||||
* When wating to be run, threads are hung off the KSEGRP in priority order.
|
||||
* When waiting to be run, threads are hung off the KSEGRP in priority order.
|
||||
* with N runnable and queued KSEs in the KSEGRP, the first N threads
|
||||
* are linked to them. Other threads are not yet assigned.
|
||||
*/
|
||||
@ -298,8 +298,10 @@ struct thread {
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
void (*td_switchin)(void); /* (k) Switchin special func. */
|
||||
struct thread *td_standin; /* (?) Use this for an upcall */
|
||||
struct kse_upcall *td_upcall; /* our upcall structure. */
|
||||
u_int64_t td_sticks; /* (j) Statclock hits in system mode. */
|
||||
u_int td_usticks; /* (?) Statclock kernel hits, for UTS */
|
||||
u_int td_uuticks; /* Statclock hits in user, for UTS */
|
||||
u_int td_usticks; /* Statclock hits in kernel, for UTS */
|
||||
u_int td_critnest; /* (k) Critical section nest level. */
|
||||
#define td_endzero td_base_pri
|
||||
|
||||
@ -334,7 +336,6 @@ struct thread {
|
||||
struct td_sched *td_sched; /* Scheduler specific data */
|
||||
};
|
||||
/* flags kept in td_flags */
|
||||
#define TDF_UNBOUND 0x000001 /* May give away the kse, uses the kg runq. */
|
||||
#define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump. */
|
||||
#define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */
|
||||
#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
|
||||
@ -346,6 +347,7 @@ struct thread {
|
||||
#define TDF_INMSLEEP 0x000400 /* Don't recurse in msleep(). */
|
||||
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
|
||||
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
|
||||
#define TDF_USTATCLOCK 0x004000 /* Stat clock hits in userland. */
|
||||
#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
|
||||
|
||||
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
|
||||
@ -353,25 +355,17 @@ struct thread {
|
||||
#define TDI_SWAPPED 0x0004 /* Stack not in mem.. bad juju if run. */
|
||||
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
|
||||
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
|
||||
#define TDI_LOAN 0x0020 /* bound thread's KSE is lent */
|
||||
#define TDI_IDLE 0x0040 /* kse_release() made us surplus */
|
||||
#define TDI_EXITING 0x0080 /* Thread is in exit processing */
|
||||
|
||||
#define TD_IS_UNBOUND(td) ((td)->td_flags & TDF_UNBOUND)
|
||||
#define TD_IS_BOUND(td) (!TD_IS_UNBOUND(td))
|
||||
#define TD_CAN_UNBIND(td) \
|
||||
(((td)->td_flags & (TDF_UNBOUND|TDF_CAN_UNBIND)) == TDF_CAN_UNBIND)
|
||||
|
||||
#define TD_CAN_UNBIND(td) \
|
||||
(((td)->td_flags & TDF_CAN_UNBIND) == TDF_CAN_UNBIND && \
|
||||
((td)->td_upcall != NULL))
|
||||
|
||||
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
|
||||
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
|
||||
#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
|
||||
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
|
||||
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
|
||||
#define TD_LENDER(td) ((td)->td_inhibitors & TDI_LOAN)
|
||||
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
|
||||
#define TD_IS_IDLE(td) ((td)->td_inhibitors & TDI_IDLE)
|
||||
#define TD_IS_EXITING(td) ((td)->td_inhibitors & TDI_EXITING)
|
||||
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
|
||||
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
|
||||
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
|
||||
@ -379,12 +373,12 @@ struct thread {
|
||||
|
||||
#define TD_SET_INHIB(td, inhib) do { \
|
||||
(td)->td_state = TDS_INHIBITED; \
|
||||
(td)->td_inhibitors |= inhib; \
|
||||
(td)->td_inhibitors |= (inhib); \
|
||||
} while (0)
|
||||
|
||||
#define TD_CLR_INHIB(td, inhib) do { \
|
||||
if (((td)->td_inhibitors & inhib) && \
|
||||
(((td)->td_inhibitors &= ~inhib) == 0)) \
|
||||
if (((td)->td_inhibitors & (inhib)) && \
|
||||
(((td)->td_inhibitors &= ~(inhib)) == 0)) \
|
||||
(td)->td_state = TDS_CAN_RUN; \
|
||||
} while (0)
|
||||
|
||||
@ -393,8 +387,6 @@ struct thread {
|
||||
#define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK)
|
||||
#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
|
||||
#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
|
||||
#define TD_SET_LOAN(td) TD_SET_INHIB((td), TDI_LOAN)
|
||||
#define TD_SET_IDLE(td) TD_SET_INHIB((td), TDI_IDLE)
|
||||
#define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING)
|
||||
|
||||
#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
|
||||
@ -402,8 +394,6 @@ struct thread {
|
||||
#define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK)
|
||||
#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
|
||||
#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
|
||||
#define TD_CLR_LOAN(td) TD_CLR_INHIB((td), TDI_LOAN)
|
||||
#define TD_CLR_IDLE(td) TD_CLR_INHIB((td), TDI_IDLE)
|
||||
|
||||
#define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0)
|
||||
#define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0)
|
||||
@ -414,16 +404,6 @@ struct thread {
|
||||
(td)->td_wchan = NULL; \
|
||||
} while (0)
|
||||
|
||||
|
||||
/*
|
||||
* Traps for young players:
|
||||
* The main thread variable that controls whether a thread acts as a threaded
|
||||
* or unthreaded thread is the TDF_UNBOUND flag.
|
||||
* i.e. they bind themselves to whatever thread thay are first scheduled with.
|
||||
* You may see BOUND threads in KSE processes but you should never see
|
||||
* UNBOUND threads in non KSE processes.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The schedulable entity that can be given a context to run.
|
||||
* A process may have several of these. Probably one per processor
|
||||
@ -441,48 +421,46 @@ struct kse {
|
||||
#define ke_startzero ke_flags
|
||||
int ke_flags; /* (j) KEF_* flags. */
|
||||
struct thread *ke_thread; /* Active associated thread. */
|
||||
struct thread *ke_owner; /* Always points to the owner */
|
||||
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
|
||||
u_int ke_uuticks; /* Statclock hits in user, for UTS */
|
||||
u_int ke_usticks; /* Statclock hits in kernel, for UTS */
|
||||
u_char ke_oncpu; /* (j) Which cpu we are on. */
|
||||
char ke_rqindex; /* (j) Run queue index. */
|
||||
enum {
|
||||
KES_UNUSED = 0x0,
|
||||
KES_IDLE,
|
||||
KES_ONRUNQ,
|
||||
KES_UNQUEUED, /* in transit */
|
||||
KES_THREAD /* slaved to thread state */
|
||||
} ke_state; /* (j) S* process status. */
|
||||
struct kse_mailbox *ke_mailbox; /* the userland mailbox address */
|
||||
stack_t ke_stack;
|
||||
void *ke_upcall;
|
||||
struct thread *ke_tdspare; /* spare thread for upcalls */
|
||||
#define ke_endzero ke_dummy
|
||||
u_char ke_dummy;
|
||||
struct ke_sched *ke_sched; /* Scheduler specific data */
|
||||
};
|
||||
|
||||
/* flags kept in ke_flags */
|
||||
#define KEF_OWEUPC 0x00002 /* Owe process an addupc() call at next ast. */
|
||||
#define KEF_OWEUPC 0x008000 /* Owe thread an addupc() call at next ast. */
|
||||
#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */
|
||||
#define KEF_LOANED 0x00008 /* On loan from the bound thread to another */
|
||||
#define KEF_USER 0x00200 /* Process is not officially in the kernel */
|
||||
#define KEF_ASTPENDING 0x00400 /* KSE has a pending ast. */
|
||||
#define KEF_NEEDRESCHED 0x00800 /* Process needs to yield. */
|
||||
#define KEF_ONLOANQ 0x01000 /* KSE is on loan queue. */
|
||||
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
|
||||
#define KEF_EXIT 0x04000 /* KSE is being killed. */
|
||||
#define KEF_DOUPCALL 0x08000 /* KSE should do upcall now. */
|
||||
|
||||
/*
|
||||
* (*) A bound KSE with a bound thread in a KSE process may be lent to
|
||||
* Other threads, as long as those threads do not leave the kernel.
|
||||
* The other threads must be either exiting, or be unbound with a valid
|
||||
* mailbox so that they can save their state there rather than going
|
||||
* to user space. While this happens the real bound thread is still linked
|
||||
* to the kse via the ke_bound field, and the KSE has its "KEF_LOANED
|
||||
* flag set.
|
||||
* The upcall management structure.
|
||||
* The upcall is used when returning to userland. If a thread does not have
|
||||
* an upcall on return to userland the thread exports its context and exits.
|
||||
*/
|
||||
struct kse_upcall {
|
||||
TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in KSEG. */
|
||||
struct ksegrp *ku_ksegrp; /* Associated KSEG. */
|
||||
struct thread *ku_owner; /* owning thread */
|
||||
int ku_flags; /* KUF_* flags. */
|
||||
struct kse_mailbox *ku_mailbox; /* userland mailbox address. */
|
||||
stack_t ku_stack; /* userland upcall stack. */
|
||||
void *ku_func; /* userland upcall function. */
|
||||
};
|
||||
|
||||
#define KUF_DOUPCALL 0x00001 /* Do upcall now, don't wait */
|
||||
|
||||
/*
|
||||
* Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to
|
||||
@ -493,18 +471,20 @@ struct ksegrp {
|
||||
struct proc *kg_proc; /* Process that contains this KSEG. */
|
||||
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */
|
||||
TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */
|
||||
TAILQ_HEAD(, kse) kg_lq; /* (ke_kgrlist) Loan KSEs. */
|
||||
TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) All idle KSEs. */
|
||||
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
|
||||
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
|
||||
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
|
||||
|
||||
TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group */
|
||||
#define kg_startzero kg_estcpu
|
||||
u_int kg_estcpu; /* Sum of the same field in KSEs. */
|
||||
u_int kg_slptime; /* (j) How long completely blocked. */
|
||||
struct thread *kg_last_assigned; /* Last thread assigned to a KSE */
|
||||
int kg_runnable; /* Num runnable threads on queue. */
|
||||
int kg_runq_kses; /* Num KSEs on runq. */
|
||||
int kg_loan_kses; /* Num KSEs on loan queue. */
|
||||
struct thread *kg_last_assigned; /* (j) Last thread assigned to a KSE */
|
||||
int kg_runnable; /* (j) Num runnable threads on queue. */
|
||||
int kg_runq_kses; /* (j) Num KSEs on runq. */
|
||||
int kg_idle_kses; /* (j) Num KSEs on iq */
|
||||
int kg_numupcalls; /* (j) Num upcalls */
|
||||
int kg_upsleeps; /* (c) Num threads in kse_release() */
|
||||
struct kse_thr_mailbox *kg_completed; /* (c) completed thread mboxes */
|
||||
#define kg_endzero kg_pri_class
|
||||
|
||||
@ -513,8 +493,8 @@ struct ksegrp {
|
||||
u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
|
||||
char kg_nice; /* (j?/k?) Process "nice" value. */
|
||||
#define kg_endcopy kg_numthreads
|
||||
int kg_numthreads; /* Num threads in total */
|
||||
int kg_kses; /* Num KSEs in group. */
|
||||
int kg_numthreads; /* (j) Num threads in total */
|
||||
int kg_kses; /* (j) Num KSEs in group. */
|
||||
struct kg_sched *kg_sched; /* Scheduler specific data */
|
||||
};
|
||||
|
||||
@ -709,6 +689,8 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
|
||||
#define FOREACH_KSE_IN_GROUP(kg, ke) \
|
||||
TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist)
|
||||
#define FOREACH_UPCALL_IN_GROUP(kg, ku) \
|
||||
TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link)
|
||||
#define FOREACH_THREAD_IN_PROC(p, td) \
|
||||
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
|
||||
|
||||
@ -923,7 +905,7 @@ struct kse *kse_alloc(void);
|
||||
void kse_free(struct kse *ke);
|
||||
void kse_stash(struct kse *ke);
|
||||
void cpu_set_upcall(struct thread *td, void *pcb);
|
||||
void cpu_set_upcall_kse(struct thread *td, struct kse *ke);
|
||||
void cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku);
|
||||
void cpu_thread_clean(struct thread *);
|
||||
void cpu_thread_exit(struct thread *);
|
||||
void cpu_thread_setup(struct thread *td);
|
||||
@ -932,7 +914,6 @@ void kse_link(struct kse *ke, struct ksegrp *kg);
|
||||
void kse_unlink(struct kse *ke);
|
||||
void ksegrp_link(struct ksegrp *kg, struct proc *p);
|
||||
void ksegrp_unlink(struct ksegrp *kg);
|
||||
void make_kse_runnable(struct kse *ke);
|
||||
struct thread *signal_upcall(struct proc *p, int sig);
|
||||
struct thread *thread_alloc(void);
|
||||
void thread_exit(void) __dead2;
|
||||
@ -941,7 +922,7 @@ void thread_free(struct thread *td);
|
||||
void thread_getcontext(struct thread *td, ucontext_t *uc);
|
||||
void thread_link(struct thread *td, struct ksegrp *kg);
|
||||
void thread_reap(void);
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse *ke);
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
|
||||
int thread_setcontext(struct thread *td, ucontext_t *uc);
|
||||
int thread_single(int how);
|
||||
#define SINGLE_NO_EXIT 0 /* values for 'how' */
|
||||
@ -955,8 +936,13 @@ void thread_unsuspend_one(struct thread *td);
|
||||
int thread_userret(struct thread *td, struct trapframe *frame);
|
||||
void thread_user_enter(struct proc *p, struct thread *td);
|
||||
void thread_wait(struct proc *p);
|
||||
int thread_add_ticks_intr(int user, uint ticks);
|
||||
|
||||
int thread_statclock(int user);
|
||||
struct kse_upcall *upcall_alloc(void);
|
||||
void upcall_free(struct kse_upcall *ku);
|
||||
void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
|
||||
void upcall_unlink(struct kse_upcall *ku);
|
||||
void upcall_remove(struct thread *td);
|
||||
void upcall_stash(struct kse_upcall *ke);
|
||||
void thread_sanity_check(struct thread *td, char *);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user