Add code to ddb to allow backtracing an arbitrary thread.

(show thread {address})

Remove the IDLE kse state and replace it with a change in
the way threads sahre KSEs. Every KSE now has a thread, which is
considered its "owner" however a KSE may also be lent to other
threads in the same group to allow completion of in-kernel work.
n this case the owner remains the same and the KSE will revert to the
owner when the other work has been completed.

All creations of upcalls etc. is now done from
kse_reassign() which in turn is called from mi_switch or
thread_exit(). This means that special code can be removed from
msleep() and cv_wait().

kse_release() does not leave a KSE with no thread any more but
converts the existing thread into teh KSE's owner, and sets it up
for doing an upcall. It is just inhibitted from being scheduled until
there is some reason to do an upcall.

Remove all trace of the kse_idle queue since it is no-longer needed.
"Idle" KSEs are now on the loanable queue.
This commit is contained in:
Julian Elischer 2002-12-28 01:23:07 +00:00
parent 84cdcd85a0
commit 93a7aa79d6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=108338
16 changed files with 927 additions and 699 deletions

View File

@ -107,6 +107,8 @@ static void db_nextframe(struct i386_frame **, db_addr_t *, struct proc *);
static int db_numargs(struct i386_frame *);
static void db_print_stack_entry(const char *, int, char **, int *, db_addr_t);
static void decode_syscall(int, struct proc *);
static void db_trace_one_stack(int count, boolean_t have_addr,
struct proc *p, struct i386_frame *frame, db_addr_t callpc);
static char * watchtype_str(int type);
@ -284,14 +286,12 @@ db_stack_trace_cmd(addr, have_addr, count, modif)
db_expr_t count;
char *modif;
{
int *argp;
struct i386_frame *frame;
struct proc *p;
struct pcb *pcb;
struct thread *td;
db_addr_t callpc;
pid_t pid;
boolean_t first;
if (count == -1)
count = 1024;
@ -348,6 +348,54 @@ db_stack_trace_cmd(addr, have_addr, count, modif)
callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE);
frame = frame->f_frame;
}
db_trace_one_stack(count, have_addr, p, frame, callpc);
}
void
db_stack_thread(db_expr_t addr, boolean_t have_addr,
db_expr_t count, char *modif)
{
struct i386_frame *frame;
struct thread *td;
struct proc *p;
struct pcb *pcb;
db_addr_t callpc;
if (!have_addr)
return;
if (!INKERNEL(addr)) {
printf("bad thread address");
return;
}
td = (struct thread *)addr;
/* quick sanity check */
if ((p = td->td_proc) != td->td_ksegrp->kg_proc)
return;
if (TD_IS_SWAPPED(td)) {
db_printf("thread at %p swapped out\n", td);
return;
}
if (td == curthread) {
frame = (struct i386_frame *)ddb_regs.tf_ebp;
if (frame == NULL)
frame = (struct i386_frame *)(ddb_regs.tf_esp - 4);
callpc = (db_addr_t)ddb_regs.tf_eip;
} else {
pcb = td->td_pcb;
frame = (struct i386_frame *)pcb->pcb_ebp;
if (frame == NULL)
frame = (struct i386_frame *) (pcb->pcb_esp - 4);
callpc = (db_addr_t)pcb->pcb_eip;
}
db_trace_one_stack(count, have_addr, p, frame, callpc);
}
static void
db_trace_one_stack(int count, boolean_t have_addr,
struct proc *p, struct i386_frame *frame, db_addr_t callpc)
{
int *argp;
boolean_t first;
first = TRUE;
while (count--) {

View File

@ -383,9 +383,7 @@ static struct command db_show_cmds[] = {
{ "all", 0, 0, db_show_all_cmds },
{ "registers", db_show_regs, 0, 0 },
{ "breaks", db_listbreak_cmd, 0, 0 },
#if 0
{ "thread", db_show_one_thread, 0, 0 },
#endif
#if 0
{ "port", ipc_port_print, 0, 0 },
#endif

View File

@ -38,8 +38,13 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/cons.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <ddb/ddb.h>
static void
dumpthread(volatile struct proc *p, volatile struct thread *td);
void
db_ps(dummy1, dummy2, dummy3, dummy4)
@ -123,58 +128,7 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
if (p->p_flag & P_KSES)
db_printf("(threaded) %s\n", p->p_comm);
FOREACH_THREAD_IN_PROC(p, td) {
if (p->p_flag & P_KSES)
db_printf( " thread %p ksegrp %p ", td, td->td_ksegrp);
if (TD_ON_SLEEPQ(td)) {
if (td->td_flags & TDF_CVWAITQ)
db_printf("[CVQ ");
else
db_printf("[SLPQ ");
db_printf(" %6s %8p]", td->td_wmesg,
(void *)td->td_wchan);
}
switch (td->td_state) {
case TDS_INHIBITED:
if (TD_ON_LOCK(td)) {
db_printf("[LOCK %6s %8p]",
td->td_lockname,
(void *)td->td_blocked);
}
if (TD_IS_SLEEPING(td)) {
db_printf("[SLP]");
}
if (TD_IS_SWAPPED(td)) {
db_printf("[SWAP]");
}
if (TD_IS_SUSPENDED(td)) {
db_printf("[SUSP]");
}
if (TD_AWAITING_INTR(td)) {
db_printf("[IWAIT]");
}
if (TD_LENT(td)) {
db_printf("[LOAN]");
}
break;
case TDS_CAN_RUN:
db_printf("[Can run]");
break;
case TDS_RUNQ:
db_printf("[RUNQ]");
break;
case TDS_RUNNING:
db_printf("[CPU %d]", td->td_kse->ke_oncpu);
break;
default:
panic("unknown thread state");
}
if (p->p_flag & P_KSES) {
if (td->td_kse)
db_printf("[kse %p]", td->td_kse);
db_printf("\n");
} else
db_printf(" %s\n", p->p_comm);
dumpthread(p, td);
}
/* PROC_UNLOCK(p); */
@ -184,3 +138,90 @@ db_ps(dummy1, dummy2, dummy3, dummy4)
}
/* sx_sunlock(&allproc_lock); */
}
static void
dumpthread(volatile struct proc *p, volatile struct thread *td)
{
if (p->p_flag & P_KSES)
db_printf( " thread %p ksegrp %p ", td, td->td_ksegrp);
if (TD_ON_SLEEPQ(td)) {
if (td->td_flags & TDF_CVWAITQ)
db_printf("[CVQ ");
else
db_printf("[SLPQ ");
db_printf(" %6s %8p]", td->td_wmesg,
(void *)td->td_wchan);
}
switch (td->td_state) {
case TDS_INHIBITED:
if (TD_ON_LOCK(td)) {
db_printf("[LOCK %6s %8p]",
td->td_lockname,
(void *)td->td_blocked);
}
if (TD_IS_SLEEPING(td)) {
db_printf("[SLP]");
}
if (TD_IS_SWAPPED(td)) {
db_printf("[SWAP]");
}
if (TD_IS_SUSPENDED(td)) {
db_printf("[SUSP]");
}
if (TD_AWAITING_INTR(td)) {
db_printf("[IWAIT]");
}
if (TD_LENDER(td)) {
db_printf("[LOAN]");
}
if (TD_IS_IDLE(td)) {
db_printf("[IDLE]");
}
if (TD_IS_EXITING(td)) {
db_printf("[EXIT]");
}
break;
case TDS_CAN_RUN:
db_printf("[Can run]");
break;
case TDS_RUNQ:
db_printf("[RUNQ]");
break;
case TDS_RUNNING:
db_printf("[CPU %d]", td->td_kse->ke_oncpu);
break;
default:
panic("unknown thread state");
}
if (p->p_flag & P_KSES) {
if (td->td_kse)
db_printf("[kse %p]", td->td_kse);
db_printf("\n");
} else
db_printf(" %s\n", p->p_comm);
}
#define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK)
void
db_show_one_thread(db_expr_t addr, boolean_t have_addr,
db_expr_t count, char *modif)
{
struct proc *p;
struct thread *td;
if (!have_addr)
td = curthread;
else if (!INKERNEL(addr)) {
printf("bad thread address");
return;
} else
td = (struct thread *)addr;
/* quick sanity check */
if ((p = td->td_proc) != td->td_ksegrp->kg_proc)
return;
printf("Proc %p ",p);
dumpthread(p, td);
#ifdef __i386__
db_stack_thread((db_expr_t)td, 1, count, modif);
#endif
}

View File

@ -104,6 +104,8 @@ void db_trap(int type, int code);
int db_value_of_name(const char *name, db_expr_t *valuep);
void db_write_bytes(vm_offset_t addr, size_t size, char *data);
/* machine-dependent */
void db_stack_thread(db_expr_t addr, boolean_t have_addr,
db_expr_t count, char *modif);
void kdb_init(void);
db_cmdfcn_t db_breakpoint_cmd;
@ -125,11 +127,11 @@ db_cmdfcn_t db_trace_until_call_cmd;
db_cmdfcn_t db_trace_until_matching_cmd;
db_cmdfcn_t db_watchpoint_cmd;
db_cmdfcn_t db_write_cmd;
db_cmdfcn_t db_show_one_thread;
#if 0
db_cmdfcn_t db_help_cmd;
db_cmdfcn_t db_show_all_threads;
db_cmdfcn_t db_show_one_thread;
db_cmdfcn_t ipc_port_print;
db_cmdfcn_t vm_page_print;
#endif

View File

@ -107,6 +107,8 @@ static void db_nextframe(struct i386_frame **, db_addr_t *, struct proc *);
static int db_numargs(struct i386_frame *);
static void db_print_stack_entry(const char *, int, char **, int *, db_addr_t);
static void decode_syscall(int, struct proc *);
static void db_trace_one_stack(int count, boolean_t have_addr,
struct proc *p, struct i386_frame *frame, db_addr_t callpc);
static char * watchtype_str(int type);
@ -284,14 +286,12 @@ db_stack_trace_cmd(addr, have_addr, count, modif)
db_expr_t count;
char *modif;
{
int *argp;
struct i386_frame *frame;
struct proc *p;
struct pcb *pcb;
struct thread *td;
db_addr_t callpc;
pid_t pid;
boolean_t first;
if (count == -1)
count = 1024;
@ -348,6 +348,54 @@ db_stack_trace_cmd(addr, have_addr, count, modif)
callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE);
frame = frame->f_frame;
}
db_trace_one_stack(count, have_addr, p, frame, callpc);
}
void
db_stack_thread(db_expr_t addr, boolean_t have_addr,
db_expr_t count, char *modif)
{
struct i386_frame *frame;
struct thread *td;
struct proc *p;
struct pcb *pcb;
db_addr_t callpc;
if (!have_addr)
return;
if (!INKERNEL(addr)) {
printf("bad thread address");
return;
}
td = (struct thread *)addr;
/* quick sanity check */
if ((p = td->td_proc) != td->td_ksegrp->kg_proc)
return;
if (TD_IS_SWAPPED(td)) {
db_printf("thread at %p swapped out\n", td);
return;
}
if (td == curthread) {
frame = (struct i386_frame *)ddb_regs.tf_ebp;
if (frame == NULL)
frame = (struct i386_frame *)(ddb_regs.tf_esp - 4);
callpc = (db_addr_t)ddb_regs.tf_eip;
} else {
pcb = td->td_pcb;
frame = (struct i386_frame *)pcb->pcb_ebp;
if (frame == NULL)
frame = (struct i386_frame *) (pcb->pcb_esp - 4);
callpc = (db_addr_t)pcb->pcb_eip;
}
db_trace_one_stack(count, have_addr, p, frame, callpc);
}
static void
db_trace_one_stack(int count, boolean_t have_addr,
struct proc *p, struct i386_frame *frame, db_addr_t callpc)
{
int *argp;
boolean_t first;
first = TRUE;
while (count--) {

View File

@ -380,6 +380,7 @@ proc0_init(void *dummy __unused)
ke->ke_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
ke->ke_owner = td;
p->p_peers = 0;
p->p_leader = p;

View File

@ -116,26 +116,6 @@ cv_destroy(struct cv *cvp)
static __inline void
cv_switch(struct thread *td)
{
/*
* If we are capable of async syscalls and there isn't already
* another one ready to return, start a new thread
* and queue it as ready to run. Note that there is danger here
* because we need to make sure that we don't sleep allocating
* the thread (recursion here might be bad).
* Hence the TDF_INMSLEEP flag.
*/
if ((td->td_flags & (TDF_UNBOUND|TDF_INMSLEEP)) == TDF_UNBOUND) {
/*
* We don't need to upcall now, just queue it.
* The upcall will happen when other n-kernel work
* in this SKEGRP has completed.
* Don't recurse here!
*/
td->td_flags |= TDF_INMSLEEP;
thread_schedule_upcall(td, td->td_kse);
td->td_flags &= ~TDF_INMSLEEP;
}
TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();

View File

@ -492,6 +492,7 @@ fork1(td, flags, pages, procp)
/* Set up the thread as an active thread (as if runnable). */
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
ke2->ke_owner = td2;
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */

View File

@ -197,6 +197,7 @@ kse_link(struct kse *ke, struct ksegrp *kg)
ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
ke->ke_owner = NULL;
ke->ke_thread = NULL;
ke->ke_oncpu = NOCPU;
}
@ -208,10 +209,6 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
if (ke->ke_state == KES_IDLE) {
kg->kg_idle_kses--;
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
}
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
if (--kg->kg_kses == 0) {
@ -231,14 +228,12 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
kg->kg_proc = p;
/* the following counters are in the -zero- section and may not need clearing */
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_kses = 0;
kg->kg_idle_kses = 0;
kg->kg_loan_kses = 0;
kg->kg_runq_kses = 0; /* XXXKSE change name */
/* link it in now that it's consistent */
@ -351,7 +346,8 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
}
/*
* Either returns as an upcall or exits
* Either becomes an upcall or waits for an awakening event and
* THEN becomes an upcall. Only error cases return.
*/
int
kse_release(struct thread * td, struct kse_release_args * uap)
@ -369,30 +365,24 @@ kse_release(struct thread * td, struct kse_release_args * uap)
(td->td_flags & TDF_UNBOUND) ||
(td->td_kse->ke_mailbox == NULL))
return (EINVAL);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
td->td_flags = TDF_UPCALLING; /* BOUND */
if (kg->kg_completed == NULL) {
#if 1 /* temp until signals make new threads */
if (p->p_numthreads == 1) {
/* change OURSELF to become an upcall */
td->td_flags = TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
/*
* msleep will not call thread_sched_upcall
* because thread is not UNBOUND.
*/
msleep(p->p_sigacts, NULL,
PPAUSE | PCATCH, "ksepause", 0);
return (0);
}
#endif /* end temp */
thread_exit();
/* XXXKSE also look for waiting signals etc. */
/*
* The KSE will however be lendable.
*/
mtx_lock_spin(&sched_lock);
TD_SET_IDLE(td);
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
} else {
PROC_UNLOCK(p);
}
/* change OURSELF to become an upcall */
td->td_flags = TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return (0);
}
@ -403,45 +393,62 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
struct kse *ke, *ke2;
struct kse *ke;
struct ksegrp *kg;
struct thread *td2;
p = td->td_proc;
td2 = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
ke = NULL;
mtx_lock_spin(&sched_lock);
PROC_LOCK(p);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
FOREACH_KSE_IN_GROUP(kg, ke2) {
if (ke2->ke_mailbox != uap->mbx)
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_mailbox != uap->mbx)
continue;
if (ke2->ke_state == KES_IDLE) {
ke = ke2;
goto found;
} else {
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
td2 = ke->ke_owner ;
KASSERT((td2 != NULL),("KSE with no owner"));
if (!TD_IS_IDLE(td2)) {
/* Return silently if no longer idle */
PROC_UNLOCK(p);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
}
break;
}
if (td2) {
break;
}
}
} else {
/*
* look for any idle KSE to resurrect.
*/
kg = td->td_ksegrp;
ke = TAILQ_FIRST(&kg->kg_iq);
mtx_lock_spin(&sched_lock);
FOREACH_KSE_IN_GROUP(kg, ke) {
td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner2"));
if (TD_IS_IDLE(td2))
break;
}
}
if (ke == NULL) {
if (td2) {
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
TD_CLR_IDLE(td2);
setrunnable(td2);
mtx_unlock_spin(&sched_lock);
return (ESRCH);
}
found:
thread_schedule_upcall(td, ke);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
}
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
PROC_UNLOCK(p);
return (ESRCH);
}
/*
@ -810,17 +817,14 @@ thread_export_context(struct thread *td)
addr = (void *)(&td->td_mailbox->tm_context);
#endif
error = copyin(addr, &uc, sizeof(ucontext_t));
if (error == 0) {
thread_getcontext(td, &uc);
error = copyout(&uc, addr, sizeof(ucontext_t));
if (error)
goto bad;
thread_getcontext(td, &uc);
error = copyout(&uc, addr, sizeof(ucontext_t));
if (error)
goto bad;
}
if (error) {
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
return (error);
}
/* get address in latest mbox of list pointer */
#if 0
addr = (caddr_t)td->td_mailbox
@ -835,6 +839,7 @@ thread_export_context(struct thread *td)
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
error = EFAULT;
goto bad;
}
PROC_LOCK(p);
@ -856,7 +861,7 @@ thread_export_context(struct thread *td)
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
return (EFAULT);
return (error);
}
/*
@ -930,8 +935,6 @@ thread_update_uticks(void)
caddr_t addr;
uint uticks, sticks;
KASSERT(!(td->td_flags & TDF_UNBOUND), ("thread not bound."));
if (ke->ke_mailbox == NULL)
return 0;
@ -939,8 +942,12 @@ thread_update_uticks(void)
ke->ke_uuticks = 0;
sticks = ke->ke_usticks;
ke->ke_usticks = 0;
#if 0
tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
+ offsetof(struct kse_mailbox, km_curthread));
+ offsetof(struct kse_mailbox, km_curthread));
#else /* if user pointer arithmetic is ok in the kernel */
tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
#endif
if ((tmbx == NULL) || (tmbx == (void *)-1))
return 0;
if (uticks) {
@ -1028,18 +1035,21 @@ thread_exit(void)
}
/* Reassign this thread's KSE. */
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
KASSERT((ke->ke_bound != td),
("thread_exit: entered with ke_bound set"));
/*
* decide what to do with the KSE attached to this thread.
* Decide what to do with the KSE attached to this thread.
* XXX Possibly kse_reassign should do both cases as it already
* does some of this.
*/
if (ke->ke_flags & KEF_EXIT) {
KASSERT((ke->ke_owner == td),
("thread_exit: KSE exiting with non-owner thread"));
ke->ke_thread = NULL;
td->td_kse = NULL;
kse_unlink(ke);
} else {
TD_SET_EXITING(td); /* definitly not runnable */
kse_reassign(ke);
}
PROC_UNLOCK(p);
@ -1107,19 +1117,13 @@ thread_link(struct thread *td, struct ksegrp *kg)
void
kse_purge(struct proc *p, struct thread *td)
{
struct kse *ke;
/* XXXKSE think about this..
may need to wake up threads on loan queue. */
struct ksegrp *kg;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses--;
kse_stash(ke);
}
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
@ -1137,38 +1141,28 @@ kse_purge(struct proc *p, struct thread *td)
/*
* Create a thread and schedule it for upcall on the KSE given.
* Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
thread_schedule_upcall(struct thread *td, struct kse *ke)
{
struct thread *td2;
struct ksegrp *kg;
int newkse;
mtx_assert(&sched_lock, MA_OWNED);
newkse = (ke != td->td_kse);
/*
* If the kse is already owned by another thread then we can't
* schedule an upcall because the other thread must be BOUND
* which means it is not in a position to take an upcall.
* We must be borrowing the KSE to allow us to complete some in-kernel
* work. When we complete, the Bound thread will have teh chance to
* If the owner and kse are BOUND then that thread is planning to
* go to userland and upcalls are not expected. So don't make one.
* If it is not bound then make it so with the spare thread
* anf then borrw back the KSE to allow us to complete some in-kernel
* work. When we complete, the Bound thread will have the chance to
* complete. This thread will sleep as planned. Hopefully there will
* eventually be un unbound thread that can be converted to an
* upcall to report the completion of this thread.
*/
if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
return (NULL);
}
KASSERT((ke->ke_bound == NULL), ("kse already bound"));
if (ke->ke_state == KES_IDLE) {
kg = ke->ke_ksegrp;
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
ke->ke_state = KES_UNQUEUED;
}
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
@ -1206,8 +1200,9 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
td2->td_kse = ke;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
ke->ke_owner = td2;
/*
* If called from msleep(), we are working on the current
* If called from kse_reassign(), we are working on the current
* KSE so fake that we borrowed it. If called from
* kse_create(), don't, as we have a new kse too.
*/
@ -1220,10 +1215,8 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
* from msleep() this is going to be "very soon" in nearly
* all cases.
*/
ke->ke_bound = td2;
TD_SET_LOAN(td2);
} else {
ke->ke_bound = NULL;
ke->ke_thread = td2;
ke->ke_state = KES_THREAD;
setrunqueue(td2);
@ -1292,10 +1285,11 @@ thread_user_enter(struct proc *p, struct thread *td)
/*
* If we are doing a syscall in a KSE environment,
* note where our mailbox is. There is always the
* possibility that we could do this lazily (in sleep()),
* possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
ke = td->td_kse;
td->td_flags &= ~TDF_UNBOUND;
if (ke->ke_mailbox != NULL) {
#if 0
td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
@ -1308,7 +1302,7 @@ thread_user_enter(struct proc *p, struct thread *td)
(td->td_mailbox == (void *)-1)) {
td->td_mailbox = NULL; /* single thread it.. */
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UNBOUND;
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
mtx_unlock_spin(&sched_lock);
} else {
/*
@ -1324,8 +1318,11 @@ thread_user_enter(struct proc *p, struct thread *td)
td->td_standin = thread_alloc();
}
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_UNBOUND;
td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
KASSERT((ke->ke_owner == td),
("thread_user_enter: No starting owner "));
ke->ke_owner = td;
td->td_usticks = 0;
}
}
@ -1350,122 +1347,204 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
struct ksegrp *kg;
struct thread *td2;
struct thread *worktodo;
struct proc *p;
struct timespec ts;
error = 0;
KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
("thread_userret: bad thread/kse pointers"));
KASSERT((td == curthread),
("thread_userret: bad thread argument"));
unbound = td->td_flags & TDF_UNBOUND;
kg = td->td_ksegrp;
p = td->td_proc;
error = 0;
unbound = TD_IS_UNBOUND(td);
mtx_lock_spin(&sched_lock);
if ((worktodo = kg->kg_last_assigned))
worktodo = TAILQ_NEXT(worktodo, td_runq);
else
worktodo = TAILQ_FIRST(&kg->kg_runq);
/*
* Originally bound threads never upcall but they may
* Permanently bound threads never upcall but they may
* loan out their KSE at this point.
* Upcalls imply bound.. They also may want to do some Philantropy.
* Unbound threads on the other hand either yield to other work
* or transform into an upcall.
* (having saved their context to user space in both cases)
* Temporarily bound threads on the other hand either yield
* to other work and transform into an upcall, or proceed back to
* userland.
*/
if (unbound) {
/*
* We are an unbound thread, looking to return to
* user space.
* THere are several possibilities:
* 1) we are using a borrowed KSE. save state and exit.
* kse_reassign() will recycle the kse as needed,
* 2) we are not.. save state, and then convert ourself
* to be an upcall, bound to the KSE.
* if there are others that need the kse,
* give them a chance by doing an mi_switch().
* Because we are bound, control will eventually return
* to us here.
* ***
* Save the thread's context, and link it
* into the KSEGRP's list of completed threads.
*/
if (TD_CAN_UNBIND(td)) {
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
if (!worktodo && (kg->kg_completed == NULL)) {
/*
* This thread has not started any upcall.
* If there is no work to report other than
* ourself, then it can return direct to userland.
*/
justreturn:
mtx_unlock_spin(&sched_lock);
thread_update_uticks();
td->td_mailbox = NULL;
return (0);
}
mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
td->td_mailbox = NULL;
td->td_usticks = 0;
if (error) {
/*
* If we are not running on a borrowed KSE, then
* As we are not running on a borrowed KSE,
* failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
* the syscall. If it IS borrowed, there is nothing
* we can do. We just lose that context. We
* the syscall.
*/
goto justreturn;
}
mtx_lock_spin(&sched_lock);
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
*/
td->td_flags |= TDF_UPCALLING;
/* there may be more work since we re-locked schedlock */
if ((worktodo = kg->kg_last_assigned))
worktodo = TAILQ_NEXT(worktodo, td_runq);
else
worktodo = TAILQ_FIRST(&kg->kg_runq);
} else if (unbound) {
/*
* We are an unbound thread, looking to
* return to user space. There must be another owner
* of this KSE.
* We are using a borrowed KSE. save state and exit.
* kse_reassign() will recycle the kse as needed,
*/
mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
td->td_usticks = 0;
if (error) {
/*
* There is nothing we can do.
* We just lose that context. We
* probably should note this somewhere and send
* the process a signal.
*/
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
mtx_lock_spin(&sched_lock);
if (td->td_kse->ke_bound == NULL) {
td->td_flags &= ~TDF_UNBOUND;
PROC_UNLOCK(td->td_proc);
mtx_unlock_spin(&sched_lock);
thread_update_uticks();
return (error); /* go sync */
ke = td->td_kse;
/* possibly upcall with error? */
} else {
/*
* Don't make an upcall, just exit so that the owner
* can get its KSE if it wants it.
* Our context is already safely stored for later
* use by the UTS.
*/
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
}
/*
* If the owner is idling, we now have something for it
* to report, so make it runnable.
* If the owner is not an upcall, make an attempt to
* ensure that at least one of any IDLED upcalls can
* wake up.
*/
if (ke->ke_owner->td_flags & TDF_UPCALLING) {
TD_CLR_IDLE(ke->ke_owner);
} else {
FOREACH_KSE_IN_GROUP(kg, ke) {
if (TD_IS_IDLE(ke->ke_owner)) {
TD_CLR_IDLE(ke->ke_owner);
}
}
thread_exit();
}
/*
* if the KSE is owned and we are borrowing it,
* don't make an upcall, just exit so that the owner
* can get its KSE if it wants it.
* Our context is already safely stored for later
* use by the UTS.
*/
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (td->td_kse->ke_bound) {
thread_exit();
}
PROC_UNLOCK(p);
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
* We should look just like a sheduled upcall
* from msleep() or cv_wait().
*/
td->td_flags &= ~TDF_UNBOUND;
td->td_flags |= TDF_UPCALLING;
/* Only get here if we have become an upcall */
} else {
mtx_lock_spin(&sched_lock);
thread_exit();
}
/*
* We ARE going back to userland with this KSE.
* Check for threads that need to borrow it.
* Optimisation: don't call mi_switch if no-one wants the KSE.
* We are permanently bound. We may be an upcall.
* If an upcall, check for threads that need to borrow the KSE.
* Any other thread that comes ready after this missed the boat.
*/
ke = td->td_kse;
if ((td2 = kg->kg_last_assigned))
td2 = TAILQ_NEXT(td2, td_runq);
else
td2 = TAILQ_FIRST(&kg->kg_runq);
if (td2) {
/*
* force a switch to more urgent 'in kernel'
* work. Control will return to this thread
* when there is no more work to do.
* kse_reassign() will do tha for us.
*/
TD_SET_LOAN(td);
ke->ke_bound = td;
ke->ke_thread = NULL;
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find td2 */
}
mtx_unlock_spin(&sched_lock);
/*
* If not upcalling, go back to userspace.
* If we are, get the upcall set up.
*/
if (td->td_flags & TDF_UPCALLING) {
if (worktodo) {
/*
* force a switch to more urgent 'in kernel'
* work. Control will return to this thread
* when there is no more work to do.
* kse_reassign() will do that for us.
*/
TD_SET_LOAN(td); /* XXXKSE may not be needed */
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find worktodo */
}
td->td_flags &= ~TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
/*
* There is no more work to do and we are going to ride
* this thread/KSE up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
/*
* Set user context to the UTS.
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
cpu_set_upcall_kse(td, ke);
/*
* Unhook the list of completed threads.
* anything that completes after this gets to
* come in next time.
* Put the list of completed thread mailboxes on
* this KSE's mailbox.
*/
error = thread_link_mboxes(kg, ke);
if (error)
goto bad;
/*
* Set state and clear the thread mailbox pointer.
* From now on we are just a bound outgoing process.
* **Problem** userret is often called several times.
* it would be nice if this all happenned only on the first
* time through. (the scan for extra work etc.)
*/
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
ke->ke_uuticks = ke->ke_usticks = 0;
if (error)
goto bad;
nanotime(&ts);
if (copyout(&ts,
(caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
} else {
mtx_unlock_spin(&sched_lock);
}
/*
* Optimisation:
* Ensure that we have a spare thread available,
@ -1476,61 +1555,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
}
thread_update_uticks();
/*
* To get here, we know there is no other need for our
* KSE so we can proceed. If not upcalling, go back to
* userspace. If we are, get the upcall set up.
*/
if ((td->td_flags & TDF_UPCALLING) == 0)
return (0);
/*
* We must be an upcall to get this far.
* There is no more work to do and we are going to ride
* this thead/KSE up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
/*
* Set user context to the UTS.
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
cpu_set_upcall_kse(td, ke);
/*
* Put any completed mailboxes on this KSE's list.
*/
error = thread_link_mboxes(kg, ke);
if (error)
goto bad;
/*
* Set state and mailbox.
* From now on we are just a bound outgoing process.
* **Problem** userret is often called several times.
* it would be nice if this all happenned only on the first time
* through. (the scan for extra work etc.)
*/
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
ke->ke_uuticks = ke->ke_usticks = 0;
if (!error) {
nanotime(&ts);
if (copyout(&ts, (caddr_t)&ke->ke_mailbox->km_timeofday,
sizeof(ts))) {
goto bad;
}
}
td->td_mailbox = NULL;
return (0);
bad:
@ -1541,6 +1566,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
td->td_mailbox = NULL;
return (error); /* go sync */
}
@ -1577,9 +1603,10 @@ thread_single(int force_exit)
if (p->p_singlethread)
return (1);
if (force_exit == SINGLE_EXIT)
if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
else
td->td_flags &= ~TDF_UNBOUND;
} else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
@ -1601,11 +1628,17 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
if (TD_IS_IDLE(td2)) {
TD_CLR_IDLE(td2);
}
} else {
if (TD_IS_SUSPENDED(td2))
continue;
/* maybe other inhibitted states too? */
if (TD_IS_SLEEPING(td2))
if (td2->td_inhibitors &
(TDI_SLEEPING | TDI_SWAPPED |
TDI_LOAN | TDI_IDLE |
TDI_EXITING))
thread_suspend_one(td2);
}
}
@ -1707,15 +1740,14 @@ thread_suspend_check(int return_instead)
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
/*
* free extra kses and ksegrps, we needn't worry
* about if current thread is in same ksegrp as
* p_singlethread and last kse in the group
* could be killed, this is protected by kg_numthreads,
* in this case, we deduce that kg_numthreads must > 1.
* All threads should be exiting
* Unless they are the active "singlethread".
* destroy un-needed KSEs as we go..
* KSEGRPS may implode too as #kses -> 0.
*/
ke = td->td_kse;
if (ke->ke_bound == NULL &&
((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
if (ke->ke_owner == td &&
(kg->kg_kses >= kg->kg_numthreads ))
ke->ke_flags |= KEF_EXIT;
thread_exit();
}

View File

@ -1525,6 +1525,11 @@ psignal(p, sig)
if (TD_IS_SLEEPING(td) &&
(td->td_flags & TDF_SINTR))
thread_suspend_one(td);
else if (TD_IS_IDLE(td)) {
thread_suspend_one(td);
}
}
if (p->p_suspcount == p->p_numthreads) {
mtx_unlock_spin(&sched_lock);

View File

@ -110,7 +110,6 @@ static void runq_readjust(struct runq *rq, struct kse *ke);
/************************************************************************
* Functions that manipulate runnability from a thread perspective. *
************************************************************************/
/*
* Select the KSE that will be run next. From that find the thread, and x
* remove it from the KSEGRP's run queue. If there is thread clustering,
@ -128,27 +127,12 @@ choosethread(void)
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
if (td->td_flags & TDF_UNBOUND) {
if (TD_IS_UNBOUND(td)) {
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
if (kg->kg_last_assigned == td) {
if (TAILQ_PREV(td, threadqueue, td_runq)
!= NULL)
printf("Yo MAMA!\n");
kg->kg_last_assigned = TAILQ_PREV(td,
threadqueue, td_runq);
}
/*
* If we have started running an upcall,
* Then TDF_UNBOUND WAS set because the thread was
* created without a KSE. Now that we have one,
* and it is our time to run, we make sure
* that BOUND semantics apply for the rest of
* the journey to userland, and into the UTS.
*/
#ifdef NOTYET
if (td->td_flags & TDF_UPCALLING)
tdf->td_flags &= ~TDF_UNBOUND;
#endif
}
kg->kg_runnable--;
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
@ -160,18 +144,26 @@ choosethread(void)
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
}
ke->ke_flags |= KEF_DIDRUN;
/*
* Only allow non system threads to run in panic
* if they are the one we are tracing. (I think.. [JRE])
*/
if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
(td->td_flags & TDF_INPANIC) == 0))
goto retry;
TD_SET_RUNNING(td);
return (td);
}
/*
* Given a KSE (now surplus or at least loanable), either assign a new
* runable thread to it
* (and put it in the run queue) or put it in the ksegrp's idle KSE list.
* Or aybe give it back to its owner if it's been loaned.
* runable thread to it (and put it in the run queue) or put it in
* the ksegrp's idle KSE list.
* Or maybe give it back to its owner if it's been loaned.
* Assumes that the original thread is either not runnable or
* already on the run queue
*/
void
kse_reassign(struct kse *ke)
@ -180,16 +172,79 @@ kse_reassign(struct kse *ke)
struct thread *td;
struct thread *owner;
struct thread *original;
int loaned;
KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
("reassigning KSE with no or runnable thread"));
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
owner = ke->ke_bound;
owner = ke->ke_owner;
loaned = TD_LENDER(owner);
original = ke->ke_thread;
KASSERT(!(owner && ((owner->td_kse != ke) ||
(owner->td_flags & TDF_UNBOUND))),
("kse_reassign: bad thread bound state"));
if (TD_CAN_UNBIND(original) && (original->td_standin)) {
KASSERT((owner == original),
("Early thread borrowing?"));
/*
* The outgoing thread is "threaded" and has never
* scheduled an upcall.
* decide whether this is a short or long term event
* and thus whether or not to schedule an upcall.
* if it is a short term event, just suspend it in
* a way that takes its KSE with it.
* Select the events for which we want to schedule upcalls.
* For now it's just sleep.
* Other threads that still have not fired an upcall
* are held to their KSE using the temorary Binding.
*/
if (TD_ON_SLEEPQ(original)) {
/*
* An bound thread that can still unbind itself
* has been scheduled out.
* If it is sleeping, then we need to schedule an
* upcall.
* XXXKSE eventually almost any inhibition could do.
*/
original->td_flags &= ~TDF_CAN_UNBIND;
original->td_flags |= TDF_UNBOUND;
thread_schedule_upcall(original, ke);
owner = ke->ke_owner;
loaned = 1;
}
}
/*
* If the current thread was borrowing, then make things consistent
* by giving it back to the owner for the moment. The original thread
* must be unbound and have already used its chance for
* firing off an upcall. Threads that have not yet made an upcall
* can not borrow KSEs.
*/
if (loaned) {
KASSERT((original->td_standin == NULL),
("kse_reassign: borrower still has standin thread"));
TD_CLR_LOAN(owner);
ke->ke_thread = owner;
original->td_kse = NULL; /* give it amnesia */
/*
* Upcalling threads have lower priority than all
* in-kernel threads, However threads that have loaned out
* their KSE and are NOT upcalling have the priority that
* they have. In other words, only look for other work if
* the owner is not runnable, OR is upcalling.
*/
if (TD_CAN_RUN(owner) &&
((owner->td_flags & TDF_UPCALLING) == 0)) {
setrunnable(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
ke, owner);
return;
}
}
/*
* Either the owner is not runnable, or is an upcall.
* Find the first unassigned thread
* If there is a 'last assigned' then see what's next.
* otherwise look at what is first.
@ -204,77 +259,67 @@ kse_reassign(struct kse *ke)
* If we found one assign it the kse, otherwise idle the kse.
*/
if (td) {
/*
* If the original is bound to us we can only be lent out so
* make a loan, otherwise we just drop the
* original thread.
/*
* Assign the new thread to the KSE.
* and make the KSE runnable again,
*/
if (original) {
if (((original->td_flags & TDF_UNBOUND) == 0)) {
/*
* Put the owner on the side
*/
ke->ke_bound = original;
TD_SET_LOAN(original);
} else {
original->td_kse = NULL;
}
if (TD_IS_BOUND(owner)) {
/*
* If there is a reason to keep the previous
* owner, do so.
*/
TD_SET_LOAN(owner);
} else {
/* otherwise, cut it free */
ke->ke_owner = td;
owner->td_kse = NULL;
}
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
sched_add(ke);
/*
* if we have already borrowed this,
* just pass it to the new thread,
* otherwise, enact the loan.
*/
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
return;
}
if (owner) { /* already loaned out */
/* effectivly unloan it */
TD_CLR_LOAN(owner);
ke->ke_thread = owner;
ke->ke_bound = NULL;
if (original)
original->td_kse = NULL;
original = owner;
if (TD_CAN_RUN(owner)) {
/*
* If the owner thread is now runnable, run it..
* Let it have its KSE back.
*/
setrunqueue(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
ke, owner);
return;
}
}
/*
* Presetly NOT loaned out.
* If we are bound, we go on the loanable queue
* otherwise onto the free queue.
* Now handle any waiting upcall.
* Since we didn't make them runnable before.
*/
if (original) {
if (((original->td_flags & TDF_UNBOUND) == 0)) {
ke->ke_state = KES_THREAD;
ke->ke_flags |= KEF_ONLOANQ;
ke->ke_bound = NULL;
TAILQ_INSERT_HEAD(&kg->kg_lq, ke, ke_kgrlist);
kg->kg_loan_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
return;
} else {
original->td_kse = NULL;
}
if (TD_CAN_RUN(owner)) {
setrunnable(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
ke, owner);
return;
}
ke->ke_state = KES_IDLE;
ke->ke_thread = NULL;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
/*
* It is possible that this is the last thread in the group
* because the KSE is being shut down or the process
* is exiting.
*/
if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
ke->ke_thread = NULL;
owner->td_kse = NULL;
kse_unlink(ke);
return;
}
/*
* At this stage all we know is that the owner
* is the same as the 'active' thread in the KSE
* and that it is
* Presently NOT loaned out.
* Put it on the loanable queue. Make it fifo
* so that long term sleepers donate their KSE's first.
*/
KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
ke->ke_state = KES_THREAD;
ke->ke_flags |= KEF_ONLOANQ;
TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
kg->kg_loan_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
return;
}
#if 0
@ -302,7 +347,7 @@ remrunqueue(struct thread *td)
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
if ((td->td_flags & TDF_UNBOUND) == 0) {
if (TD_IS_BOUND(td)) {
/* Bring its kse with it, leave the thread attached */
sched_rem(ke);
ke->ke_state = KES_THREAD;
@ -317,12 +362,12 @@ remrunqueue(struct thread *td)
* KSE to the next available thread. Then, we should
* see if we need to move the KSE in the run queues.
*/
sched_rem(ke);
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value "));
if (td2 == td)
kg->kg_last_assigned = td3;
td->td_kse = NULL;
ke->ke_thread = NULL;
kse_reassign(ke);
}
}
@ -345,7 +390,7 @@ adjustrunqueue( struct thread *td, int newpri)
*/
ke = td->td_kse;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
if ((td->td_flags & TDF_UNBOUND) == 0) {
if (TD_IS_BOUND(td)) {
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
@ -396,11 +441,16 @@ setrunqueue(struct thread *td)
sched_add(td->td_kse);
return;
}
if ((td->td_flags & TDF_UNBOUND) == 0) {
/*
* If the process is threaded but the thread is bound then
* there is still a little extra to do re. KSE loaning.
*/
if (TD_IS_BOUND(td)) {
KASSERT((td->td_kse != NULL),
("queueing BAD thread to run queue"));
ke = td->td_kse;
ke->ke_bound = NULL;
KASSERT((ke->ke_owner == ke->ke_thread),
("setrunqueue: Hey KSE loaned out"));
if (ke->ke_flags & KEF_ONLOANQ) {
ke->ke_flags &= ~KEF_ONLOANQ;
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
@ -422,15 +472,7 @@ setrunqueue(struct thread *td)
* If we can't get one, our priority is not high enough..
* that's ok..
*/
if (kg->kg_idle_kses) {
/*
* There is a free one so it's ours for the asking..
*/
ke = TAILQ_FIRST(&kg->kg_iq);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
} else if (kg->kg_loan_kses) {
if (kg->kg_loan_kses) {
/*
* Failing that see if we can borrow one.
*/
@ -438,8 +480,7 @@ setrunqueue(struct thread *td)
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
ke->ke_flags &= ~KEF_ONLOANQ;
ke->ke_state = KES_THREAD;
TD_SET_LOAN(ke->ke_thread);
ke->ke_bound = ke->ke_thread;
TD_SET_LOAN(ke->ke_owner);
ke->ke_thread = NULL;
kg->kg_loan_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
@ -456,7 +497,11 @@ setrunqueue(struct thread *td)
} else {
/*
* Temporarily disassociate so it looks like the other cases.
* If the owner wasn't lending before, then it is now..
*/
if (!TD_LENDER(ke->ke_owner)) {
TD_SET_LOAN(ke->ke_owner);
}
ke->ke_thread = NULL;
td->td_kse = NULL;
}

View File

@ -172,23 +172,8 @@ msleep(ident, mtx, priority, wmesg, timo)
td->td_flags &= ~TDF_INTERRUPT;
return (EINTR);
}
mtx_lock_spin(&sched_lock);
if ((td->td_flags & (TDF_UNBOUND|TDF_INMSLEEP)) ==
TDF_UNBOUND) {
/*
* Arrange for an upcall to be readied.
* it will not actually happen until all
* pending in-kernel work for this KSEGRP
* has been done.
*/
/* Don't recurse here! */
td->td_flags |= TDF_INMSLEEP;
thread_schedule_upcall(td, td->td_kse);
td->td_flags &= ~TDF_INMSLEEP;
}
} else {
mtx_lock_spin(&sched_lock);
}
mtx_lock_spin(&sched_lock);
if (cold ) {
/*
* During autoconfiguration, just give interrupts

View File

@ -197,6 +197,7 @@ kse_link(struct kse *ke, struct ksegrp *kg)
ke->ke_state = KES_UNQUEUED;
ke->ke_proc = p;
ke->ke_ksegrp = kg;
ke->ke_owner = NULL;
ke->ke_thread = NULL;
ke->ke_oncpu = NOCPU;
}
@ -208,10 +209,6 @@ kse_unlink(struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
kg = ke->ke_ksegrp;
if (ke->ke_state == KES_IDLE) {
kg->kg_idle_kses--;
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
}
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
if (--kg->kg_kses == 0) {
@ -231,14 +228,12 @@ ksegrp_link(struct ksegrp *kg, struct proc *p)
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
kg->kg_proc = p;
/* the following counters are in the -zero- section and may not need clearing */
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
kg->kg_kses = 0;
kg->kg_idle_kses = 0;
kg->kg_loan_kses = 0;
kg->kg_runq_kses = 0; /* XXXKSE change name */
/* link it in now that it's consistent */
@ -351,7 +346,8 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
}
/*
* Either returns as an upcall or exits
* Either becomes an upcall or waits for an awakening event and
* THEN becomes an upcall. Only error cases return.
*/
int
kse_release(struct thread * td, struct kse_release_args * uap)
@ -369,30 +365,24 @@ kse_release(struct thread * td, struct kse_release_args * uap)
(td->td_flags & TDF_UNBOUND) ||
(td->td_kse->ke_mailbox == NULL))
return (EINVAL);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
/* Change OURSELF to become an upcall. */
td->td_flags = TDF_UPCALLING; /* BOUND */
if (kg->kg_completed == NULL) {
#if 1 /* temp until signals make new threads */
if (p->p_numthreads == 1) {
/* change OURSELF to become an upcall */
td->td_flags = TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
/*
* msleep will not call thread_sched_upcall
* because thread is not UNBOUND.
*/
msleep(p->p_sigacts, NULL,
PPAUSE | PCATCH, "ksepause", 0);
return (0);
}
#endif /* end temp */
thread_exit();
/* XXXKSE also look for waiting signals etc. */
/*
* The KSE will however be lendable.
*/
mtx_lock_spin(&sched_lock);
TD_SET_IDLE(td);
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
} else {
PROC_UNLOCK(p);
}
/* change OURSELF to become an upcall */
td->td_flags = TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return (0);
}
@ -403,45 +393,62 @@ int
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
{
struct proc *p;
struct kse *ke, *ke2;
struct kse *ke;
struct ksegrp *kg;
struct thread *td2;
p = td->td_proc;
td2 = NULL;
/* KSE-enabled processes only, please. */
if (!(p->p_flag & P_KSES))
return EINVAL;
ke = NULL;
mtx_lock_spin(&sched_lock);
PROC_LOCK(p);
if (uap->mbx) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
FOREACH_KSE_IN_GROUP(kg, ke2) {
if (ke2->ke_mailbox != uap->mbx)
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_mailbox != uap->mbx)
continue;
if (ke2->ke_state == KES_IDLE) {
ke = ke2;
goto found;
} else {
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
td2 = ke->ke_owner ;
KASSERT((td2 != NULL),("KSE with no owner"));
if (!TD_IS_IDLE(td2)) {
/* Return silently if no longer idle */
PROC_UNLOCK(p);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
}
break;
}
if (td2) {
break;
}
}
} else {
/*
* look for any idle KSE to resurrect.
*/
kg = td->td_ksegrp;
ke = TAILQ_FIRST(&kg->kg_iq);
mtx_lock_spin(&sched_lock);
FOREACH_KSE_IN_GROUP(kg, ke) {
td2 = ke->ke_owner;
KASSERT((td2 != NULL),("KSE with no owner2"));
if (TD_IS_IDLE(td2))
break;
}
}
if (ke == NULL) {
if (td2) {
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
TD_CLR_IDLE(td2);
setrunnable(td2);
mtx_unlock_spin(&sched_lock);
return (ESRCH);
}
found:
thread_schedule_upcall(td, ke);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
}
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
td->td_retval[1] = 0;
return (0);
PROC_UNLOCK(p);
return (ESRCH);
}
/*
@ -810,17 +817,14 @@ thread_export_context(struct thread *td)
addr = (void *)(&td->td_mailbox->tm_context);
#endif
error = copyin(addr, &uc, sizeof(ucontext_t));
if (error == 0) {
thread_getcontext(td, &uc);
error = copyout(&uc, addr, sizeof(ucontext_t));
if (error)
goto bad;
thread_getcontext(td, &uc);
error = copyout(&uc, addr, sizeof(ucontext_t));
if (error)
goto bad;
}
if (error) {
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
return (error);
}
/* get address in latest mbox of list pointer */
#if 0
addr = (caddr_t)td->td_mailbox
@ -835,6 +839,7 @@ thread_export_context(struct thread *td)
for (;;) {
mbx = (uintptr_t)kg->kg_completed;
if (suword(addr, mbx)) {
error = EFAULT;
goto bad;
}
PROC_LOCK(p);
@ -856,7 +861,7 @@ thread_export_context(struct thread *td)
PROC_LOCK(p);
psignal(p, SIGSEGV);
PROC_UNLOCK(p);
return (EFAULT);
return (error);
}
/*
@ -930,8 +935,6 @@ thread_update_uticks(void)
caddr_t addr;
uint uticks, sticks;
KASSERT(!(td->td_flags & TDF_UNBOUND), ("thread not bound."));
if (ke->ke_mailbox == NULL)
return 0;
@ -939,8 +942,12 @@ thread_update_uticks(void)
ke->ke_uuticks = 0;
sticks = ke->ke_usticks;
ke->ke_usticks = 0;
#if 0
tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
+ offsetof(struct kse_mailbox, km_curthread));
+ offsetof(struct kse_mailbox, km_curthread));
#else /* if user pointer arithmetic is ok in the kernel */
tmbx = (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
#endif
if ((tmbx == NULL) || (tmbx == (void *)-1))
return 0;
if (uticks) {
@ -1028,18 +1035,21 @@ thread_exit(void)
}
/* Reassign this thread's KSE. */
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
KASSERT((ke->ke_bound != td),
("thread_exit: entered with ke_bound set"));
/*
* decide what to do with the KSE attached to this thread.
* Decide what to do with the KSE attached to this thread.
* XXX Possibly kse_reassign should do both cases as it already
* does some of this.
*/
if (ke->ke_flags & KEF_EXIT) {
KASSERT((ke->ke_owner == td),
("thread_exit: KSE exiting with non-owner thread"));
ke->ke_thread = NULL;
td->td_kse = NULL;
kse_unlink(ke);
} else {
TD_SET_EXITING(td); /* definitly not runnable */
kse_reassign(ke);
}
PROC_UNLOCK(p);
@ -1107,19 +1117,13 @@ thread_link(struct thread *td, struct ksegrp *kg)
void
kse_purge(struct proc *p, struct thread *td)
{
struct kse *ke;
/* XXXKSE think about this..
may need to wake up threads on loan queue. */
struct ksegrp *kg;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
mtx_lock_spin(&sched_lock);
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses--;
kse_stash(ke);
}
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
p->p_numksegrps--;
KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
@ -1137,38 +1141,28 @@ kse_purge(struct proc *p, struct thread *td)
/*
* Create a thread and schedule it for upcall on the KSE given.
* Use our thread's standin so that we don't have to allocate one.
*/
struct thread *
thread_schedule_upcall(struct thread *td, struct kse *ke)
{
struct thread *td2;
struct ksegrp *kg;
int newkse;
mtx_assert(&sched_lock, MA_OWNED);
newkse = (ke != td->td_kse);
/*
* If the kse is already owned by another thread then we can't
* schedule an upcall because the other thread must be BOUND
* which means it is not in a position to take an upcall.
* We must be borrowing the KSE to allow us to complete some in-kernel
* work. When we complete, the Bound thread will have teh chance to
* If the owner and kse are BOUND then that thread is planning to
* go to userland and upcalls are not expected. So don't make one.
* If it is not bound then make it so with the spare thread
* anf then borrw back the KSE to allow us to complete some in-kernel
* work. When we complete, the Bound thread will have the chance to
* complete. This thread will sleep as planned. Hopefully there will
* eventually be un unbound thread that can be converted to an
* upcall to report the completion of this thread.
*/
if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
return (NULL);
}
KASSERT((ke->ke_bound == NULL), ("kse already bound"));
if (ke->ke_state == KES_IDLE) {
kg = ke->ke_ksegrp;
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
ke->ke_state = KES_UNQUEUED;
}
if ((td2 = td->td_standin) != NULL) {
td->td_standin = NULL;
} else {
@ -1206,8 +1200,9 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
td2->td_kse = ke;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
ke->ke_owner = td2;
/*
* If called from msleep(), we are working on the current
* If called from kse_reassign(), we are working on the current
* KSE so fake that we borrowed it. If called from
* kse_create(), don't, as we have a new kse too.
*/
@ -1220,10 +1215,8 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
* from msleep() this is going to be "very soon" in nearly
* all cases.
*/
ke->ke_bound = td2;
TD_SET_LOAN(td2);
} else {
ke->ke_bound = NULL;
ke->ke_thread = td2;
ke->ke_state = KES_THREAD;
setrunqueue(td2);
@ -1292,10 +1285,11 @@ thread_user_enter(struct proc *p, struct thread *td)
/*
* If we are doing a syscall in a KSE environment,
* note where our mailbox is. There is always the
* possibility that we could do this lazily (in sleep()),
* possibility that we could do this lazily (in kse_reassign()),
* but for now do it every time.
*/
ke = td->td_kse;
td->td_flags &= ~TDF_UNBOUND;
if (ke->ke_mailbox != NULL) {
#if 0
td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
@ -1308,7 +1302,7 @@ thread_user_enter(struct proc *p, struct thread *td)
(td->td_mailbox == (void *)-1)) {
td->td_mailbox = NULL; /* single thread it.. */
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UNBOUND;
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
mtx_unlock_spin(&sched_lock);
} else {
/*
@ -1324,8 +1318,11 @@ thread_user_enter(struct proc *p, struct thread *td)
td->td_standin = thread_alloc();
}
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_UNBOUND;
td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
KASSERT((ke->ke_owner == td),
("thread_user_enter: No starting owner "));
ke->ke_owner = td;
td->td_usticks = 0;
}
}
@ -1350,122 +1347,204 @@ thread_userret(struct thread *td, struct trapframe *frame)
int unbound;
struct kse *ke;
struct ksegrp *kg;
struct thread *td2;
struct thread *worktodo;
struct proc *p;
struct timespec ts;
error = 0;
KASSERT((td->td_kse && td->td_kse->ke_thread && td->td_kse->ke_owner),
("thread_userret: bad thread/kse pointers"));
KASSERT((td == curthread),
("thread_userret: bad thread argument"));
unbound = td->td_flags & TDF_UNBOUND;
kg = td->td_ksegrp;
p = td->td_proc;
error = 0;
unbound = TD_IS_UNBOUND(td);
mtx_lock_spin(&sched_lock);
if ((worktodo = kg->kg_last_assigned))
worktodo = TAILQ_NEXT(worktodo, td_runq);
else
worktodo = TAILQ_FIRST(&kg->kg_runq);
/*
* Originally bound threads never upcall but they may
* Permanently bound threads never upcall but they may
* loan out their KSE at this point.
* Upcalls imply bound.. They also may want to do some Philantropy.
* Unbound threads on the other hand either yield to other work
* or transform into an upcall.
* (having saved their context to user space in both cases)
* Temporarily bound threads on the other hand either yield
* to other work and transform into an upcall, or proceed back to
* userland.
*/
if (unbound) {
/*
* We are an unbound thread, looking to return to
* user space.
* THere are several possibilities:
* 1) we are using a borrowed KSE. save state and exit.
* kse_reassign() will recycle the kse as needed,
* 2) we are not.. save state, and then convert ourself
* to be an upcall, bound to the KSE.
* if there are others that need the kse,
* give them a chance by doing an mi_switch().
* Because we are bound, control will eventually return
* to us here.
* ***
* Save the thread's context, and link it
* into the KSEGRP's list of completed threads.
*/
if (TD_CAN_UNBIND(td)) {
td->td_flags &= ~(TDF_UNBOUND|TDF_CAN_UNBIND);
if (!worktodo && (kg->kg_completed == NULL)) {
/*
* This thread has not started any upcall.
* If there is no work to report other than
* ourself, then it can return direct to userland.
*/
justreturn:
mtx_unlock_spin(&sched_lock);
thread_update_uticks();
td->td_mailbox = NULL;
return (0);
}
mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
td->td_mailbox = NULL;
td->td_usticks = 0;
if (error) {
/*
* If we are not running on a borrowed KSE, then
* As we are not running on a borrowed KSE,
* failing to do the KSE operation just defaults
* back to synchonous operation, so just return from
* the syscall. If it IS borrowed, there is nothing
* we can do. We just lose that context. We
* the syscall.
*/
goto justreturn;
}
mtx_lock_spin(&sched_lock);
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
*/
td->td_flags |= TDF_UPCALLING;
/* there may be more work since we re-locked schedlock */
if ((worktodo = kg->kg_last_assigned))
worktodo = TAILQ_NEXT(worktodo, td_runq);
else
worktodo = TAILQ_FIRST(&kg->kg_runq);
} else if (unbound) {
/*
* We are an unbound thread, looking to
* return to user space. There must be another owner
* of this KSE.
* We are using a borrowed KSE. save state and exit.
* kse_reassign() will recycle the kse as needed,
*/
mtx_unlock_spin(&sched_lock);
error = thread_export_context(td);
td->td_usticks = 0;
if (error) {
/*
* There is nothing we can do.
* We just lose that context. We
* probably should note this somewhere and send
* the process a signal.
*/
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
mtx_lock_spin(&sched_lock);
if (td->td_kse->ke_bound == NULL) {
td->td_flags &= ~TDF_UNBOUND;
PROC_UNLOCK(td->td_proc);
mtx_unlock_spin(&sched_lock);
thread_update_uticks();
return (error); /* go sync */
ke = td->td_kse;
/* possibly upcall with error? */
} else {
/*
* Don't make an upcall, just exit so that the owner
* can get its KSE if it wants it.
* Our context is already safely stored for later
* use by the UTS.
*/
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
}
/*
* If the owner is idling, we now have something for it
* to report, so make it runnable.
* If the owner is not an upcall, make an attempt to
* ensure that at least one of any IDLED upcalls can
* wake up.
*/
if (ke->ke_owner->td_flags & TDF_UPCALLING) {
TD_CLR_IDLE(ke->ke_owner);
} else {
FOREACH_KSE_IN_GROUP(kg, ke) {
if (TD_IS_IDLE(ke->ke_owner)) {
TD_CLR_IDLE(ke->ke_owner);
}
}
thread_exit();
}
/*
* if the KSE is owned and we are borrowing it,
* don't make an upcall, just exit so that the owner
* can get its KSE if it wants it.
* Our context is already safely stored for later
* use by the UTS.
*/
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (td->td_kse->ke_bound) {
thread_exit();
}
PROC_UNLOCK(p);
/*
* Turn ourself into a bound upcall.
* We will rely on kse_reassign()
* to make us run at a later time.
* We should look just like a sheduled upcall
* from msleep() or cv_wait().
*/
td->td_flags &= ~TDF_UNBOUND;
td->td_flags |= TDF_UPCALLING;
/* Only get here if we have become an upcall */
} else {
mtx_lock_spin(&sched_lock);
thread_exit();
}
/*
* We ARE going back to userland with this KSE.
* Check for threads that need to borrow it.
* Optimisation: don't call mi_switch if no-one wants the KSE.
* We are permanently bound. We may be an upcall.
* If an upcall, check for threads that need to borrow the KSE.
* Any other thread that comes ready after this missed the boat.
*/
ke = td->td_kse;
if ((td2 = kg->kg_last_assigned))
td2 = TAILQ_NEXT(td2, td_runq);
else
td2 = TAILQ_FIRST(&kg->kg_runq);
if (td2) {
/*
* force a switch to more urgent 'in kernel'
* work. Control will return to this thread
* when there is no more work to do.
* kse_reassign() will do tha for us.
*/
TD_SET_LOAN(td);
ke->ke_bound = td;
ke->ke_thread = NULL;
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find td2 */
}
mtx_unlock_spin(&sched_lock);
/*
* If not upcalling, go back to userspace.
* If we are, get the upcall set up.
*/
if (td->td_flags & TDF_UPCALLING) {
if (worktodo) {
/*
* force a switch to more urgent 'in kernel'
* work. Control will return to this thread
* when there is no more work to do.
* kse_reassign() will do that for us.
*/
TD_SET_LOAN(td); /* XXXKSE may not be needed */
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* kse_reassign() will (re)find worktodo */
}
td->td_flags &= ~TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
/*
* There is no more work to do and we are going to ride
* this thread/KSE up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
/*
* Set user context to the UTS.
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
cpu_set_upcall_kse(td, ke);
/*
* Unhook the list of completed threads.
* anything that completes after this gets to
* come in next time.
* Put the list of completed thread mailboxes on
* this KSE's mailbox.
*/
error = thread_link_mboxes(kg, ke);
if (error)
goto bad;
/*
* Set state and clear the thread mailbox pointer.
* From now on we are just a bound outgoing process.
* **Problem** userret is often called several times.
* it would be nice if this all happenned only on the first
* time through. (the scan for extra work etc.)
*/
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
ke->ke_uuticks = ke->ke_usticks = 0;
if (error)
goto bad;
nanotime(&ts);
if (copyout(&ts,
(caddr_t)&ke->ke_mailbox->km_timeofday, sizeof(ts))) {
goto bad;
}
} else {
mtx_unlock_spin(&sched_lock);
}
/*
* Optimisation:
* Ensure that we have a spare thread available,
@ -1476,61 +1555,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
}
thread_update_uticks();
/*
* To get here, we know there is no other need for our
* KSE so we can proceed. If not upcalling, go back to
* userspace. If we are, get the upcall set up.
*/
if ((td->td_flags & TDF_UPCALLING) == 0)
return (0);
/*
* We must be an upcall to get this far.
* There is no more work to do and we are going to ride
* this thead/KSE up to userland as an upcall.
* Do the last parts of the setup needed for the upcall.
*/
CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
/*
* Set user context to the UTS.
* Will use Giant in cpu_thread_clean() because it uses
* kmem_free(kernel_map, ...)
*/
cpu_set_upcall_kse(td, ke);
/*
* Put any completed mailboxes on this KSE's list.
*/
error = thread_link_mboxes(kg, ke);
if (error)
goto bad;
/*
* Set state and mailbox.
* From now on we are just a bound outgoing process.
* **Problem** userret is often called several times.
* it would be nice if this all happenned only on the first time
* through. (the scan for extra work etc.)
*/
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_UPCALLING;
mtx_unlock_spin(&sched_lock);
#if 0
error = suword((caddr_t)ke->ke_mailbox +
offsetof(struct kse_mailbox, km_curthread), 0);
#else /* if user pointer arithmetic is ok in the kernel */
error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
#endif
ke->ke_uuticks = ke->ke_usticks = 0;
if (!error) {
nanotime(&ts);
if (copyout(&ts, (caddr_t)&ke->ke_mailbox->km_timeofday,
sizeof(ts))) {
goto bad;
}
}
td->td_mailbox = NULL;
return (0);
bad:
@ -1541,6 +1566,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
td->td_mailbox = NULL;
return (error); /* go sync */
}
@ -1577,9 +1603,10 @@ thread_single(int force_exit)
if (p->p_singlethread)
return (1);
if (force_exit == SINGLE_EXIT)
if (force_exit == SINGLE_EXIT) {
p->p_flag |= P_SINGLE_EXIT;
else
td->td_flags &= ~TDF_UNBOUND;
} else
p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
p->p_singlethread = td;
@ -1601,11 +1628,17 @@ thread_single(int force_exit)
else
abortsleep(td2);
}
if (TD_IS_IDLE(td2)) {
TD_CLR_IDLE(td2);
}
} else {
if (TD_IS_SUSPENDED(td2))
continue;
/* maybe other inhibitted states too? */
if (TD_IS_SLEEPING(td2))
if (td2->td_inhibitors &
(TDI_SLEEPING | TDI_SWAPPED |
TDI_LOAN | TDI_IDLE |
TDI_EXITING))
thread_suspend_one(td2);
}
}
@ -1707,15 +1740,14 @@ thread_suspend_check(int return_instead)
while (mtx_owned(&Giant))
mtx_unlock(&Giant);
/*
* free extra kses and ksegrps, we needn't worry
* about if current thread is in same ksegrp as
* p_singlethread and last kse in the group
* could be killed, this is protected by kg_numthreads,
* in this case, we deduce that kg_numthreads must > 1.
* All threads should be exiting
* Unless they are the active "singlethread".
* destroy un-needed KSEs as we go..
* KSEGRPS may implode too as #kses -> 0.
*/
ke = td->td_kse;
if (ke->ke_bound == NULL &&
((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
if (ke->ke_owner == td &&
(kg->kg_kses >= kg->kg_numthreads ))
ke->ke_flags |= KEF_EXIT;
thread_exit();
}

View File

@ -116,7 +116,7 @@ maybe_resched(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
if (td->td_priority < curthread->td_priority)
if (td->td_priority < curthread->td_priority && curthread->td_kse)
curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
}

View File

@ -112,7 +112,6 @@ userret(td, frame, oticks)
*/
if (p->p_flag & P_KSES) {
thread_userret(td, frame);
/* printf("KSE thread returned"); */
}
/*

View File

@ -289,11 +289,11 @@ struct thread {
LIST_HEAD(, mtx) td_contested; /* (j) Contested locks. */
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
int td_intr_nesting_level; /* (k) Interrupt recursion. */
struct kse_thr_mailbox *td_mailbox; /* the userland mailbox address */
struct kse_thr_mailbox *td_mailbox; /* The userland mailbox address */
struct ucred *td_ucred; /* (k) Reference to credentials. */
void (*td_switchin)(void); /* (k) Switchin special func. */
struct thread *td_standin; /* (?) use this for an upcall */
u_int td_usticks; /* Statclock hits in kernel, for UTS */
struct thread *td_standin; /* (?) Use this for an upcall */
u_int td_usticks; /* (?) Statclock kernel hits, for UTS */
u_int td_critnest; /* (k) Critical section nest level. */
#define td_endzero td_base_pri
@ -309,7 +309,7 @@ struct thread {
*/
struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
enum {
TDS_INACTIVE = 0x20,
TDS_INACTIVE = 0x0,
TDS_INHIBITED,
TDS_CAN_RUN,
TDS_RUNQ,
@ -330,6 +330,7 @@ struct thread {
/* flags kept in td_flags */
#define TDF_UNBOUND 0x000001 /* May give away the kse, uses the kg runq. */
#define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump. */
#define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */
#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
#define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */
#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
@ -347,14 +348,24 @@ struct thread {
#define TDI_LOCK 0x08 /* Stopped on a lock. */
#define TDI_IWAIT 0x10 /* Awaiting interrupt. */
#define TDI_LOAN 0x20 /* bound thread's KSE is lent */
#define TDI_IDLE 0x40 /* kse_release() made us surplus */
#define TDI_EXITING 0x80 /* Thread is in exit processing */
#define TD_IS_UNBOUND(td) ((td)->td_flags & TDF_UNBOUND)
#define TD_IS_BOUND(td) (!TD_IS_UNBOUND(td))
#define TD_CAN_UNBIND(td) \
(((td)->td_flags & (TDF_UNBOUND|TDF_CAN_UNBIND)) == TDF_CAN_UNBIND)
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
#define TD_LENT(td) ((td)->td_inhibitors & TDI_LOAN)
#define TD_LENDER(td) ((td)->td_inhibitors & TDI_LOAN)
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
#define TD_IS_IDLE(td) ((td)->td_inhibitors & TDI_IDLE)
#define TD_IS_EXITING(td) ((td)->td_inhibitors & TDI_EXITING)
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
@ -377,6 +388,8 @@ struct thread {
#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
#define TD_SET_LOAN(td) TD_SET_INHIB((td), TDI_LOAN)
#define TD_SET_IDLE(td) TD_SET_INHIB((td), TDI_IDLE)
#define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING)
#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
#define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED)
@ -384,6 +397,7 @@ struct thread {
#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
#define TD_CLR_LOAN(td) TD_CLR_INHIB((td), TDI_LOAN)
#define TD_CLR_IDLE(td) TD_CLR_INHIB((td), TDI_IDLE)
#define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0)
#define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0)
@ -398,8 +412,7 @@ struct thread {
/*
* Traps for young players:
* The main thread variable that controls whether a thread acts as a threaded
* or unthreaded thread is the td_bound counter (0 == unbound).
* UPCALLS run with the UNBOUND flags clear, after they are first scheduled.
* or unthreaded thread is the TDF_UNBOUND flag.
* i.e. they bind themselves to whatever thread thay are first scheduled with.
* You may see BOUND threads in KSE processes but you should never see
* UNBOUND threads in non KSE processes.
@ -422,7 +435,7 @@ struct kse {
#define ke_startzero ke_flags
int ke_flags; /* (j) KEF_* flags. */
struct thread *ke_thread; /* Active associated thread. */
struct thread *ke_bound; /* Thread bound to this KSE (*) */
struct thread *ke_owner; /* Always points to the owner */
int ke_cpticks; /* (j) Ticks of cpu time. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_int64_t ke_uu; /* (j) Previous user time in usec. */
@ -436,7 +449,7 @@ struct kse {
u_char ke_oncpu; /* (j) Which cpu we are on. */
char ke_rqindex; /* (j) Run queue index. */
enum {
KES_IDLE = 0x10,
KES_UNUSED = 0x0,
KES_ONRUNQ,
KES_UNQUEUED, /* in transit */
KES_THREAD /* slaved to thread state */
@ -480,7 +493,6 @@ struct ksegrp {
struct proc *kg_proc; /* Process that contains this KSEG. */
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */
TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */
TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) Idle KSEs. */
TAILQ_HEAD(, kse) kg_lq; /* (ke_kgrlist) Loan KSEs. */
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
@ -502,7 +514,6 @@ struct ksegrp {
char kg_nice; /* (j?/k?) Process "nice" value. */
#define kg_endcopy kg_numthreads
int kg_numthreads; /* Num threads in total */
int kg_idle_kses; /* num KSEs idle */
int kg_kses; /* Num KSEs in group. */
struct kg_sched *kg_sched; /* Scheduler specific data */
};