Unbreak the KSE code. Keep track of zobie threads using the Per-CPU storage
during the context switch. Rearrange thread cleanups to avoid problems with Giant. Clean threads when freed or when recycled. Approved by: re (jhb)
This commit is contained in:
parent
bdaf0d3b7d
commit
696058c3c5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=107719
@ -261,7 +261,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
struct pcb *pcb;
|
||||
|
||||
@ -294,8 +294,10 @@ cpu_thread_dtor(struct thread *td)
|
||||
* XXX do we need to move the TSS off the allocated pages
|
||||
* before freeing them? (not done here)
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
|
||||
ctob(IOPAGES + 1));
|
||||
mtx_unlock(&Giant);
|
||||
pcb->pcb_ext = 0;
|
||||
}
|
||||
}
|
||||
@ -388,6 +390,15 @@ void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do any extra cleaning that needs to be done.
|
||||
* The thread may have optional components
|
||||
* that are not present in a fresh thread.
|
||||
* This may be a recycled thread so make it look
|
||||
* as though it's newly allocated.
|
||||
*/
|
||||
cpu_thread_clean(td);
|
||||
|
||||
/*
|
||||
* Set the trap frame to point at the beginning of the uts
|
||||
* function.
|
||||
|
@ -282,7 +282,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
struct pcb *pcb;
|
||||
|
||||
@ -294,8 +294,10 @@ cpu_thread_dtor(struct thread *td)
|
||||
* XXX do we need to move the TSS off the allocated pages
|
||||
* before freeing them? (not done here)
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
|
||||
ctob(IOPAGES + 1));
|
||||
mtx_unlock(&Giant);
|
||||
pcb->pcb_ext = 0;
|
||||
}
|
||||
}
|
||||
@ -388,6 +390,15 @@ void
|
||||
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do any extra cleaning that needs to be done.
|
||||
* The thread may have optional components
|
||||
* that are not present in a fresh thread.
|
||||
* This may be a recycled thread so make it look
|
||||
* as though it's newly allocated.
|
||||
*/
|
||||
cpu_thread_clean(td);
|
||||
|
||||
/*
|
||||
* Set the trap frame to point at the beginning of the uts
|
||||
* function.
|
||||
|
@ -118,7 +118,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ exit1(td, rv)
|
||||
|
||||
/*
|
||||
* Finally, call machine-dependent code to release the remaining
|
||||
* resources including address space, the kernel stack and pcb.
|
||||
* resources including address space.
|
||||
* The address space is released by "vmspace_exitfree(p)" in
|
||||
* vm_waitproc().
|
||||
*/
|
||||
@ -493,6 +493,7 @@ exit1(td, rv)
|
||||
PROC_LOCK(p->p_pptr);
|
||||
sx_xunlock(&proctree_lock);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
@ -512,11 +513,11 @@ exit1(td, rv)
|
||||
|
||||
cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */
|
||||
/*
|
||||
* Make sure this thread is discarded from the zombie.
|
||||
* Make sure the scheduler takes this thread out of its tables etc.
|
||||
* This will also release this thread's reference to the ucred.
|
||||
* Other thread parts to release include pcb bits and such.
|
||||
*/
|
||||
thread_exit();
|
||||
panic("exit1");
|
||||
}
|
||||
|
||||
#ifdef COMPAT_43
|
||||
@ -570,9 +571,6 @@ wait1(td, uap, compat)
|
||||
int nfound;
|
||||
struct proc *p, *q, *t;
|
||||
int status, error;
|
||||
struct thread *td2;
|
||||
struct kse *ke;
|
||||
struct ksegrp *kg;
|
||||
|
||||
q = td->td_proc;
|
||||
if (uap->pid == 0) {
|
||||
@ -717,25 +715,9 @@ wait1(td, uap, compat)
|
||||
}
|
||||
|
||||
/*
|
||||
* There should only be one
|
||||
* but do it right anyhow.
|
||||
* do any thread-system specific cleanups
|
||||
*/
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
FOREACH_KSE_IN_GROUP(kg, ke) {
|
||||
/* Free the KSE spare thread. */
|
||||
if (ke->ke_tdspare != NULL) {
|
||||
thread_free(ke->ke_tdspare);
|
||||
ke->ke_tdspare = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
FOREACH_THREAD_IN_PROC(p, td2) {
|
||||
if (td2->td_standin != NULL) {
|
||||
thread_free(td2->td_standin);
|
||||
td2->td_standin = NULL;
|
||||
}
|
||||
}
|
||||
thread_reap(); /* check for zombie threads */
|
||||
thread_wait(p);
|
||||
|
||||
/*
|
||||
* Give vm and machine-dependent layer a chance
|
||||
|
@ -834,9 +834,15 @@ fork_exit(callout, arg, frame)
|
||||
void *arg;
|
||||
struct trapframe *frame;
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
struct proc *p = td->td_proc;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
|
||||
if ((td = PCPU_GET(deadthread))) {
|
||||
PCPU_SET(deadthread, NULL);
|
||||
thread_stash(td);
|
||||
}
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
|
||||
p->p_state = PRS_NORMAL;
|
||||
/*
|
||||
|
@ -66,13 +66,9 @@ static uma_zone_t thread_zone;
|
||||
|
||||
/* DEBUG ONLY */
|
||||
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
|
||||
static int oiks_debug = 0; /* 0 disable, 1 printf, 2 enter debugger */
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
|
||||
&oiks_debug, 0, "OIKS thread debug");
|
||||
|
||||
static int oiks_max_threads_per_proc = 10;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, oiks_max_per_proc, CTLFLAG_RW,
|
||||
&oiks_max_threads_per_proc, 0, "Debug limit on threads per proc");
|
||||
static int thread_debug = 0;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
|
||||
&thread_debug, 0, "thread debug");
|
||||
|
||||
static int max_threads_per_proc = 30;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
|
||||
@ -91,11 +87,10 @@ struct mtx zombie_thread_lock;
|
||||
MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
|
||||
"zombie_thread_lock", MTX_SPIN);
|
||||
|
||||
static void kse_purge(struct proc *p, struct thread *td);
|
||||
|
||||
|
||||
void kse_purge(struct proc *p, struct thread *td);
|
||||
/*
|
||||
* Pepare a thread for use.
|
||||
* Prepare a thread for use.
|
||||
*/
|
||||
static void
|
||||
thread_ctor(void *mem, int size, void *arg)
|
||||
@ -115,7 +110,6 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
td = (struct thread *)mem;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
@ -138,8 +132,6 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
/* NOTREACHED */
|
||||
}
|
||||
#endif
|
||||
|
||||
cpu_thread_dtor(td);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -346,12 +338,11 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
return (EDEADLK);
|
||||
}
|
||||
if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
|
||||
/* XXXSKE what if >1 KSE? check.... */
|
||||
p->p_flag &= ~P_KSES;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
} else {
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
td->td_kse->ke_flags |= KEF_EXIT;
|
||||
thread_exit();
|
||||
/* NOTREACHED */
|
||||
@ -359,40 +350,50 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Either returns as an upcall or exits
|
||||
*/
|
||||
int
|
||||
kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
kse_release(struct thread * td, struct kse_release_args * uap)
|
||||
{
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
|
||||
p = td->td_proc;
|
||||
/* KSE-enabled processes only */
|
||||
if (!(p->p_flag & P_KSES))
|
||||
return (EINVAL);
|
||||
kg = td->td_ksegrp;
|
||||
/*
|
||||
* Must be a bound thread. And kse must have a mailbox ready,
|
||||
* if not, the kse would can not generate an upcall.
|
||||
* if not, the kse can not generate an upcall.
|
||||
*/
|
||||
if (!(td->td_flags & TDF_UNBOUND) && (td->td_kse->ke_mailbox != NULL)) {
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
/* prevent last thread from exiting */
|
||||
if (!(p->p_flag & P_KSES) ||
|
||||
(td->td_flags & TDF_UNBOUND) ||
|
||||
(td->td_kse->ke_mailbox == NULL))
|
||||
return (EINVAL);
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (kg->kg_completed == NULL) {
|
||||
#if 1 /* temp until signals make new threads */
|
||||
if (p->p_numthreads == 1) {
|
||||
/* change OURSELF to become an upcall */
|
||||
td->td_flags = TDF_UPCALLING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (td->td_standin == NULL) {
|
||||
PROC_UNLOCK(p);
|
||||
td->td_standin = thread_alloc();
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH,
|
||||
"pause", 0);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags |= TDF_UNBOUND;
|
||||
thread_schedule_upcall(td, td->td_kse);
|
||||
PROC_UNLOCK(p);
|
||||
/*
|
||||
* msleep will not call thread_sched_upcall
|
||||
* because thread is not UNBOUND.
|
||||
*/
|
||||
msleep(p->p_sigacts, NULL,
|
||||
PPAUSE | PCATCH, "ksepause", 0);
|
||||
return (0);
|
||||
}
|
||||
#endif /* end temp */
|
||||
thread_exit();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
return (EINVAL);
|
||||
/* change OURSELF to become an upcall */
|
||||
td->td_flags = TDF_UPCALLING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* struct kse_wakeup_args {
|
||||
@ -409,8 +410,6 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
/* KSE-enabled processes only, please. */
|
||||
if (!(p->p_flag & P_KSES))
|
||||
return EINVAL;
|
||||
if (td->td_standin == NULL)
|
||||
td->td_standin = thread_alloc();
|
||||
ke = NULL;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (uap->mbx) {
|
||||
@ -507,7 +506,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
* which is safe.
|
||||
*/
|
||||
if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
|
||||
if (oiks_debug == 0) {
|
||||
if (thread_debug == 0) { /* if debugging, allow more */
|
||||
#ifdef SMP
|
||||
if (kg->kg_kses > mp_ncpus)
|
||||
#endif
|
||||
@ -779,6 +778,8 @@ kse_free(struct kse *td)
|
||||
void
|
||||
thread_free(struct thread *td)
|
||||
{
|
||||
|
||||
cpu_thread_clean(td);
|
||||
uma_zfree(thread_zone, td);
|
||||
}
|
||||
|
||||
@ -966,10 +967,9 @@ thread_update_uticks(void)
|
||||
* Discard the current thread and exit from its context.
|
||||
*
|
||||
* Because we can't free a thread while we're operating under its context,
|
||||
* push the current thread into our KSE's ke_tdspare slot, freeing the
|
||||
* thread that might be there currently. Because we know that only this
|
||||
* processor will run our KSE, we needn't worry about someone else grabbing
|
||||
* our context before we do a cpu_throw.
|
||||
* push the current thread into our CPU's deadthread holder. This means
|
||||
* we needn't worry about someone else grabbing our context before we
|
||||
* do a cpu_throw().
|
||||
*/
|
||||
void
|
||||
thread_exit(void)
|
||||
@ -992,10 +992,6 @@ thread_exit(void)
|
||||
CTR1(KTR_PROC, "thread_exit: thread %p", td);
|
||||
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
|
||||
|
||||
if (ke->ke_tdspare != NULL) {
|
||||
thread_stash(ke->ke_tdspare);
|
||||
ke->ke_tdspare = NULL;
|
||||
}
|
||||
if (td->td_standin != NULL) {
|
||||
thread_stash(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
@ -1039,87 +1035,47 @@ thread_exit(void)
|
||||
("thread_exit: entered with ke_bound set"));
|
||||
|
||||
/*
|
||||
* The reason for all this hoopla is
|
||||
* an attempt to stop our thread stack from being freed
|
||||
* until AFTER we have stopped running on it.
|
||||
* Since we are under schedlock, almost any method where
|
||||
* it is eventually freed by someone else is probably ok.
|
||||
* (Especially if they do it under schedlock). We could
|
||||
* almost free it here if we could be certain that
|
||||
* the uma code wouldn't pull it apart immediatly,
|
||||
* but unfortunatly we can not guarantee that.
|
||||
*
|
||||
* For threads that are exiting and NOT killing their
|
||||
* KSEs we can just stash it in the KSE, however
|
||||
* in the case where the KSE is also being deallocated,
|
||||
* we need to store it somewhere else. It turns out that
|
||||
* we will never free the last KSE, so there is always one
|
||||
* other KSE available. We might as well just choose one
|
||||
* and stash it there. Being under schedlock should make that
|
||||
* safe.
|
||||
*
|
||||
* In borrower threads, we can stash it in the lender
|
||||
* Where it won't be needed until this thread is long gone.
|
||||
* Borrower threads can't kill their KSE anyhow, so even
|
||||
* the KSE would be a safe place for them. It is not
|
||||
* necessary to have a KSE (or KSEGRP) at all beyond this
|
||||
* point, while we are under the protection of schedlock.
|
||||
*
|
||||
* Either give the KSE to another thread to use (or make
|
||||
* it idle), or free it entirely, possibly along with its
|
||||
* ksegrp if it's the last one.
|
||||
* decide what to do with the KSE attached to this thread.
|
||||
*/
|
||||
if (ke->ke_flags & KEF_EXIT) {
|
||||
kse_unlink(ke);
|
||||
/*
|
||||
* Designate another KSE to hold our thread.
|
||||
* Safe as long as we abide by whatever lock
|
||||
* we control it with.. The other KSE will not
|
||||
* be able to run it until we release the schelock,
|
||||
* but we need to be careful about it deciding to
|
||||
* write to the stack before then. Luckily
|
||||
* I believe that while another thread's
|
||||
* standin thread can be used in this way, the
|
||||
* spare thread for the KSE cannot be used without
|
||||
* holding schedlock at least once.
|
||||
*/
|
||||
ke = FIRST_KSE_IN_PROC(p);
|
||||
} else {
|
||||
kse_reassign(ke);
|
||||
}
|
||||
#if 0
|
||||
if (ke->ke_bound) {
|
||||
/*
|
||||
* WE are a borrower..
|
||||
* stash our thread with the owner.
|
||||
*/
|
||||
if (ke->ke_bound->td_standin) {
|
||||
thread_stash(ke->ke_bound->td_standin);
|
||||
}
|
||||
ke->ke_bound->td_standin = td;
|
||||
} else {
|
||||
#endif
|
||||
if (ke->ke_tdspare != NULL) {
|
||||
thread_stash(ke->ke_tdspare);
|
||||
ke->ke_tdspare = NULL;
|
||||
}
|
||||
ke->ke_tdspare = td;
|
||||
#if 0
|
||||
}
|
||||
#endif
|
||||
PROC_UNLOCK(p);
|
||||
td->td_state = TDS_INACTIVE;
|
||||
td->td_proc = NULL;
|
||||
td->td_ksegrp = NULL;
|
||||
td->td_last_kse = NULL;
|
||||
PCPU_SET(deadthread, td);
|
||||
} else {
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
cpu_throw();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
/*
|
||||
* Do any thread specific cleanups that may be needed in wait()
|
||||
* called with Giant held, proc and schedlock not held.
|
||||
*/
|
||||
void
|
||||
thread_wait(struct proc *p)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
|
||||
KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (td->td_standin != NULL) {
|
||||
thread_free(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
cpu_thread_clean(td);
|
||||
}
|
||||
thread_reap(); /* check for zombie threads etc. */
|
||||
}
|
||||
|
||||
/*
|
||||
* Link a thread to a process.
|
||||
* set up anything that needs to be initialized for it to
|
||||
@ -1145,11 +1101,6 @@ thread_link(struct thread *td, struct ksegrp *kg)
|
||||
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
|
||||
p->p_numthreads++;
|
||||
kg->kg_numthreads++;
|
||||
if (oiks_debug && (p->p_numthreads > oiks_max_threads_per_proc)) {
|
||||
printf("OIKS %d\n", p->p_numthreads);
|
||||
if (oiks_debug > 1)
|
||||
Debugger("OIKS");
|
||||
}
|
||||
td->td_kse = NULL;
|
||||
}
|
||||
|
||||
@ -1167,8 +1118,6 @@ kse_purge(struct proc *p, struct thread *td)
|
||||
kg->kg_idle_kses--;
|
||||
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
|
||||
kg->kg_kses--;
|
||||
if (ke->ke_tdspare)
|
||||
thread_stash(ke->ke_tdspare);
|
||||
kse_stash(ke);
|
||||
}
|
||||
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
|
||||
@ -1512,6 +1461,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
TD_SET_LOAN(td);
|
||||
ke->ke_bound = td;
|
||||
ke->ke_thread = NULL;
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch(); /* kse_reassign() will (re)find td2 */
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1522,12 +1472,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
* for when we re-enter the kernel.
|
||||
*/
|
||||
if (td->td_standin == NULL) {
|
||||
if (ke->ke_tdspare) {
|
||||
td->td_standin = ke->ke_tdspare;
|
||||
ke->ke_tdspare = NULL;
|
||||
} else {
|
||||
td->td_standin = thread_alloc();
|
||||
}
|
||||
td->td_standin = thread_alloc();
|
||||
}
|
||||
|
||||
thread_update_uticks();
|
||||
@ -1550,6 +1495,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
|
||||
/*
|
||||
* Set user context to the UTS.
|
||||
* Will use Giant in cpu_thread_clean() because it uses
|
||||
* kmem_free(kernel_map, ...)
|
||||
*/
|
||||
cpu_set_upcall_kse(td, ke);
|
||||
|
||||
@ -1619,6 +1566,7 @@ thread_single(int force_exit)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT((td != NULL), ("curthread is NULL"));
|
||||
|
||||
@ -1677,6 +1625,7 @@ thread_single(int force_exit)
|
||||
thread_suspend_one(td);
|
||||
mtx_unlock(&Giant);
|
||||
PROC_UNLOCK(p);
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
|
@ -540,6 +540,14 @@ mi_switch(void)
|
||||
*/
|
||||
if (td->td_switchin)
|
||||
td->td_switchin();
|
||||
|
||||
/*
|
||||
* If the last thread was exiting, finish cleaning it up.
|
||||
*/
|
||||
if ((td = PCPU_GET(deadthread))) {
|
||||
PCPU_SET(deadthread, NULL);
|
||||
thread_stash(td);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -66,13 +66,9 @@ static uma_zone_t thread_zone;
|
||||
|
||||
/* DEBUG ONLY */
|
||||
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
|
||||
static int oiks_debug = 0; /* 0 disable, 1 printf, 2 enter debugger */
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
|
||||
&oiks_debug, 0, "OIKS thread debug");
|
||||
|
||||
static int oiks_max_threads_per_proc = 10;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, oiks_max_per_proc, CTLFLAG_RW,
|
||||
&oiks_max_threads_per_proc, 0, "Debug limit on threads per proc");
|
||||
static int thread_debug = 0;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
|
||||
&thread_debug, 0, "thread debug");
|
||||
|
||||
static int max_threads_per_proc = 30;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
|
||||
@ -91,11 +87,10 @@ struct mtx zombie_thread_lock;
|
||||
MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
|
||||
"zombie_thread_lock", MTX_SPIN);
|
||||
|
||||
static void kse_purge(struct proc *p, struct thread *td);
|
||||
|
||||
|
||||
void kse_purge(struct proc *p, struct thread *td);
|
||||
/*
|
||||
* Pepare a thread for use.
|
||||
* Prepare a thread for use.
|
||||
*/
|
||||
static void
|
||||
thread_ctor(void *mem, int size, void *arg)
|
||||
@ -115,7 +110,6 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
td = (struct thread *)mem;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
@ -138,8 +132,6 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
/* NOTREACHED */
|
||||
}
|
||||
#endif
|
||||
|
||||
cpu_thread_dtor(td);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -346,12 +338,11 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
return (EDEADLK);
|
||||
}
|
||||
if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
|
||||
/* XXXSKE what if >1 KSE? check.... */
|
||||
p->p_flag &= ~P_KSES;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
} else {
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
td->td_kse->ke_flags |= KEF_EXIT;
|
||||
thread_exit();
|
||||
/* NOTREACHED */
|
||||
@ -359,40 +350,50 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Either returns as an upcall or exits
|
||||
*/
|
||||
int
|
||||
kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
kse_release(struct thread * td, struct kse_release_args * uap)
|
||||
{
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
|
||||
p = td->td_proc;
|
||||
/* KSE-enabled processes only */
|
||||
if (!(p->p_flag & P_KSES))
|
||||
return (EINVAL);
|
||||
kg = td->td_ksegrp;
|
||||
/*
|
||||
* Must be a bound thread. And kse must have a mailbox ready,
|
||||
* if not, the kse would can not generate an upcall.
|
||||
* if not, the kse can not generate an upcall.
|
||||
*/
|
||||
if (!(td->td_flags & TDF_UNBOUND) && (td->td_kse->ke_mailbox != NULL)) {
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
/* prevent last thread from exiting */
|
||||
if (!(p->p_flag & P_KSES) ||
|
||||
(td->td_flags & TDF_UNBOUND) ||
|
||||
(td->td_kse->ke_mailbox == NULL))
|
||||
return (EINVAL);
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (kg->kg_completed == NULL) {
|
||||
#if 1 /* temp until signals make new threads */
|
||||
if (p->p_numthreads == 1) {
|
||||
/* change OURSELF to become an upcall */
|
||||
td->td_flags = TDF_UPCALLING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (td->td_standin == NULL) {
|
||||
PROC_UNLOCK(p);
|
||||
td->td_standin = thread_alloc();
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH,
|
||||
"pause", 0);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags |= TDF_UNBOUND;
|
||||
thread_schedule_upcall(td, td->td_kse);
|
||||
PROC_UNLOCK(p);
|
||||
/*
|
||||
* msleep will not call thread_sched_upcall
|
||||
* because thread is not UNBOUND.
|
||||
*/
|
||||
msleep(p->p_sigacts, NULL,
|
||||
PPAUSE | PCATCH, "ksepause", 0);
|
||||
return (0);
|
||||
}
|
||||
#endif /* end temp */
|
||||
thread_exit();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
return (EINVAL);
|
||||
/* change OURSELF to become an upcall */
|
||||
td->td_flags = TDF_UPCALLING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* struct kse_wakeup_args {
|
||||
@ -409,8 +410,6 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
/* KSE-enabled processes only, please. */
|
||||
if (!(p->p_flag & P_KSES))
|
||||
return EINVAL;
|
||||
if (td->td_standin == NULL)
|
||||
td->td_standin = thread_alloc();
|
||||
ke = NULL;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (uap->mbx) {
|
||||
@ -507,7 +506,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
* which is safe.
|
||||
*/
|
||||
if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
|
||||
if (oiks_debug == 0) {
|
||||
if (thread_debug == 0) { /* if debugging, allow more */
|
||||
#ifdef SMP
|
||||
if (kg->kg_kses > mp_ncpus)
|
||||
#endif
|
||||
@ -779,6 +778,8 @@ kse_free(struct kse *td)
|
||||
void
|
||||
thread_free(struct thread *td)
|
||||
{
|
||||
|
||||
cpu_thread_clean(td);
|
||||
uma_zfree(thread_zone, td);
|
||||
}
|
||||
|
||||
@ -966,10 +967,9 @@ thread_update_uticks(void)
|
||||
* Discard the current thread and exit from its context.
|
||||
*
|
||||
* Because we can't free a thread while we're operating under its context,
|
||||
* push the current thread into our KSE's ke_tdspare slot, freeing the
|
||||
* thread that might be there currently. Because we know that only this
|
||||
* processor will run our KSE, we needn't worry about someone else grabbing
|
||||
* our context before we do a cpu_throw.
|
||||
* push the current thread into our CPU's deadthread holder. This means
|
||||
* we needn't worry about someone else grabbing our context before we
|
||||
* do a cpu_throw().
|
||||
*/
|
||||
void
|
||||
thread_exit(void)
|
||||
@ -992,10 +992,6 @@ thread_exit(void)
|
||||
CTR1(KTR_PROC, "thread_exit: thread %p", td);
|
||||
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
|
||||
|
||||
if (ke->ke_tdspare != NULL) {
|
||||
thread_stash(ke->ke_tdspare);
|
||||
ke->ke_tdspare = NULL;
|
||||
}
|
||||
if (td->td_standin != NULL) {
|
||||
thread_stash(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
@ -1039,87 +1035,47 @@ thread_exit(void)
|
||||
("thread_exit: entered with ke_bound set"));
|
||||
|
||||
/*
|
||||
* The reason for all this hoopla is
|
||||
* an attempt to stop our thread stack from being freed
|
||||
* until AFTER we have stopped running on it.
|
||||
* Since we are under schedlock, almost any method where
|
||||
* it is eventually freed by someone else is probably ok.
|
||||
* (Especially if they do it under schedlock). We could
|
||||
* almost free it here if we could be certain that
|
||||
* the uma code wouldn't pull it apart immediatly,
|
||||
* but unfortunatly we can not guarantee that.
|
||||
*
|
||||
* For threads that are exiting and NOT killing their
|
||||
* KSEs we can just stash it in the KSE, however
|
||||
* in the case where the KSE is also being deallocated,
|
||||
* we need to store it somewhere else. It turns out that
|
||||
* we will never free the last KSE, so there is always one
|
||||
* other KSE available. We might as well just choose one
|
||||
* and stash it there. Being under schedlock should make that
|
||||
* safe.
|
||||
*
|
||||
* In borrower threads, we can stash it in the lender
|
||||
* Where it won't be needed until this thread is long gone.
|
||||
* Borrower threads can't kill their KSE anyhow, so even
|
||||
* the KSE would be a safe place for them. It is not
|
||||
* necessary to have a KSE (or KSEGRP) at all beyond this
|
||||
* point, while we are under the protection of schedlock.
|
||||
*
|
||||
* Either give the KSE to another thread to use (or make
|
||||
* it idle), or free it entirely, possibly along with its
|
||||
* ksegrp if it's the last one.
|
||||
* decide what to do with the KSE attached to this thread.
|
||||
*/
|
||||
if (ke->ke_flags & KEF_EXIT) {
|
||||
kse_unlink(ke);
|
||||
/*
|
||||
* Designate another KSE to hold our thread.
|
||||
* Safe as long as we abide by whatever lock
|
||||
* we control it with.. The other KSE will not
|
||||
* be able to run it until we release the schelock,
|
||||
* but we need to be careful about it deciding to
|
||||
* write to the stack before then. Luckily
|
||||
* I believe that while another thread's
|
||||
* standin thread can be used in this way, the
|
||||
* spare thread for the KSE cannot be used without
|
||||
* holding schedlock at least once.
|
||||
*/
|
||||
ke = FIRST_KSE_IN_PROC(p);
|
||||
} else {
|
||||
kse_reassign(ke);
|
||||
}
|
||||
#if 0
|
||||
if (ke->ke_bound) {
|
||||
/*
|
||||
* WE are a borrower..
|
||||
* stash our thread with the owner.
|
||||
*/
|
||||
if (ke->ke_bound->td_standin) {
|
||||
thread_stash(ke->ke_bound->td_standin);
|
||||
}
|
||||
ke->ke_bound->td_standin = td;
|
||||
} else {
|
||||
#endif
|
||||
if (ke->ke_tdspare != NULL) {
|
||||
thread_stash(ke->ke_tdspare);
|
||||
ke->ke_tdspare = NULL;
|
||||
}
|
||||
ke->ke_tdspare = td;
|
||||
#if 0
|
||||
}
|
||||
#endif
|
||||
PROC_UNLOCK(p);
|
||||
td->td_state = TDS_INACTIVE;
|
||||
td->td_proc = NULL;
|
||||
td->td_ksegrp = NULL;
|
||||
td->td_last_kse = NULL;
|
||||
PCPU_SET(deadthread, td);
|
||||
} else {
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
cpu_throw();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
/*
|
||||
* Do any thread specific cleanups that may be needed in wait()
|
||||
* called with Giant held, proc and schedlock not held.
|
||||
*/
|
||||
void
|
||||
thread_wait(struct proc *p)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
|
||||
KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (td->td_standin != NULL) {
|
||||
thread_free(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
cpu_thread_clean(td);
|
||||
}
|
||||
thread_reap(); /* check for zombie threads etc. */
|
||||
}
|
||||
|
||||
/*
|
||||
* Link a thread to a process.
|
||||
* set up anything that needs to be initialized for it to
|
||||
@ -1145,11 +1101,6 @@ thread_link(struct thread *td, struct ksegrp *kg)
|
||||
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
|
||||
p->p_numthreads++;
|
||||
kg->kg_numthreads++;
|
||||
if (oiks_debug && (p->p_numthreads > oiks_max_threads_per_proc)) {
|
||||
printf("OIKS %d\n", p->p_numthreads);
|
||||
if (oiks_debug > 1)
|
||||
Debugger("OIKS");
|
||||
}
|
||||
td->td_kse = NULL;
|
||||
}
|
||||
|
||||
@ -1167,8 +1118,6 @@ kse_purge(struct proc *p, struct thread *td)
|
||||
kg->kg_idle_kses--;
|
||||
TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
|
||||
kg->kg_kses--;
|
||||
if (ke->ke_tdspare)
|
||||
thread_stash(ke->ke_tdspare);
|
||||
kse_stash(ke);
|
||||
}
|
||||
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
|
||||
@ -1512,6 +1461,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
TD_SET_LOAN(td);
|
||||
ke->ke_bound = td;
|
||||
ke->ke_thread = NULL;
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch(); /* kse_reassign() will (re)find td2 */
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1522,12 +1472,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
* for when we re-enter the kernel.
|
||||
*/
|
||||
if (td->td_standin == NULL) {
|
||||
if (ke->ke_tdspare) {
|
||||
td->td_standin = ke->ke_tdspare;
|
||||
ke->ke_tdspare = NULL;
|
||||
} else {
|
||||
td->td_standin = thread_alloc();
|
||||
}
|
||||
td->td_standin = thread_alloc();
|
||||
}
|
||||
|
||||
thread_update_uticks();
|
||||
@ -1550,6 +1495,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
|
||||
/*
|
||||
* Set user context to the UTS.
|
||||
* Will use Giant in cpu_thread_clean() because it uses
|
||||
* kmem_free(kernel_map, ...)
|
||||
*/
|
||||
cpu_set_upcall_kse(td, ke);
|
||||
|
||||
@ -1619,6 +1566,7 @@ thread_single(int force_exit)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
mtx_assert(&Giant, MA_OWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT((td != NULL), ("curthread is NULL"));
|
||||
|
||||
@ -1677,6 +1625,7 @@ thread_single(int force_exit)
|
||||
thread_suspend_one(td);
|
||||
mtx_unlock(&Giant);
|
||||
PROC_UNLOCK(p);
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
|
@ -354,7 +354,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -354,7 +354,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ cpu_thread_exit(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
cpu_thread_dtor(struct thread *td)
|
||||
cpu_thread_clean(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,7 @@ struct pcpu {
|
||||
struct thread *pc_curthread; /* Current thread */
|
||||
struct thread *pc_idlethread; /* Idle thread */
|
||||
struct thread *pc_fpcurthread; /* Fp state owner */
|
||||
struct thread *pc_deadthread; /* Zombie thread or NULL */
|
||||
struct pcb *pc_curpcb; /* Current pcb */
|
||||
struct bintime pc_switchtime;
|
||||
int pc_switchticks;
|
||||
|
@ -905,7 +905,7 @@ void kse_free(struct kse *ke);
|
||||
void kse_stash(struct kse *ke);
|
||||
void cpu_set_upcall(struct thread *td, void *pcb);
|
||||
void cpu_set_upcall_kse(struct thread *td, struct kse *ke);
|
||||
void cpu_thread_dtor(struct thread *);
|
||||
void cpu_thread_clean(struct thread *);
|
||||
void cpu_thread_exit(struct thread *);
|
||||
void cpu_thread_setup(struct thread *td);
|
||||
void kse_reassign(struct kse *ke);
|
||||
@ -935,6 +935,7 @@ void thread_unsuspend(struct proc *p);
|
||||
void thread_unsuspend_one(struct thread *td);
|
||||
int thread_userret(struct thread *td, struct trapframe *frame);
|
||||
void thread_user_enter(struct proc *p, struct thread *td);
|
||||
void thread_wait(struct proc *p);
|
||||
int thread_add_ticks_intr(int user, uint ticks);
|
||||
|
||||
void thread_sanity_check(struct thread *td, char *);
|
||||
|
Loading…
Reference in New Issue
Block a user