Solve a complex exit race introduced with thread_lock:

- Add a count of exiting threads, p_exitthreads, to struct proc.
 - Increment p_exithreads when we set the deadthread in thread_exit().
 - When we thread_stash() a deadthread use an atomic to drop the count.
 - Spin until the p_exithreads count reaches 0 in thread_wait().
 - Lock the last exiting thread momentarily to be certain that it has
   exited cpu_throw().
 - Restructure thread_wait().  It does not need a loop as there will only
   ever be one thread.

Tested by:	moose@opera.com
Reported by:	kris, moose@opera.com
This commit is contained in:
jeff 2007-06-12 07:24:46 +00:00
parent aa6c27d817
commit 49712c9a60
2 changed files with 34 additions and 15 deletions

View File

@ -73,6 +73,8 @@ TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
struct mtx zombie_lock;
MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
static void thread_zombie(struct thread *);
#ifdef KSE
static int
sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
@ -248,17 +250,27 @@ threadinit(void)
}
/*
* Stash an embarasingly extra thread into the zombie thread queue.
* Place an unused thread on the zombie list.
* Use the slpq as that must be unused by now.
*/
void
thread_stash(struct thread *td)
thread_zombie(struct thread *td)
{
mtx_lock_spin(&zombie_lock);
TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
mtx_unlock_spin(&zombie_lock);
}
/*
* Release a thread that has exited after cpu_throw().
*/
void
thread_stash(struct thread *td)
{
atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
thread_zombie(td);
}
/*
* Reap zombie kse resource.
*/
@ -371,7 +383,7 @@ thread_exit(void)
* Note that we don't need to free the cred here as it
* is done in thread_reap().
*/
thread_stash(td->td_standin);
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
#endif
@ -440,6 +452,7 @@ thread_exit(void)
*/
upcall_remove(td);
#endif
atomic_add_int(&td->td_proc->p_exitthreads, 1);
PCPU_SET(deadthread, td);
} else {
/*
@ -481,20 +494,25 @@ thread_wait(struct proc *p)
mtx_assert(&Giant, MA_NOTOWNED);
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
FOREACH_THREAD_IN_PROC(p, td) {
td = FIRST_THREAD_IN_PROC(p);
#ifdef KSE
if (td->td_standin != NULL) {
if (td->td_standin->td_ucred != NULL) {
crfree(td->td_standin->td_ucred);
td->td_standin->td_ucred = NULL;
}
thread_free(td->td_standin);
td->td_standin = NULL;
if (td->td_standin != NULL) {
if (td->td_standin->td_ucred != NULL) {
crfree(td->td_standin->td_ucred);
td->td_standin->td_ucred = NULL;
}
#endif
cpu_thread_clean(td);
crfree(td->td_ucred);
thread_free(td->td_standin);
td->td_standin = NULL;
}
#endif
/* Lock the last thread so we spin until it exits cpu_throw(). */
thread_lock(td);
thread_unlock(td);
/* Wait for any remaining threads to exit cpu_throw(). */
while (p->p_exitthreads)
sched_relinquish(curthread);
cpu_thread_clean(td);
crfree(td->td_ucred);
thread_reap(); /* check for zombie threads etc. */
}
@ -548,7 +566,7 @@ thread_unthread(struct thread *td)
td->td_mailbox = NULL;
td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
if (td->td_standin != NULL) {
thread_stash(td->td_standin);
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
sched_set_concurrency(p, 1);

View File

@ -524,6 +524,7 @@ struct proc {
struct rusage_ext p_crux; /* (c) Internal child resource usage. */
int p_profthreads; /* (c) Num threads in addupc_task. */
int p_maxthrwaits; /* (c) Max threads num waiters */
volatile int p_exitthreads; /* (j) Number of threads exiting */
int p_traceflag; /* (o) Kernel trace points. */
struct vnode *p_tracevp; /* (c + o) Trace to vnode. */
struct ucred *p_tracecred; /* (o) Credentials to trace with. */