Revert rev 184216 and 184199, due to the way the thread_lock works,

it may cause a lockup.

Noticed by: peter, jhb
This commit is contained in:
David Xu 2008-11-05 03:01:23 +00:00
parent cef3930d7c
commit 7b4a950a7d
6 changed files with 57 additions and 17 deletions

View File

@ -2115,15 +2115,19 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* Otherwise, process goes back to sleep state.
*/
p->p_flag &= ~P_STOPPED_SIG;
PROC_SLOCK(p);
if (p->p_numthreads == p->p_suspcount) {
PROC_SUNLOCK(p);
p->p_flag |= P_CONTINUED;
p->p_xstat = SIGCONT;
PROC_LOCK(p->p_pptr);
childproc_continued(p);
PROC_UNLOCK(p->p_pptr);
PROC_SLOCK(p);
}
if (action == SIG_DFL) {
thread_unsuspend(p);
PROC_SUNLOCK(p);
sigqueue_delete(sigqueue, sig);
goto out;
}
@ -2132,12 +2136,14 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* The process wants to catch it so it needs
* to run at least one thread, but which one?
*/
PROC_SUNLOCK(p);
goto runfast;
}
/*
* The signal is not ignored or caught.
*/
thread_unsuspend(p);
PROC_SUNLOCK(p);
goto out;
}
@ -2161,10 +2167,12 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* It may run a bit until it hits a thread_suspend_check().
*/
wakeup_swapper = 0;
PROC_SLOCK(p);
thread_lock(td);
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
wakeup_swapper = sleepq_abort(td, intrval);
thread_unlock(td);
PROC_SUNLOCK(p);
if (wakeup_swapper)
kick_proc0();
goto out;
@ -2185,6 +2193,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
goto out;
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 1);
if (p->p_numthreads == p->p_suspcount) {
/*
@ -2195,8 +2204,10 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* should never be equal to p_suspcount.
*/
thread_stopped(p);
PROC_SUNLOCK(p);
sigqueue_delete_proc(p, p->p_xstat);
}
} else
PROC_SUNLOCK(p);
goto out;
}
} else {
@ -2211,8 +2222,12 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
*/
runfast:
tdsigwakeup(td, sig, action, intrval);
PROC_SLOCK(p);
thread_unsuspend(p);
PROC_SUNLOCK(p);
out:
/* If we jump here, proc slock should not be owned. */
PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
return (ret);
}
@ -2232,6 +2247,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
PROC_LOCK_ASSERT(p, MA_OWNED);
prop = sigprop(sig);
PROC_SLOCK(p);
thread_lock(td);
/*
* Bring the priority of a thread up if we want it to get
@ -2255,6 +2271,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
*/
if ((prop & SA_CONT) && action == SIG_DFL) {
thread_unlock(td);
PROC_SUNLOCK(p);
sigqueue_delete(&p->p_sigqueue, sig);
/*
* It may be on either list in this state.
@ -2283,6 +2300,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
#endif
}
out:
PROC_SUNLOCK(p);
thread_unlock(td);
if (wakeup_swapper)
kick_proc0();
@ -2294,6 +2312,7 @@ sig_suspend_threads(struct thread *td, struct proc *p, int sending)
struct thread *td2;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
FOREACH_THREAD_IN_PROC(p, td2) {
thread_lock(td2);
@ -2325,9 +2344,11 @@ ptracestop(struct thread *td, int sig)
td->td_dbgflags |= TDB_XSIG;
td->td_xsig = sig;
PROC_SLOCK(p);
while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
if (p->p_flag & P_SINGLE_EXIT) {
td->td_dbgflags &= ~TDB_XSIG;
PROC_SUNLOCK(p);
return (sig);
}
/*
@ -2349,6 +2370,7 @@ ptracestop(struct thread *td, int sig)
goto stopme;
}
}
PROC_SUNLOCK(p);
return (td->td_xsig);
}
@ -2489,8 +2511,10 @@ issignal(td)
&p->p_mtx.lock_object, "Catching SIGSTOP");
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 0);
thread_suspend_switch(td);
PROC_SUNLOCK(p);
mtx_lock(&ps->ps_mtx);
break;
} else if (prop & SA_IGNORE) {
@ -2532,15 +2556,18 @@ thread_stopped(struct proc *p)
int n;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
n = p->p_suspcount;
if (p == curproc)
n++;
if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
PROC_SUNLOCK(p);
p->p_flag &= ~P_WAITED;
PROC_LOCK(p->p_pptr);
childproc_stopped(p, (p->p_flag & P_TRACED) ?
CLD_TRAPPED : CLD_STOPPED);
PROC_UNLOCK(p->p_pptr);
PROC_SLOCK(p);
}
}

View File

@ -286,6 +286,7 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
PROC_LOCK(p);
sigqueue_flush(&td->td_sigqueue);
PROC_SLOCK(p);
/*
* Shutting down last thread in the proc. This will actually
@ -293,10 +294,10 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
*/
if (p->p_numthreads != 1) {
thread_stopped(p);
PROC_SLOCK(p);
thread_exit();
/* NOTREACHED */
}
PROC_SUNLOCK(p);
PROC_UNLOCK(p);
return (0);
}

View File

@ -543,6 +543,7 @@ thread_single(int mode)
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
@ -641,6 +642,7 @@ thread_single(int mode)
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
thread_unthread(td);
}
PROC_SUNLOCK(p);
return (0);
}
@ -714,16 +716,15 @@ thread_suspend_check(int return_instead)
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
sigqueue_flush(&td->td_sigqueue);
PROC_SLOCK(p);
thread_stopped(p);
/*
* If the process is waiting for us to exit,
* this thread should just suicide.
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
*/
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
PROC_SLOCK(p);
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
thread_exit();
}
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
@ -734,8 +735,8 @@ thread_suspend_check(int return_instead)
kick_proc0();
}
}
thread_lock(td);
PROC_UNLOCK(p);
thread_lock(td);
/*
* When a thread suspends, it just
* gets taken off all queues.
@ -745,6 +746,7 @@ thread_suspend_check(int return_instead)
p->p_boundary_count++;
td->td_flags |= TDF_BOUNDARY;
}
PROC_SUNLOCK(p);
mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
if (return_instead == 0)
td->td_flags &= ~TDF_BOUNDARY;
@ -764,22 +766,25 @@ thread_suspend_switch(struct thread *td)
p = td->td_proc;
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
/*
* We implement thread_suspend_one in stages here to avoid
* dropping the proc lock while the thread lock is owned.
*/
thread_stopped(p);
p->p_suspcount++;
thread_lock(td);
PROC_UNLOCK(p);
thread_lock(td);
td->td_flags &= ~TDF_NEEDSUSPCHK;
TD_SET_SUSPENDED(td);
sched_sleep(td, 0);
PROC_SUNLOCK(p);
DROP_GIANT();
mi_switch(SW_VOL | SWT_SUSPEND, NULL);
thread_unlock(td);
PICKUP_GIANT();
PROC_LOCK(p);
PROC_SLOCK(p);
}
void
@ -787,6 +792,7 @@ thread_suspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
p->p_suspcount++;
@ -800,6 +806,7 @@ thread_unsuspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
@ -817,6 +824,7 @@ thread_unsuspend(struct proc *p)
int wakeup_swapper;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
wakeup_swapper = 0;
if (!P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
@ -855,6 +863,7 @@ thread_single_end(void)
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
PROC_SLOCK(p);
p->p_singlethread = NULL;
wakeup_swapper = 0;
/*
@ -872,6 +881,7 @@ thread_single_end(void)
thread_unlock(td);
}
}
PROC_SUNLOCK(p);
if (wakeup_swapper)
kick_proc0();
}

View File

@ -395,7 +395,6 @@ sleepq_catch_signals(void *wchan, int pri)
sleepq_switch(wchan, pri);
return (0);
}
thread_unlock(td);
mtx_unlock_spin(&sc->sc_lock);
CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
@ -415,15 +414,16 @@ sleepq_catch_signals(void *wchan, int pri)
ret = ERESTART;
mtx_unlock(&ps->ps_mtx);
}
/*
* Lock the per-process spinlock prior to dropping the PROC_LOCK
* to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
* thread_lock() are currently held in tdsignal().
*/
PROC_SLOCK(p);
mtx_lock_spin(&sc->sc_lock);
thread_lock(td);
PROC_UNLOCK(p);
if (ret == 0) {
sleepq_switch(wchan, pri);
return (0);
}
thread_lock(td);
PROC_SUNLOCK(p);
/*
* There were pending signals and this thread is still
* on the sleep queue, remove it from the sleep queue.

View File

@ -795,8 +795,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
* you should use PT_SUSPEND to suspend it before
* continuing process.
*/
PROC_SLOCK(p);
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
thread_unsuspend(p);
PROC_SUNLOCK(p);
} else {
if (data)
psignal(p, data);

View File

@ -500,8 +500,8 @@ struct proc {
u_char p_pfsflags; /* (c) Procfs flags. */
struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */
struct kaioinfo *p_aioinfo; /* (c) ASYNC I/O info. */
struct thread *p_singlethread;/* (c) If single threading this is it */
int p_suspcount; /* (c) Num threads in suspended mode. */
struct thread *p_singlethread;/* (c + j) If single threading this is it */
int p_suspcount; /* (j) Num threads in suspended mode. */
struct thread *p_xthread; /* (c) Trap thread */
int p_boundary_count;/* (c) Num threads at user boundary */
int p_pendingcnt; /* how many signals are pending */