Currently, when signal is delivered to the process and there is a thread

not blocking the signal, signal is placed on the thread sigqueue. If
the selected thread is in kernel executing thr_exit() or sigprocmask()
syscalls, then signal might be not delivered to usermode for arbitrary
amount of time, and for exiting thread it is lost.

Put process-directed signals to the process queue unconditionally,
selecting the thread to deliver the signal only by the thread returning
to usermode, since only then the thread can handle delivery of signal
reliably. For exiting thread or thread that has blocked some signals,
check whether the newly blocked signal is queued for the process, and
try to find a thread to wakeup for delivery, in reschedule_signal(). For
exiting thread, assume that all signals are blocked.

Change cursig() and postsig() to look both into the thread and process
signal queues. When there is a signal that thread returning to usermode
could consume, TDF_NEEDSIGCHK flag is not neccessary set now. Do
unlocked read of p_siglist and p_pendingcnt to check for queued signals.

Note that thread that has a signal unblocked might get spurious wakeup
and EINTR from the interruptible system call now, due to the possibility
of being selected by reschedule_signals(), while other thread returned
to usermode earlier and removed the signal from process queue. This
should not cause compliance issues, since the thread has not blocked a
signal and thus should be ready to receive it anyway.

Reported by:	Justin Teller <justin.teller gmail com>
Reviewed by:	davidxu, jilles
MFC after:	1 month
This commit is contained in:
Konstantin Belousov 2009-10-11 16:49:30 +00:00
parent a8211b27b9
commit 6b286ee8b5
4 changed files with 118 additions and 34 deletions

View File

@ -220,6 +220,8 @@ static int sigproptbl[NSIG] = {
SA_KILL|SA_PROC, /* SIGUSR2 */
};
static void reschedule_signals(struct proc *p, sigset_t block);
static void
sigqueue_start(void)
{
@ -581,20 +583,11 @@ void
signotify(struct thread *td)
{
struct proc *p;
sigset_t set;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
/*
* If our mask changed we may have to move signal that were
* previously masked by all threads to our sigqueue.
*/
set = p->p_sigqueue.sq_signals;
SIGSETNAND(set, td->td_sigmask);
if (! SIGISEMPTY(set))
sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set);
if (SIGPENDING(td)) {
thread_lock(td);
td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
@ -976,24 +969,28 @@ execsigs(struct proc *p)
* Manipulate signal mask.
*/
int
kern_sigprocmask(td, how, set, oset, old)
struct thread *td;
int how;
sigset_t *set, *oset;
int old;
kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
int old)
{
sigset_t new_block, oset1;
struct proc *p;
int error;
PROC_LOCK(td->td_proc);
p = td->td_proc;
PROC_LOCK(p);
if (oset != NULL)
*oset = td->td_sigmask;
error = 0;
SIGEMPTYSET(new_block);
if (set != NULL) {
switch (how) {
case SIG_BLOCK:
SIG_CANTMASK(*set);
oset1 = td->td_sigmask;
SIGSETOR(td->td_sigmask, *set);
new_block = td->td_sigmask;
SIGSETNAND(new_block, oset1);
break;
case SIG_UNBLOCK:
SIGSETNAND(td->td_sigmask, *set);
@ -1001,10 +998,13 @@ kern_sigprocmask(td, how, set, oset, old)
break;
case SIG_SETMASK:
SIG_CANTMASK(*set);
oset1 = td->td_sigmask;
if (old)
SIGSETLO(td->td_sigmask, *set);
else
td->td_sigmask = *set;
new_block = td->td_sigmask;
SIGSETNAND(new_block, oset1);
signotify(td);
break;
default:
@ -1012,7 +1012,20 @@ kern_sigprocmask(td, how, set, oset, old)
break;
}
}
PROC_UNLOCK(td->td_proc);
/*
* The new_block set contains signals that were not previosly
* blocked, but are blocked now.
*
* In case we block any signal that was not previously blocked
* for td, and process has the signal pending, try to schedule
* signal delivery to some thread that does not block the signal,
* possibly waking it up.
*/
if (p->p_numthreads != 1)
reschedule_signals(p, new_block);
PROC_UNLOCK(p);
return (error);
}
@ -1985,17 +1998,9 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig);
prop = sigprop(sig);
/*
* If the signal is blocked and not destined for this thread, then
* assign it to the process so that we can find it later in the first
* thread that unblocks it. Otherwise, assign it to this thread now.
*/
if (td == NULL) {
td = sigtd(p, sig, prop);
if (SIGISMEMBER(td->td_sigmask, sig))
sigqueue = &p->p_sigqueue;
else
sigqueue = &td->td_sigqueue;
sigqueue = &p->p_sigqueue;
} else {
KASSERT(td->td_proc == p, ("invalid thread"));
sigqueue = &td->td_sigqueue;
@ -2392,6 +2397,62 @@ ptracestop(struct thread *td, int sig)
return (td->td_xsig);
}
static void
reschedule_signals(struct proc *p, sigset_t block)
{
struct sigacts *ps;
struct thread *td;
int i;
PROC_LOCK_ASSERT(p, MA_OWNED);
ps = p->p_sigacts;
for (i = 1; !SIGISEMPTY(block); i++) {
if (!SIGISMEMBER(block, i))
continue;
SIGDELSET(block, i);
if (!SIGISMEMBER(p->p_siglist, i))
continue;
td = sigtd(p, i, 0);
signotify(td);
mtx_lock(&ps->ps_mtx);
if (p->p_flag & P_TRACED || SIGISMEMBER(ps->ps_sigcatch, i))
tdsigwakeup(td, i, SIG_CATCH,
(SIGISMEMBER(ps->ps_sigintr, i) ? EINTR :
ERESTART));
mtx_unlock(&ps->ps_mtx);
}
}
void
tdsigcleanup(struct thread *td)
{
struct proc *p;
sigset_t unblocked;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
sigqueue_flush(&td->td_sigqueue);
if (p->p_numthreads == 1)
return;
/*
* Since we cannot handle signals, notify signal post code
* about this by filling the sigmask.
*
* Also, if needed, wake up thread(s) that do not block the
* same signals as the exiting thread, since the thread might
* have been selected for delivery and woken up.
*/
SIGFILLSET(unblocked);
SIGSETNAND(unblocked, td->td_sigmask);
SIGFILLSET(td->td_sigmask);
reschedule_signals(p, unblocked);
}
/*
* If the current process has received a signal (should be caught or cause
* termination, should interrupt current syscall), return the signal number.
@ -2409,8 +2470,9 @@ issignal(struct thread *td, int stop_allowed)
{
struct proc *p;
struct sigacts *ps;
struct sigqueue *queue;
sigset_t sigpending;
int sig, prop, newsig;
int sig, prop, newsig, signo;
p = td->td_proc;
ps = p->p_sigacts;
@ -2420,6 +2482,7 @@ issignal(struct thread *td, int stop_allowed)
int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
sigpending = td->td_sigqueue.sq_signals;
SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
SIGSETNAND(sigpending, td->td_sigmask);
if (p->p_flag & P_PPWAIT)
@ -2440,6 +2503,7 @@ issignal(struct thread *td, int stop_allowed)
*/
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
sigqueue_delete(&td->td_sigqueue, sig);
sigqueue_delete(&p->p_sigqueue, sig);
continue;
}
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
@ -2452,12 +2516,18 @@ issignal(struct thread *td, int stop_allowed)
if (sig != newsig) {
ksiginfo_t ksi;
queue = &td->td_sigqueue;
/*
* clear old signal.
* XXX shrug off debugger, it causes siginfo to
* be thrown away.
*/
sigqueue_get(&td->td_sigqueue, sig, &ksi);
if (sigqueue_get(queue, sig, &ksi) == 0) {
queue = &p->p_sigqueue;
signo = sigqueue_get(queue, sig, &ksi);
KASSERT(signo == sig, ("signo != sig"));
}
/*
* If parent wants us to take the signal,
@ -2472,7 +2542,7 @@ issignal(struct thread *td, int stop_allowed)
* Put the new signal into td_sigqueue. If the
* signal is being masked, look for other signals.
*/
SIGADDSET(td->td_sigqueue.sq_signals, sig);
SIGADDSET(queue->sq_signals, sig);
if (SIGISMEMBER(td->td_sigmask, sig))
continue;
signotify(td);
@ -2567,6 +2637,7 @@ issignal(struct thread *td, int stop_allowed)
return (sig);
}
sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */
sigqueue_delete(&p->p_sigqueue, sig);
}
/* NOTREACHED */
}
@ -2613,7 +2684,9 @@ postsig(sig)
ps = p->p_sigacts;
mtx_assert(&ps->ps_mtx, MA_OWNED);
ksiginfo_init(&ksi);
sigqueue_get(&td->td_sigqueue, sig, &ksi);
if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
return;
ksi.ksi_signo = sig;
if (ksi.ksi_code == SI_TIMER)
itimer_accept(p, ksi.ksi_timerid, &ksi);

View File

@ -282,7 +282,7 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
}
PROC_LOCK(p);
sigqueue_flush(&td->td_sigqueue);
tdsigcleanup(td);
PROC_SLOCK(p);
/*

View File

@ -90,6 +90,7 @@ userret(struct thread *td, struct trapframe *frame)
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
td->td_name);
#if 0
#ifdef DIAGNOSTIC
/* Check that we called signotify() enough. */
PROC_LOCK(p);
@ -100,6 +101,7 @@ userret(struct thread *td, struct trapframe *frame)
thread_unlock(td);
PROC_UNLOCK(p);
#endif
#endif
#ifdef KTRACE
KTRUSERRET(td);
#endif
@ -218,7 +220,14 @@ ast(struct trapframe *framep)
ktrcsw(0, 1);
#endif
}
if (flags & TDF_NEEDSIGCHK) {
/*
* Check for signals. Unlocked reads of p_pendingcnt or
* p_siglist might cause process-directed signal to be handled
* later.
*/
if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
!SIGISEMPTY(p->p_siglist)) {
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
while ((sig = cursig(td, SIG_STOP_ALLOWED)) != 0)

View File

@ -252,9 +252,10 @@ typedef struct sigqueue {
/* Return nonzero if process p has an unmasked pending signal. */
#define SIGPENDING(td) \
(!SIGISEMPTY((td)->td_siglist) && \
!sigsetmasked(&(td)->td_siglist, &(td)->td_sigmask))
((!SIGISEMPTY((td)->td_siglist) && \
!sigsetmasked(&(td)->td_siglist, &(td)->td_sigmask)) || \
(!SIGISEMPTY((td)->td_proc->p_siglist) && \
!sigsetmasked(&(td)->td_proc->p_siglist, &(td)->td_sigmask)))
/*
* Return the value of the pseudo-expression ((*set & ~*mask) != 0). This
* is an optimized version of SIGISEMPTY() on a temporary variable
@ -336,6 +337,7 @@ void sigexit(struct thread *td, int signum) __dead2;
int sig_ffs(sigset_t *set);
void siginit(struct proc *p);
void signotify(struct thread *td);
void tdsigcleanup(struct thread *td);
int tdsignal(struct proc *p, struct thread *td, int sig,
ksiginfo_t *ksi);
void trapsignal(struct thread *td, ksiginfo_t *);