1996-01-22 00:23:58 +00:00
|
|
|
/*
|
1998-04-29 09:59:34 +00:00
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
1996-01-22 00:23:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1999-08-05 12:15:30 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
1996-01-22 00:23:58 +00:00
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
1999-09-29 15:18:46 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/signalvar.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <signal.h>
|
2003-04-18 05:04:16 +00:00
|
|
|
#include <errno.h>
|
1996-08-20 08:22:01 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2003-04-18 05:04:16 +00:00
|
|
|
#include <string.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <pthread.h>
|
2002-09-16 08:45:36 +00:00
|
|
|
#include "thr_private.h"
|
2003-04-18 05:04:16 +00:00
|
|
|
#include "pthread_md.h"
|
1996-01-22 00:23:58 +00:00
|
|
|
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
/* Prototypes: */
|
2003-04-18 05:04:16 +00:00
|
|
|
static void build_siginfo(siginfo_t *info, int signo);
|
2003-05-16 19:58:30 +00:00
|
|
|
/* static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info); */
|
2003-04-18 05:04:16 +00:00
|
|
|
static void thr_sig_check_state(struct pthread *pthread, int sig);
|
|
|
|
static struct pthread *thr_sig_find(struct kse *curkse, int sig,
|
|
|
|
siginfo_t *info);
|
|
|
|
static void handle_special_signals(struct kse *curkse, int sig);
|
|
|
|
static void thr_sigframe_add(struct pthread *thread, int sig,
|
|
|
|
siginfo_t *info);
|
|
|
|
static void thr_sigframe_restore(struct pthread *thread,
|
|
|
|
struct pthread_sigframe *psf);
|
|
|
|
static void thr_sigframe_save(struct pthread *thread,
|
|
|
|
struct pthread_sigframe *psf);
|
|
|
|
static void thr_sig_invoke_handler(struct pthread *, int sig,
|
|
|
|
siginfo_t *info, ucontext_t *ucp);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/* #define DEBUG_SIGNAL */
|
2000-10-13 22:12:32 +00:00
|
|
|
#ifdef DEBUG_SIGNAL
|
|
|
|
#define DBG_MSG stdout_debug
|
|
|
|
#else
|
|
|
|
#define DBG_MSG(x...)
|
|
|
|
#endif
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Signal setup and delivery.
|
|
|
|
*
|
|
|
|
* 1) Delivering signals to threads in the same KSE.
|
|
|
|
* These signals are sent by upcall events and are set in the
|
|
|
|
* km_sigscaught field of the KSE mailbox. Since these signals
|
|
|
|
* are received while operating on the KSE stack, they can be
|
|
|
|
* delivered either by using signalcontext() to add a stack frame
|
|
|
|
* to the target thread's stack, or by adding them in the thread's
|
|
|
|
* pending set and having the thread run them down after it
|
|
|
|
* 2) Delivering signals to threads in other KSEs/KSEGs.
|
|
|
|
* 3) Delivering signals to threads in critical regions.
|
|
|
|
* 4) Delivering signals to threads after they change their signal masks.
|
|
|
|
*
|
|
|
|
* Methods of delivering signals.
|
|
|
|
*
|
|
|
|
* 1) Add a signal frame to the thread's saved context.
|
|
|
|
* 2) Add the signal to the thread structure, mark the thread as
|
|
|
|
* having signals to handle, and let the thread run them down
|
|
|
|
* after it resumes from the KSE scheduler.
|
|
|
|
*
|
|
|
|
* Problem with 1). You can't do this to a running thread or a
|
|
|
|
* thread in a critical region.
|
|
|
|
*
|
|
|
|
* Problem with 2). You can't do this to a thread that doesn't
|
|
|
|
* yield in some way (explicitly enters the scheduler). A thread
|
|
|
|
* blocked in the kernel or a CPU hungry thread will not see the
|
|
|
|
* signal without entering the scheduler.
|
|
|
|
*
|
|
|
|
* The solution is to use both 1) and 2) to deliver signals:
|
|
|
|
*
|
|
|
|
* o Thread in critical region - use 2). When the thread
|
|
|
|
* leaves the critical region it will check to see if it
|
|
|
|
* has pending signals and run them down.
|
|
|
|
*
|
|
|
|
* o Thread enters scheduler explicitly - use 2). The thread
|
|
|
|
* can check for pending signals after it returns from the
|
|
|
|
* the scheduler.
|
|
|
|
*
|
|
|
|
* o Thread is running and not current thread - use 2). When the
|
|
|
|
* thread hits a condition specified by one of the other bullets,
|
|
|
|
* the signal will be delivered.
|
|
|
|
*
|
|
|
|
* o Thread is running and is current thread (e.g., the thread
|
|
|
|
* has just changed its signal mask and now sees that it has
|
|
|
|
* pending signals) - just run down the pending signals.
|
|
|
|
*
|
|
|
|
* o Thread is swapped out due to quantum expiration - use 1)
|
|
|
|
*
|
|
|
|
* o Thread is blocked in kernel - kse_thr_wakeup() and then
|
|
|
|
* use 1)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rules for selecting threads for signals received:
|
|
|
|
*
|
|
|
|
* 1) If the signal is a sychronous signal, it is delivered to
|
|
|
|
* the generating (current thread). If the thread has the
|
|
|
|
* signal masked, it is added to the threads pending signal
|
|
|
|
* set until the thread unmasks it.
|
|
|
|
*
|
|
|
|
* 2) A thread in sigwait() where the signal is in the thread's
|
|
|
|
* waitset.
|
|
|
|
*
|
|
|
|
* 3) A thread in sigsuspend() where the signal is not in the
|
|
|
|
* thread's suspended signal mask.
|
|
|
|
*
|
|
|
|
* 4) Any thread (first found/easiest to deliver) that has the
|
|
|
|
* signal unmasked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This signal handler only delivers asynchronous signals.
|
|
|
|
* This must be called with upcalls disabled and without
|
|
|
|
* holding any locks.
|
2003-02-17 10:05:18 +00:00
|
|
|
*/
|
1998-04-29 09:59:34 +00:00
|
|
|
void
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
|
|
|
|
{
|
|
|
|
struct pthread *thread;
|
|
|
|
|
|
|
|
DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
|
|
|
|
|
|
|
|
/* Some signals need special handling: */
|
|
|
|
handle_special_signals(curkse, sig);
|
2003-05-24 02:29:25 +00:00
|
|
|
stderr_debug("dispatch sig:%d\n", sig);
|
|
|
|
while ((thread = thr_sig_find(curkse, sig, info)) != NULL) {
|
2003-04-18 05:04:16 +00:00
|
|
|
/*
|
|
|
|
* Setup the target thread to receive the signal:
|
|
|
|
*/
|
|
|
|
DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread);
|
|
|
|
KSE_SCHED_LOCK(curkse, thread->kseg);
|
2003-05-24 02:29:25 +00:00
|
|
|
if ((thread->state == PS_DEAD) ||
|
|
|
|
(thread->state == PS_DEADLOCK) ||
|
|
|
|
THR_IS_EXITING(thread) || THR_IS_SUSPENDED(thread)) {
|
|
|
|
KSE_SCHED_UNLOCK(curkse, thread->kseg);
|
|
|
|
_thr_ref_delete(NULL, thread);
|
|
|
|
} else {
|
|
|
|
_thr_sig_add(thread, sig, info);
|
|
|
|
KSE_SCHED_UNLOCK(curkse, thread->kseg);
|
|
|
|
_thr_ref_delete(NULL, thread);
|
|
|
|
break;
|
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
|
|
|
{
|
|
|
|
void (*sigfunc)(int, siginfo_t *, void *);
|
|
|
|
struct kse *curkse;
|
|
|
|
|
|
|
|
curkse = _get_curkse();
|
|
|
|
if ((curkse == NULL) || ((curkse->k_flags & KF_STARTED) == 0)) {
|
|
|
|
/* Upcalls are not yet started; just call the handler. */
|
|
|
|
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
|
|
|
|
ucp->uc_sigmask = _thr_proc_sigmask;
|
|
|
|
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
|
|
|
|
((__sighandler_t *)sigfunc != SIG_IGN)) {
|
|
|
|
if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO)
|
|
|
|
!= 0) || (info == NULL))
|
|
|
|
(*(sigfunc))(sig, info, ucp);
|
|
|
|
else
|
|
|
|
(*(sigfunc))(sig, (siginfo_t *)info->si_code,
|
|
|
|
ucp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Nothing. */
|
|
|
|
DBG_MSG("Got signal %d\n", sig);
|
|
|
|
sigaddset(&curkse->k_mbx.km_sigscaught, sig);
|
|
|
|
ucp->uc_sigmask = _thr_proc_sigmask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
|
|
|
|
ucontext_t *ucp)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
void (*sigfunc)(int, siginfo_t *, void *);
|
|
|
|
sigset_t saved_mask;
|
|
|
|
int saved_seqno;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
/* Invoke the signal handler without going through the scheduler:
|
|
|
|
*/
|
|
|
|
DBG_MSG("Got signal %d, calling handler for current thread %p\n",
|
|
|
|
sig, curthread);
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
/*
|
|
|
|
* Setup the threads signal mask.
|
|
|
|
*
|
|
|
|
* The mask is changed in the thread's active signal mask
|
|
|
|
* (in the context) and not in the base signal mask because
|
|
|
|
* a thread is allowed to change its signal mask within a
|
|
|
|
* signal handler. If it does, the signal mask restored
|
|
|
|
* after the handler should be the same as that set by the
|
|
|
|
* thread during the handler, not the original mask from
|
|
|
|
* before calling the handler. The thread could also
|
|
|
|
* modify the signal mask in the context and expect this
|
|
|
|
* mask to be used.
|
|
|
|
*/
|
|
|
|
THR_SCHED_LOCK(curthread, curthread);
|
|
|
|
saved_mask = curthread->tmbx.tm_context.uc_sigmask;
|
|
|
|
saved_seqno = curthread->sigmask_seqno;
|
|
|
|
SIGSETOR(curthread->tmbx.tm_context.uc_sigmask,
|
|
|
|
_thread_sigact[sig - 1].sa_mask);
|
|
|
|
sigaddset(&curthread->tmbx.tm_context.uc_sigmask, sig);
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2002-02-09 19:58:41 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Check that a custom handler is installed and if
|
|
|
|
* the signal is not blocked:
|
2002-02-09 19:58:41 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
|
|
|
|
ucp->uc_sigmask = _thr_proc_sigmask;
|
|
|
|
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
|
|
|
|
((__sighandler_t *)sigfunc != SIG_IGN)) {
|
|
|
|
if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) ||
|
|
|
|
(info == NULL))
|
|
|
|
(*(sigfunc))(sig, info, ucp);
|
|
|
|
else
|
|
|
|
(*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp);
|
2002-02-09 19:58:41 +00:00
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore the thread's signal mask.
|
|
|
|
*/
|
|
|
|
if (saved_seqno == curthread->sigmask_seqno)
|
|
|
|
curthread->tmbx.tm_context.uc_sigmask = saved_mask;
|
|
|
|
else
|
|
|
|
curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Find a thread that can handle the signal. This must be called
|
|
|
|
* with upcalls disabled.
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
struct pthread *
|
|
|
|
thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
int handler_installed;
|
2003-04-28 21:35:06 +00:00
|
|
|
struct pthread *pthread;
|
2002-02-09 19:58:41 +00:00
|
|
|
struct pthread *suspended_thread, *signaled_thread;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
DBG_MSG("Looking for thread to handle signal %d\n", sig);
|
2003-04-18 05:04:16 +00:00
|
|
|
|
|
|
|
handler_installed = (_thread_sigact[sig - 1].sa_handler != SIG_IGN) &&
|
|
|
|
(_thread_sigact[sig - 1].sa_handler != SIG_DFL);
|
|
|
|
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Check if the signal requires a dump of thread information: */
|
2000-11-09 05:08:26 +00:00
|
|
|
if (sig == SIGINFO) {
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Dump thread information to file: */
|
|
|
|
_thread_dump_info();
|
2000-11-09 05:08:26 +00:00
|
|
|
}
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
|
|
|
* Enter a loop to look for threads that have the signal
|
|
|
|
* unmasked. POSIX specifies that a thread in a sigwait
|
|
|
|
* will get the signal over any other threads. Second
|
|
|
|
* preference will be threads in in a sigsuspend. Third
|
|
|
|
* preference will be the current thread. If none of the
|
|
|
|
* above, then the signal is delivered to the first thread
|
|
|
|
* that is found. Note that if a custom handler is not
|
|
|
|
* installed, the signal only affects threads in sigwait.
|
|
|
|
*/
|
|
|
|
suspended_thread = NULL;
|
2003-04-18 05:04:16 +00:00
|
|
|
signaled_thread = NULL;
|
2003-02-17 10:05:18 +00:00
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
|
2003-04-28 21:35:06 +00:00
|
|
|
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
2003-04-29 21:03:33 +00:00
|
|
|
/* Take the scheduling lock. */
|
|
|
|
KSE_SCHED_LOCK(curkse, pthread->kseg);
|
2003-02-17 10:05:18 +00:00
|
|
|
if ((pthread->state == PS_SIGWAIT) &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Return the signal number and make the
|
|
|
|
* thread runnable.
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
*/
|
2003-02-17 10:05:18 +00:00
|
|
|
pthread->signo = sig;
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_setrunnable_unlocked(pthread);
|
|
|
|
|
|
|
|
KSE_SCHED_UNLOCK(curkse, pthread->kseg);
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
|
|
|
* POSIX doesn't doesn't specify which thread
|
|
|
|
* will get the signal if there are multiple
|
|
|
|
* waiters, so we give it to the first thread
|
|
|
|
* we find.
|
|
|
|
*
|
|
|
|
* Do not attempt to deliver this signal
|
|
|
|
* to other threads and do not add the signal
|
|
|
|
* to the process pending set.
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
|
|
|
DBG_MSG("Waking thread %p in sigwait with signal %d\n",
|
|
|
|
pthread, sig);
|
2003-02-17 10:05:18 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
else if ((pthread->state == PS_DEAD) ||
|
|
|
|
(pthread->state == PS_DEADLOCK) ||
|
2003-05-04 16:17:01 +00:00
|
|
|
THR_IS_EXITING(pthread) || THR_IS_SUSPENDED(pthread))
|
2003-04-18 05:04:16 +00:00
|
|
|
; /* Skip this thread. */
|
|
|
|
else if ((handler_installed != 0) &&
|
2003-05-04 16:17:01 +00:00
|
|
|
!sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig)) {
|
2003-02-17 10:05:18 +00:00
|
|
|
if (pthread->state == PS_SIGSUSPEND) {
|
2003-05-24 02:29:25 +00:00
|
|
|
if (suspended_thread == NULL) {
|
2003-02-17 10:05:18 +00:00
|
|
|
suspended_thread = pthread;
|
2003-05-24 02:29:25 +00:00
|
|
|
suspended_thread->refcount++;
|
|
|
|
}
|
|
|
|
} else if (signaled_thread == NULL) {
|
2003-02-17 10:05:18 +00:00
|
|
|
signaled_thread = pthread;
|
2003-05-24 02:29:25 +00:00
|
|
|
signaled_thread->refcount++;
|
|
|
|
}
|
1998-09-30 06:27:31 +00:00
|
|
|
}
|
2003-04-29 21:03:33 +00:00
|
|
|
KSE_SCHED_UNLOCK(curkse, pthread->kseg);
|
2003-02-17 10:05:18 +00:00
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Only perform wakeups and signal delivery if there is a
|
|
|
|
* custom handler installed:
|
2003-02-17 10:05:18 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
if (handler_installed == 0) {
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* There is no handler installed; nothing to do here.
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
} else if (suspended_thread == NULL &&
|
|
|
|
signaled_thread == NULL) {
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
|
|
|
* Add it to the set of signals pending
|
|
|
|
* on the process:
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
|
|
|
if (!sigismember(&_thr_proc_sigpending, sig)) {
|
|
|
|
sigaddset(&_thr_proc_sigpending, sig);
|
|
|
|
if (info == NULL)
|
|
|
|
build_siginfo(&_thr_proc_siginfo[sig], sig);
|
|
|
|
else
|
|
|
|
memcpy(&_thr_proc_siginfo[sig], info,
|
|
|
|
sizeof(*info));
|
|
|
|
}
|
|
|
|
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
|
|
|
} else {
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
|
|
|
* We only deliver the signal to one thread;
|
|
|
|
* give preference to the suspended thread:
|
|
|
|
*/
|
2003-05-24 02:29:25 +00:00
|
|
|
if (suspended_thread != NULL) {
|
2003-02-17 10:05:18 +00:00
|
|
|
pthread = suspended_thread;
|
2003-05-24 02:29:25 +00:00
|
|
|
_thr_ref_delete(NULL, signaled_thread);
|
|
|
|
} else
|
2003-02-17 10:05:18 +00:00
|
|
|
pthread = signaled_thread;
|
|
|
|
return (pthread);
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
return (NULL);
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
static void
|
|
|
|
build_siginfo(siginfo_t *info, int signo)
|
|
|
|
{
|
|
|
|
bzero(info, sizeof(*info));
|
|
|
|
info->si_signo = signo;
|
|
|
|
info->si_pid = _thr_pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called by a thread when it has pending signals to deliver.
|
|
|
|
* It should only be called from the context of the thread.
|
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
void
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
|
|
|
struct pthread_sigframe *psf)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
struct pthread_sigframe psf_save;
|
|
|
|
sigset_t sigset;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
THR_SCHED_LOCK(curthread, curthread);
|
|
|
|
memcpy(&sigset, &curthread->sigpend, sizeof(sigset));
|
|
|
|
sigemptyset(&curthread->sigpend);
|
|
|
|
if (psf != NULL) {
|
|
|
|
memcpy(&psf_save, psf, sizeof(*psf));
|
|
|
|
SIGSETOR(sigset, psf_save.psf_sigset);
|
|
|
|
sigemptyset(&psf->psf_sigset);
|
|
|
|
}
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
/* Check the threads previous state: */
|
|
|
|
if ((psf != NULL) && (psf_save.psf_state != PS_RUNNING)) {
|
|
|
|
/*
|
|
|
|
* Do a little cleanup handling for those threads in
|
|
|
|
* queues before calling the signal handler. Signals
|
|
|
|
* for these threads are temporarily blocked until
|
|
|
|
* after cleanup handling.
|
|
|
|
*/
|
|
|
|
switch (psf_save.psf_state) {
|
|
|
|
case PS_COND_WAIT:
|
|
|
|
_cond_wait_backout(curthread);
|
|
|
|
psf_save.psf_state = PS_RUNNING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
_mutex_lock_backout(curthread);
|
|
|
|
psf_save.psf_state = PS_RUNNING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Lower the priority before calling the handler in case
|
|
|
|
* it never returns (longjmps back):
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
|
|
|
|
|
|
|
|
for (i = 1; i < NSIG; i++) {
|
|
|
|
if (sigismember(&sigset, i) != 0) {
|
|
|
|
/* Call the handler: */
|
|
|
|
thr_sig_invoke_handler(curthread, i,
|
|
|
|
&curthread->siginfo[i], ucp);
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
|
|
|
|
THR_SCHED_LOCK(curthread, curthread);
|
|
|
|
if (psf != NULL)
|
|
|
|
thr_sigframe_restore(curthread, &psf_save);
|
|
|
|
/* Restore the signal mask. */
|
|
|
|
curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
|
|
|
_thr_sig_check_pending(curthread);
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* This checks pending signals for the current thread. It should be
|
|
|
|
* called whenever a thread changes its signal mask. Note that this
|
|
|
|
* is called from a thread (using its stack).
|
|
|
|
*
|
|
|
|
* XXX - We might want to just check to see if there are pending
|
|
|
|
* signals for the thread here, but enter the UTS scheduler
|
|
|
|
* to actually install the signal handler(s).
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_sig_check_pending(struct pthread *curthread)
|
2000-10-13 22:12:32 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
sigset_t sigset;
|
|
|
|
sigset_t pending_process;
|
|
|
|
sigset_t pending_thread;
|
|
|
|
kse_critical_t crit;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
curthread->check_pending = 0;
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Check if there are pending signals for the running
|
|
|
|
* thread or process that aren't blocked:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
crit = _kse_critical_enter();
|
|
|
|
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
|
|
|
|
sigset = _thr_proc_sigpending;
|
|
|
|
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
|
|
|
|
_kse_critical_leave(crit);
|
|
|
|
|
|
|
|
THR_SCHED_LOCK(curthread, curthread);
|
|
|
|
SIGSETOR(sigset, curthread->sigpend);
|
|
|
|
SIGSETNAND(sigset, curthread->tmbx.tm_context.uc_sigmask);
|
|
|
|
if (SIGNOTEMPTY(sigset)) {
|
|
|
|
ucontext_t uc;
|
|
|
|
volatile int once;
|
|
|
|
|
|
|
|
curthread->check_pending = 0;
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Split the pending signals into those that were
|
|
|
|
* pending on the process and those that were pending
|
|
|
|
* on the thread.
|
|
|
|
*/
|
|
|
|
sigfillset(&pending_process);
|
|
|
|
sigfillset(&pending_thread);
|
|
|
|
for (i = 1; i < NSIG; i++) {
|
|
|
|
if (sigismember(&sigset, i) != 0) {
|
|
|
|
if (sigismember(&curthread->sigpend, i) != 0) {
|
|
|
|
build_siginfo(&curthread->siginfo[i], i);
|
|
|
|
sigdelset(&pending_thread, i);
|
|
|
|
} else {
|
|
|
|
memcpy(&curthread->siginfo[i],
|
|
|
|
&_thr_proc_siginfo[i],
|
|
|
|
sizeof(siginfo_t));
|
|
|
|
sigdelset(&pending_process, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Remove any process pending signals that were scheduled
|
|
|
|
* to be delivered from process' pending set.
|
|
|
|
*/
|
|
|
|
crit = _kse_critical_enter();
|
|
|
|
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
|
|
|
|
SIGSETAND(_thr_proc_sigpending, pending_process);
|
|
|
|
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
|
|
|
|
_kse_critical_leave(crit);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove any thread pending signals that were scheduled
|
|
|
|
* to be delivered from thread's pending set.
|
|
|
|
*/
|
|
|
|
THR_SCHED_LOCK(curthread, curthread);
|
|
|
|
SIGSETAND(curthread->sigpend, pending_thread);
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
|
|
|
|
|
|
|
once = 0;
|
|
|
|
THR_GETCONTEXT(&uc);
|
|
|
|
if (once == 0) {
|
|
|
|
once = 1;
|
|
|
|
for (i = 1; i < NSIG; i++) {
|
|
|
|
if (sigismember(&sigset, i) != 0) {
|
|
|
|
/* Call the handler: */
|
|
|
|
thr_sig_invoke_handler(curthread, i,
|
|
|
|
&curthread->siginfo[i], &uc);
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
else
|
|
|
|
THR_SCHED_UNLOCK(curthread, curthread);
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* This must be called with upcalls disabled.
|
2003-02-17 10:05:18 +00:00
|
|
|
*/
|
2000-01-19 07:04:50 +00:00
|
|
|
static void
|
2003-04-18 05:04:16 +00:00
|
|
|
handle_special_signals(struct kse *curkse, int sig)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
switch (sig) {
|
|
|
|
/*
|
|
|
|
* POSIX says that pending SIGCONT signals are
|
|
|
|
* discarded when one of these signals occurs.
|
|
|
|
*/
|
|
|
|
case SIGTSTP:
|
|
|
|
case SIGTTIN:
|
|
|
|
case SIGTTOU:
|
2003-04-18 05:04:16 +00:00
|
|
|
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
|
|
|
sigdelset(&_thr_proc_sigpending, SIGCONT);
|
|
|
|
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Perform thread specific actions in response to a signal.
|
|
|
|
* This function is only called if there is a handler installed
|
|
|
|
* for the signal, and if the target thread has the signal
|
|
|
|
* unmasked.
|
2003-04-18 05:04:16 +00:00
|
|
|
*
|
|
|
|
* This must be called with the thread's scheduling lock held.
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
void
|
|
|
|
_thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
int restart;
|
2000-10-13 22:12:32 +00:00
|
|
|
int suppress_handler = 0;
|
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
if (pthread->curframe == NULL) {
|
|
|
|
/*
|
|
|
|
* This thread is active. Just add it to the
|
|
|
|
* thread's pending set.
|
|
|
|
*/
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
pthread->check_pending = 1;
|
|
|
|
if (info == NULL)
|
|
|
|
build_siginfo(&pthread->siginfo[sig], sig);
|
|
|
|
else if (info != &pthread->siginfo[sig])
|
|
|
|
memcpy(&pthread->siginfo[sig], info,
|
|
|
|
sizeof(*info));
|
|
|
|
if ((pthread->blocked != 0) && !THR_IN_CRITICAL(pthread))
|
|
|
|
kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
|
2003-04-18 05:04:16 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
/* Make sure this signal isn't still in the pending set: */
|
|
|
|
sigdelset(&pthread->sigpend, sig);
|
2001-05-04 20:37:07 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* Process according to thread state:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
switch (pthread->state) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* States which do not change when a signal is trapped:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_DEAD:
|
|
|
|
case PS_DEADLOCK:
|
|
|
|
case PS_LOCKWAIT:
|
|
|
|
case PS_SUSPENDED:
|
|
|
|
case PS_STATE_MAX:
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* You can't call a signal handler for threads in these
|
|
|
|
* states.
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
suppress_handler = 1;
|
2003-05-16 19:58:30 +00:00
|
|
|
break;
|
1998-06-17 03:53:16 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* States which do not need any cleanup handling when signals
|
|
|
|
* occur:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_RUNNING:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the queue before changing its
|
|
|
|
* priority:
|
|
|
|
*/
|
|
|
|
if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0)
|
|
|
|
THR_RUNQ_REMOVE(pthread);
|
|
|
|
break;
|
2001-11-17 14:28:39 +00:00
|
|
|
|
1998-09-30 06:27:31 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* States which cannot be interrupted but still require the
|
|
|
|
* signal handler to run:
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_COND_WAIT:
|
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue. It will
|
|
|
|
* be added back to the wait queue once all signal
|
|
|
|
* handlers have been invoked.
|
|
|
|
*/
|
|
|
|
KSE_WAITQ_REMOVE(pthread->kse, pthread);
|
|
|
|
break;
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_SLEEP_WAIT:
|
|
|
|
/*
|
|
|
|
* Unmasked signals always cause sleep to terminate
|
|
|
|
* early regardless of SA_RESTART:
|
|
|
|
*/
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
KSE_WAITQ_REMOVE(pthread->kse, pthread);
|
|
|
|
THR_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
break;
|
2003-04-18 05:04:16 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_JOIN:
|
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
KSE_WAITQ_REMOVE(pthread->kse, pthread);
|
|
|
|
THR_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
break;
|
2003-02-17 10:05:18 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
case PS_SIGWAIT:
|
2003-04-18 05:04:16 +00:00
|
|
|
/*
|
2003-05-16 19:58:30 +00:00
|
|
|
* The signal handler is not called for threads in
|
|
|
|
* SIGWAIT.
|
2003-04-18 05:04:16 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
suppress_handler = 1;
|
|
|
|
/* Wake up the thread if the signal is blocked. */
|
|
|
|
if (sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
|
|
|
|
/* Make the thread runnable: */
|
|
|
|
_thr_setrunnable_unlocked(pthread);
|
|
|
|
} else
|
|
|
|
/* Increment the pending signal count. */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (suppress_handler == 0) {
|
2003-04-18 05:04:16 +00:00
|
|
|
/*
|
|
|
|
* Setup a signal frame and save the current threads
|
|
|
|
* state:
|
|
|
|
*/
|
|
|
|
thr_sigframe_add(pthread, sig, info);
|
2000-11-09 05:08:26 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
if (pthread->state != PS_RUNNING)
|
|
|
|
THR_SET_STATE(pthread, PS_RUNNING);
|
2003-04-18 05:04:16 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
/*
|
|
|
|
* The thread should be removed from all scheduling
|
|
|
|
* queues at this point. Raise the priority and
|
|
|
|
* place the thread in the run queue. It is also
|
|
|
|
* possible for a signal to be sent to a suspended
|
|
|
|
* thread, mostly via pthread_kill(). If a thread
|
|
|
|
* is suspended, don't insert it into the priority
|
|
|
|
* queue; just set its state to suspended and it
|
|
|
|
* will run the signal handler when it is resumed.
|
|
|
|
*/
|
|
|
|
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
|
|
|
if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0)
|
|
|
|
THR_RUNQ_INSERT_TAIL(pthread);
|
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
|
2000-10-25 11:46:07 +00:00
|
|
|
static void
|
2003-04-18 05:04:16 +00:00
|
|
|
thr_sig_check_state(struct pthread *pthread, int sig)
|
2000-10-25 11:46:07 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Process according to thread state:
|
|
|
|
*/
|
|
|
|
switch (pthread->state) {
|
|
|
|
/*
|
|
|
|
* States which do not change when a signal is trapped:
|
|
|
|
*/
|
|
|
|
case PS_RUNNING:
|
2003-04-18 05:04:16 +00:00
|
|
|
case PS_LOCKWAIT:
|
|
|
|
case PS_MUTEX_WAIT:
|
2000-10-25 11:46:07 +00:00
|
|
|
case PS_COND_WAIT:
|
|
|
|
case PS_JOIN:
|
2003-04-18 05:04:16 +00:00
|
|
|
case PS_SUSPENDED:
|
|
|
|
case PS_DEAD:
|
|
|
|
case PS_DEADLOCK:
|
|
|
|
case PS_STATE_MAX:
|
2000-10-25 11:46:07 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_SIGWAIT:
|
|
|
|
/* Wake up the thread if the signal is blocked. */
|
|
|
|
if (sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2003-04-18 05:04:16 +00:00
|
|
|
|
|
|
|
/* Change the state of the thread to run: */
|
|
|
|
_thr_setrunnable_unlocked(pthread);
|
2000-10-25 11:46:07 +00:00
|
|
|
} else
|
|
|
|
/* Increment the pending signal count. */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
case PS_SLEEP_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue and make it
|
|
|
|
* runnable:
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_setrunnable_unlocked(pthread);
|
2000-10-25 11:46:07 +00:00
|
|
|
|
|
|
|
/* Flag the operation as interrupted: */
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Send a signal to a specific thread (ala pthread_kill):
|
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
void
|
2003-04-18 05:04:16 +00:00
|
|
|
_thr_sig_send(struct pthread *pthread, int sig)
|
1999-12-17 00:56:36 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
|
|
|
|
/* Lock the scheduling queue of the target thread. */
|
|
|
|
THR_SCHED_LOCK(curthread, pthread);
|
2001-01-24 13:03:38 +00:00
|
|
|
|
2000-10-25 11:46:07 +00:00
|
|
|
/* Check for signals whose actions are SIG_DFL: */
|
|
|
|
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
|
|
|
|
/*
|
|
|
|
* Check to see if a temporary signal handler is
|
|
|
|
* installed for sigwaiters:
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
if (_thread_dfl_count[sig] == 0) {
|
2000-10-25 11:46:07 +00:00
|
|
|
/*
|
|
|
|
* Deliver the signal to the process if a handler
|
|
|
|
* is not installed:
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
2000-10-25 11:46:07 +00:00
|
|
|
kill(getpid(), sig);
|
2003-04-18 05:04:16 +00:00
|
|
|
THR_SCHED_LOCK(curthread, pthread);
|
|
|
|
}
|
2000-10-25 11:46:07 +00:00
|
|
|
/*
|
|
|
|
* Assuming we're still running after the above kill(),
|
|
|
|
* make any necessary state changes to the thread:
|
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
thr_sig_check_state(pthread, sig);
|
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
2000-10-25 11:46:07 +00:00
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
|
|
|
* Check that the signal is not being ignored:
|
|
|
|
*/
|
2000-10-25 11:46:07 +00:00
|
|
|
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
|
1999-12-17 00:56:36 +00:00
|
|
|
if (pthread->state == PS_SIGWAIT &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2003-04-18 05:04:16 +00:00
|
|
|
|
|
|
|
/* Change the state of the thread to run: */
|
|
|
|
_thr_setrunnable_unlocked(pthread);
|
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
|
|
|
} else if (sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig)) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Add the signal to the pending set: */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
2003-04-18 05:04:16 +00:00
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
|
|
|
} else if (pthread == curthread) {
|
|
|
|
ucontext_t uc;
|
|
|
|
siginfo_t info;
|
|
|
|
volatile int once;
|
|
|
|
|
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
|
|
|
build_siginfo(&info, sig);
|
|
|
|
once = 0;
|
|
|
|
THR_GETCONTEXT(&uc);
|
|
|
|
if (once == 0) {
|
|
|
|
once = 1;
|
|
|
|
/*
|
|
|
|
* Call the signal handler for the current
|
|
|
|
* thread:
|
|
|
|
*/
|
|
|
|
thr_sig_invoke_handler(curthread, sig,
|
|
|
|
&info, &uc);
|
|
|
|
}
|
|
|
|
} else {
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-10-25 11:46:07 +00:00
|
|
|
* Perform any state changes due to signal
|
|
|
|
* arrival:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-05-16 19:58:30 +00:00
|
|
|
_thr_sig_add(pthread, sig, NULL);
|
2003-04-18 05:04:16 +00:00
|
|
|
THR_SCHED_UNLOCK(curthread, pthread);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
static void
|
|
|
|
thr_sigframe_add(struct pthread *thread, int sig, siginfo_t *info)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
if (thread->curframe == NULL)
|
|
|
|
PANIC("Thread doesn't have signal frame ");
|
2001-06-29 17:09:07 +00:00
|
|
|
|
2003-05-16 19:58:30 +00:00
|
|
|
if (thread->have_signals == 0) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2003-04-18 05:04:16 +00:00
|
|
|
* Multiple signals can be added to the same signal
|
|
|
|
* frame. Only save the thread's state the first time.
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2003-04-18 05:04:16 +00:00
|
|
|
thr_sigframe_save(thread, thread->curframe);
|
2003-05-16 19:58:30 +00:00
|
|
|
thread->have_signals = 1;
|
2003-04-18 05:04:16 +00:00
|
|
|
thread->flags &= THR_FLAGS_PRIVATE;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
2003-04-18 05:04:16 +00:00
|
|
|
sigaddset(&thread->curframe->psf_sigset, sig);
|
2003-05-16 19:58:30 +00:00
|
|
|
if (info == NULL)
|
2003-04-18 05:04:16 +00:00
|
|
|
build_siginfo(&thread->siginfo[sig], sig);
|
2003-05-16 19:58:30 +00:00
|
|
|
else if (info != &thread->siginfo[sig])
|
|
|
|
memcpy(&thread->siginfo[sig], info, sizeof(*info));
|
2000-11-20 13:12:44 +00:00
|
|
|
|
2003-02-17 10:05:18 +00:00
|
|
|
/* Setup the new signal mask. */
|
2003-04-18 05:04:16 +00:00
|
|
|
SIGSETOR(thread->tmbx.tm_context.uc_sigmask,
|
2003-02-17 10:05:18 +00:00
|
|
|
_thread_sigact[sig - 1].sa_mask);
|
2003-04-18 05:04:16 +00:00
|
|
|
sigaddset(&thread->tmbx.tm_context.uc_sigmask, sig);
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
2000-11-09 05:08:26 +00:00
|
|
|
|
2003-04-18 05:04:16 +00:00
|
|
|
void
|
|
|
|
thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf)
|
2000-10-13 22:12:32 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
thread->flags = psf->psf_flags;
|
|
|
|
thread->interrupted = psf->psf_interrupted;
|
|
|
|
thread->signo = psf->psf_signo;
|
|
|
|
thread->state = psf->psf_state;
|
|
|
|
thread->data = psf->psf_wait_data;
|
|
|
|
thread->wakeup_time = psf->psf_wakeup_time;
|
|
|
|
if (thread->sigmask_seqno == psf->psf_seqno)
|
|
|
|
thread->tmbx.tm_context.uc_sigmask = psf->psf_sigmask;
|
|
|
|
else
|
|
|
|
thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2003-04-18 05:04:16 +00:00
|
|
|
thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf)
|
2000-10-13 22:12:32 +00:00
|
|
|
{
|
2003-04-18 05:04:16 +00:00
|
|
|
/* This has to initialize all members of the sigframe. */
|
|
|
|
psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE;
|
|
|
|
psf->psf_interrupted = thread->interrupted;
|
|
|
|
psf->psf_signo = thread->signo;
|
|
|
|
psf->psf_state = thread->state;
|
|
|
|
psf->psf_wait_data = thread->data;
|
|
|
|
psf->psf_wakeup_time = thread->wakeup_time;
|
|
|
|
psf->psf_sigmask = thread->tmbx.tm_context.uc_sigmask;
|
|
|
|
psf->psf_seqno = thread->sigmask_seqno;
|
|
|
|
sigemptyset(&psf->psf_sigset);
|
2003-02-17 10:05:18 +00:00
|
|
|
}
|