1996-01-22 00:23:58 +00:00
|
|
|
/*
|
1998-04-29 09:59:34 +00:00
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
1996-01-22 00:23:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1999-08-05 12:15:30 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
1996-01-22 00:23:58 +00:00
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
1999-09-29 15:18:46 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/signalvar.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <signal.h>
|
1996-08-20 08:22:01 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2000-01-19 07:04:50 +00:00
|
|
|
#include <setjmp.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#ifdef _THREAD_SAFE
|
|
|
|
#include <pthread.h>
|
|
|
|
#include "pthread_private.h"
|
|
|
|
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
/* Prototypes: */
|
2000-10-13 22:12:32 +00:00
|
|
|
static void thread_sig_add(pthread_t pthread, int sig, int has_args);
|
|
|
|
static pthread_t thread_sig_find(int sig);
|
|
|
|
static void thread_sig_handle_special(int sig);
|
|
|
|
static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp);
|
|
|
|
static void thread_sigframe_add(pthread_t thread, int sig);
|
|
|
|
static void thread_sigframe_leave(pthread_t thread, int frame);
|
|
|
|
static void thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
|
|
|
|
static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf);
|
|
|
|
|
|
|
|
/* #define DEBUG_SIGNAL */
|
|
|
|
#ifdef DEBUG_SIGNAL
|
|
|
|
#define DBG_MSG stdout_debug
|
|
|
|
#else
|
|
|
|
#define DBG_MSG(x...)
|
|
|
|
#endif
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
#if defined(_PTHREADS_INVARIANTS)
|
|
|
|
#define SIG_SET_ACTIVE() _sig_in_handler = 1
|
|
|
|
#define SIG_SET_INACTIVE() _sig_in_handler = 0
|
|
|
|
#else
|
|
|
|
#define SIG_SET_ACTIVE()
|
|
|
|
#define SIG_SET_INACTIVE()
|
|
|
|
#endif
|
1998-04-29 09:59:34 +00:00
|
|
|
|
|
|
|
void
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
pthread_t pthread;
|
|
|
|
int current_frame;
|
|
|
|
char c;
|
1998-04-29 09:59:34 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
if (ucp == NULL)
|
|
|
|
PANIC("Thread signal handler received null context");
|
|
|
|
DBG_MSG("Got signal %d, current thread %p\n", sig, _thread_run);
|
1998-04-29 09:59:34 +00:00
|
|
|
|
|
|
|
/* Check if an interval timer signal: */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
if (sig == _SCHED_SIGNAL) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Update the scheduling clock: */
|
|
|
|
gettimeofday((struct timeval *)&_sched_tod, NULL);
|
|
|
|
_sched_ticks++;
|
|
|
|
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
if (_thread_kern_in_sched != 0) {
|
1996-01-22 00:23:58 +00:00
|
|
|
/*
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
* The scheduler is already running; ignore this
|
|
|
|
* signal.
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
1999-03-23 05:07:56 +00:00
|
|
|
/*
|
|
|
|
* Check if the scheduler interrupt has come when
|
|
|
|
* the currently running thread has deferred thread
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
* signals.
|
1999-03-23 05:07:56 +00:00
|
|
|
*/
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
else if (_thread_run->sig_defer_count > 0)
|
|
|
|
_thread_run->yield_on_sig_undefer = 1;
|
|
|
|
else {
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Save the context of the currently running thread:
|
|
|
|
*/
|
|
|
|
thread_sig_savecontext(_thread_run, ucp);
|
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
|
|
|
* Schedule the next thread. This function is not
|
|
|
|
* expected to return because it will do a longjmp
|
|
|
|
* instead.
|
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_kern_sched(ucp);
|
1996-08-20 08:22:01 +00:00
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
|
|
|
* This point should not be reached, so abort the
|
|
|
|
* process:
|
|
|
|
*/
|
|
|
|
PANIC("Returned to signal function from scheduler");
|
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Check if the kernel has been interrupted while the scheduler
|
|
|
|
* is accessing the scheduling queues or if there is a currently
|
|
|
|
* running thread that has deferred signals.
|
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
else if ((_thread_kern_in_sched != 0) ||
|
|
|
|
(_thread_run->sig_defer_count > 0)) {
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Cast the signal number to a character variable: */
|
|
|
|
c = sig;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the signal number to the kernel pipe so that it will
|
|
|
|
* be ready to read when this signal handler returns.
|
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (_queue_signals != 0) {
|
|
|
|
_thread_sys_write(_thread_kern_pipe[1], &c, 1);
|
|
|
|
DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig);
|
|
|
|
}
|
|
|
|
if (_thread_sigq[sig - 1].blocked == 0) {
|
|
|
|
DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig);
|
|
|
|
/*
|
|
|
|
* Do not block this signal; it will be blocked
|
|
|
|
* when the pending signals are run down.
|
|
|
|
*/
|
|
|
|
/* _thread_sigq[sig - 1].blocked = 1; */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Queue the signal, saving siginfo and sigcontext
|
|
|
|
* (ucontext).
|
|
|
|
*
|
|
|
|
* XXX - Do we need to copy siginfo and ucp?
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
|
|
|
if (info != NULL)
|
|
|
|
memcpy(&_thread_sigq[sig - 1].siginfo, info,
|
|
|
|
sizeof(*info));
|
|
|
|
memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
|
|
|
|
|
|
|
|
/* Indicate that there are queued signals: */
|
|
|
|
_thread_sigq[sig - 1].pending = 1;
|
|
|
|
_sigq_check_reqd = 1;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
/* These signals need special handling: */
|
|
|
|
else if (sig == SIGCHLD || sig == SIGTSTP ||
|
|
|
|
sig == SIGTTIN || sig == SIGTTOU) {
|
|
|
|
_thread_sigq[sig - 1].pending = 1;
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
|
|
|
_sigq_check_reqd = 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
DBG_MSG("Got signal %d, ignored.\n", sig);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The signal handlers should have been installed so that they
|
|
|
|
* cannot be interrupted by other signals.
|
|
|
|
*/
|
|
|
|
else if (_thread_sigq[sig - 1].blocked == 0) {
|
|
|
|
/* The signal is not blocked; handle the signal: */
|
|
|
|
current_frame = _thread_run->sigframe_count;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Ignore subsequent occurrences of this signal
|
|
|
|
* until the current signal is handled:
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].blocked = 1;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* This signal will be handled; clear the pending flag: */
|
|
|
|
_thread_sigq[sig - 1].pending = 0;
|
2000-03-15 13:59:27 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Save siginfo and sigcontext (ucontext).
|
|
|
|
*
|
|
|
|
* XXX - Do we need to copy siginfo and ucp?
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
2000-03-15 13:59:27 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
if (info != NULL)
|
|
|
|
memcpy(&_thread_sigq[sig - 1].siginfo, info,
|
|
|
|
sizeof(*info));
|
|
|
|
memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
|
|
|
|
SIG_SET_ACTIVE();
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Handle special signals: */
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
|
|
|
|
if ((pthread = thread_sig_find(sig)) != NULL) {
|
|
|
|
DBG_MSG("Got signal %d, adding frame to thread %p\n",
|
|
|
|
sig, pthread);
|
|
|
|
/*
|
|
|
|
* A thread was found that can handle the signal.
|
|
|
|
* Save the context of the currently running thread
|
|
|
|
* so that we can switch to another thread without
|
|
|
|
* losing track of where the current thread left off.
|
|
|
|
* This also applies if the current thread is the
|
|
|
|
* thread to be signaled.
|
|
|
|
*/
|
|
|
|
thread_sig_savecontext(_thread_run, ucp);
|
|
|
|
|
|
|
|
/* Setup the target thread to receive the signal: */
|
|
|
|
thread_sig_add(pthread, sig, /*has_args*/ 1);
|
|
|
|
|
|
|
|
/* Take a peek at the next ready to run thread: */
|
|
|
|
pthread = PTHREAD_PRIOQ_FIRST();
|
|
|
|
DBG_MSG("Finished adding frame, head of prio list %p\n",
|
|
|
|
pthread);
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
else
|
|
|
|
DBG_MSG("No thread to handle signal %d\n", sig);
|
|
|
|
SIG_SET_INACTIVE();
|
2000-01-19 07:04:50 +00:00
|
|
|
|
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Switch to a different context if the currently running
|
|
|
|
* thread takes a signal, or if another thread takes a
|
|
|
|
* signal and the currently running thread is not in a
|
|
|
|
* signal handler.
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if ((_thread_run->sigframe_count > current_frame) ||
|
|
|
|
((pthread != NULL) &&
|
|
|
|
(pthread->active_priority > _thread_run->active_priority))) {
|
|
|
|
/* Enter the kernel scheduler: */
|
|
|
|
DBG_MSG("Entering scheduler from signal handler\n");
|
|
|
|
_thread_kern_sched(ucp);
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
else {
|
|
|
|
SIG_SET_ACTIVE();
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
SIG_SET_INACTIVE();
|
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
static void
|
|
|
|
thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp)
|
|
|
|
{
|
|
|
|
struct pthread_signal_frame *psf;
|
|
|
|
|
|
|
|
psf = _thread_run->curframe;
|
|
|
|
|
|
|
|
memcpy(&psf->ctx.uc, ucp, sizeof(*ucp));
|
|
|
|
|
|
|
|
/* XXX - Save FP registers too? */
|
|
|
|
FP_SAVE_UC(&psf->ctx.uc);
|
|
|
|
|
|
|
|
/* Mark the context saved as a ucontext: */
|
|
|
|
psf->ctxtype = CTX_UC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a thread that can handle the signal.
|
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
pthread_t
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_find(int sig)
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
int handler_installed;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
pthread_t pthread, pthread_next;
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
pthread_t suspended_thread, signaled_thread;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
DBG_MSG("Looking for thread to handle signal %d\n", sig);
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Check if the signal requires a dump of thread information: */
|
|
|
|
if (sig == SIGINFO)
|
|
|
|
/* Dump thread information to file: */
|
|
|
|
_thread_dump_info();
|
|
|
|
|
|
|
|
/* Check if an interval timer signal: */
|
|
|
|
else if (sig == _SCHED_SIGNAL) {
|
|
|
|
/*
|
|
|
|
* This shouldn't ever occur (should this panic?).
|
|
|
|
*/
|
1998-04-29 09:59:34 +00:00
|
|
|
} else {
|
1998-09-30 06:27:31 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* Enter a loop to look for threads that have the signal
|
|
|
|
* unmasked. POSIX specifies that a thread in a sigwait
|
|
|
|
* will get the signal over any other threads. Second
|
2000-10-13 22:12:32 +00:00
|
|
|
* preference will be threads in in a sigsuspend. Third
|
|
|
|
* preference will be the current thread. If none of the
|
|
|
|
* above, then the signal is delivered to the first thread
|
|
|
|
* that is found. Note that if a custom handler is not
|
|
|
|
* installed, the signal only affects threads in sigwait.
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
suspended_thread = NULL;
|
2000-10-13 22:12:32 +00:00
|
|
|
if ((_thread_run != &_thread_kern_thread) &&
|
|
|
|
!sigismember(&_thread_run->sigmask, sig))
|
|
|
|
signaled_thread = _thread_run;
|
|
|
|
else
|
|
|
|
signaled_thread = NULL;
|
1999-12-28 18:08:09 +00:00
|
|
|
if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) ||
|
|
|
|
(_thread_sigact[sig - 1].sa_handler == SIG_DFL))
|
|
|
|
handler_installed = 0;
|
|
|
|
else
|
|
|
|
handler_installed = 1;
|
|
|
|
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
for (pthread = TAILQ_FIRST(&_waitingq);
|
|
|
|
pthread != NULL; pthread = pthread_next) {
|
|
|
|
/*
|
|
|
|
* Grab the next thread before possibly destroying
|
|
|
|
* the link entry.
|
|
|
|
*/
|
|
|
|
pthread_next = TAILQ_NEXT(pthread, pqe);
|
|
|
|
|
1998-09-30 06:27:31 +00:00
|
|
|
if ((pthread->state == PS_SIGWAIT) &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* A signal handler is not invoked for threads
|
|
|
|
* in sigwait. Clear the blocked and pending
|
|
|
|
* flags.
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
_thread_sigq[sig - 1].pending = 0;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
|
|
|
|
/*
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
* POSIX doesn't doesn't specify which thread
|
|
|
|
* will get the signal if there are multiple
|
|
|
|
* waiters, so we give it to the first thread
|
|
|
|
* we find.
|
|
|
|
*
|
1998-09-30 06:27:31 +00:00
|
|
|
* Do not attempt to deliver this signal
|
2000-10-13 22:12:32 +00:00
|
|
|
* to other threads and do not add the signal
|
|
|
|
* to the process pending set.
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
return (NULL);
|
1998-09-30 06:27:31 +00:00
|
|
|
}
|
1999-12-28 18:08:09 +00:00
|
|
|
else if ((handler_installed != 0) &&
|
|
|
|
!sigismember(&pthread->sigmask, sig)) {
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
if (pthread->state == PS_SIGSUSPEND) {
|
|
|
|
if (suspended_thread == NULL)
|
|
|
|
suspended_thread = pthread;
|
|
|
|
} else if (signaled_thread == NULL)
|
|
|
|
signaled_thread = pthread;
|
|
|
|
}
|
1998-09-30 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* Only perform wakeups and signal delivery if there is a
|
|
|
|
* custom handler installed:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (handler_installed == 0) {
|
|
|
|
/*
|
|
|
|
* There is no handler installed. Unblock the
|
|
|
|
* signal so that if a handler _is_ installed, any
|
|
|
|
* subsequent signals can be handled.
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
} else {
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* If we didn't find a thread in the waiting queue,
|
|
|
|
* check the all threads queue:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
1999-12-28 18:08:09 +00:00
|
|
|
if (suspended_thread == NULL &&
|
|
|
|
signaled_thread == NULL) {
|
|
|
|
/*
|
|
|
|
* Enter a loop to look for other threads
|
|
|
|
* capable of receiving the signal:
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
|
|
|
if (!sigismember(&pthread->sigmask,
|
|
|
|
sig)) {
|
|
|
|
signaled_thread = pthread;
|
|
|
|
break;
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
if (suspended_thread == NULL &&
|
|
|
|
signaled_thread == NULL)
|
|
|
|
/*
|
|
|
|
* Add it to the set of signals pending
|
|
|
|
* on the process:
|
|
|
|
*/
|
|
|
|
sigaddset(&_process_sigpending, sig);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* We only deliver the signal to one thread;
|
|
|
|
* give preference to the suspended thread:
|
|
|
|
*/
|
|
|
|
if (suspended_thread != NULL)
|
|
|
|
pthread = suspended_thread;
|
|
|
|
else
|
|
|
|
pthread = signaled_thread;
|
1999-12-17 00:56:36 +00:00
|
|
|
return (pthread);
|
1999-03-23 05:07:56 +00:00
|
|
|
}
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns nothing. */
|
1999-12-17 00:56:36 +00:00
|
|
|
return (NULL);
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
void
|
|
|
|
_thread_sig_check_pending(pthread_t pthread)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
sigset_t sigset;
|
|
|
|
int i;
|
|
|
|
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Check if there are pending signals for the running
|
|
|
|
* thread or process that aren't blocked:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
sigset = pthread->sigpend;
|
|
|
|
SIGSETOR(sigset, _process_sigpending);
|
|
|
|
SIGSETNAND(sigset, pthread->sigmask);
|
|
|
|
if (SIGNOTEMPTY(sigset)) {
|
|
|
|
for (i = 1; i < NSIG; i++) {
|
|
|
|
if (sigismember(&sigset, i) != 0) {
|
|
|
|
if (sigismember(&pthread->sigpend, i) != 0)
|
|
|
|
thread_sig_add(pthread, i,
|
|
|
|
/*has_args*/ 0);
|
|
|
|
else {
|
|
|
|
thread_sig_add(pthread, i,
|
|
|
|
/*has_args*/ 1);
|
|
|
|
sigdelset(&_process_sigpending, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can only be called from the kernel scheduler. It assumes that
|
|
|
|
* all thread contexts are saved and that a signal frame can safely be
|
|
|
|
* added to any user thread.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_thread_sig_handle_pending(void)
|
|
|
|
{
|
|
|
|
pthread_t pthread;
|
|
|
|
int i, sig;
|
|
|
|
|
|
|
|
PTHREAD_ASSERT(_thread_kern_in_sched != 0,
|
|
|
|
"_thread_sig_handle_pending called from outside kernel schedule");
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Check the array of pending signals:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (i = 0; i < NSIG; i++) {
|
|
|
|
if (_thread_sigq[i].pending != 0) {
|
|
|
|
/* This signal is no longer pending. */
|
|
|
|
_thread_sigq[i].pending = 0;
|
|
|
|
|
|
|
|
sig = _thread_sigq[i].signo;
|
|
|
|
|
|
|
|
/* Some signals need special handling: */
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
|
|
|
|
if (_thread_sigq[i].blocked == 0) {
|
|
|
|
/*
|
|
|
|
* Block future signals until this one
|
|
|
|
* is handled:
|
|
|
|
*/
|
|
|
|
_thread_sigq[i].blocked = 1;
|
|
|
|
|
|
|
|
if ((pthread = thread_sig_find(sig)) != NULL) {
|
|
|
|
/*
|
|
|
|
* Setup the target thread to receive
|
|
|
|
* the signal:
|
|
|
|
*/
|
|
|
|
thread_sig_add(pthread, sig,
|
|
|
|
/*has_args*/ 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_handle_special(int sig)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
pthread_t pthread, pthread_next;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
switch (sig) {
|
|
|
|
case SIGCHLD:
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Go through the file list and set all files
|
|
|
|
* to non-blocking again in case the child
|
|
|
|
* set some of them to block. Sigh.
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (i = 0; i < _thread_dtablesize; i++) {
|
|
|
|
/* Check if this file is used: */
|
|
|
|
if (_thread_fd_table[i] != NULL) {
|
|
|
|
/*
|
|
|
|
* Set the file descriptor to non-blocking:
|
|
|
|
*/
|
|
|
|
_thread_sys_fcntl(i, F_SETFL,
|
|
|
|
_thread_fd_table[i]->flags | O_NONBLOCK);
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Enter a loop to wake up all threads waiting
|
|
|
|
* for a process to complete:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (pthread = TAILQ_FIRST(&_waitingq);
|
|
|
|
pthread != NULL; pthread = pthread_next) {
|
|
|
|
/*
|
|
|
|
* Grab the next thread before possibly
|
|
|
|
* destroying the link entry:
|
|
|
|
*/
|
|
|
|
pthread_next = TAILQ_NEXT(pthread, pqe);
|
|
|
|
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* If this thread is waiting for a child
|
|
|
|
* process to complete, wake it up:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (pthread->state == PS_WAIT_WAIT) {
|
|
|
|
/* Make the thread runnable: */
|
|
|
|
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* POSIX says that pending SIGCONT signals are
|
|
|
|
* discarded when one of these signals occurs.
|
|
|
|
*/
|
|
|
|
case SIGTSTP:
|
|
|
|
case SIGTTIN:
|
|
|
|
case SIGTTOU:
|
|
|
|
/*
|
|
|
|
* Enter a loop to discard pending SIGCONT
|
|
|
|
* signals:
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
|
|
|
sigdelset(&pthread->sigpend, SIGCONT);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Perform thread specific actions in response to a signal.
|
|
|
|
* This function is only called if there is a handler installed
|
|
|
|
* for the signal, and if the target thread has the signal
|
|
|
|
* unmasked.
|
|
|
|
*/
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
static void
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_add(pthread_t pthread, int sig, int has_args)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
int restart, frame;
|
|
|
|
int block_signals = 0;
|
|
|
|
int suppress_handler = 0;
|
|
|
|
|
|
|
|
restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
/*
|
|
|
|
* Process according to thread state:
|
|
|
|
*/
|
|
|
|
switch (pthread->state) {
|
|
|
|
/*
|
|
|
|
* States which do not change when a signal is trapped:
|
|
|
|
*/
|
|
|
|
case PS_DEAD:
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
case PS_DEADLOCK:
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_STATE_MAX:
|
|
|
|
case PS_SIGTHREAD:
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* You can't call a signal handler for threads in these
|
|
|
|
* states.
|
|
|
|
*/
|
|
|
|
suppress_handler = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* States which do not need any cleanup handling when signals
|
|
|
|
* occur:
|
|
|
|
*/
|
|
|
|
case PS_RUNNING:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the queue before changing its
|
|
|
|
* priority:
|
|
|
|
*/
|
|
|
|
if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
|
|
|
|
PTHREAD_PRIOQ_REMOVE(pthread);
|
|
|
|
break;
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_SUSPENDED:
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_SPINBLOCK:
|
|
|
|
/* Remove the thread from the workq and waitq: */
|
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
/* Make the thread runnable: */
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-10 22:25:18 +00:00
|
|
|
break;
|
1998-04-29 09:59:34 +00:00
|
|
|
|
1999-12-17 00:56:36 +00:00
|
|
|
case PS_SIGWAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
/* The signal handler is not called for threads in SIGWAIT. */
|
|
|
|
suppress_handler = 1;
|
1999-12-17 00:56:36 +00:00
|
|
|
/* Wake up the thread if the signal is blocked. */
|
|
|
|
if (sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
1999-12-17 00:56:36 +00:00
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
} else
|
|
|
|
/* Increment the pending signal count. */
|
2000-10-13 22:12:32 +00:00
|
|
|
sigaddset(&pthread->sigpend, sig);
|
1999-12-17 00:56:36 +00:00
|
|
|
break;
|
|
|
|
|
1998-06-17 03:53:16 +00:00
|
|
|
/*
|
|
|
|
* The wait state is a special case due to the handling of
|
|
|
|
* SIGCHLD signals.
|
|
|
|
*/
|
|
|
|
case PS_WAIT_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
/* Change the state of the thread to run: */
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-17 03:53:16 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Mark the thread as interrupted only if the
|
|
|
|
* restart flag is not set on the signal action:
|
|
|
|
*/
|
|
|
|
if (restart == 0)
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
}
|
|
|
|
break;
|
1998-06-17 03:53:16 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* States which cannot be interrupted but still require the
|
|
|
|
* signal handler to run:
|
|
|
|
*/
|
|
|
|
case PS_COND_WAIT:
|
|
|
|
case PS_JOIN:
|
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue. It will
|
|
|
|
* be added back to the wait queue once all signal
|
|
|
|
* handlers have been invoked.
|
|
|
|
*/
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
1998-06-17 03:53:16 +00:00
|
|
|
break;
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* States which are interruptible but may need to be removed
|
|
|
|
* from queues before any signal handler is called.
|
|
|
|
*
|
|
|
|
* XXX - We may not need to handle this condition, but will
|
|
|
|
* mark it as a potential problem.
|
1998-06-10 22:25:18 +00:00
|
|
|
*/
|
1999-12-28 18:08:09 +00:00
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_FILE_WAIT:
|
|
|
|
if (restart == 0)
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue. Our
|
|
|
|
* signal handler hook will remove this thread
|
|
|
|
* from the fd or file queue before invoking
|
|
|
|
* the actual handler.
|
|
|
|
*/
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
/*
|
|
|
|
* To ensure the thread is removed from the fd and file
|
|
|
|
* queues before any other signal interrupts it, set the
|
|
|
|
* signal mask to block all signals. As soon as the thread
|
|
|
|
* is removed from the queue the signal mask will be
|
|
|
|
* restored.
|
|
|
|
*/
|
|
|
|
block_signals = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* States which are interruptible:
|
|
|
|
*/
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_FDR_WAIT:
|
|
|
|
case PS_FDW_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
if (restart == 0) {
|
|
|
|
/*
|
|
|
|
* Flag the operation as interrupted and
|
|
|
|
* set the state to running:
|
|
|
|
*/
|
1998-06-17 22:29:12 +00:00
|
|
|
pthread->interrupted = 1;
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-17 22:29:12 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
1998-06-10 22:25:18 +00:00
|
|
|
break;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_POLL_WAIT:
|
|
|
|
case PS_SELECT_WAIT:
|
|
|
|
case PS_SLEEP_WAIT:
|
1998-09-30 06:27:31 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Unmasked signals always cause poll, select, and sleep
|
|
|
|
* to terminate early, regardless of SA_RESTART:
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
pthread->interrupted = 1;
|
|
|
|
/* Remove threads in poll and select from the workq: */
|
|
|
|
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
|
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
break;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-09-30 06:27:31 +00:00
|
|
|
break;
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
if (suppress_handler == 0) {
|
|
|
|
/*
|
|
|
|
* Save the current state of the thread and add a
|
|
|
|
* new signal frame.
|
|
|
|
*/
|
|
|
|
frame = pthread->sigframe_count;
|
|
|
|
thread_sigframe_save(pthread, pthread->curframe);
|
|
|
|
thread_sigframe_add(pthread, sig);
|
|
|
|
pthread->sigframes[frame + 1]->sig_has_args = has_args;
|
|
|
|
SIGSETOR(pthread->sigmask, _thread_sigact[sig - 1].sa_mask);
|
|
|
|
if (block_signals != 0) {
|
|
|
|
/* Save the signal mask and block all signals: */
|
|
|
|
pthread->sigframes[frame + 1]->saved_state.psd_sigmask =
|
|
|
|
pthread->sigmask;
|
|
|
|
sigfillset(&pthread->sigmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure the thread is runnable: */
|
|
|
|
if (pthread->state != PS_RUNNING)
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
/*
|
|
|
|
* The thread should be removed from all scheduling
|
|
|
|
* queues at this point. Raise the priority and place
|
|
|
|
* the thread in the run queue.
|
|
|
|
*/
|
|
|
|
pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
|
|
|
|
if (pthread != _thread_run)
|
|
|
|
PTHREAD_PRIOQ_INSERT_TAIL(pthread);
|
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Send a signal to a specific thread (ala pthread_kill):
|
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
void
|
|
|
|
_thread_sig_send(pthread_t pthread, int sig)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Check that the signal is not being ignored:
|
|
|
|
*/
|
|
|
|
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
|
|
|
|
if (pthread->state == PS_SIGWAIT &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
1999-12-17 00:56:36 +00:00
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2000-10-13 22:12:32 +00:00
|
|
|
} else if (pthread == _thread_run) {
|
|
|
|
/* Add the signal to the pending set: */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
/*
|
|
|
|
* Deliver the signal to the process if a
|
|
|
|
* handler is not installed:
|
|
|
|
*/
|
|
|
|
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
|
|
|
|
kill(getpid(), sig);
|
|
|
|
if (!sigismember(&pthread->sigmask, sig)) {
|
|
|
|
/*
|
|
|
|
* Call the kernel scheduler which will safely
|
|
|
|
* install a signal frame for this thread:
|
|
|
|
*/
|
|
|
|
_thread_kern_sched_sig();
|
|
|
|
}
|
2000-03-15 13:59:27 +00:00
|
|
|
} else {
|
2000-10-13 22:12:32 +00:00
|
|
|
if (pthread->state != PS_SIGWAIT &&
|
|
|
|
!sigismember(&pthread->sigmask, sig)) {
|
|
|
|
/* Protect the scheduling queues: */
|
|
|
|
_thread_kern_sig_defer();
|
|
|
|
/*
|
|
|
|
* Perform any state changes due to signal
|
|
|
|
* arrival:
|
|
|
|
*/
|
|
|
|
thread_sig_add(pthread, sig, /* has args */ 0);
|
|
|
|
/* Unprotect the scheduling queues: */
|
|
|
|
_thread_kern_sig_undefer();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
/* Increment the pending signal count. */
|
|
|
|
sigaddset(&pthread->sigpend,sig);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Deliver the signal to the process if a
|
|
|
|
* handler is not installed:
|
|
|
|
*/
|
|
|
|
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL)
|
|
|
|
kill(getpid(), sig);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* User thread signal handler wrapper.
|
|
|
|
*
|
|
|
|
* thread - current running thread
|
|
|
|
*/
|
1998-04-29 09:59:34 +00:00
|
|
|
void
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_sig_wrapper(void)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
void (*sigfunc)(int, siginfo_t *, void *);
|
|
|
|
struct pthread_signal_frame *psf;
|
|
|
|
pthread_t thread;
|
|
|
|
int dead = 0;
|
|
|
|
int i, sig, has_args;
|
|
|
|
int frame, dst_frame;
|
|
|
|
|
|
|
|
thread = _thread_run;
|
|
|
|
|
|
|
|
/* Get the current frame and state: */
|
|
|
|
frame = thread->sigframe_count;
|
|
|
|
PTHREAD_ASSERT(frame > 0, "Invalid signal frame in signal handler");
|
|
|
|
psf = thread->curframe;
|
|
|
|
|
|
|
|
/* Check the threads previous state: */
|
|
|
|
if (psf->saved_state.psd_state != PS_RUNNING) {
|
|
|
|
/*
|
|
|
|
* Do a little cleanup handling for those threads in
|
|
|
|
* queues before calling the signal handler. Signals
|
|
|
|
* for these threads are temporarily blocked until
|
|
|
|
* after cleanup handling.
|
|
|
|
*/
|
|
|
|
switch (psf->saved_state.psd_state) {
|
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
|
|
|
_fd_lock_backout(thread);
|
|
|
|
psf->saved_state.psd_state = PS_RUNNING;
|
|
|
|
/* Reenable signals: */
|
|
|
|
thread->sigmask = psf->saved_state.psd_sigmask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_FILE_WAIT:
|
|
|
|
_flockfile_backout(thread);
|
|
|
|
psf->saved_state.psd_state = PS_RUNNING;
|
|
|
|
/* Reenable signals: */
|
|
|
|
thread->sigmask = psf->saved_state.psd_sigmask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Unless the thread exits or longjmps out of the signal handler,
|
|
|
|
* return to the previous frame:
|
1998-04-29 09:59:34 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
dst_frame = frame - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that a custom handler is installed and if the signal
|
|
|
|
* is not blocked:
|
|
|
|
*/
|
|
|
|
sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction;
|
|
|
|
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
|
|
|
|
((__sighandler_t *)sigfunc != SIG_IGN)) {
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* The signal jump buffer is allocated off the stack.
|
|
|
|
* If the signal handler tries to [_][sig]longjmp() or
|
|
|
|
* setcontext(), our wrapped versions of these routines
|
|
|
|
* will copy the user supplied jump buffer or context
|
|
|
|
* to the destination signal frame, set the destination
|
|
|
|
* signal frame in psf->dst_frame, and _longjmp() back
|
|
|
|
* to here.
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
jmp_buf jb;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the context for abnormal returns out of signal
|
|
|
|
* handlers.
|
|
|
|
*/
|
|
|
|
psf->sig_jb = &jb;
|
|
|
|
if (_setjmp(jb) == 0) {
|
|
|
|
DBG_MSG("_thread_sig_wrapper: Entering frame %d, "
|
|
|
|
"stack 0x%lx\n", frame, GET_STACK_JB(jb));
|
1996-01-22 00:23:58 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Invalidate the destination frame before calling
|
|
|
|
* the signal handler.
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
psf->dst_frame = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dispatch the signal via the custom signal
|
|
|
|
* handler:
|
|
|
|
*/
|
|
|
|
if (psf->sig_has_args == 0)
|
|
|
|
(*(sigfunc))(psf->signo, NULL, NULL);
|
|
|
|
else if ((_thread_sigact[psf->signo - 1].sa_flags &
|
|
|
|
SA_SIGINFO) != 0)
|
|
|
|
(*(sigfunc))(psf->signo,
|
|
|
|
&_thread_sigq[psf->signo - 1].siginfo,
|
|
|
|
&_thread_sigq[psf->signo - 1].uc);
|
1999-12-17 00:56:36 +00:00
|
|
|
else
|
2000-10-13 22:12:32 +00:00
|
|
|
(*(sigfunc))(psf->signo,
|
|
|
|
(siginfo_t *)_thread_sigq[psf->signo - 1].siginfo.si_code,
|
|
|
|
&_thread_sigq[psf->signo - 1].uc);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* The return from _setjmp() should only be non-zero
|
|
|
|
* when the signal handler wants to xxxlongjmp() or
|
|
|
|
* setcontext() to a different context, or if the
|
|
|
|
* thread has exited (via pthread_exit).
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Grab a copy of the destination frame before it
|
|
|
|
* gets clobbered after unwinding.
|
|
|
|
*/
|
|
|
|
dst_frame = psf->dst_frame;
|
|
|
|
DBG_MSG("Abnormal exit from handler for signal %d, "
|
|
|
|
"frame %d\n", psf->signo, frame);
|
|
|
|
|
|
|
|
/* Has the thread exited? */
|
|
|
|
if ((dead = thread->flags & PTHREAD_EXITING) != 0)
|
|
|
|
/* When exiting, unwind to frame 0. */
|
|
|
|
dst_frame = 0;
|
|
|
|
else if ((dst_frame < 0) || (dst_frame > frame))
|
|
|
|
PANIC("Attempt to unwind to invalid "
|
|
|
|
"signal frame");
|
|
|
|
|
|
|
|
/* Unwind to the target frame: */
|
|
|
|
for (i = frame; i > dst_frame; i--) {
|
|
|
|
DBG_MSG("Leaving frame %d, signal %d\n", i,
|
|
|
|
thread->sigframes[i]->signo);
|
|
|
|
/* Leave the current signal frame: */
|
|
|
|
thread_sigframe_leave(thread, i);
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Save whatever is needed out of the state
|
|
|
|
* data; as soon as the frame count is
|
|
|
|
* is decremented, another signal can arrive
|
|
|
|
* and corrupt this view of the state data.
|
|
|
|
*/
|
|
|
|
sig = thread->sigframes[i]->signo;
|
|
|
|
has_args = thread->sigframes[i]->sig_has_args;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're done with this signal frame:
|
|
|
|
*/
|
|
|
|
thread->curframe = thread->sigframes[i - 1];
|
|
|
|
thread->sigframe_count = i - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only unblock the signal if it was a
|
|
|
|
* process signal as opposed to a signal
|
|
|
|
* generated by pthread_kill().
|
|
|
|
*/
|
|
|
|
if (has_args != 0)
|
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the kernel scheduler to schedule the next
|
|
|
|
* thread.
|
|
|
|
*/
|
|
|
|
if (dead == 0) {
|
|
|
|
/* Restore the threads state: */
|
|
|
|
thread_sigframe_restore(thread, thread->sigframes[dst_frame]);
|
|
|
|
_thread_kern_sched_frame(dst_frame);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
PTHREAD_ASSERT(dst_frame == 0,
|
|
|
|
"Invalid signal frame for dead thread");
|
|
|
|
|
|
|
|
/* Perform any necessary cleanup before exiting. */
|
|
|
|
thread_sigframe_leave(thread, 0);
|
|
|
|
|
|
|
|
/* This should never return: */
|
|
|
|
_thread_exit_finish();
|
|
|
|
PANIC("Return from _thread_exit_finish in signal wrapper");
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
static void
|
|
|
|
thread_sigframe_add(pthread_t thread, int sig)
|
1999-12-17 00:56:36 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
unsigned long stackp = 0;
|
|
|
|
|
|
|
|
/* Get the top of the threads stack: */
|
|
|
|
switch (thread->curframe->ctxtype) {
|
|
|
|
case CTX_JB:
|
|
|
|
case CTX_JB_NOSIG:
|
|
|
|
stackp = GET_STACK_JB(thread->curframe->ctx.jb);
|
|
|
|
break;
|
|
|
|
case CTX_SJB:
|
|
|
|
stackp = GET_STACK_SJB(thread->curframe->ctx.sigjb);
|
|
|
|
break;
|
|
|
|
case CTX_UC:
|
|
|
|
stackp = GET_STACK_UC(&thread->curframe->ctx.uc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
PANIC("Invalid thread context type");
|
|
|
|
break;
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
|
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Leave a little space on the stack and round down to the
|
|
|
|
* nearest aligned word:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
stackp -= sizeof(double);
|
|
|
|
stackp &= ~0x3UL;
|
|
|
|
|
|
|
|
/* Allocate room on top of the stack for a new signal frame: */
|
|
|
|
stackp -= sizeof(struct pthread_signal_frame);
|
|
|
|
|
|
|
|
/* Set up the new frame: */
|
|
|
|
thread->sigframe_count++;
|
|
|
|
thread->sigframes[thread->sigframe_count] =
|
|
|
|
(struct pthread_signal_frame *) stackp;
|
|
|
|
thread->curframe = thread->sigframes[thread->sigframe_count];
|
|
|
|
thread->curframe->stackp = stackp;
|
2000-10-22 18:35:11 +00:00
|
|
|
thread->curframe->ctxtype = CTX_JB_NOSIG;
|
2000-10-13 22:12:32 +00:00
|
|
|
thread->curframe->longjmp_val = 1;
|
|
|
|
thread->curframe->signo = sig;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Set up the context:
|
|
|
|
*/
|
|
|
|
_setjmp(thread->curframe->ctx.jb);
|
|
|
|
SET_STACK_JB(thread->curframe->ctx.jb, stackp);
|
|
|
|
SET_RETURN_ADDR_JB(thread->curframe->ctx.jb, _thread_sig_wrapper);
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Locate the signal frame from the specified stack pointer.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
_thread_sigframe_find(pthread_t pthread, void *stackp)
|
|
|
|
{
|
|
|
|
int frame;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Find the destination of the target frame based on the
|
|
|
|
* given stack pointer.
|
|
|
|
*/
|
|
|
|
for (frame = pthread->sigframe_count; frame >= 0; frame--) {
|
|
|
|
if (stackp < (void *)pthread->sigframes[frame]->stackp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
thread_sigframe_leave(pthread_t thread, int frame)
|
|
|
|
{
|
|
|
|
struct pthread_state_data *psd;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
psd = &thread->sigframes[frame]->saved_state;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Perform any necessary cleanup for this signal frame:
|
|
|
|
*/
|
|
|
|
switch (psd->psd_state) {
|
|
|
|
case PS_DEAD:
|
|
|
|
case PS_DEADLOCK:
|
|
|
|
case PS_RUNNING:
|
|
|
|
case PS_SIGTHREAD:
|
|
|
|
case PS_STATE_MAX:
|
|
|
|
case PS_SUSPENDED:
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Threads in the following states need to be removed
|
|
|
|
* from queues.
|
|
|
|
*/
|
|
|
|
case PS_COND_WAIT:
|
|
|
|
_cond_wait_backout(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
break;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
|
|
|
_fd_lock_backout(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
break;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_FILE_WAIT:
|
|
|
|
_flockfile_backout(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_JOIN:
|
|
|
|
_join_backout(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
_mutex_lock_backout(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0)
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
break;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_FDR_WAIT:
|
|
|
|
case PS_FDW_WAIT:
|
|
|
|
case PS_POLL_WAIT:
|
|
|
|
case PS_SELECT_WAIT:
|
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
case PS_SIGWAIT:
|
|
|
|
case PS_SLEEP_WAIT:
|
|
|
|
case PS_SPINBLOCK:
|
|
|
|
case PS_WAIT_WAIT:
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WAITQ) != 0) {
|
|
|
|
PTHREAD_WAITQ_REMOVE(thread);
|
|
|
|
if ((psd->psd_flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
|
|
|
|
PTHREAD_WORKQ_REMOVE(thread);
|
|
|
|
}
|
|
|
|
break;
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf)
|
|
|
|
{
|
|
|
|
thread->interrupted = psf->saved_state.psd_interrupted;
|
|
|
|
thread->sigmask = psf->saved_state.psd_sigmask;
|
|
|
|
thread->state = psf->saved_state.psd_state;
|
|
|
|
thread->flags = psf->saved_state.psd_flags;
|
|
|
|
thread->wakeup_time = psf->saved_state.psd_wakeup_time;
|
|
|
|
thread->data = psf->saved_state.psd_wait_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf)
|
|
|
|
{
|
|
|
|
psf->saved_state.psd_interrupted = thread->interrupted;
|
|
|
|
psf->saved_state.psd_sigmask = thread->sigmask;
|
|
|
|
psf->saved_state.psd_state = thread->state;
|
|
|
|
psf->saved_state.psd_flags = thread->flags;
|
|
|
|
thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE |
|
|
|
|
PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ |
|
|
|
|
PTHREAD_FLAGS_IN_JOINQ;
|
|
|
|
psf->saved_state.psd_wakeup_time = thread->wakeup_time;
|
|
|
|
psf->saved_state.psd_wait_data = thread->data;
|
|
|
|
}
|
|
|
|
|
1996-01-22 00:23:58 +00:00
|
|
|
#endif
|