1996-01-22 00:23:58 +00:00
|
|
|
/*
|
1998-04-29 09:59:34 +00:00
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
1996-01-22 00:23:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1999-08-05 12:15:30 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
1996-01-22 00:23:58 +00:00
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
1999-09-29 15:18:46 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/signalvar.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <signal.h>
|
1996-08-20 08:22:01 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2000-01-19 07:04:50 +00:00
|
|
|
#include <setjmp.h>
|
1996-01-22 00:23:58 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include "pthread_private.h"
|
|
|
|
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
/* Prototypes: */
|
2000-10-13 22:12:32 +00:00
|
|
|
static void thread_sig_add(pthread_t pthread, int sig, int has_args);
|
2000-10-25 11:46:07 +00:00
|
|
|
static void thread_sig_check_state(pthread_t pthread, int sig);
|
2000-10-13 22:12:32 +00:00
|
|
|
static pthread_t thread_sig_find(int sig);
|
|
|
|
static void thread_sig_handle_special(int sig);
|
|
|
|
static void thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp);
|
2000-11-09 05:08:26 +00:00
|
|
|
static void thread_sigframe_add(pthread_t thread, int sig, int has_args);
|
2000-10-13 22:12:32 +00:00
|
|
|
static void thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf);
|
|
|
|
|
|
|
|
/* #define DEBUG_SIGNAL */
|
|
|
|
#ifdef DEBUG_SIGNAL
|
|
|
|
#define DBG_MSG stdout_debug
|
|
|
|
#else
|
|
|
|
#define DBG_MSG(x...)
|
|
|
|
#endif
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
#if defined(_PTHREADS_INVARIANTS)
|
|
|
|
#define SIG_SET_ACTIVE() _sig_in_handler = 1
|
|
|
|
#define SIG_SET_INACTIVE() _sig_in_handler = 0
|
|
|
|
#else
|
|
|
|
#define SIG_SET_ACTIVE()
|
|
|
|
#define SIG_SET_INACTIVE()
|
|
|
|
#endif
|
1998-04-29 09:59:34 +00:00
|
|
|
|
|
|
|
void
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2001-01-24 13:03:38 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread_t pthread, pthread_h;
|
|
|
|
void *stackp;
|
|
|
|
int in_sched = 0;
|
2000-10-13 22:12:32 +00:00
|
|
|
char c;
|
1998-04-29 09:59:34 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
if (ucp == NULL)
|
|
|
|
PANIC("Thread signal handler received null context");
|
2001-01-24 13:03:38 +00:00
|
|
|
DBG_MSG("Got signal %d, current thread %p\n", sig, curthread);
|
1998-04-29 09:59:34 +00:00
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
if (_thread_kern_in_sched != 0)
|
|
|
|
in_sched = 1;
|
|
|
|
else {
|
|
|
|
stackp = (void *)GET_STACK_UC(ucp);
|
|
|
|
if ((stackp >= _thread_kern_sched_stack) &&
|
|
|
|
(stackp <= _thread_kern_sched_stack + SCHED_STACK_SIZE))
|
|
|
|
in_sched = 1;
|
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
/* Check if an interval timer signal: */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
if (sig == _SCHED_SIGNAL) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Update the scheduling clock: */
|
|
|
|
gettimeofday((struct timeval *)&_sched_tod, NULL);
|
|
|
|
_sched_ticks++;
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
if (in_sched != 0) {
|
1996-01-22 00:23:58 +00:00
|
|
|
/*
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
* The scheduler is already running; ignore this
|
|
|
|
* signal.
|
1996-01-22 00:23:58 +00:00
|
|
|
*/
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
1999-03-23 05:07:56 +00:00
|
|
|
/*
|
|
|
|
* Check if the scheduler interrupt has come when
|
|
|
|
* the currently running thread has deferred thread
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
* signals.
|
1999-03-23 05:07:56 +00:00
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
else if (curthread->sig_defer_count > 0)
|
|
|
|
curthread->yield_on_sig_undefer = 1;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
else {
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Save the context of the currently running thread:
|
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
thread_sig_savecontext(curthread, ucp);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
|
|
|
* Schedule the next thread. This function is not
|
|
|
|
* expected to return because it will do a longjmp
|
2000-11-09 05:08:26 +00:00
|
|
|
* instead.
|
1998-04-29 09:59:34 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_kern_sched(ucp);
|
1996-08-20 08:22:01 +00:00
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
|
|
|
* This point should not be reached, so abort the
|
2000-11-09 05:08:26 +00:00
|
|
|
* process:
|
1998-04-29 09:59:34 +00:00
|
|
|
*/
|
|
|
|
PANIC("Returned to signal function from scheduler");
|
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Check if the kernel has been interrupted while the scheduler
|
|
|
|
* is accessing the scheduling queues or if there is a currently
|
|
|
|
* running thread that has deferred signals.
|
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
else if ((in_sched != 0) || (curthread->sig_defer_count > 0)) {
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Cast the signal number to a character variable: */
|
|
|
|
c = sig;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the signal number to the kernel pipe so that it will
|
|
|
|
* be ready to read when this signal handler returns.
|
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (_queue_signals != 0) {
|
2001-01-24 13:03:38 +00:00
|
|
|
__sys_write(_thread_kern_pipe[1], &c, 1);
|
2000-10-13 22:12:32 +00:00
|
|
|
DBG_MSG("Got signal %d, queueing to kernel pipe\n", sig);
|
|
|
|
}
|
|
|
|
if (_thread_sigq[sig - 1].blocked == 0) {
|
|
|
|
DBG_MSG("Got signal %d, adding to _thread_sigq\n", sig);
|
|
|
|
/*
|
|
|
|
* Do not block this signal; it will be blocked
|
|
|
|
* when the pending signals are run down.
|
|
|
|
*/
|
|
|
|
/* _thread_sigq[sig - 1].blocked = 1; */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Queue the signal, saving siginfo and sigcontext
|
|
|
|
* (ucontext).
|
|
|
|
*
|
|
|
|
* XXX - Do we need to copy siginfo and ucp?
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
|
|
|
if (info != NULL)
|
|
|
|
memcpy(&_thread_sigq[sig - 1].siginfo, info,
|
|
|
|
sizeof(*info));
|
|
|
|
memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
|
|
|
|
|
|
|
|
/* Indicate that there are queued signals: */
|
|
|
|
_thread_sigq[sig - 1].pending = 1;
|
|
|
|
_sigq_check_reqd = 1;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
/* These signals need special handling: */
|
|
|
|
else if (sig == SIGCHLD || sig == SIGTSTP ||
|
|
|
|
sig == SIGTTIN || sig == SIGTTOU) {
|
|
|
|
_thread_sigq[sig - 1].pending = 1;
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
|
|
|
_sigq_check_reqd = 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
DBG_MSG("Got signal %d, ignored.\n", sig);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The signal handlers should have been installed so that they
|
|
|
|
* cannot be interrupted by other signals.
|
|
|
|
*/
|
|
|
|
else if (_thread_sigq[sig - 1].blocked == 0) {
|
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* The signal is not blocked; handle the signal.
|
|
|
|
*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Ignore subsequent occurrences of this signal
|
|
|
|
* until the current signal is handled:
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].blocked = 1;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* This signal will be handled; clear the pending flag: */
|
|
|
|
_thread_sigq[sig - 1].pending = 0;
|
2000-03-15 13:59:27 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Save siginfo and sigcontext (ucontext).
|
|
|
|
*
|
|
|
|
* XXX - Do we need to copy siginfo and ucp?
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].signo = sig;
|
2000-03-15 13:59:27 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
if (info != NULL)
|
|
|
|
memcpy(&_thread_sigq[sig - 1].siginfo, info,
|
|
|
|
sizeof(*info));
|
|
|
|
memcpy(&_thread_sigq[sig - 1].uc, ucp, sizeof(*ucp));
|
|
|
|
SIG_SET_ACTIVE();
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Handle special signals: */
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread_h = NULL;
|
2000-10-13 22:12:32 +00:00
|
|
|
if ((pthread = thread_sig_find(sig)) != NULL) {
|
|
|
|
DBG_MSG("Got signal %d, adding frame to thread %p\n",
|
|
|
|
sig, pthread);
|
|
|
|
/*
|
|
|
|
* A thread was found that can handle the signal.
|
|
|
|
* Save the context of the currently running thread
|
|
|
|
* so that we can switch to another thread without
|
|
|
|
* losing track of where the current thread left off.
|
|
|
|
* This also applies if the current thread is the
|
|
|
|
* thread to be signaled.
|
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
thread_sig_savecontext(curthread, ucp);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/* Setup the target thread to receive the signal: */
|
|
|
|
thread_sig_add(pthread, sig, /*has_args*/ 1);
|
|
|
|
|
|
|
|
/* Take a peek at the next ready to run thread: */
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread_h = PTHREAD_PRIOQ_FIRST();
|
2000-10-13 22:12:32 +00:00
|
|
|
DBG_MSG("Finished adding frame, head of prio list %p\n",
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread_h);
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
else
|
|
|
|
DBG_MSG("No thread to handle signal %d\n", sig);
|
|
|
|
SIG_SET_INACTIVE();
|
2000-01-19 07:04:50 +00:00
|
|
|
|
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Switch to a different context if the currently running
|
|
|
|
* thread takes a signal, or if another thread takes a
|
|
|
|
* signal and the currently running thread is not in a
|
|
|
|
* signal handler.
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
if ((pthread == curthread) || ((pthread_h != NULL) &&
|
|
|
|
(pthread_h->active_priority > curthread->active_priority))) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Enter the kernel scheduler: */
|
|
|
|
_thread_kern_sched(ucp);
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
else {
|
|
|
|
SIG_SET_ACTIVE();
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
SIG_SET_INACTIVE();
|
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
static void
|
|
|
|
thread_sig_savecontext(pthread_t pthread, ucontext_t *ucp)
|
|
|
|
{
|
2000-11-09 05:08:26 +00:00
|
|
|
memcpy(&pthread->ctx.uc, ucp, sizeof(*ucp));
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/* XXX - Save FP registers too? */
|
2000-11-09 05:08:26 +00:00
|
|
|
FP_SAVE_UC(&pthread->ctx.uc);
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/* Mark the context saved as a ucontext: */
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread->ctxtype = CTX_UC;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a thread that can handle the signal.
|
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
pthread_t
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_find(int sig)
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
{
|
2001-01-24 13:03:38 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2000-10-13 22:12:32 +00:00
|
|
|
int handler_installed;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
pthread_t pthread, pthread_next;
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
pthread_t suspended_thread, signaled_thread;
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
DBG_MSG("Looking for thread to handle signal %d\n", sig);
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Check if the signal requires a dump of thread information: */
|
2000-11-09 05:08:26 +00:00
|
|
|
if (sig == SIGINFO) {
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Dump thread information to file: */
|
|
|
|
_thread_dump_info();
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Unblock this signal to allow further dumps: */
|
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
}
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
/* Check if an interval timer signal: */
|
|
|
|
else if (sig == _SCHED_SIGNAL) {
|
|
|
|
/*
|
|
|
|
* This shouldn't ever occur (should this panic?).
|
|
|
|
*/
|
1998-04-29 09:59:34 +00:00
|
|
|
} else {
|
1998-09-30 06:27:31 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* Enter a loop to look for threads that have the signal
|
|
|
|
* unmasked. POSIX specifies that a thread in a sigwait
|
|
|
|
* will get the signal over any other threads. Second
|
2000-10-13 22:12:32 +00:00
|
|
|
* preference will be threads in in a sigsuspend. Third
|
|
|
|
* preference will be the current thread. If none of the
|
|
|
|
* above, then the signal is delivered to the first thread
|
|
|
|
* that is found. Note that if a custom handler is not
|
|
|
|
* installed, the signal only affects threads in sigwait.
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
suspended_thread = NULL;
|
2001-01-24 13:03:38 +00:00
|
|
|
if ((curthread != &_thread_kern_thread) &&
|
|
|
|
!sigismember(&curthread->sigmask, sig))
|
|
|
|
signaled_thread = curthread;
|
2000-10-13 22:12:32 +00:00
|
|
|
else
|
|
|
|
signaled_thread = NULL;
|
1999-12-28 18:08:09 +00:00
|
|
|
if ((_thread_sigact[sig - 1].sa_handler == SIG_IGN) ||
|
|
|
|
(_thread_sigact[sig - 1].sa_handler == SIG_DFL))
|
|
|
|
handler_installed = 0;
|
|
|
|
else
|
|
|
|
handler_installed = 1;
|
|
|
|
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
for (pthread = TAILQ_FIRST(&_waitingq);
|
|
|
|
pthread != NULL; pthread = pthread_next) {
|
|
|
|
/*
|
|
|
|
* Grab the next thread before possibly destroying
|
|
|
|
* the link entry.
|
|
|
|
*/
|
|
|
|
pthread_next = TAILQ_NEXT(pthread, pqe);
|
|
|
|
|
1998-09-30 06:27:31 +00:00
|
|
|
if ((pthread->state == PS_SIGWAIT) &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
In the words of the author:
o The polling mechanism for I/O readiness was changed from
select() to poll(). In additon, a wrapped version of poll()
is now provided.
o The wrapped select routine now converts each fd_set to a
poll array so that the thread scheduler doesn't have to
perform a bitwise search for selected fds each time file
descriptors are polled for I/O readiness.
o The thread scheduler was modified to use a new queue (_workq)
for threads that need work. Threads waiting for I/O readiness
and spinblocks are added to the work queue in addition to the
waiting queue. This reduces the time spent forming/searching
the array of file descriptors being polled.
o The waiting queue (_waitingq) is now maintained in order of
thread wakeup time. This allows the thread scheduler to
find the nearest wakeup time by looking at the first thread
in the queue instead of searching the entire queue.
o Removed file descriptor locking for select/poll routines. An
application should not rely on the threads library for providing
this locking; if necessary, the application should use mutexes
to protect selecting/polling of file descriptors.
o Retrieve and use the kernel clock rate/resolution at startup
instead of hardcoding the clock resolution to 10 msec (tested
with kernel running at 1000 HZ).
o All queues have been changed to use queue.h macros. These
include the queues of all threads, dead threads, and threads
waiting for file descriptor locks.
o Added reinitialization of the GC mutex and condition variable
after a fork. Also prevented reallocation of the ready queue
after a fork.
o Prevented the wrapped close routine from closing the thread
kernel pipes.
o Initialized file descriptor table for stdio entries at thread
init.
o Provided additional flags to indicate to what queues threads
belong.
o Moved TAILQ initialization for statically allocated mutex and
condition variables to after the spinlock.
o Added dispatching of signals to pthread_kill. Removing the
dispatching of signals from thread activation broke sigsuspend
when pthread_kill was used to send a signal to a thread.
o Temporarily set the state of a thread to PS_SUSPENDED when it
is first created and placed in the list of threads so that it
will not be accidentally scheduled before becoming a member
of one of the scheduling queues.
o Change the signal handler to queue signals to the thread kernel
pipe if the scheduling queues are protected. When scheduling
queues are unprotected, signals are then dequeued and handled.
o Ensured that all installed signal handlers block the scheduling
signal and that the scheduling signal handler blocks all
other signals. This ensures that the signal handler is only
interruptible for and by non-scheduling signals. An atomic
lock is used to decide which instance of the signal handler
will handle pending signals.
o Removed _lock_thread_list and _unlock_thread_list as they are
no longer used to protect the thread list.
o Added missing RCS IDs to modified files.
o Added checks for appropriate queue membership and activity when
adding, removing, and searching the scheduling queues. These
checks add very little overhead and are enabled when compiled
with _PTHREADS_INVARIANTS defined. Suggested and implemented
by Tor Egge with some modification by me.
o Close a race condition in uthread_close. (Tor Egge)
o Protect the scheduling queues while modifying them in
pthread_cond_signal and _thread_fd_unlock. (Tor Egge)
o Ensure that when a thread gets a mutex, the mutex is on that
threads list of owned mutexes. (Tor Egge)
o Set the kernel-in-scheduler flag in _thread_kern_sched_state
and _thread_kern_sched_state_unlock to prevent a scheduling
signal from calling the scheduler again. (Tor Egge)
o Don't use TAILQ_FOREACH macro while searching the waiting
queue for threads in a sigwait state, because a change of
state destroys the TAILQ link. It is actually safe to do
so, though, because once a sigwaiting thread is found, the
loop ends and the function returns. (Tor Egge)
o When dispatching signals to threads, make the thread inherit
the signal deferral flag of the currently running thread.
(Tor Egge)
Submitted by: Daniel Eischen <eischen@vigrid.com> and
Tor Egge <Tor.Egge@fast.no>
1999-06-20 08:28:48 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* A signal handler is not invoked for threads
|
|
|
|
* in sigwait. Clear the blocked and pending
|
|
|
|
* flags.
|
2000-11-09 05:08:26 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
_thread_sigq[sig - 1].pending = 0;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
|
|
|
|
/*
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
* POSIX doesn't doesn't specify which thread
|
|
|
|
* will get the signal if there are multiple
|
|
|
|
* waiters, so we give it to the first thread
|
|
|
|
* we find.
|
|
|
|
*
|
1998-09-30 06:27:31 +00:00
|
|
|
* Do not attempt to deliver this signal
|
2000-10-13 22:12:32 +00:00
|
|
|
* to other threads and do not add the signal
|
|
|
|
* to the process pending set.
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
return (NULL);
|
1998-09-30 06:27:31 +00:00
|
|
|
}
|
1999-12-28 18:08:09 +00:00
|
|
|
else if ((handler_installed != 0) &&
|
|
|
|
!sigismember(&pthread->sigmask, sig)) {
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
if (pthread->state == PS_SIGSUSPEND) {
|
|
|
|
if (suspended_thread == NULL)
|
|
|
|
suspended_thread = pthread;
|
|
|
|
} else if (signaled_thread == NULL)
|
|
|
|
signaled_thread = pthread;
|
|
|
|
}
|
1998-09-30 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* Only perform wakeups and signal delivery if there is a
|
|
|
|
* custom handler installed:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (handler_installed == 0) {
|
|
|
|
/*
|
|
|
|
* There is no handler installed. Unblock the
|
|
|
|
* signal so that if a handler _is_ installed, any
|
|
|
|
* subsequent signals can be handled.
|
|
|
|
*/
|
|
|
|
_thread_sigq[sig - 1].blocked = 0;
|
|
|
|
} else {
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
1999-12-28 18:08:09 +00:00
|
|
|
* If we didn't find a thread in the waiting queue,
|
|
|
|
* check the all threads queue:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
1999-12-28 18:08:09 +00:00
|
|
|
if (suspended_thread == NULL &&
|
|
|
|
signaled_thread == NULL) {
|
|
|
|
/*
|
|
|
|
* Enter a loop to look for other threads
|
2000-11-09 05:08:26 +00:00
|
|
|
* capable of receiving the signal:
|
1999-12-28 18:08:09 +00:00
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
|
|
|
if (!sigismember(&pthread->sigmask,
|
|
|
|
sig)) {
|
|
|
|
signaled_thread = pthread;
|
|
|
|
break;
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
if (suspended_thread == NULL &&
|
|
|
|
signaled_thread == NULL)
|
|
|
|
/*
|
|
|
|
* Add it to the set of signals pending
|
|
|
|
* on the process:
|
|
|
|
*/
|
|
|
|
sigaddset(&_process_sigpending, sig);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* We only deliver the signal to one thread;
|
|
|
|
* give preference to the suspended thread:
|
|
|
|
*/
|
|
|
|
if (suspended_thread != NULL)
|
|
|
|
pthread = suspended_thread;
|
|
|
|
else
|
|
|
|
pthread = signaled_thread;
|
1999-12-17 00:56:36 +00:00
|
|
|
return (pthread);
|
1999-03-23 05:07:56 +00:00
|
|
|
}
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns nothing. */
|
1999-12-17 00:56:36 +00:00
|
|
|
return (NULL);
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
void
|
|
|
|
_thread_sig_check_pending(pthread_t pthread)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
sigset_t sigset;
|
|
|
|
int i;
|
|
|
|
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Check if there are pending signals for the running
|
|
|
|
* thread or process that aren't blocked:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
sigset = pthread->sigpend;
|
|
|
|
SIGSETOR(sigset, _process_sigpending);
|
|
|
|
SIGSETNAND(sigset, pthread->sigmask);
|
|
|
|
if (SIGNOTEMPTY(sigset)) {
|
|
|
|
for (i = 1; i < NSIG; i++) {
|
|
|
|
if (sigismember(&sigset, i) != 0) {
|
|
|
|
if (sigismember(&pthread->sigpend, i) != 0)
|
|
|
|
thread_sig_add(pthread, i,
|
|
|
|
/*has_args*/ 0);
|
|
|
|
else {
|
|
|
|
thread_sig_add(pthread, i,
|
|
|
|
/*has_args*/ 1);
|
|
|
|
sigdelset(&_process_sigpending, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can only be called from the kernel scheduler. It assumes that
|
|
|
|
* all thread contexts are saved and that a signal frame can safely be
|
|
|
|
* added to any user thread.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_thread_sig_handle_pending(void)
|
|
|
|
{
|
|
|
|
pthread_t pthread;
|
|
|
|
int i, sig;
|
|
|
|
|
|
|
|
PTHREAD_ASSERT(_thread_kern_in_sched != 0,
|
|
|
|
"_thread_sig_handle_pending called from outside kernel schedule");
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Check the array of pending signals:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (i = 0; i < NSIG; i++) {
|
|
|
|
if (_thread_sigq[i].pending != 0) {
|
|
|
|
/* This signal is no longer pending. */
|
|
|
|
_thread_sigq[i].pending = 0;
|
|
|
|
|
|
|
|
sig = _thread_sigq[i].signo;
|
|
|
|
|
|
|
|
/* Some signals need special handling: */
|
|
|
|
thread_sig_handle_special(sig);
|
|
|
|
|
|
|
|
if (_thread_sigq[i].blocked == 0) {
|
|
|
|
/*
|
|
|
|
* Block future signals until this one
|
|
|
|
* is handled:
|
|
|
|
*/
|
|
|
|
_thread_sigq[i].blocked = 1;
|
|
|
|
|
|
|
|
if ((pthread = thread_sig_find(sig)) != NULL) {
|
|
|
|
/*
|
|
|
|
* Setup the target thread to receive
|
|
|
|
* the signal:
|
|
|
|
*/
|
|
|
|
thread_sig_add(pthread, sig,
|
|
|
|
/*has_args*/ 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_handle_special(int sig)
|
2000-01-19 07:04:50 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
pthread_t pthread, pthread_next;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
switch (sig) {
|
|
|
|
case SIGCHLD:
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Go through the file list and set all files
|
|
|
|
* to non-blocking again in case the child
|
|
|
|
* set some of them to block. Sigh.
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (i = 0; i < _thread_dtablesize; i++) {
|
|
|
|
/* Check if this file is used: */
|
|
|
|
if (_thread_fd_table[i] != NULL) {
|
|
|
|
/*
|
|
|
|
* Set the file descriptor to non-blocking:
|
|
|
|
*/
|
2001-01-24 13:03:38 +00:00
|
|
|
__sys_fcntl(i, F_SETFL,
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_fd_table[i]->flags | O_NONBLOCK);
|
|
|
|
}
|
|
|
|
}
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Enter a loop to wake up all threads waiting
|
|
|
|
* for a process to complete:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
for (pthread = TAILQ_FIRST(&_waitingq);
|
|
|
|
pthread != NULL; pthread = pthread_next) {
|
|
|
|
/*
|
|
|
|
* Grab the next thread before possibly
|
|
|
|
* destroying the link entry:
|
|
|
|
*/
|
|
|
|
pthread_next = TAILQ_NEXT(pthread, pqe);
|
|
|
|
|
2000-01-19 07:04:50 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* If this thread is waiting for a child
|
|
|
|
* process to complete, wake it up:
|
2000-01-19 07:04:50 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
if (pthread->state == PS_WAIT_WAIT) {
|
|
|
|
/* Make the thread runnable: */
|
|
|
|
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* POSIX says that pending SIGCONT signals are
|
|
|
|
* discarded when one of these signals occurs.
|
|
|
|
*/
|
|
|
|
case SIGTSTP:
|
|
|
|
case SIGTTIN:
|
|
|
|
case SIGTTOU:
|
|
|
|
/*
|
|
|
|
* Enter a loop to discard pending SIGCONT
|
|
|
|
* signals:
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
|
|
|
sigdelset(&pthread->sigpend, SIGCONT);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
2000-01-19 07:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Perform thread specific actions in response to a signal.
|
|
|
|
* This function is only called if there is a handler installed
|
|
|
|
* for the signal, and if the target thread has the signal
|
|
|
|
* unmasked.
|
|
|
|
*/
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
static void
|
2000-10-13 22:12:32 +00:00
|
|
|
thread_sig_add(pthread_t pthread, int sig, int has_args)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2000-11-09 05:08:26 +00:00
|
|
|
int restart;
|
2000-10-13 22:12:32 +00:00
|
|
|
int suppress_handler = 0;
|
2001-03-09 16:05:43 +00:00
|
|
|
int thread_is_active = 0;
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
|
|
|
|
|
2001-05-04 20:37:07 +00:00
|
|
|
/* Make sure this signal isn't still in the pending set: */
|
|
|
|
sigdelset(&pthread->sigpend, sig);
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
/*
|
|
|
|
* Process according to thread state:
|
|
|
|
*/
|
|
|
|
switch (pthread->state) {
|
|
|
|
/*
|
|
|
|
* States which do not change when a signal is trapped:
|
|
|
|
*/
|
|
|
|
case PS_DEAD:
|
Change signal handling to conform to POSIX specified semantics.
Before this change, a signal was delivered to each thread that
didn't have the signal masked. Signals also improperly woke up
threads waiting on I/O. With this change, signals are now
handled in the following way:
o If a thread is waiting in a sigwait for the signal,
then the thread is woken up.
o If no threads are sigwait'ing on the signal and a
thread is in a sigsuspend waiting for the signal,
then the thread is woken up.
o In the case that no threads are waiting or suspended
on the signal, then the signal is delivered to the
first thread we find that has the signal unmasked.
o If no threads are waiting or suspended on the signal,
and no threads have the signal unmasked, then the signal
is added to the process wide pending signal set. The
signal will be delivered to the first thread that unmasks
the signal.
If there is an installed signal handler, it is only invoked
if the chosen thread was not in a sigwait.
In the case that multiple threads are waiting or suspended
on a signal, or multiple threads have the signal unmasked,
we wake up/deliver the signal to the first thread we find.
The above rules still apply.
Reported by: Scott Hess <scott@avantgo.com>
Reviewed by: jb, jasone
1999-12-04 22:55:59 +00:00
|
|
|
case PS_DEADLOCK:
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_STATE_MAX:
|
|
|
|
case PS_SIGTHREAD:
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* You can't call a signal handler for threads in these
|
|
|
|
* states.
|
|
|
|
*/
|
|
|
|
suppress_handler = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* States which do not need any cleanup handling when signals
|
|
|
|
* occur:
|
|
|
|
*/
|
|
|
|
case PS_RUNNING:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the queue before changing its
|
|
|
|
* priority:
|
|
|
|
*/
|
|
|
|
if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
|
|
|
|
PTHREAD_PRIOQ_REMOVE(pthread);
|
2001-03-09 16:05:43 +00:00
|
|
|
else
|
|
|
|
/*
|
|
|
|
* This thread is running; avoid placing it in
|
|
|
|
* the run queue:
|
|
|
|
*/
|
|
|
|
thread_is_active = 1;
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_SUSPENDED:
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_SPINBLOCK:
|
|
|
|
/* Remove the thread from the workq and waitq: */
|
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
/* Make the thread runnable: */
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-10 22:25:18 +00:00
|
|
|
break;
|
1998-04-29 09:59:34 +00:00
|
|
|
|
1999-12-17 00:56:36 +00:00
|
|
|
case PS_SIGWAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
/* The signal handler is not called for threads in SIGWAIT. */
|
|
|
|
suppress_handler = 1;
|
1999-12-17 00:56:36 +00:00
|
|
|
/* Wake up the thread if the signal is blocked. */
|
|
|
|
if (sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
1999-12-17 00:56:36 +00:00
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
} else
|
|
|
|
/* Increment the pending signal count. */
|
2000-10-13 22:12:32 +00:00
|
|
|
sigaddset(&pthread->sigpend, sig);
|
1999-12-17 00:56:36 +00:00
|
|
|
break;
|
|
|
|
|
1998-06-17 03:53:16 +00:00
|
|
|
/*
|
|
|
|
* The wait state is a special case due to the handling of
|
|
|
|
* SIGCHLD signals.
|
|
|
|
*/
|
|
|
|
case PS_WAIT_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
/* Change the state of the thread to run: */
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-17 03:53:16 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Mark the thread as interrupted only if the
|
|
|
|
* restart flag is not set on the signal action:
|
|
|
|
*/
|
|
|
|
if (restart == 0)
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
}
|
|
|
|
break;
|
1998-06-17 03:53:16 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* States which cannot be interrupted but still require the
|
|
|
|
* signal handler to run:
|
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
case PS_COND_WAIT:
|
2001-05-20 23:08:33 +00:00
|
|
|
case PS_JOIN:
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue. It will
|
|
|
|
* be added back to the wait queue once all signal
|
|
|
|
* handlers have been invoked.
|
|
|
|
*/
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
1998-06-17 03:53:16 +00:00
|
|
|
break;
|
|
|
|
|
1998-06-10 22:25:18 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* States which are interruptible but may need to be removed
|
|
|
|
* from queues before any signal handler is called.
|
|
|
|
*
|
|
|
|
* XXX - We may not need to handle this condition, but will
|
|
|
|
* mark it as a potential problem.
|
1998-06-10 22:25:18 +00:00
|
|
|
*/
|
1999-12-28 18:08:09 +00:00
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_FILE_WAIT:
|
|
|
|
if (restart == 0)
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue. Our
|
|
|
|
* signal handler hook will remove this thread
|
|
|
|
* from the fd or file queue before invoking
|
|
|
|
* the actual handler.
|
|
|
|
*/
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* States which are interruptible:
|
|
|
|
*/
|
1998-06-10 22:25:18 +00:00
|
|
|
case PS_FDR_WAIT:
|
|
|
|
case PS_FDW_WAIT:
|
2000-10-13 22:12:32 +00:00
|
|
|
if (restart == 0) {
|
|
|
|
/*
|
|
|
|
* Flag the operation as interrupted and
|
|
|
|
* set the state to running:
|
|
|
|
*/
|
1998-06-17 22:29:12 +00:00
|
|
|
pthread->interrupted = 1;
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-06-17 22:29:12 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
1998-06-10 22:25:18 +00:00
|
|
|
break;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_POLL_WAIT:
|
|
|
|
case PS_SELECT_WAIT:
|
|
|
|
case PS_SLEEP_WAIT:
|
1998-09-30 06:27:31 +00:00
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Unmasked signals always cause poll, select, and sleep
|
|
|
|
* to terminate early, regardless of SA_RESTART:
|
1998-09-30 06:27:31 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
pthread->interrupted = 1;
|
|
|
|
/* Remove threads in poll and select from the workq: */
|
|
|
|
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
|
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
break;
|
1998-09-30 06:27:31 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
PTHREAD_WAITQ_REMOVE(pthread);
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
1998-09-30 06:27:31 +00:00
|
|
|
break;
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
if (suppress_handler == 0) {
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Setup a signal frame and save the current threads state: */
|
|
|
|
thread_sigframe_add(pthread, sig, has_args);
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Signals are deferred until just before the threads
|
|
|
|
* signal handler is invoked:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
pthread->sig_defer_count = 1;
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Make sure the thread is runnable: */
|
|
|
|
if (pthread->state != PS_RUNNING)
|
|
|
|
PTHREAD_SET_STATE(pthread, PS_RUNNING);
|
|
|
|
/*
|
|
|
|
* The thread should be removed from all scheduling
|
|
|
|
* queues at this point. Raise the priority and place
|
|
|
|
* the thread in the run queue.
|
|
|
|
*/
|
|
|
|
pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
|
2001-03-09 16:05:43 +00:00
|
|
|
if (thread_is_active == 0)
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_PRIOQ_INSERT_TAIL(pthread);
|
|
|
|
}
|
1998-04-29 09:59:34 +00:00
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
|
2000-10-25 11:46:07 +00:00
|
|
|
static void
|
|
|
|
thread_sig_check_state(pthread_t pthread, int sig)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Process according to thread state:
|
|
|
|
*/
|
|
|
|
switch (pthread->state) {
|
|
|
|
/*
|
|
|
|
* States which do not change when a signal is trapped:
|
|
|
|
*/
|
|
|
|
case PS_DEAD:
|
|
|
|
case PS_DEADLOCK:
|
|
|
|
case PS_STATE_MAX:
|
|
|
|
case PS_SIGTHREAD:
|
|
|
|
case PS_RUNNING:
|
|
|
|
case PS_SUSPENDED:
|
|
|
|
case PS_SPINBLOCK:
|
|
|
|
case PS_COND_WAIT:
|
|
|
|
case PS_JOIN:
|
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_SIGWAIT:
|
|
|
|
/* Wake up the thread if the signal is blocked. */
|
|
|
|
if (sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
} else
|
|
|
|
/* Increment the pending signal count. */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The wait state is a special case due to the handling of
|
|
|
|
* SIGCHLD signals.
|
|
|
|
*/
|
|
|
|
case PS_WAIT_WAIT:
|
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue and
|
|
|
|
* make it runnable:
|
|
|
|
*/
|
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
|
|
|
|
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
|
|
|
case PS_SIGSUSPEND:
|
|
|
|
case PS_SLEEP_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait queue and make it
|
|
|
|
* runnable:
|
|
|
|
*/
|
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
|
|
|
|
|
|
|
/* Flag the operation as interrupted: */
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These states are additionally in the work queue:
|
|
|
|
*/
|
|
|
|
case PS_FDR_WAIT:
|
|
|
|
case PS_FDW_WAIT:
|
|
|
|
case PS_FILE_WAIT:
|
|
|
|
case PS_POLL_WAIT:
|
|
|
|
case PS_SELECT_WAIT:
|
|
|
|
/*
|
|
|
|
* Remove the thread from the wait and work queues, and
|
|
|
|
* make it runnable:
|
|
|
|
*/
|
|
|
|
PTHREAD_WORKQ_REMOVE(pthread);
|
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
|
|
|
|
|
|
|
/* Flag the operation as interrupted: */
|
|
|
|
pthread->interrupted = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* Send a signal to a specific thread (ala pthread_kill):
|
|
|
|
*/
|
1999-12-17 00:56:36 +00:00
|
|
|
void
|
|
|
|
_thread_sig_send(pthread_t pthread, int sig)
|
|
|
|
{
|
2001-01-24 13:03:38 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
|
2000-10-25 11:46:07 +00:00
|
|
|
/* Check for signals whose actions are SIG_DFL: */
|
|
|
|
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
|
|
|
|
/*
|
|
|
|
* Check to see if a temporary signal handler is
|
|
|
|
* installed for sigwaiters:
|
|
|
|
*/
|
|
|
|
if (_thread_dfl_count[sig] == 0)
|
|
|
|
/*
|
|
|
|
* Deliver the signal to the process if a handler
|
|
|
|
* is not installed:
|
|
|
|
*/
|
|
|
|
kill(getpid(), sig);
|
|
|
|
/*
|
|
|
|
* Assuming we're still running after the above kill(),
|
|
|
|
* make any necessary state changes to the thread:
|
|
|
|
*/
|
|
|
|
thread_sig_check_state(pthread, sig);
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
|
|
|
* Check that the signal is not being ignored:
|
|
|
|
*/
|
2000-10-25 11:46:07 +00:00
|
|
|
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
|
1999-12-17 00:56:36 +00:00
|
|
|
if (pthread->state == PS_SIGWAIT &&
|
|
|
|
sigismember(pthread->data.sigwait, sig)) {
|
|
|
|
/* Change the state of the thread to run: */
|
2000-10-13 22:12:32 +00:00
|
|
|
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
2000-10-25 11:46:07 +00:00
|
|
|
|
1999-12-17 00:56:36 +00:00
|
|
|
/* Return the signal number: */
|
|
|
|
pthread->signo = sig;
|
2001-01-24 13:03:38 +00:00
|
|
|
} else if (pthread == curthread) {
|
2000-10-13 22:12:32 +00:00
|
|
|
/* Add the signal to the pending set: */
|
|
|
|
sigaddset(&pthread->sigpend, sig);
|
|
|
|
if (!sigismember(&pthread->sigmask, sig)) {
|
|
|
|
/*
|
|
|
|
* Call the kernel scheduler which will safely
|
|
|
|
* install a signal frame for this thread:
|
|
|
|
*/
|
|
|
|
_thread_kern_sched_sig();
|
|
|
|
}
|
2000-10-25 11:46:07 +00:00
|
|
|
} else if (!sigismember(&pthread->sigmask, sig)) {
|
|
|
|
/* Protect the scheduling queues: */
|
|
|
|
_thread_kern_sig_defer();
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-10-25 11:46:07 +00:00
|
|
|
* Perform any state changes due to signal
|
|
|
|
* arrival:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-10-25 11:46:07 +00:00
|
|
|
thread_sig_add(pthread, sig, /* has args */ 0);
|
|
|
|
/* Unprotect the scheduling queues: */
|
|
|
|
_thread_kern_sig_undefer();
|
|
|
|
} else {
|
|
|
|
/* Increment the pending signal count. */
|
|
|
|
sigaddset(&pthread->sigpend,sig);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
|
|
|
* User thread signal handler wrapper.
|
|
|
|
*
|
|
|
|
* thread - current running thread
|
|
|
|
*/
|
1998-04-29 09:59:34 +00:00
|
|
|
void
|
2000-10-13 22:12:32 +00:00
|
|
|
_thread_sig_wrapper(void)
|
1998-04-29 09:59:34 +00:00
|
|
|
{
|
2000-10-13 22:12:32 +00:00
|
|
|
void (*sigfunc)(int, siginfo_t *, void *);
|
|
|
|
struct pthread_signal_frame *psf;
|
2001-01-24 13:03:38 +00:00
|
|
|
struct pthread *thread = _get_curthread();
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/* Get the current frame and state: */
|
|
|
|
psf = thread->curframe;
|
2000-11-09 05:08:26 +00:00
|
|
|
thread->curframe = NULL;
|
|
|
|
PTHREAD_ASSERT(psf != NULL, "Invalid signal frame in signal handler");
|
2000-10-13 22:12:32 +00:00
|
|
|
|
2001-06-29 17:09:07 +00:00
|
|
|
/*
|
|
|
|
* We're coming from the kernel scheduler; clear the in
|
|
|
|
* scheduler flag:
|
|
|
|
*/
|
|
|
|
_thread_kern_in_sched = 0;
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Check the threads previous state: */
|
2000-10-13 22:12:32 +00:00
|
|
|
if (psf->saved_state.psd_state != PS_RUNNING) {
|
|
|
|
/*
|
|
|
|
* Do a little cleanup handling for those threads in
|
|
|
|
* queues before calling the signal handler. Signals
|
|
|
|
* for these threads are temporarily blocked until
|
|
|
|
* after cleanup handling.
|
|
|
|
*/
|
|
|
|
switch (psf->saved_state.psd_state) {
|
|
|
|
case PS_FDLR_WAIT:
|
|
|
|
case PS_FDLW_WAIT:
|
|
|
|
_fd_lock_backout(thread);
|
|
|
|
psf->saved_state.psd_state = PS_RUNNING;
|
|
|
|
break;
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
case PS_COND_WAIT:
|
|
|
|
_cond_wait_backout(thread);
|
|
|
|
psf->saved_state.psd_state = PS_RUNNING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PS_MUTEX_WAIT:
|
|
|
|
_mutex_lock_backout(thread);
|
|
|
|
psf->saved_state.psd_state = PS_RUNNING;
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1996-01-22 00:23:58 +00:00
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Unblock the signal in case we don't return from the handler: */
|
|
|
|
_thread_sigq[psf->signo - 1].blocked = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lower the priority before calling the handler in case
|
|
|
|
* it never returns (longjmps back):
|
|
|
|
*/
|
|
|
|
thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
|
|
|
|
|
1998-04-29 09:59:34 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Reenable interruptions without checking for the need to
|
|
|
|
* context switch:
|
1998-04-29 09:59:34 +00:00
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
thread->sig_defer_count = 0;
|
2000-10-13 22:12:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that a custom handler is installed and if the signal
|
|
|
|
* is not blocked:
|
|
|
|
*/
|
|
|
|
sigfunc = _thread_sigact[psf->signo - 1].sa_sigaction;
|
|
|
|
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
|
|
|
|
((__sighandler_t *)sigfunc != SIG_IGN)) {
|
2000-11-09 05:08:26 +00:00
|
|
|
DBG_MSG("_thread_sig_wrapper: Calling signal handler for "
|
|
|
|
"thread 0x%p\n", thread);
|
1999-12-17 00:56:36 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Dispatch the signal via the custom signal
|
|
|
|
* handler:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
if (psf->sig_has_args == 0)
|
|
|
|
(*(sigfunc))(psf->signo, NULL, NULL);
|
|
|
|
else if ((_thread_sigact[psf->signo - 1].sa_flags &
|
|
|
|
SA_SIGINFO) != 0)
|
|
|
|
(*(sigfunc))(psf->signo, &psf->siginfo, &psf->uc);
|
|
|
|
else
|
|
|
|
(*(sigfunc))(psf->signo,
|
|
|
|
(siginfo_t *)psf->siginfo.si_code, &psf->uc);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Call the kernel scheduler to safely restore the frame and
|
|
|
|
* schedule the next thread:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
_thread_kern_sched_frame(psf);
|
1999-12-17 00:56:36 +00:00
|
|
|
}
|
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
static void
|
2000-11-09 05:08:26 +00:00
|
|
|
thread_sigframe_add(pthread_t thread, int sig, int has_args)
|
1999-12-17 00:56:36 +00:00
|
|
|
{
|
2000-11-09 05:08:26 +00:00
|
|
|
struct pthread_signal_frame *psf = NULL;
|
2000-10-13 22:12:32 +00:00
|
|
|
unsigned long stackp = 0;
|
|
|
|
|
|
|
|
/* Get the top of the threads stack: */
|
2000-11-09 05:08:26 +00:00
|
|
|
switch (thread->ctxtype) {
|
2000-10-13 22:12:32 +00:00
|
|
|
case CTX_JB:
|
|
|
|
case CTX_JB_NOSIG:
|
2000-11-09 05:08:26 +00:00
|
|
|
stackp = GET_STACK_JB(thread->ctx.jb);
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
case CTX_SJB:
|
2000-11-09 05:08:26 +00:00
|
|
|
stackp = GET_STACK_SJB(thread->ctx.sigjb);
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
case CTX_UC:
|
2000-11-09 05:08:26 +00:00
|
|
|
stackp = GET_STACK_UC(&thread->ctx.uc);
|
2000-10-13 22:12:32 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
PANIC("Invalid thread context type");
|
|
|
|
break;
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
|
|
|
|
/*
|
2000-10-13 22:12:32 +00:00
|
|
|
* Leave a little space on the stack and round down to the
|
|
|
|
* nearest aligned word:
|
1999-12-17 00:56:36 +00:00
|
|
|
*/
|
2000-10-13 22:12:32 +00:00
|
|
|
stackp -= sizeof(double);
|
|
|
|
stackp &= ~0x3UL;
|
|
|
|
|
|
|
|
/* Allocate room on top of the stack for a new signal frame: */
|
|
|
|
stackp -= sizeof(struct pthread_signal_frame);
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
psf = (struct pthread_signal_frame *) stackp;
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Save the current context in the signal frame: */
|
|
|
|
thread_sigframe_save(thread, psf);
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Set handler specific information: */
|
|
|
|
psf->sig_has_args = has_args;
|
|
|
|
psf->signo = sig;
|
|
|
|
if (has_args) {
|
|
|
|
/* Copy the signal handler arguments to the signal frame: */
|
|
|
|
memcpy(&psf->uc, &_thread_sigq[psf->signo - 1].uc,
|
|
|
|
sizeof(psf->uc));
|
|
|
|
memcpy(&psf->siginfo, &_thread_sigq[psf->signo - 1].siginfo,
|
|
|
|
sizeof(psf->siginfo));
|
|
|
|
}
|
1999-12-17 00:56:36 +00:00
|
|
|
|
2000-11-20 13:12:44 +00:00
|
|
|
/* Setup the signal mask: */
|
|
|
|
SIGSETOR(thread->sigmask, _thread_sigact[sig - 1].sa_mask);
|
|
|
|
sigaddset(&thread->sigmask, sig);
|
|
|
|
|
2000-11-09 05:08:26 +00:00
|
|
|
/* Set up the new frame: */
|
|
|
|
thread->curframe = psf;
|
|
|
|
thread->ctxtype = CTX_JB_NOSIG;
|
|
|
|
thread->longjmp_val = 1;
|
|
|
|
thread->flags &= PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE |
|
|
|
|
PTHREAD_FLAGS_IN_SYNCQ;
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Set up the context:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-11-14 20:00:19 +00:00
|
|
|
stackp -= sizeof(double);
|
2000-11-09 05:08:26 +00:00
|
|
|
_setjmp(thread->ctx.jb);
|
|
|
|
SET_STACK_JB(thread->ctx.jb, stackp);
|
|
|
|
SET_RETURN_ADDR_JB(thread->ctx.jb, _thread_sig_wrapper);
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
2000-11-09 05:08:26 +00:00
|
|
|
|
2000-10-13 22:12:32 +00:00
|
|
|
void
|
2000-11-09 05:08:26 +00:00
|
|
|
_thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf)
|
2000-10-13 22:12:32 +00:00
|
|
|
{
|
2000-11-09 05:08:26 +00:00
|
|
|
thread->ctxtype = psf->ctxtype;
|
|
|
|
memcpy(&thread->ctx.uc, &psf->ctx.uc, sizeof(thread->ctx.uc));
|
2000-10-13 22:12:32 +00:00
|
|
|
/*
|
2000-11-09 05:08:26 +00:00
|
|
|
* Only restore the signal mask if it hasn't been changed
|
|
|
|
* by the application during invocation of the signal handler:
|
2000-10-13 22:12:32 +00:00
|
|
|
*/
|
2000-11-09 05:08:26 +00:00
|
|
|
if (thread->sigmask_seqno == psf->saved_state.psd_sigmask_seqno)
|
|
|
|
thread->sigmask = psf->saved_state.psd_sigmask;
|
|
|
|
thread->curframe = psf->saved_state.psd_curframe;
|
2000-10-13 22:12:32 +00:00
|
|
|
thread->wakeup_time = psf->saved_state.psd_wakeup_time;
|
|
|
|
thread->data = psf->saved_state.psd_wait_data;
|
2000-11-09 05:08:26 +00:00
|
|
|
thread->state = psf->saved_state.psd_state;
|
|
|
|
thread->flags = psf->saved_state.psd_flags;
|
|
|
|
thread->interrupted = psf->saved_state.psd_interrupted;
|
|
|
|
thread->longjmp_val = psf->saved_state.psd_longjmp_val;
|
|
|
|
thread->signo = psf->saved_state.psd_signo;
|
|
|
|
thread->sig_defer_count = psf->saved_state.psd_sig_defer_count;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_sigframe_save(pthread_t thread, struct pthread_signal_frame *psf)
|
|
|
|
{
|
2000-11-09 05:08:26 +00:00
|
|
|
psf->ctxtype = thread->ctxtype;
|
|
|
|
memcpy(&psf->ctx.uc, &thread->ctx.uc, sizeof(thread->ctx.uc));
|
2000-10-13 22:12:32 +00:00
|
|
|
psf->saved_state.psd_sigmask = thread->sigmask;
|
2000-11-09 05:08:26 +00:00
|
|
|
psf->saved_state.psd_curframe = thread->curframe;
|
2000-10-13 22:12:32 +00:00
|
|
|
psf->saved_state.psd_wakeup_time = thread->wakeup_time;
|
|
|
|
psf->saved_state.psd_wait_data = thread->data;
|
2000-11-09 05:08:26 +00:00
|
|
|
psf->saved_state.psd_state = thread->state;
|
|
|
|
psf->saved_state.psd_flags = thread->flags &
|
|
|
|
(PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE);
|
|
|
|
psf->saved_state.psd_interrupted = thread->interrupted;
|
|
|
|
psf->saved_state.psd_longjmp_val = thread->longjmp_val;
|
|
|
|
psf->saved_state.psd_sigmask_seqno = thread->sigmask_seqno;
|
|
|
|
psf->saved_state.psd_signo = thread->signo;
|
|
|
|
psf->saved_state.psd_sig_defer_count = thread->sig_defer_count;
|
2000-10-13 22:12:32 +00:00
|
|
|
}
|
|
|
|
|