o Eliminate upcall for PTHREAD_SYSTEM_SCOPE thread, now it

is system bound thread and when it is blocked, no upcall is generated.

o Add ability to libkse to allow it run in pure 1:1 threading mode,
  defining SYSTEM_SCOPE_ONLY in Makefile can turn on this option.

o Eliminate code for installing dummy signal handler for sigwait call.

o Add hash table to find thread.

Reviewed by: deischen
This commit is contained in:
davidxu 2003-07-17 23:02:30 +00:00
parent 97d2d9dfed
commit 8cbb5ce673
32 changed files with 1206 additions and 792 deletions

View File

@ -22,6 +22,10 @@ CFLAGS+=-fno-builtin
CFLAGS+=-D_LOCK_DEBUG
#CFLAGS+= -g
# Uncomment this if you want to build a 1:1 threading mode library
# however it is no longer strictly conformed to POSIX
# CFLAGS+=-DSYSTEM_SCOPE_ONLY
LDFLAGS= -Wl,--version-script=${.CURDIR}/pthread.map
# enable extra internal consistancy checks

View File

@ -63,6 +63,8 @@ sigsuspender (void *arg)
/* Allow these signals to wake us up during a sigsuspend. */
sigfillset (&suspender_mask); /* Default action */
sigdelset (&suspender_mask, SIGKILL); /* Cannot catch */
sigdelset (&suspender_mask, SIGSTOP); /* Cannot catch */
sigdelset (&suspender_mask, SIGINT); /* terminate */
sigdelset (&suspender_mask, SIGHUP); /* terminate */
sigdelset (&suspender_mask, SIGQUIT); /* create core image */

View File

@ -107,9 +107,11 @@ _pthread_cancel(pthread_t pthread)
/* Ignore - only here to silence -Wall: */
break;
}
if ((pthread->blocked != 0) &&
((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
kse_thr_interrupt(&pthread->tmbx, -1);
if ((pthread->cancelflags & THR_AT_CANCEL_POINT) &&
(pthread->blocked != 0 ||
pthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
kse_thr_interrupt(&pthread->tmbx,
KSE_INTR_INTERRUPT, 0);
}
/*

View File

@ -89,7 +89,7 @@ _thr_setconcurrency(int new_level)
/* Race condition, but so what. */
kse_count = _kse_initial->k_kseg->kg_ksecount;
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread);
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;

View File

@ -57,6 +57,7 @@ int _thread_PS_DEAD_value = PS_DEAD;
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void free_stack(struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread,
void *(*start_routine) (void *), void *arg);
@ -91,7 +92,6 @@ int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
struct kse *curkse;
struct pthread *curthread, *new_thread;
struct kse *kse = NULL;
struct kse_group *kseg = NULL;
@ -132,14 +132,16 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->attr = _pthread_attr_default;
else
new_thread->attr = *(*attr);
#ifdef SYSTEM_SCOPE_ONLY
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
if (create_stack(&new_thread->attr) != 0) {
/* Insufficient memory to create a stack: */
ret = EAGAIN;
_thr_free(curthread, new_thread);
}
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
(((kse = _kse_alloc(curthread)) == NULL)
(((kse = _kse_alloc(curthread, 1)) == NULL)
|| ((kseg = _kseg_alloc(curthread)) == NULL))) {
/* Insufficient memory to create a new KSE/KSEG: */
ret = EAGAIN;
@ -147,15 +149,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
kse->k_mbx.km_flags |= KMF_DONE;
_kse_free(curthread, kse);
}
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Stack routines don't use malloc/free. */
_thr_stack_free(&new_thread->attr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
free_stack(&new_thread->attr);
_thr_free(curthread, new_thread);
}
else {
@ -178,9 +172,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
/* Initialize the thread for signals: */
new_thread->sigmask = curthread->sigmask;
/* No thread is wanting to join to this one: */
new_thread->joiner = NULL;
@ -193,6 +184,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
crit = _kse_critical_enter();
THR_GETCONTEXT(&new_thread->tmbx.tm_context);
/* Initialize the thread for signals: */
new_thread->sigmask = curthread->sigmask;
_kse_critical_leave(crit);
new_thread->tmbx.tm_udata = new_thread;
new_thread->tmbx.tm_context.uc_sigmask =
@ -278,9 +271,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
}
else {
kse->k_curthread = NULL;
#ifdef NOT_YET
kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
#endif
new_thread->kse = kse;
new_thread->kseg = kse->k_kseg;
kse->k_mbx.km_udata = kse;
@ -308,6 +299,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
free_stack(&thread->attr);
if ((thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
/* Free the KSE and KSEG. */
_kseg_free(thread->kseg);
@ -332,6 +324,22 @@ create_stack(struct pthread_attr *pattr)
return (ret);
}
static void
free_stack(struct pthread_attr *pattr)
{
struct kse *curkse;
kse_critical_t crit;
if ((pattr->flags & THR_STACK_USER) == 0) {
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Stack routines don't use malloc/free. */
_thr_stack_free(pattr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
}
static void
thread_start(struct pthread *curthread, void *(*start_routine) (void *),

View File

@ -56,19 +56,17 @@ _thr_ref_add(struct pthread *curthread, struct pthread *thread,
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
TAILQ_FOREACH(pthread, &_thread_list, tle) {
if (pthread == thread) {
if ((include_dead == 0) &&
((pthread->state == PS_DEAD) ||
((pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
pthread = NULL;
else {
thread->refcount++;
if (curthread != NULL)
curthread->critical_count++;
}
break;
pthread = _thr_hash_find(thread);
if (pthread) {
if ((include_dead == 0) &&
((pthread->state == PS_DEAD) ||
((pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
pthread = NULL;
else {
pthread->refcount++;
if (curthread != NULL)
curthread->critical_count++;
}
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);

View File

@ -259,12 +259,19 @@ _libpthread_init(struct pthread *curthread)
_kse_init();
/* Initialize the initial kse and kseg. */
_kse_initial = _kse_alloc(NULL);
#ifdef SYSTEM_SCOPE_ONLY
_kse_initial = _kse_alloc(NULL, 1);
#else
_kse_initial = _kse_alloc(NULL, 0);
#endif
if (_kse_initial == NULL)
PANIC("Can't allocate initial kse.");
_kse_initial->k_kseg = _kseg_alloc(NULL);
if (_kse_initial->k_kseg == NULL)
PANIC("Can't allocate initial kseg.");
#ifdef SYSTEM_SCOPE_ONLY
_kse_initial->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
#endif
_kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe);
@ -326,7 +333,9 @@ init_main_thread(struct pthread *thread)
/* Setup the thread attributes. */
thread->attr = _pthread_attr_default;
#ifdef SYSTEM_SCOPE_ONLY
thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
/*
* Set up the thread stack.
*
@ -463,9 +472,6 @@ init_private(void)
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&_thread_gc_list);
/* Initialize the SIG_DFL dummy handler count. */
bzero(_thread_dfl_count, sizeof(_thread_dfl_count));
/*
* Initialize the lock for temporary installation of signal
* handlers (to support sigwait() semantics) and for the

View File

@ -120,6 +120,10 @@ static int active_kse_count = 0;
static int active_kseg_count = 0;
static u_int64_t next_uniqueid = 1;
LIST_HEAD(thread_hash_head, pthread);
#define THREAD_HASH_QUEUES 127
static struct thread_hash_head thr_hashtable[THREAD_HASH_QUEUES];
#define THREAD_HASH(thrd) ((unsigned long)thrd % THREAD_HASH_QUEUES)
#ifdef DEBUG_THREAD_KERN
static void dump_queues(struct kse *curkse);
@ -127,13 +131,11 @@ static void dump_queues(struct kse *curkse);
static void kse_check_completed(struct kse *kse);
static void kse_check_waitq(struct kse *kse);
static void kse_fini(struct kse *curkse);
static void kse_reinit(struct kse *kse);
static void kse_reinit(struct kse *kse, int sys_scope);
static void kse_sched_multi(struct kse *curkse);
#ifdef NOT_YET
static void kse_sched_single(struct kse *curkse);
#endif
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse, struct pthread *td_wait);
static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free_unlocked(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
@ -385,16 +387,30 @@ _kse_setthreaded(int threaded)
*/
_thr_signal_init();
_kse_initial->k_flags |= KF_STARTED;
#ifdef SYSTEM_SCOPE_ONLY
/*
* For bound thread, kernel reads mailbox pointer once,
* we'd set it here before calling kse_create
*/
KSE_SET_MBOX(_kse_initial, _thr_initial);
_kse_initial->k_mbx.km_flags |= KMF_BOUND;
#endif
if (kse_create(&_kse_initial->k_mbx, 0) != 0) {
_kse_initial->k_flags &= ~KF_STARTED;
__isthreaded = 0;
/* may abort() */
PANIC("kse_create() failed\n");
return (-1);
}
#ifndef SYSTEM_SCOPE_ONLY
/* Set current thread to initial thread */
KSE_SET_MBOX(_kse_initial, _thr_initial);
_thr_start_sig_daemon();
_thr_setmaxconcurrency();
#endif
}
return (0);
}
@ -592,7 +608,9 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
* instead of the next thread in the run queue, but
* we don't bother checking for that.
*/
if ((curthread->state == PS_DEAD) ||
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
kse_sched_single(curkse);
else if ((curthread->state == PS_DEAD) ||
(((td = KSE_RUNQ_FIRST(curkse)) == NULL) &&
(curthread->state != PS_RUNNING)) ||
((td != NULL) && (td->lock_switch == 0))) {
@ -693,30 +711,34 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
* KSE, but we use a separate scheduler so that it can be fine-tuned
* to be more efficient (and perhaps not need a separate stack for
* the KSE, allowing it to use the thread's stack).
*
* XXX - This probably needs some work.
*/
#ifdef NOT_YET
static void
kse_sched_single(struct kse *curkse)
{
struct pthread *curthread = curkse->k_curthread;
struct pthread *td_wait;
struct timespec ts;
int level;
sigset_t sigmask;
int i, sigseqno, level, first = 0;
if (curthread->active == 0) {
if (curthread->state != PS_RUNNING) {
/* Check to see if the thread has timed out. */
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts) != 0) {
curthread->timeout = 1;
curthread->state = PS_RUNNING;
}
}
}
if ((curkse->k_flags & KF_INITIALIZED) == 0) {
/* Setup this KSEs specific data. */
_ksd_setprivate(&curkse->k_ksd);
_set_curkse(curkse);
curkse->k_flags |= KF_INITIALIZED;
first = 1;
curthread->active = 1;
/* Setup kernel signal masks for new thread. */
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
/*
* Enter critical region, this is meanless for bound thread,
* It is used to let other code work, those code want mailbox
* to be cleared.
*/
_kse_critical_enter();
}
/* This thread no longer needs to yield the CPU: */
curthread->critical_yield = 0;
curthread->need_switchout = 0;
@ -726,7 +748,8 @@ kse_sched_single(struct kse *curkse)
* There is no scheduling queue for single threaded KSEs,
* but we need a lock for protection regardless.
*/
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
if (curthread->lock_switch == 0)
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
/*
* This has to do the job of kse_switchout_thread(), only
@ -735,33 +758,46 @@ kse_sched_single(struct kse *curkse)
switch (curthread->state) {
case PS_DEAD:
curthread->check_pending = 0;
/* Unlock the scheduling queue and exit the KSE and thread. */
thr_cleaup(curkse, curthread);
thr_cleanup(curkse, curthread);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
PANIC("bound thread shouldn't get here\n");
break;
case PS_COND_WAIT:
case PS_SIGWAIT:
PANIC("bound thread does not have SIGWAIT state\n");
case PS_SLEEP_WAIT:
/* Only insert threads that can timeout: */
if (curthread->wakeup_time.tv_sec != -1) {
/* Insert into the waiting queue: */
KSE_WAITQ_INSERT(curkse, curthread);
}
PANIC("bound thread does not have SLEEP_WAIT state\n");
case PS_SIGSUSPEND:
PANIC("bound thread does not have SIGSUSPEND state\n");
case PS_COND_WAIT:
break;
case PS_LOCKWAIT:
/*
* This state doesn't timeout.
*/
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
level = curthread->locklevel - 1;
if (!_LCK_GRANTED(&curthread->lockusers[level]))
KSE_WAITQ_INSERT(curkse, curthread);
else
if (_LCK_GRANTED(&curthread->lockusers[level]))
THR_SET_STATE(curthread, PS_RUNNING);
break;
case PS_RUNNING:
if ((curthread->flags & THR_FLAGS_SUSPENDED) != 0) {
THR_SET_STATE(curthread, PS_SUSPENDED);
}
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
break;
case PS_JOIN:
case PS_MUTEX_WAIT:
case PS_RUNNING:
case PS_SIGSUSPEND:
case PS_SIGWAIT:
case PS_SUSPENDED:
case PS_DEADLOCK:
default:
@ -769,41 +805,66 @@ kse_sched_single(struct kse *curkse)
* These states don't timeout and don't need
* to be in the waiting queue.
*/
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
break;
}
while (curthread->state != PS_RUNNING) {
curthread->active = 0;
td_wait = KSE_WAITQ_FIRST(curkse);
sigseqno = curkse->k_sigseqno;
if (curthread->check_pending != 0) {
/*
* Install pending signals into the frame, possible
* cause mutex or condvar backout.
*/
curthread->check_pending = 0;
SIGFILLSET(sigmask);
kse_wait(curkse, td_wait);
if (td_wait != NULL) {
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts)) {
/* Indicate the thread timedout: */
td_wait->timeout = 1;
/* Make the thread runnable. */
THR_SET_STATE(td_wait, PS_RUNNING);
KSE_WAITQ_REMOVE(curkse, td_wait);
/*
* Lock out kernel signal code when we are processing
* signals, and get a fresh copy of signal mask.
*/
__sys_sigprocmask(SIG_SETMASK, &sigmask,
&curthread->sigmask);
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(curthread->sigmask, i))
continue;
if (SIGISMEMBER(curthread->sigpend, i))
_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
}
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
NULL);
/* The above code might make thread runnable */
if (curthread->state == PS_RUNNING)
break;
}
THR_DEACTIVATE_LAST_LOCK(curthread);
kse_wait(curkse, curthread, sigseqno);
THR_ACTIVATE_LAST_LOCK(curthread);
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts)) {
/* Indicate the thread timedout: */
curthread->timeout = 1;
/* Make the thread runnable. */
THR_SET_STATE(curthread, PS_RUNNING);
}
}
/* Remove the frame reference. */
curthread->curframe = NULL;
/* Unlock the scheduling queue. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
if (curthread->lock_switch == 0) {
/* Unlock the scheduling queue. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
}
/*
* Continue the thread at its current frame:
*/
DBG_MSG("Continuing bound thread %p\n", curthread);
_thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
PANIC("Thread has returned from _thread_switch");
if (first) {
_kse_critical_leave(&curthread->tmbx);
pthread_exit(curthread->start_routine(curthread->arg));
}
}
#endif
#ifdef DEBUG_THREAD_KERN
static void
@ -929,7 +990,7 @@ kse_sched_multi(struct kse *curkse)
* no more threads.
*/
td_wait = KSE_WAITQ_FIRST(curkse);
kse_wait(curkse, td_wait);
kse_wait(curkse, td_wait, 0);
kse_check_completed(curkse);
kse_check_waitq(curkse);
}
@ -1003,8 +1064,8 @@ kse_sched_multi(struct kse *curkse)
signalcontext(&curthread->tmbx.tm_context, 0,
(__sighandler_t *)thr_resume_wrapper);
#else
if ((curframe == NULL) && (curthread->check_pending != 0) &&
!THR_IN_CRITICAL(curthread)) {
if ((curframe == NULL) && (curthread->state == PS_RUNNING) &&
(curthread->check_pending != 0) && !THR_IN_CRITICAL(curthread)) {
curthread->check_pending = 0;
signalcontext(&curthread->tmbx.tm_context, 0,
(__sighandler_t *)thr_resume_wrapper);
@ -1129,7 +1190,11 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
THR_GCLIST_ADD(thread);
/* Use thread_list_lock */
active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (active_threads == 0) {
#else
if (active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
exit(0);
}
@ -1139,7 +1204,10 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
* System scope thread is single thread group,
* when thread is exited, its kse and ksegrp should
* be recycled as well.
* kse upcall stack belongs to thread, clear it here.
*/
curkse->k_stack.ss_sp = 0;
curkse->k_stack.ss_size = 0;
kse_exit();
PANIC("kse_exit() failed for system scope thread");
}
@ -1239,30 +1307,30 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
* the mailbox is set for the current thread.
*/
if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
#ifdef NOT_YET
/* We use the thread's stack as the KSE's stack. */
new_thread->kse->k_mbx.km_stack.ss_sp =
new_thread->attr.stackaddr_attr;
new_thread->kse->k_mbx.km_stack.ss_size =
new_thread->attr.stacksize_attr;
#endif
newthread->kse->k_mbx.km_stack.ss_sp =
newthread->attr.stackaddr_attr;
newthread->kse->k_mbx.km_stack.ss_size =
newthread->attr.stacksize_attr;
/*
* No need to lock the scheduling queue since the
* KSE/KSEG pair have not yet been started.
*/
KSEG_THRQ_ADD(newthread->kseg, newthread);
if (newthread->state == PS_RUNNING)
THR_RUNQ_INSERT_TAIL(newthread);
newthread->kse->k_curthread = NULL;
newthread->kse->k_mbx.km_flags = 0;
newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
/* this thread never gives up kse */
newthread->active = 1;
newthread->kse->k_curthread = newthread;
newthread->kse->k_mbx.km_flags = KMF_BOUND;
newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
newthread->kse->k_mbx.km_quantum = 0;
KSE_SET_MBOX(newthread->kse, newthread);
/*
* This thread needs a new KSE and KSEG.
*/
newthread->kse->k_flags &= ~KF_INITIALIZED;
newthread->kse->k_flags |= KF_STARTED;
/* Fire up! */
ret = kse_create(&newthread->kse->k_mbx, 1);
if (ret != 0)
ret = errno;
@ -1492,7 +1560,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
!SIGISMEMBER(thread->sigmask, i)) {
restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART;
kse_thr_interrupt(&thread->tmbx,
restart ? -2 : -1);
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
break;
}
}
@ -1617,7 +1685,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
* This must be called with the scheduling lock held.
*/
static void
kse_wait(struct kse *kse, struct pthread *td_wait)
kse_wait(struct kse *kse, struct pthread *td_wait, int sigseqno)
{
struct timespec ts, ts_sleep;
int saved_flags;
@ -1640,10 +1708,15 @@ kse_wait(struct kse *kse, struct pthread *td_wait)
KSE_SET_IDLE(kse);
kse->k_kseg->kg_idle_kses++;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
saved_flags = kse->k_mbx.km_flags;
kse->k_mbx.km_flags |= KMF_NOUPCALL;
kse_release(&ts_sleep);
kse->k_mbx.km_flags = saved_flags;
if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) &&
(kse->k_sigseqno != sigseqno))
; /* don't sleep */
else {
saved_flags = kse->k_mbx.km_flags;
kse->k_mbx.km_flags |= KMF_NOUPCALL;
kse_release(&ts_sleep);
kse->k_mbx.km_flags = saved_flags;
}
KSE_SCHED_LOCK(kse, kse->k_kseg);
if (KSE_IS_IDLE(kse)) {
KSE_CLEAR_IDLE(kse);
@ -1965,7 +2038,7 @@ _kseg_free(struct kse_group *kseg)
* In this case, we don't need to (and can't) take any locks.
*/
struct kse *
_kse_alloc(struct pthread *curthread)
_kse_alloc(struct pthread *curthread, int sys_scope)
{
struct kse *kse = NULL;
kse_critical_t crit;
@ -1991,7 +2064,7 @@ _kse_alloc(struct pthread *curthread)
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
if (kse != NULL)
kse_reinit(kse);
kse_reinit(kse, sys_scope);
}
if ((kse == NULL) &&
((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
@ -2009,16 +2082,16 @@ _kse_alloc(struct pthread *curthread)
/*
* Create the KSE context.
*
* XXX - For now this is done here in the allocation.
* In the future, we may want to have it done
* outside the allocation so that scope system
* threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
* Scope system threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
*/
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE);
kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE;
if (!sys_scope) {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
kse->k_stack.ss_size = KSE_STACKSIZE;
} else {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
}
kse->k_mbx.km_udata = (void *)kse;
kse->k_mbx.km_quantum = 20000;
/*
@ -2026,9 +2099,8 @@ _kse_alloc(struct pthread *curthread)
* doesn't get used; a KSE running a scope system
* thread will use that thread's stack.
*/
kse->k_stack.ss_sp = kse->k_mbx.km_stack.ss_sp;
kse->k_stack.ss_size = kse->k_mbx.km_stack.ss_size;
if (kse->k_mbx.km_stack.ss_sp == NULL) {
kse->k_mbx.km_stack = kse->k_stack;
if (!sys_scope && kse->k_stack.ss_sp == NULL) {
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
@ -2049,7 +2121,8 @@ _kse_alloc(struct pthread *curthread)
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
free(kse->k_mbx.km_stack.ss_sp);
if (kse->k_stack.ss_sp)
free(kse->k_stack.ss_sp);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
@ -2068,15 +2141,27 @@ _kse_alloc(struct pthread *curthread)
}
static void
kse_reinit(struct kse *kse)
kse_reinit(struct kse *kse, int sys_scope)
{
/*
* XXX - For now every kse has its stack.
* In the future, we may want to have it done
* outside the allocation so that scope system
* threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
*/
if (!sys_scope) {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
if (kse->k_stack.ss_sp == NULL) {
/* XXX check allocation failure */
kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
kse->k_stack.ss_size = KSE_STACKSIZE;
}
kse->k_mbx.km_quantum = 20000;
} else {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
if (kse->k_stack.ss_sp)
free(kse->k_stack.ss_sp);
kse->k_stack.ss_sp = NULL;
kse->k_stack.ss_size = 0;
kse->k_mbx.km_quantum = 0;
}
kse->k_mbx.km_stack = kse->k_stack;
kse->k_mbx.km_udata = (void *)kse;
kse->k_mbx.km_curthread = NULL;
kse->k_mbx.km_flags = 0;
kse->k_curthread = 0;
kse->k_kseg = 0;
@ -2092,6 +2177,7 @@ kse_reinit(struct kse *kse)
kse->k_cpu = 0;
kse->k_done = 0;
kse->k_switch = 0;
kse->k_sigseqno = 0;
}
void
@ -2226,7 +2312,6 @@ thr_link(struct pthread *thread)
THR_LIST_ADD(thread);
active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2241,11 +2326,39 @@ thr_unlink(struct pthread *thread)
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
void
_thr_hash_add(struct pthread *thread)
{
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_INSERT_HEAD(head, thread, hle);
}
void
_thr_hash_remove(struct pthread *thread)
{
LIST_REMOVE(thread, hle);
}
struct pthread *
_thr_hash_find(struct pthread *thread)
{
struct pthread *td;
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_FOREACH(td, head, hle) {
if (td == thread)
return (thread);
}
return (NULL);
}

View File

@ -55,8 +55,9 @@ _nanosleep(const struct timespec *time_to_sleep,
errno = EINVAL;
ret = -1;
} else {
if (!_kse_isthreaded())
return __sys_nanosleep(time_to_sleep, time_remaining);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_nanosleep(time_to_sleep, time_remaining));
KSE_GET_TOD(curthread->kse, &ts);

View File

@ -202,6 +202,7 @@ struct kse {
int k_cpu; /* CPU ID when bound */
int k_done; /* this KSE is done */
int k_switch; /* thread switch in UTS */
int k_sigseqno; /* signal buffered count */
};
/*
@ -615,6 +616,9 @@ struct pthread {
/* Queue entry for GC lists: */
TAILQ_ENTRY(pthread) gcle;
/* Hash queue entry */
LIST_ENTRY(pthread) hle;
/*
* Lock for accesses to this thread structure.
*/
@ -662,7 +666,7 @@ struct pthread {
sigset_t oldsigmask;
sigset_t sigmask;
sigset_t sigpend;
int check_pending;
volatile int check_pending;
int refcount;
/* Thread state: */
@ -894,12 +898,14 @@ do { \
#define THR_LIST_ADD(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \
TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
_thr_hash_add(thrd); \
(thrd)->flags |= THR_FLAGS_IN_TDLIST; \
} \
} while (0)
#define THR_LIST_REMOVE(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \
TAILQ_REMOVE(&_thread_list, thrd, tle); \
_thr_hash_remove(thrd); \
(thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \
} \
} while (0)
@ -1000,13 +1006,6 @@ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
/* Array of signal actions for this process: */
SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
/*
* Array of counts of dummy handlers for SIG_DFL signals. This is used to
* assure that there is always a dummy signal handler installed while there
* is a thread sigwait()ing on the corresponding signal.
*/
SCLASS int _thread_dfl_count[_SIG_MAXSIG];
/*
* Lock for above count of dummy handlers and for the process signal
* mask and pending signal sets.
@ -1047,7 +1046,7 @@ void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
struct kse *_get_curkse(void);
void _set_curkse(struct kse *);
struct kse *_kse_alloc(struct pthread *);
struct kse *_kse_alloc(struct pthread *, int sys_scope);
kse_critical_t _kse_critical_enter(void);
void _kse_critical_leave(kse_critical_t);
int _kse_in_critical(void);
@ -1131,11 +1130,16 @@ void _thr_enter_cancellation_point(struct pthread *);
void _thr_leave_cancellation_point(struct pthread *);
int _thr_setconcurrency(int new_level);
int _thr_setmaxconcurrency(void);
void _thr_critical_enter(struct pthread *);
void _thr_critical_leave(struct pthread *);
int _thr_start_sig_daemon(void);
int _thr_getprocsig(int sig, siginfo_t *siginfo);
int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
void _thr_signal_init(void);
void _thr_signal_deinit(void);
void _thr_hash_add(struct pthread *);
void _thr_hash_remove(struct pthread *);
struct pthread *_thr_hash_find(struct pthread *);
/*
* Aliases for _pthread functions. Should be called instead of

View File

@ -45,16 +45,55 @@
/* Prototypes: */
static void build_siginfo(siginfo_t *info, int signo);
static void thr_sig_check_state(struct pthread *pthread, int sig);
#ifndef SYSTEM_SCOPE_ONLY
static struct pthread *thr_sig_find(struct kse *curkse, int sig,
siginfo_t *info);
static void handle_special_signals(struct kse *curkse, int sig);
#endif
static void thr_sigframe_add(struct pthread *thread);
static void thr_sigframe_restore(struct pthread *thread,
struct pthread_sigframe *psf);
static void thr_sigframe_save(struct pthread *thread,
struct pthread_sigframe *psf);
#define SA_KILL 0x01 /* terminates process by default */
#define SA_STOP 0x02
#define SA_CONT 0x04
static int sigproptbl[NSIG] = {
SA_KILL, /* SIGHUP */
SA_KILL, /* SIGINT */
SA_KILL, /* SIGQUIT */
SA_KILL, /* SIGILL */
SA_KILL, /* SIGTRAP */
SA_KILL, /* SIGABRT */
SA_KILL, /* SIGEMT */
SA_KILL, /* SIGFPE */
SA_KILL, /* SIGKILL */
SA_KILL, /* SIGBUS */
SA_KILL, /* SIGSEGV */
SA_KILL, /* SIGSYS */
SA_KILL, /* SIGPIPE */
SA_KILL, /* SIGALRM */
SA_KILL, /* SIGTERM */
0, /* SIGURG */
SA_STOP, /* SIGSTOP */
SA_STOP, /* SIGTSTP */
SA_CONT, /* SIGCONT */
0, /* SIGCHLD */
SA_STOP, /* SIGTTIN */
SA_STOP, /* SIGTTOU */
0, /* SIGIO */
SA_KILL, /* SIGXCPU */
SA_KILL, /* SIGXFSZ */
SA_KILL, /* SIGVTALRM */
SA_KILL, /* SIGPROF */
0, /* SIGWINCH */
0, /* SIGINFO */
SA_KILL, /* SIGUSR1 */
SA_KILL /* SIGUSR2 */
};
/* #define DEBUG_SIGNAL */
#ifdef DEBUG_SIGNAL
#define DBG_MSG stdout_debug
@ -133,6 +172,8 @@ static void thr_sigframe_save(struct pthread *thread,
* signal unmasked.
*/
#ifndef SYSTEM_SCOPE_ONLY
static void *
sig_daemon(void *arg /* Unused */)
{
@ -143,13 +184,20 @@ sig_daemon(void *arg /* Unused */)
struct kse *curkse;
struct pthread *curthread = _get_curthread();
DBG_MSG("signal daemon started\n");
DBG_MSG("signal daemon started(%p)\n", curthread);
curthread->name = strdup("signal thread");
crit = _kse_critical_enter();
curkse = _get_curkse();
/*
* Daemon thread is a bound thread and we must be created with
* all signals masked
*/
#if 0
SIGFILLSET(set);
__sys_sigprocmask(SIG_SETMASK, &set, NULL);
#endif
__sys_sigpending(&set);
ts.tv_sec = 0;
ts.tv_nsec = 0;
@ -173,13 +221,14 @@ sig_daemon(void *arg /* Unused */)
return (0);
}
/* Utility function to create signal daemon thread */
int
_thr_start_sig_daemon(void)
{
pthread_attr_t attr;
sigset_t sigset, oldset;
SIGFILLSET(sigset);
pthread_sigmask(SIG_SETMASK, &sigset, &oldset);
pthread_attr_init(&attr);
@ -206,6 +255,13 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
/* Some signals need special handling: */
handle_special_signals(curkse, sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
}
while ((thread = thr_sig_find(curkse, sig, info)) != NULL) {
/*
* Setup the target thread to receive the signal:
@ -233,11 +289,27 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
DBG_MSG("<<< _thr_sig_dispatch\n");
}
#endif /* ! SYSTEM_SCOPE_ONLY */
static __inline int
sigprop(int sig)
{
if (sig > 0 && sig < NSIG)
return (sigproptbl[_SIG_IDX(sig)]);
return (0);
}
void
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
__siginfohandler_t *sigfunc;
struct pthread *curthread;
struct kse *curkse;
struct sigaction act;
int sa_flags, err_save, intr_save, timeout_save;
DBG_MSG(">>> _thr_sig_handler(%d)\n", sig);
curkse = _get_curkse();
if ((curkse == NULL) || ((curkse->k_flags & KF_STARTED) == 0)) {
@ -253,12 +325,86 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
(*(sigfunc))(sig,
(siginfo_t*)(intptr_t)info->si_code, ucp);
}
return;
}
else {
/* Nothing. */
DBG_MSG("Got signal %d\n", sig);
/* XXX Bound thread will fall into this... */
curthread = _get_curthread();
if (curthread == NULL)
PANIC("No current thread.\n");
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
PANIC("Thread is not system scope.\n");
if (curthread->flags & THR_FLAGS_EXITING)
return;
curkse = _get_curkse();
/*
* If thread is in critical region or if thread is on
* the way of state transition, then latch signal into buffer.
*/
if (_kse_in_critical() || THR_IN_CRITICAL(curthread) ||
(curthread->state != PS_RUNNING && curthread->curframe == NULL)) {
DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig);
curthread->siginfo[sig-1] = *info;
curthread->check_pending = 1;
curkse->k_sigseqno++;
SIGADDSET(curthread->sigpend, sig);
/*
* If the kse is on the way to idle itself, but
* we have signal ready, we should prevent it
* to sleep, kernel will latch the wakeup request,
* so kse_release will return from kernel immediately.
*/
if (KSE_IS_IDLE(curkse))
kse_wakeup(&curkse->k_mbx);
return;
}
/* It is now safe to invoke signal handler */
err_save = curthread->error;
timeout_save = curthread->timeout;
intr_save = curthread->interrupted;
/* Get a fresh copy of signal mask from kernel, for thread dump only */
__sys_sigprocmask(SIG_SETMASK, NULL, &curthread->sigmask);
_kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
sa_flags = _thread_sigact[sig - 1].sa_flags & SA_SIGINFO;
if (sa_flags & SA_RESETHAND) {
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
_kse_critical_leave(&curthread->tmbx);
/* Now invoke real handler */
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN) &&
(sigfunc != (__siginfohandler_t *)_thr_sig_handler)) {
if ((sa_flags & SA_SIGINFO) != 0 || info == NULL)
(*(sigfunc))(sig, info, ucp);
else
(*(sigfunc))(sig, (siginfo_t*)(intptr_t)info->si_code,
ucp);
} else {
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL)
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
#ifdef NOTYET
else if (sigprop(sig) & SA_STOP)
kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig);
#endif
}
}
curthread->error = err_save;
curthread->timeout = timeout_save;
curthread->interrupted = intr_save;
_kse_critical_enter();
curthread->sigmask = ucp->uc_sigmask;
_kse_critical_leave(&curthread->tmbx);
DBG_MSG("<<< _thr_sig_handler(%d)\n", sig);
}
/* Must be called with signal lock and schedule lock held in order */
@ -292,19 +438,22 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
if (!(sa_flags & (SA_NODEFER | SA_RESETHAND)))
SIGADDSET(curthread->sigmask, sig);
if ((sig != SIGILL) && (sa_flags & SA_RESETHAND)) {
if (_thread_dfl_count[sig - 1] == 0) {
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
_kse_critical_leave(&curthread->tmbx);
/*
* We are processing buffered signals, synchronize working
* signal mask into kernel.
*/
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
ucp->uc_sigmask = sigmask;
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN)) {
if ((sa_flags & SA_SIGINFO) != 0 || info == NULL)
@ -313,24 +462,29 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
(*(sigfunc))(sig, (siginfo_t*)(intptr_t)info->si_code,
ucp);
} else {
/* XXX
* TODO: exit process if signal would kill it.
*/
#ifdef NOTYET
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL)
kse_sigexit(sig);
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
#ifdef NOTYET
else if (sigprop(sig) & SA_STOP)
kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig);
#endif
}
}
_kse_critical_enter();
/* Don't trust after critical leave/enter */
curkse = _get_curkse();
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
/*
* Restore the thread's signal mask.
*/
curthread->sigmask = ucp->uc_sigmask;
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
DBG_MSG("Got signal %d, handler returned %p\n", sig, curthread);
}
@ -365,13 +519,13 @@ _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo)
SIGADDSET(sigset, sig);
ts.tv_sec = 0;
ts.tv_nsec = 0;
if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0) {
SIGDELSET(_thr_proc_sigpending, sig);
SIGDELSET(_thr_proc_sigpending, sig);
if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0)
return (sig);
}
return (0);
}
#ifndef SYSTEM_SCOPE_ONLY
/*
* Find a thread that can handle the signal. This must be called
* with upcalls disabled.
@ -381,15 +535,11 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
struct pthread *pthread;
struct pthread *suspended_thread, *signaled_thread;
__siginfohandler_t *sigfunc;
siginfo_t si;
DBG_MSG("Looking for thread to handle signal %d\n", sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
}
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
@ -407,12 +557,9 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
TAILQ_FOREACH(pthread, &_thread_list, tle) {
if (pthread == _thr_sig_daemon)
continue;
#ifdef NOTYET
/* Signal delivering to bound thread is done by kernel */
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
continue;
#endif
/* Take the scheduling lock. */
KSE_SCHED_LOCK(curkse, pthread->kseg);
if ((pthread->state == PS_DEAD) ||
@ -451,8 +598,16 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
return (NULL);
} else if (!SIGISMEMBER(pthread->sigmask, sig) ||
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
pthread->state == PS_SIGWAIT)) {
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
pthread->state == PS_SIGWAIT)) {
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL) {
kse_thr_interrupt(NULL,
KSE_INTR_SIGEXIT, sig);
/* Never reach */
}
}
if (pthread->state == PS_SIGSUSPEND) {
if (suspended_thread == NULL) {
suspended_thread = pthread;
@ -478,6 +633,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
}
return (pthread);
}
#endif /* ! SYSTEM_SCOPE_ONLY */
static void
build_siginfo(siginfo_t *info, int signo)
@ -501,8 +657,9 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
int i;
kse_critical_t crit;
struct kse *curkse;
sigset_t sigmask;
DBG_MSG(">>> thr_sig_rundown %p\n", curthread);
DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread);
/* Check the threads previous state: */
if ((psf != NULL) && (psf->psf_valid != 0)) {
/*
@ -544,6 +701,15 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
while (1) {
/*
* For bound thread, we mask all signals and get a fresh
* copy of signal mask from kernel
*/
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
SIGFILLSET(sigmask);
__sys_sigprocmask(SIG_SETMASK, &sigmask,
&curthread->sigmask);
}
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(curthread->sigmask, i))
continue;
@ -552,7 +718,8 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
siginfo = curthread->siginfo[i-1];
break;
}
if (SIGISMEMBER(_thr_proc_sigpending, i)) {
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
&& SIGISMEMBER(_thr_proc_sigpending, i)) {
if (_thr_getprocsig_unlocked(i, &siginfo))
break;
}
@ -568,12 +735,14 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
curkse = _get_curkse();
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_kse_critical_leave(&curthread->tmbx);
curthread->interrupted = interrupted;
curthread->timeout = timeout;
DBG_MSG("<<< thr_sig_rundown %p\n", curthread);
DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread);
}
/*
@ -603,6 +772,7 @@ _thr_sig_check_pending(struct pthread *curthread)
}
}
#ifndef SYSTEM_SCOPE_ONLY
/*
* This must be called with upcalls disabled.
*/
@ -631,6 +801,7 @@ handle_special_signals(struct kse *curkse, int sig)
break;
}
}
#endif /* ! SYSTEM_SCOPE_ONLY */
/*
* Perform thread specific actions in response to a signal.
@ -650,7 +821,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
struct kse *curkse;
siginfo_t siginfo;
DBG_MSG(">>> _thr_sig_add\n");
DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig);
curkse = _get_curkse();
restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
@ -660,13 +831,11 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->state == PS_STATE_MAX)
return; /* return false */
#ifdef NOTYET
if ((pthread->attrs.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
if (!fromproc)
kse_thr_interrupt(&pthread->tmbx, 0, sig);
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(curthread != pthread)) {
PANIC("Please use _thr_send_sig for bound thread");
return;
}
#endif
if (pthread->curframe == NULL ||
(pthread->state != PS_SIGWAIT &&
@ -687,9 +856,11 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
}
if (!SIGISMEMBER(pthread->sigmask, sig)) {
pthread->check_pending = 1;
if (pthread->blocked != 0 && !THR_IN_CRITICAL(pthread))
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(pthread->blocked != 0) &&
!THR_IN_CRITICAL(pthread))
kse_thr_interrupt(&pthread->tmbx,
restart ? -2 : -1);
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
}
}
else {
@ -801,64 +972,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->check_pending = 1;
}
}
DBG_MSG("<<< _thr_sig_add\n");
}
static void
thr_sig_check_state(struct pthread *pthread, int sig)
{
/*
* Process according to thread state:
*/
switch (pthread->state) {
/*
* States which do not change when a signal is trapped:
*/
case PS_RUNNING:
case PS_LOCKWAIT:
case PS_MUTEX_WAIT:
case PS_COND_WAIT:
case PS_JOIN:
case PS_SUSPENDED:
case PS_DEAD:
case PS_DEADLOCK:
case PS_STATE_MAX:
break;
case PS_SIGWAIT:
build_siginfo(&pthread->siginfo[sig-1], sig);
/* Wake up the thread if the signal is blocked. */
if (!SIGISMEMBER(pthread->sigmask, sig)) {
/* Return the signal number: */
*(pthread->data.sigwaitinfo) = pthread->siginfo[sig-1];
pthread->sigmask = pthread->oldsigmask;
/* Change the state of the thread to run: */
_thr_setrunnable_unlocked(pthread);
} else {
/* Increment the pending signal count. */
SIGADDSET(pthread->sigpend, sig);
if (!SIGISMEMBER(pthread->oldsigmask, sig)) {
pthread->check_pending = 1;
pthread->interrupted = 1;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
}
}
break;
case PS_SIGSUSPEND:
case PS_SLEEP_WAIT:
/*
* Remove the thread from the wait queue and make it
* runnable:
*/
_thr_setrunnable_unlocked(pthread);
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
break;
}
}
/*
@ -869,41 +982,14 @@ _thr_sig_send(struct pthread *pthread, int sig)
{
struct pthread *curthread = _get_curthread();
#ifdef NOTYET
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
kse_thr_interrupt(&pthread->tmbx, sig);
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig);
return;
}
#endif
/* Lock the scheduling queue of the target thread. */
THR_SCHED_LOCK(curthread, pthread);
/* Check for signals whose actions are SIG_DFL: */
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
/*
* Check to see if a temporary signal handler is
* installed for sigwaiters:
*/
if (_thread_dfl_count[sig - 1] == 0) {
/*
* Deliver the signal to the process if a handler
* is not installed:
*/
THR_SCHED_UNLOCK(curthread, pthread);
kill(getpid(), sig);
THR_SCHED_LOCK(curthread, pthread);
}
/*
* Assuming we're still running after the above kill(),
* make any necessary state changes to the thread:
*/
thr_sig_check_state(pthread, sig);
THR_SCHED_UNLOCK(curthread, pthread);
}
/*
* Check that the signal is not being ignored:
*/
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
_thr_sig_add(pthread, sig, NULL);
THR_SCHED_UNLOCK(curthread, pthread);
/* XXX
@ -965,6 +1051,7 @@ _thr_signal_init(void)
{
sigset_t sigset;
struct sigaction act;
__siginfohandler_t *sigfunc;
int i;
SIGFILLSET(sigset);
@ -984,6 +1071,15 @@ _thr_signal_init(void)
*/
PANIC("Cannot read signal handler info");
}
/* Intall wrapper if handler was set */
sigfunc = _thread_sigact[i - 1].sa_sigaction;
if (((__sighandler_t *)sigfunc) != SIG_DFL &&
((__sighandler_t *)sigfunc) != SIG_IGN) {
act = _thread_sigact[i - 1];
act.sa_flags |= SA_SIGINFO;
act.sa_sigaction = (__siginfohandler_t *)_thr_sig_handler;
__sys_sigaction(i, &act, NULL);
}
}
/*
* Install the signal handler for SIGINFO. It isn't
@ -1000,6 +1096,9 @@ _thr_signal_init(void)
*/
PANIC("Cannot initialize signal handler");
}
#ifdef SYSTEM_SCOPE_ONLY
__sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL);
#endif
}
void

View File

@ -52,6 +52,15 @@ _pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
if (! _kse_isthreaded())
_kse_setthreaded(1);
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
ret = __sys_sigprocmask(how, set, oset);
if (ret != 0)
ret = errno;
/* Get a copy for thread dump */
__sys_sigprocmask(SIG_SETMASK, NULL, &curthread->sigmask);
return (ret);
}
if (set)
newset = *set;

View File

@ -55,8 +55,9 @@ _sigpending(sigset_t *set)
ret = EINVAL;
}
else {
if (!_kse_isthreaded())
return __sys_sigpending(set);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_sigpending(set));
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);

View File

@ -35,6 +35,7 @@
#include <errno.h>
#include <pthread.h>
#include <string.h>
#include <sys/signalvar.h>
#include "thr_private.h"
__weak_reference(__sigsuspend, sigsuspend);
@ -46,12 +47,14 @@ _sigsuspend(const sigset_t *set)
sigset_t oldmask, newmask;
int ret = -1;
if (!_kse_isthreaded())
return __sys_sigsuspend(set);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_sigsuspend(set));
/* Check if a new signal set was provided by the caller: */
if (set != NULL) {
newmask = *set;
SIG_CANTMASK(newmask);
THR_LOCK_SWITCH(curthread);

View File

@ -50,26 +50,18 @@ lib_sigtimedwait(const sigset_t *set, siginfo_t *info,
struct pthread *curthread = _get_curthread();
int ret = 0;
int i;
sigset_t tempset, waitset;
struct sigaction act;
sigset_t waitset;
kse_critical_t crit;
siginfo_t siginfo;
if (!_kse_isthreaded()) {
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) {
if (info == NULL)
info = &siginfo;
return __sys_sigtimedwait((sigset_t *)set, info,
(struct timespec *)timeout);
return (__sys_sigtimedwait((sigset_t *)set, info,
(struct timespec *)timeout));
}
/*
* Specify the thread kernel signal handler.
*/
act.sa_handler = (void (*) ()) _thr_sig_handler;
act.sa_flags = SA_RESTART | SA_SIGINFO;
/* Ensure the signal handler cannot be interrupted by other signals: */
SIGFILLSET(act.sa_mask);
/*
* Initialize the set of signals that will be waited on:
*/
@ -79,103 +71,60 @@ lib_sigtimedwait(const sigset_t *set, siginfo_t *info,
SIGDELSET(waitset, SIGKILL);
SIGDELSET(waitset, SIGSTOP);
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
/*
* Enter a loop to find the signals that are SIG_DFL. For
* these signals we must install a dummy signal handler in
* order for the kernel to pass them in to us. POSIX says
* that the _application_ must explicitly install a dummy
* handler for signals that are SIG_IGN in order to sigwait
* on them. Note that SIG_IGN signals are left in the
* mask because a subsequent sigaction could enable an
* POSIX says that the _application_ must explicitly install
* a dummy handler for signals that are SIG_IGN in order
* to sigwait on them. Note that SIG_IGN signals are left in
* the mask because a subsequent sigaction could enable an
* ignored signal.
*/
SIGEMPTYSET(tempset);
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(waitset, i) &&
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
_thread_dfl_count[i - 1]++;
SIGADDSET(tempset, i);
if (_thread_dfl_count[i - 1] == 1) {
if (__sys_sigaction(i, &act, NULL) != 0)
/* ret = -1 */;
}
}
}
if (ret == 0) {
/* Done accessing _thread_dfl_count for now. */
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);
for (i = 1; i <= _SIG_MAXSIG; ++i) {
if (SIGISMEMBER(waitset, i) &&
SIGISMEMBER(curthread->sigpend, i)) {
SIGDELSET(curthread->sigpend, i);
siginfo = curthread->siginfo[i - 1];
KSE_SCHED_UNLOCK(curthread->kse,
curthread->kseg);
KSE_LOCK_ACQUIRE(curthread->kse,
&_thread_signal_lock);
ret = i;
goto OUT;
}
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);
for (i = 1; i <= _SIG_MAXSIG; ++i) {
if (SIGISMEMBER(waitset, i) &&
SIGISMEMBER(curthread->sigpend, i)) {
SIGDELSET(curthread->sigpend, i);
siginfo = curthread->siginfo[i - 1];
KSE_SCHED_UNLOCK(curthread->kse,
curthread->kseg);
_kse_critical_leave(crit);
ret = i;
goto OUT;
}
curthread->timeout = 0;
curthread->interrupted = 0;
_thr_set_timeout(timeout);
/* Wait for a signal: */
curthread->oldsigmask = curthread->sigmask;
siginfo.si_signo = 0;
curthread->data.sigwaitinfo = &siginfo;
SIGFILLSET(curthread->sigmask);
SIGSETNAND(curthread->sigmask, waitset);
THR_SET_STATE(curthread, PS_SIGWAIT);
_thr_sched_switch_unlocked(curthread);
/*
* Return the signal number to the caller:
*/
if (siginfo.si_signo > 0) {
ret = siginfo.si_signo;
} else {
if (curthread->interrupted)
errno = EINTR;
else if (curthread->timeout)
errno = EAGAIN;
ret = -1;
}
curthread->timeout = 0;
curthread->interrupted = 0;
/*
* Probably unnecessary, but since it's in a union struct
* we don't know how it could be used in the future.
*/
crit = _kse_critical_enter();
curthread->data.sigwaitinfo = NULL;
/*
* Relock the array of SIG_DFL wait counts.
*/
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
}
curthread->timeout = 0;
curthread->interrupted = 0;
_thr_set_timeout(timeout);
/* Wait for a signal: */
curthread->oldsigmask = curthread->sigmask;
siginfo.si_signo = 0;
curthread->data.sigwaitinfo = &siginfo;
SIGFILLSET(curthread->sigmask);
SIGSETNAND(curthread->sigmask, waitset);
THR_SET_STATE(curthread, PS_SIGWAIT);
_thr_sched_switch_unlocked(curthread);
/*
* Return the signal number to the caller:
*/
if (siginfo.si_signo > 0) {
ret = siginfo.si_signo;
} else {
if (curthread->interrupted)
errno = EINTR;
else if (curthread->timeout)
errno = EAGAIN;
ret = -1;
}
curthread->timeout = 0;
curthread->interrupted = 0;
/*
* Probably unnecessary, but since it's in a union struct
* we don't know how it could be used in the future.
*/
curthread->data.sigwaitinfo = NULL;
OUT:
/* Restore the sigactions: */
act.sa_handler = SIG_DFL;
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(tempset, i)) {
_thread_dfl_count[i - 1]--;
if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
(_thread_dfl_count[i - 1] == 0)) {
if (__sys_sigaction(i, &act, NULL) != 0)
/* ret = -1 */ ;
}
}
}
/* Done accessing _thread_dfl_count. */
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
_kse_critical_leave(crit);
if (ret > 0 && info != NULL)
*info = siginfo;

View File

@ -42,6 +42,9 @@ _sched_yield(void)
{
struct pthread *curthread = _get_curthread();
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
return (__sys_sched_yield());
/* Reset the accumulated time slice value for the current thread: */
curthread->slice_usec = -1;
@ -57,6 +60,11 @@ _pthread_yield(void)
{
struct pthread *curthread = _get_curthread();
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
__sys_sched_yield();
return;
}
/* Reset the accumulated time slice value for the current thread: */
curthread->slice_usec = -1;

View File

@ -22,6 +22,10 @@ CFLAGS+=-fno-builtin
CFLAGS+=-D_LOCK_DEBUG
#CFLAGS+= -g
# Uncomment this if you want to build a 1:1 threading mode library
# however it is no longer strictly conformed to POSIX
# CFLAGS+=-DSYSTEM_SCOPE_ONLY
LDFLAGS= -Wl,--version-script=${.CURDIR}/pthread.map
# enable extra internal consistancy checks

View File

@ -63,6 +63,8 @@ sigsuspender (void *arg)
/* Allow these signals to wake us up during a sigsuspend. */
sigfillset (&suspender_mask); /* Default action */
sigdelset (&suspender_mask, SIGKILL); /* Cannot catch */
sigdelset (&suspender_mask, SIGSTOP); /* Cannot catch */
sigdelset (&suspender_mask, SIGINT); /* terminate */
sigdelset (&suspender_mask, SIGHUP); /* terminate */
sigdelset (&suspender_mask, SIGQUIT); /* create core image */

View File

@ -107,9 +107,11 @@ _pthread_cancel(pthread_t pthread)
/* Ignore - only here to silence -Wall: */
break;
}
if ((pthread->blocked != 0) &&
((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
kse_thr_interrupt(&pthread->tmbx, -1);
if ((pthread->cancelflags & THR_AT_CANCEL_POINT) &&
(pthread->blocked != 0 ||
pthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
kse_thr_interrupt(&pthread->tmbx,
KSE_INTR_INTERRUPT, 0);
}
/*

View File

@ -89,7 +89,7 @@ _thr_setconcurrency(int new_level)
/* Race condition, but so what. */
kse_count = _kse_initial->k_kseg->kg_ksecount;
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread);
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;

View File

@ -57,6 +57,7 @@ int _thread_PS_DEAD_value = PS_DEAD;
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void free_stack(struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread,
void *(*start_routine) (void *), void *arg);
@ -91,7 +92,6 @@ int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
struct kse *curkse;
struct pthread *curthread, *new_thread;
struct kse *kse = NULL;
struct kse_group *kseg = NULL;
@ -132,14 +132,16 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->attr = _pthread_attr_default;
else
new_thread->attr = *(*attr);
#ifdef SYSTEM_SCOPE_ONLY
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
if (create_stack(&new_thread->attr) != 0) {
/* Insufficient memory to create a stack: */
ret = EAGAIN;
_thr_free(curthread, new_thread);
}
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
(((kse = _kse_alloc(curthread)) == NULL)
(((kse = _kse_alloc(curthread, 1)) == NULL)
|| ((kseg = _kseg_alloc(curthread)) == NULL))) {
/* Insufficient memory to create a new KSE/KSEG: */
ret = EAGAIN;
@ -147,15 +149,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
kse->k_mbx.km_flags |= KMF_DONE;
_kse_free(curthread, kse);
}
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Stack routines don't use malloc/free. */
_thr_stack_free(&new_thread->attr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
free_stack(&new_thread->attr);
_thr_free(curthread, new_thread);
}
else {
@ -178,9 +172,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
/* Initialize the thread for signals: */
new_thread->sigmask = curthread->sigmask;
/* No thread is wanting to join to this one: */
new_thread->joiner = NULL;
@ -193,6 +184,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
crit = _kse_critical_enter();
THR_GETCONTEXT(&new_thread->tmbx.tm_context);
/* Initialize the thread for signals: */
new_thread->sigmask = curthread->sigmask;
_kse_critical_leave(crit);
new_thread->tmbx.tm_udata = new_thread;
new_thread->tmbx.tm_context.uc_sigmask =
@ -278,9 +271,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
}
else {
kse->k_curthread = NULL;
#ifdef NOT_YET
kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
#endif
new_thread->kse = kse;
new_thread->kseg = kse->k_kseg;
kse->k_mbx.km_udata = kse;
@ -308,6 +299,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
free_stack(&thread->attr);
if ((thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
/* Free the KSE and KSEG. */
_kseg_free(thread->kseg);
@ -332,6 +324,22 @@ create_stack(struct pthread_attr *pattr)
return (ret);
}
static void
free_stack(struct pthread_attr *pattr)
{
struct kse *curkse;
kse_critical_t crit;
if ((pattr->flags & THR_STACK_USER) == 0) {
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Stack routines don't use malloc/free. */
_thr_stack_free(pattr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
}
static void
thread_start(struct pthread *curthread, void *(*start_routine) (void *),

View File

@ -56,19 +56,17 @@ _thr_ref_add(struct pthread *curthread, struct pthread *thread,
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
TAILQ_FOREACH(pthread, &_thread_list, tle) {
if (pthread == thread) {
if ((include_dead == 0) &&
((pthread->state == PS_DEAD) ||
((pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
pthread = NULL;
else {
thread->refcount++;
if (curthread != NULL)
curthread->critical_count++;
}
break;
pthread = _thr_hash_find(thread);
if (pthread) {
if ((include_dead == 0) &&
((pthread->state == PS_DEAD) ||
((pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
pthread = NULL;
else {
pthread->refcount++;
if (curthread != NULL)
curthread->critical_count++;
}
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);

View File

@ -259,12 +259,19 @@ _libpthread_init(struct pthread *curthread)
_kse_init();
/* Initialize the initial kse and kseg. */
_kse_initial = _kse_alloc(NULL);
#ifdef SYSTEM_SCOPE_ONLY
_kse_initial = _kse_alloc(NULL, 1);
#else
_kse_initial = _kse_alloc(NULL, 0);
#endif
if (_kse_initial == NULL)
PANIC("Can't allocate initial kse.");
_kse_initial->k_kseg = _kseg_alloc(NULL);
if (_kse_initial->k_kseg == NULL)
PANIC("Can't allocate initial kseg.");
#ifdef SYSTEM_SCOPE_ONLY
_kse_initial->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
#endif
_kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe);
@ -326,7 +333,9 @@ init_main_thread(struct pthread *thread)
/* Setup the thread attributes. */
thread->attr = _pthread_attr_default;
#ifdef SYSTEM_SCOPE_ONLY
thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
/*
* Set up the thread stack.
*
@ -463,9 +472,6 @@ init_private(void)
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&_thread_gc_list);
/* Initialize the SIG_DFL dummy handler count. */
bzero(_thread_dfl_count, sizeof(_thread_dfl_count));
/*
* Initialize the lock for temporary installation of signal
* handlers (to support sigwait() semantics) and for the

View File

@ -120,6 +120,10 @@ static int active_kse_count = 0;
static int active_kseg_count = 0;
static u_int64_t next_uniqueid = 1;
LIST_HEAD(thread_hash_head, pthread);
#define THREAD_HASH_QUEUES 127
static struct thread_hash_head thr_hashtable[THREAD_HASH_QUEUES];
#define THREAD_HASH(thrd) ((unsigned long)thrd % THREAD_HASH_QUEUES)
#ifdef DEBUG_THREAD_KERN
static void dump_queues(struct kse *curkse);
@ -127,13 +131,11 @@ static void dump_queues(struct kse *curkse);
static void kse_check_completed(struct kse *kse);
static void kse_check_waitq(struct kse *kse);
static void kse_fini(struct kse *curkse);
static void kse_reinit(struct kse *kse);
static void kse_reinit(struct kse *kse, int sys_scope);
static void kse_sched_multi(struct kse *curkse);
#ifdef NOT_YET
static void kse_sched_single(struct kse *curkse);
#endif
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse, struct pthread *td_wait);
static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free_unlocked(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
@ -385,16 +387,30 @@ _kse_setthreaded(int threaded)
*/
_thr_signal_init();
_kse_initial->k_flags |= KF_STARTED;
#ifdef SYSTEM_SCOPE_ONLY
/*
* For bound thread, kernel reads mailbox pointer once,
* we'd set it here before calling kse_create
*/
KSE_SET_MBOX(_kse_initial, _thr_initial);
_kse_initial->k_mbx.km_flags |= KMF_BOUND;
#endif
if (kse_create(&_kse_initial->k_mbx, 0) != 0) {
_kse_initial->k_flags &= ~KF_STARTED;
__isthreaded = 0;
/* may abort() */
PANIC("kse_create() failed\n");
return (-1);
}
#ifndef SYSTEM_SCOPE_ONLY
/* Set current thread to initial thread */
KSE_SET_MBOX(_kse_initial, _thr_initial);
_thr_start_sig_daemon();
_thr_setmaxconcurrency();
#endif
}
return (0);
}
@ -592,7 +608,9 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
* instead of the next thread in the run queue, but
* we don't bother checking for that.
*/
if ((curthread->state == PS_DEAD) ||
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
kse_sched_single(curkse);
else if ((curthread->state == PS_DEAD) ||
(((td = KSE_RUNQ_FIRST(curkse)) == NULL) &&
(curthread->state != PS_RUNNING)) ||
((td != NULL) && (td->lock_switch == 0))) {
@ -693,30 +711,34 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
* KSE, but we use a separate scheduler so that it can be fine-tuned
* to be more efficient (and perhaps not need a separate stack for
* the KSE, allowing it to use the thread's stack).
*
* XXX - This probably needs some work.
*/
#ifdef NOT_YET
static void
kse_sched_single(struct kse *curkse)
{
struct pthread *curthread = curkse->k_curthread;
struct pthread *td_wait;
struct timespec ts;
int level;
sigset_t sigmask;
int i, sigseqno, level, first = 0;
if (curthread->active == 0) {
if (curthread->state != PS_RUNNING) {
/* Check to see if the thread has timed out. */
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts) != 0) {
curthread->timeout = 1;
curthread->state = PS_RUNNING;
}
}
}
if ((curkse->k_flags & KF_INITIALIZED) == 0) {
/* Setup this KSEs specific data. */
_ksd_setprivate(&curkse->k_ksd);
_set_curkse(curkse);
curkse->k_flags |= KF_INITIALIZED;
first = 1;
curthread->active = 1;
/* Setup kernel signal masks for new thread. */
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
/*
* Enter critical region, this is meanless for bound thread,
* It is used to let other code work, those code want mailbox
* to be cleared.
*/
_kse_critical_enter();
}
/* This thread no longer needs to yield the CPU: */
curthread->critical_yield = 0;
curthread->need_switchout = 0;
@ -726,7 +748,8 @@ kse_sched_single(struct kse *curkse)
* There is no scheduling queue for single threaded KSEs,
* but we need a lock for protection regardless.
*/
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
if (curthread->lock_switch == 0)
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
/*
* This has to do the job of kse_switchout_thread(), only
@ -735,33 +758,46 @@ kse_sched_single(struct kse *curkse)
switch (curthread->state) {
case PS_DEAD:
curthread->check_pending = 0;
/* Unlock the scheduling queue and exit the KSE and thread. */
thr_cleaup(curkse, curthread);
thr_cleanup(curkse, curthread);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
PANIC("bound thread shouldn't get here\n");
break;
case PS_COND_WAIT:
case PS_SIGWAIT:
PANIC("bound thread does not have SIGWAIT state\n");
case PS_SLEEP_WAIT:
/* Only insert threads that can timeout: */
if (curthread->wakeup_time.tv_sec != -1) {
/* Insert into the waiting queue: */
KSE_WAITQ_INSERT(curkse, curthread);
}
PANIC("bound thread does not have SLEEP_WAIT state\n");
case PS_SIGSUSPEND:
PANIC("bound thread does not have SIGSUSPEND state\n");
case PS_COND_WAIT:
break;
case PS_LOCKWAIT:
/*
* This state doesn't timeout.
*/
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
level = curthread->locklevel - 1;
if (!_LCK_GRANTED(&curthread->lockusers[level]))
KSE_WAITQ_INSERT(curkse, curthread);
else
if (_LCK_GRANTED(&curthread->lockusers[level]))
THR_SET_STATE(curthread, PS_RUNNING);
break;
case PS_RUNNING:
if ((curthread->flags & THR_FLAGS_SUSPENDED) != 0) {
THR_SET_STATE(curthread, PS_SUSPENDED);
}
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
break;
case PS_JOIN:
case PS_MUTEX_WAIT:
case PS_RUNNING:
case PS_SIGSUSPEND:
case PS_SIGWAIT:
case PS_SUSPENDED:
case PS_DEADLOCK:
default:
@ -769,41 +805,66 @@ kse_sched_single(struct kse *curkse)
* These states don't timeout and don't need
* to be in the waiting queue.
*/
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
break;
}
while (curthread->state != PS_RUNNING) {
curthread->active = 0;
td_wait = KSE_WAITQ_FIRST(curkse);
sigseqno = curkse->k_sigseqno;
if (curthread->check_pending != 0) {
/*
* Install pending signals into the frame, possible
* cause mutex or condvar backout.
*/
curthread->check_pending = 0;
SIGFILLSET(sigmask);
kse_wait(curkse, td_wait);
if (td_wait != NULL) {
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts)) {
/* Indicate the thread timedout: */
td_wait->timeout = 1;
/* Make the thread runnable. */
THR_SET_STATE(td_wait, PS_RUNNING);
KSE_WAITQ_REMOVE(curkse, td_wait);
/*
* Lock out kernel signal code when we are processing
* signals, and get a fresh copy of signal mask.
*/
__sys_sigprocmask(SIG_SETMASK, &sigmask,
&curthread->sigmask);
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(curthread->sigmask, i))
continue;
if (SIGISMEMBER(curthread->sigpend, i))
_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
}
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
NULL);
/* The above code might make thread runnable */
if (curthread->state == PS_RUNNING)
break;
}
THR_DEACTIVATE_LAST_LOCK(curthread);
kse_wait(curkse, curthread, sigseqno);
THR_ACTIVATE_LAST_LOCK(curthread);
KSE_GET_TOD(curkse, &ts);
if (thr_timedout(curthread, &ts)) {
/* Indicate the thread timedout: */
curthread->timeout = 1;
/* Make the thread runnable. */
THR_SET_STATE(curthread, PS_RUNNING);
}
}
/* Remove the frame reference. */
curthread->curframe = NULL;
/* Unlock the scheduling queue. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
if (curthread->lock_switch == 0) {
/* Unlock the scheduling queue. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
}
/*
* Continue the thread at its current frame:
*/
DBG_MSG("Continuing bound thread %p\n", curthread);
_thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
PANIC("Thread has returned from _thread_switch");
if (first) {
_kse_critical_leave(&curthread->tmbx);
pthread_exit(curthread->start_routine(curthread->arg));
}
}
#endif
#ifdef DEBUG_THREAD_KERN
static void
@ -929,7 +990,7 @@ kse_sched_multi(struct kse *curkse)
* no more threads.
*/
td_wait = KSE_WAITQ_FIRST(curkse);
kse_wait(curkse, td_wait);
kse_wait(curkse, td_wait, 0);
kse_check_completed(curkse);
kse_check_waitq(curkse);
}
@ -1003,8 +1064,8 @@ kse_sched_multi(struct kse *curkse)
signalcontext(&curthread->tmbx.tm_context, 0,
(__sighandler_t *)thr_resume_wrapper);
#else
if ((curframe == NULL) && (curthread->check_pending != 0) &&
!THR_IN_CRITICAL(curthread)) {
if ((curframe == NULL) && (curthread->state == PS_RUNNING) &&
(curthread->check_pending != 0) && !THR_IN_CRITICAL(curthread)) {
curthread->check_pending = 0;
signalcontext(&curthread->tmbx.tm_context, 0,
(__sighandler_t *)thr_resume_wrapper);
@ -1129,7 +1190,11 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
THR_GCLIST_ADD(thread);
/* Use thread_list_lock */
active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (active_threads == 0) {
#else
if (active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
exit(0);
}
@ -1139,7 +1204,10 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
* System scope thread is single thread group,
* when thread is exited, its kse and ksegrp should
* be recycled as well.
* kse upcall stack belongs to thread, clear it here.
*/
curkse->k_stack.ss_sp = 0;
curkse->k_stack.ss_size = 0;
kse_exit();
PANIC("kse_exit() failed for system scope thread");
}
@ -1239,30 +1307,30 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
* the mailbox is set for the current thread.
*/
if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
#ifdef NOT_YET
/* We use the thread's stack as the KSE's stack. */
new_thread->kse->k_mbx.km_stack.ss_sp =
new_thread->attr.stackaddr_attr;
new_thread->kse->k_mbx.km_stack.ss_size =
new_thread->attr.stacksize_attr;
#endif
newthread->kse->k_mbx.km_stack.ss_sp =
newthread->attr.stackaddr_attr;
newthread->kse->k_mbx.km_stack.ss_size =
newthread->attr.stacksize_attr;
/*
* No need to lock the scheduling queue since the
* KSE/KSEG pair have not yet been started.
*/
KSEG_THRQ_ADD(newthread->kseg, newthread);
if (newthread->state == PS_RUNNING)
THR_RUNQ_INSERT_TAIL(newthread);
newthread->kse->k_curthread = NULL;
newthread->kse->k_mbx.km_flags = 0;
newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
/* this thread never gives up kse */
newthread->active = 1;
newthread->kse->k_curthread = newthread;
newthread->kse->k_mbx.km_flags = KMF_BOUND;
newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
newthread->kse->k_mbx.km_quantum = 0;
KSE_SET_MBOX(newthread->kse, newthread);
/*
* This thread needs a new KSE and KSEG.
*/
newthread->kse->k_flags &= ~KF_INITIALIZED;
newthread->kse->k_flags |= KF_STARTED;
/* Fire up! */
ret = kse_create(&newthread->kse->k_mbx, 1);
if (ret != 0)
ret = errno;
@ -1492,7 +1560,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
!SIGISMEMBER(thread->sigmask, i)) {
restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART;
kse_thr_interrupt(&thread->tmbx,
restart ? -2 : -1);
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
break;
}
}
@ -1617,7 +1685,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
* This must be called with the scheduling lock held.
*/
static void
kse_wait(struct kse *kse, struct pthread *td_wait)
kse_wait(struct kse *kse, struct pthread *td_wait, int sigseqno)
{
struct timespec ts, ts_sleep;
int saved_flags;
@ -1640,10 +1708,15 @@ kse_wait(struct kse *kse, struct pthread *td_wait)
KSE_SET_IDLE(kse);
kse->k_kseg->kg_idle_kses++;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
saved_flags = kse->k_mbx.km_flags;
kse->k_mbx.km_flags |= KMF_NOUPCALL;
kse_release(&ts_sleep);
kse->k_mbx.km_flags = saved_flags;
if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) &&
(kse->k_sigseqno != sigseqno))
; /* don't sleep */
else {
saved_flags = kse->k_mbx.km_flags;
kse->k_mbx.km_flags |= KMF_NOUPCALL;
kse_release(&ts_sleep);
kse->k_mbx.km_flags = saved_flags;
}
KSE_SCHED_LOCK(kse, kse->k_kseg);
if (KSE_IS_IDLE(kse)) {
KSE_CLEAR_IDLE(kse);
@ -1965,7 +2038,7 @@ _kseg_free(struct kse_group *kseg)
* In this case, we don't need to (and can't) take any locks.
*/
struct kse *
_kse_alloc(struct pthread *curthread)
_kse_alloc(struct pthread *curthread, int sys_scope)
{
struct kse *kse = NULL;
kse_critical_t crit;
@ -1991,7 +2064,7 @@ _kse_alloc(struct pthread *curthread)
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
if (kse != NULL)
kse_reinit(kse);
kse_reinit(kse, sys_scope);
}
if ((kse == NULL) &&
((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
@ -2009,16 +2082,16 @@ _kse_alloc(struct pthread *curthread)
/*
* Create the KSE context.
*
* XXX - For now this is done here in the allocation.
* In the future, we may want to have it done
* outside the allocation so that scope system
* threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
* Scope system threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
*/
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE);
kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE;
if (!sys_scope) {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
kse->k_stack.ss_size = KSE_STACKSIZE;
} else {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
}
kse->k_mbx.km_udata = (void *)kse;
kse->k_mbx.km_quantum = 20000;
/*
@ -2026,9 +2099,8 @@ _kse_alloc(struct pthread *curthread)
* doesn't get used; a KSE running a scope system
* thread will use that thread's stack.
*/
kse->k_stack.ss_sp = kse->k_mbx.km_stack.ss_sp;
kse->k_stack.ss_size = kse->k_mbx.km_stack.ss_size;
if (kse->k_mbx.km_stack.ss_sp == NULL) {
kse->k_mbx.km_stack = kse->k_stack;
if (!sys_scope && kse->k_stack.ss_sp == NULL) {
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
@ -2049,7 +2121,8 @@ _kse_alloc(struct pthread *curthread)
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
free(kse->k_mbx.km_stack.ss_sp);
if (kse->k_stack.ss_sp)
free(kse->k_stack.ss_sp);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
@ -2068,15 +2141,27 @@ _kse_alloc(struct pthread *curthread)
}
static void
kse_reinit(struct kse *kse)
kse_reinit(struct kse *kse, int sys_scope)
{
/*
* XXX - For now every kse has its stack.
* In the future, we may want to have it done
* outside the allocation so that scope system
* threads (one thread per KSE) are not required
* to have a stack for an unneeded kse upcall.
*/
if (!sys_scope) {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
if (kse->k_stack.ss_sp == NULL) {
/* XXX check allocation failure */
kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
kse->k_stack.ss_size = KSE_STACKSIZE;
}
kse->k_mbx.km_quantum = 20000;
} else {
kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
if (kse->k_stack.ss_sp)
free(kse->k_stack.ss_sp);
kse->k_stack.ss_sp = NULL;
kse->k_stack.ss_size = 0;
kse->k_mbx.km_quantum = 0;
}
kse->k_mbx.km_stack = kse->k_stack;
kse->k_mbx.km_udata = (void *)kse;
kse->k_mbx.km_curthread = NULL;
kse->k_mbx.km_flags = 0;
kse->k_curthread = 0;
kse->k_kseg = 0;
@ -2092,6 +2177,7 @@ kse_reinit(struct kse *kse)
kse->k_cpu = 0;
kse->k_done = 0;
kse->k_switch = 0;
kse->k_sigseqno = 0;
}
void
@ -2226,7 +2312,6 @@ thr_link(struct pthread *thread)
THR_LIST_ADD(thread);
active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2241,11 +2326,39 @@ thr_unlink(struct pthread *thread)
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
void
_thr_hash_add(struct pthread *thread)
{
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_INSERT_HEAD(head, thread, hle);
}
void
_thr_hash_remove(struct pthread *thread)
{
LIST_REMOVE(thread, hle);
}
struct pthread *
_thr_hash_find(struct pthread *thread)
{
struct pthread *td;
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_FOREACH(td, head, hle) {
if (td == thread)
return (thread);
}
return (NULL);
}

View File

@ -55,8 +55,9 @@ _nanosleep(const struct timespec *time_to_sleep,
errno = EINVAL;
ret = -1;
} else {
if (!_kse_isthreaded())
return __sys_nanosleep(time_to_sleep, time_remaining);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_nanosleep(time_to_sleep, time_remaining));
KSE_GET_TOD(curthread->kse, &ts);

View File

@ -202,6 +202,7 @@ struct kse {
int k_cpu; /* CPU ID when bound */
int k_done; /* this KSE is done */
int k_switch; /* thread switch in UTS */
int k_sigseqno; /* signal buffered count */
};
/*
@ -615,6 +616,9 @@ struct pthread {
/* Queue entry for GC lists: */
TAILQ_ENTRY(pthread) gcle;
/* Hash queue entry */
LIST_ENTRY(pthread) hle;
/*
* Lock for accesses to this thread structure.
*/
@ -662,7 +666,7 @@ struct pthread {
sigset_t oldsigmask;
sigset_t sigmask;
sigset_t sigpend;
int check_pending;
volatile int check_pending;
int refcount;
/* Thread state: */
@ -894,12 +898,14 @@ do { \
#define THR_LIST_ADD(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \
TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
_thr_hash_add(thrd); \
(thrd)->flags |= THR_FLAGS_IN_TDLIST; \
} \
} while (0)
#define THR_LIST_REMOVE(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \
TAILQ_REMOVE(&_thread_list, thrd, tle); \
_thr_hash_remove(thrd); \
(thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \
} \
} while (0)
@ -1000,13 +1006,6 @@ SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
/* Array of signal actions for this process: */
SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
/*
* Array of counts of dummy handlers for SIG_DFL signals. This is used to
* assure that there is always a dummy signal handler installed while there
* is a thread sigwait()ing on the corresponding signal.
*/
SCLASS int _thread_dfl_count[_SIG_MAXSIG];
/*
* Lock for above count of dummy handlers and for the process signal
* mask and pending signal sets.
@ -1047,7 +1046,7 @@ void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
struct kse *_get_curkse(void);
void _set_curkse(struct kse *);
struct kse *_kse_alloc(struct pthread *);
struct kse *_kse_alloc(struct pthread *, int sys_scope);
kse_critical_t _kse_critical_enter(void);
void _kse_critical_leave(kse_critical_t);
int _kse_in_critical(void);
@ -1131,11 +1130,16 @@ void _thr_enter_cancellation_point(struct pthread *);
void _thr_leave_cancellation_point(struct pthread *);
int _thr_setconcurrency(int new_level);
int _thr_setmaxconcurrency(void);
void _thr_critical_enter(struct pthread *);
void _thr_critical_leave(struct pthread *);
int _thr_start_sig_daemon(void);
int _thr_getprocsig(int sig, siginfo_t *siginfo);
int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
void _thr_signal_init(void);
void _thr_signal_deinit(void);
void _thr_hash_add(struct pthread *);
void _thr_hash_remove(struct pthread *);
struct pthread *_thr_hash_find(struct pthread *);
/*
* Aliases for _pthread functions. Should be called instead of

View File

@ -45,16 +45,55 @@
/* Prototypes: */
static void build_siginfo(siginfo_t *info, int signo);
static void thr_sig_check_state(struct pthread *pthread, int sig);
#ifndef SYSTEM_SCOPE_ONLY
static struct pthread *thr_sig_find(struct kse *curkse, int sig,
siginfo_t *info);
static void handle_special_signals(struct kse *curkse, int sig);
#endif
static void thr_sigframe_add(struct pthread *thread);
static void thr_sigframe_restore(struct pthread *thread,
struct pthread_sigframe *psf);
static void thr_sigframe_save(struct pthread *thread,
struct pthread_sigframe *psf);
#define SA_KILL 0x01 /* terminates process by default */
#define SA_STOP 0x02
#define SA_CONT 0x04
static int sigproptbl[NSIG] = {
SA_KILL, /* SIGHUP */
SA_KILL, /* SIGINT */
SA_KILL, /* SIGQUIT */
SA_KILL, /* SIGILL */
SA_KILL, /* SIGTRAP */
SA_KILL, /* SIGABRT */
SA_KILL, /* SIGEMT */
SA_KILL, /* SIGFPE */
SA_KILL, /* SIGKILL */
SA_KILL, /* SIGBUS */
SA_KILL, /* SIGSEGV */
SA_KILL, /* SIGSYS */
SA_KILL, /* SIGPIPE */
SA_KILL, /* SIGALRM */
SA_KILL, /* SIGTERM */
0, /* SIGURG */
SA_STOP, /* SIGSTOP */
SA_STOP, /* SIGTSTP */
SA_CONT, /* SIGCONT */
0, /* SIGCHLD */
SA_STOP, /* SIGTTIN */
SA_STOP, /* SIGTTOU */
0, /* SIGIO */
SA_KILL, /* SIGXCPU */
SA_KILL, /* SIGXFSZ */
SA_KILL, /* SIGVTALRM */
SA_KILL, /* SIGPROF */
0, /* SIGWINCH */
0, /* SIGINFO */
SA_KILL, /* SIGUSR1 */
SA_KILL /* SIGUSR2 */
};
/* #define DEBUG_SIGNAL */
#ifdef DEBUG_SIGNAL
#define DBG_MSG stdout_debug
@ -133,6 +172,8 @@ static void thr_sigframe_save(struct pthread *thread,
* signal unmasked.
*/
#ifndef SYSTEM_SCOPE_ONLY
static void *
sig_daemon(void *arg /* Unused */)
{
@ -143,13 +184,20 @@ sig_daemon(void *arg /* Unused */)
struct kse *curkse;
struct pthread *curthread = _get_curthread();
DBG_MSG("signal daemon started\n");
DBG_MSG("signal daemon started(%p)\n", curthread);
curthread->name = strdup("signal thread");
crit = _kse_critical_enter();
curkse = _get_curkse();
/*
* Daemon thread is a bound thread and we must be created with
* all signals masked
*/
#if 0
SIGFILLSET(set);
__sys_sigprocmask(SIG_SETMASK, &set, NULL);
#endif
__sys_sigpending(&set);
ts.tv_sec = 0;
ts.tv_nsec = 0;
@ -173,13 +221,14 @@ sig_daemon(void *arg /* Unused */)
return (0);
}
/* Utility function to create signal daemon thread */
int
_thr_start_sig_daemon(void)
{
pthread_attr_t attr;
sigset_t sigset, oldset;
SIGFILLSET(sigset);
pthread_sigmask(SIG_SETMASK, &sigset, &oldset);
pthread_attr_init(&attr);
@ -206,6 +255,13 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
/* Some signals need special handling: */
handle_special_signals(curkse, sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
}
while ((thread = thr_sig_find(curkse, sig, info)) != NULL) {
/*
* Setup the target thread to receive the signal:
@ -233,11 +289,27 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
DBG_MSG("<<< _thr_sig_dispatch\n");
}
#endif /* ! SYSTEM_SCOPE_ONLY */
static __inline int
sigprop(int sig)
{
if (sig > 0 && sig < NSIG)
return (sigproptbl[_SIG_IDX(sig)]);
return (0);
}
void
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
__siginfohandler_t *sigfunc;
struct pthread *curthread;
struct kse *curkse;
struct sigaction act;
int sa_flags, err_save, intr_save, timeout_save;
DBG_MSG(">>> _thr_sig_handler(%d)\n", sig);
curkse = _get_curkse();
if ((curkse == NULL) || ((curkse->k_flags & KF_STARTED) == 0)) {
@ -253,12 +325,86 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
(*(sigfunc))(sig,
(siginfo_t*)(intptr_t)info->si_code, ucp);
}
return;
}
else {
/* Nothing. */
DBG_MSG("Got signal %d\n", sig);
/* XXX Bound thread will fall into this... */
curthread = _get_curthread();
if (curthread == NULL)
PANIC("No current thread.\n");
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
PANIC("Thread is not system scope.\n");
if (curthread->flags & THR_FLAGS_EXITING)
return;
curkse = _get_curkse();
/*
* If thread is in critical region or if thread is on
* the way of state transition, then latch signal into buffer.
*/
if (_kse_in_critical() || THR_IN_CRITICAL(curthread) ||
(curthread->state != PS_RUNNING && curthread->curframe == NULL)) {
DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig);
curthread->siginfo[sig-1] = *info;
curthread->check_pending = 1;
curkse->k_sigseqno++;
SIGADDSET(curthread->sigpend, sig);
/*
* If the kse is on the way to idle itself, but
* we have signal ready, we should prevent it
* to sleep, kernel will latch the wakeup request,
* so kse_release will return from kernel immediately.
*/
if (KSE_IS_IDLE(curkse))
kse_wakeup(&curkse->k_mbx);
return;
}
/* It is now safe to invoke signal handler */
err_save = curthread->error;
timeout_save = curthread->timeout;
intr_save = curthread->interrupted;
/* Get a fresh copy of signal mask from kernel, for thread dump only */
__sys_sigprocmask(SIG_SETMASK, NULL, &curthread->sigmask);
_kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
sa_flags = _thread_sigact[sig - 1].sa_flags & SA_SIGINFO;
if (sa_flags & SA_RESETHAND) {
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
_kse_critical_leave(&curthread->tmbx);
/* Now invoke real handler */
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN) &&
(sigfunc != (__siginfohandler_t *)_thr_sig_handler)) {
if ((sa_flags & SA_SIGINFO) != 0 || info == NULL)
(*(sigfunc))(sig, info, ucp);
else
(*(sigfunc))(sig, (siginfo_t*)(intptr_t)info->si_code,
ucp);
} else {
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL)
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
#ifdef NOTYET
else if (sigprop(sig) & SA_STOP)
kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig);
#endif
}
}
curthread->error = err_save;
curthread->timeout = timeout_save;
curthread->interrupted = intr_save;
_kse_critical_enter();
curthread->sigmask = ucp->uc_sigmask;
_kse_critical_leave(&curthread->tmbx);
DBG_MSG("<<< _thr_sig_handler(%d)\n", sig);
}
/* Must be called with signal lock and schedule lock held in order */
@ -292,19 +438,22 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
if (!(sa_flags & (SA_NODEFER | SA_RESETHAND)))
SIGADDSET(curthread->sigmask, sig);
if ((sig != SIGILL) && (sa_flags & SA_RESETHAND)) {
if (_thread_dfl_count[sig - 1] == 0) {
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
act.sa_handler = SIG_DFL;
act.sa_flags = SA_RESTART;
SIGEMPTYSET(act.sa_mask);
__sys_sigaction(sig, &act, NULL);
__sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]);
}
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
_kse_critical_leave(&curthread->tmbx);
/*
* We are processing buffered signals, synchronize working
* signal mask into kernel.
*/
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
ucp->uc_sigmask = sigmask;
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN)) {
if ((sa_flags & SA_SIGINFO) != 0 || info == NULL)
@ -313,24 +462,29 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
(*(sigfunc))(sig, (siginfo_t*)(intptr_t)info->si_code,
ucp);
} else {
/* XXX
* TODO: exit process if signal would kill it.
*/
#ifdef NOTYET
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL)
kse_sigexit(sig);
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
#ifdef NOTYET
else if (sigprop(sig) & SA_STOP)
kse_thr_interrupt(NULL, KSE_INTR_JOBSTOP, sig);
#endif
}
}
_kse_critical_enter();
/* Don't trust after critical leave/enter */
curkse = _get_curkse();
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
/*
* Restore the thread's signal mask.
*/
curthread->sigmask = ucp->uc_sigmask;
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
DBG_MSG("Got signal %d, handler returned %p\n", sig, curthread);
}
@ -365,13 +519,13 @@ _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo)
SIGADDSET(sigset, sig);
ts.tv_sec = 0;
ts.tv_nsec = 0;
if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0) {
SIGDELSET(_thr_proc_sigpending, sig);
SIGDELSET(_thr_proc_sigpending, sig);
if (__sys_sigtimedwait(&sigset, siginfo, &ts) > 0)
return (sig);
}
return (0);
}
#ifndef SYSTEM_SCOPE_ONLY
/*
* Find a thread that can handle the signal. This must be called
* with upcalls disabled.
@ -381,15 +535,11 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
struct pthread *pthread;
struct pthread *suspended_thread, *signaled_thread;
__siginfohandler_t *sigfunc;
siginfo_t si;
DBG_MSG("Looking for thread to handle signal %d\n", sig);
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
}
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
@ -407,12 +557,9 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
TAILQ_FOREACH(pthread, &_thread_list, tle) {
if (pthread == _thr_sig_daemon)
continue;
#ifdef NOTYET
/* Signal delivering to bound thread is done by kernel */
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
continue;
#endif
/* Take the scheduling lock. */
KSE_SCHED_LOCK(curkse, pthread->kseg);
if ((pthread->state == PS_DEAD) ||
@ -451,8 +598,16 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
return (NULL);
} else if (!SIGISMEMBER(pthread->sigmask, sig) ||
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
pthread->state == PS_SIGWAIT)) {
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
pthread->state == PS_SIGWAIT)) {
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
if ((__sighandler_t *)sigfunc == SIG_DFL) {
if (sigprop(sig) & SA_KILL) {
kse_thr_interrupt(NULL,
KSE_INTR_SIGEXIT, sig);
/* Never reach */
}
}
if (pthread->state == PS_SIGSUSPEND) {
if (suspended_thread == NULL) {
suspended_thread = pthread;
@ -478,6 +633,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
}
return (pthread);
}
#endif /* ! SYSTEM_SCOPE_ONLY */
static void
build_siginfo(siginfo_t *info, int signo)
@ -501,8 +657,9 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
int i;
kse_critical_t crit;
struct kse *curkse;
sigset_t sigmask;
DBG_MSG(">>> thr_sig_rundown %p\n", curthread);
DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread);
/* Check the threads previous state: */
if ((psf != NULL) && (psf->psf_valid != 0)) {
/*
@ -544,6 +701,15 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
while (1) {
/*
* For bound thread, we mask all signals and get a fresh
* copy of signal mask from kernel
*/
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
SIGFILLSET(sigmask);
__sys_sigprocmask(SIG_SETMASK, &sigmask,
&curthread->sigmask);
}
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(curthread->sigmask, i))
continue;
@ -552,7 +718,8 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
siginfo = curthread->siginfo[i-1];
break;
}
if (SIGISMEMBER(_thr_proc_sigpending, i)) {
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
&& SIGISMEMBER(_thr_proc_sigpending, i)) {
if (_thr_getprocsig_unlocked(i, &siginfo))
break;
}
@ -568,12 +735,14 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
curkse = _get_curkse();
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_kse_critical_leave(&curthread->tmbx);
curthread->interrupted = interrupted;
curthread->timeout = timeout;
DBG_MSG("<<< thr_sig_rundown %p\n", curthread);
DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread);
}
/*
@ -603,6 +772,7 @@ _thr_sig_check_pending(struct pthread *curthread)
}
}
#ifndef SYSTEM_SCOPE_ONLY
/*
* This must be called with upcalls disabled.
*/
@ -631,6 +801,7 @@ handle_special_signals(struct kse *curkse, int sig)
break;
}
}
#endif /* ! SYSTEM_SCOPE_ONLY */
/*
* Perform thread specific actions in response to a signal.
@ -650,7 +821,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
struct kse *curkse;
siginfo_t siginfo;
DBG_MSG(">>> _thr_sig_add\n");
DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig);
curkse = _get_curkse();
restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
@ -660,13 +831,11 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->state == PS_STATE_MAX)
return; /* return false */
#ifdef NOTYET
if ((pthread->attrs.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
if (!fromproc)
kse_thr_interrupt(&pthread->tmbx, 0, sig);
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(curthread != pthread)) {
PANIC("Please use _thr_send_sig for bound thread");
return;
}
#endif
if (pthread->curframe == NULL ||
(pthread->state != PS_SIGWAIT &&
@ -687,9 +856,11 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
}
if (!SIGISMEMBER(pthread->sigmask, sig)) {
pthread->check_pending = 1;
if (pthread->blocked != 0 && !THR_IN_CRITICAL(pthread))
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(pthread->blocked != 0) &&
!THR_IN_CRITICAL(pthread))
kse_thr_interrupt(&pthread->tmbx,
restart ? -2 : -1);
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
}
}
else {
@ -801,64 +972,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->check_pending = 1;
}
}
DBG_MSG("<<< _thr_sig_add\n");
}
static void
thr_sig_check_state(struct pthread *pthread, int sig)
{
/*
* Process according to thread state:
*/
switch (pthread->state) {
/*
* States which do not change when a signal is trapped:
*/
case PS_RUNNING:
case PS_LOCKWAIT:
case PS_MUTEX_WAIT:
case PS_COND_WAIT:
case PS_JOIN:
case PS_SUSPENDED:
case PS_DEAD:
case PS_DEADLOCK:
case PS_STATE_MAX:
break;
case PS_SIGWAIT:
build_siginfo(&pthread->siginfo[sig-1], sig);
/* Wake up the thread if the signal is blocked. */
if (!SIGISMEMBER(pthread->sigmask, sig)) {
/* Return the signal number: */
*(pthread->data.sigwaitinfo) = pthread->siginfo[sig-1];
pthread->sigmask = pthread->oldsigmask;
/* Change the state of the thread to run: */
_thr_setrunnable_unlocked(pthread);
} else {
/* Increment the pending signal count. */
SIGADDSET(pthread->sigpend, sig);
if (!SIGISMEMBER(pthread->oldsigmask, sig)) {
pthread->check_pending = 1;
pthread->interrupted = 1;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
}
}
break;
case PS_SIGSUSPEND:
case PS_SLEEP_WAIT:
/*
* Remove the thread from the wait queue and make it
* runnable:
*/
_thr_setrunnable_unlocked(pthread);
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
break;
}
}
/*
@ -869,41 +982,14 @@ _thr_sig_send(struct pthread *pthread, int sig)
{
struct pthread *curthread = _get_curthread();
#ifdef NOTYET
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
kse_thr_interrupt(&pthread->tmbx, sig);
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig);
return;
}
#endif
/* Lock the scheduling queue of the target thread. */
THR_SCHED_LOCK(curthread, pthread);
/* Check for signals whose actions are SIG_DFL: */
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
/*
* Check to see if a temporary signal handler is
* installed for sigwaiters:
*/
if (_thread_dfl_count[sig - 1] == 0) {
/*
* Deliver the signal to the process if a handler
* is not installed:
*/
THR_SCHED_UNLOCK(curthread, pthread);
kill(getpid(), sig);
THR_SCHED_LOCK(curthread, pthread);
}
/*
* Assuming we're still running after the above kill(),
* make any necessary state changes to the thread:
*/
thr_sig_check_state(pthread, sig);
THR_SCHED_UNLOCK(curthread, pthread);
}
/*
* Check that the signal is not being ignored:
*/
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
_thr_sig_add(pthread, sig, NULL);
THR_SCHED_UNLOCK(curthread, pthread);
/* XXX
@ -965,6 +1051,7 @@ _thr_signal_init(void)
{
sigset_t sigset;
struct sigaction act;
__siginfohandler_t *sigfunc;
int i;
SIGFILLSET(sigset);
@ -984,6 +1071,15 @@ _thr_signal_init(void)
*/
PANIC("Cannot read signal handler info");
}
/* Intall wrapper if handler was set */
sigfunc = _thread_sigact[i - 1].sa_sigaction;
if (((__sighandler_t *)sigfunc) != SIG_DFL &&
((__sighandler_t *)sigfunc) != SIG_IGN) {
act = _thread_sigact[i - 1];
act.sa_flags |= SA_SIGINFO;
act.sa_sigaction = (__siginfohandler_t *)_thr_sig_handler;
__sys_sigaction(i, &act, NULL);
}
}
/*
* Install the signal handler for SIGINFO. It isn't
@ -1000,6 +1096,9 @@ _thr_signal_init(void)
*/
PANIC("Cannot initialize signal handler");
}
#ifdef SYSTEM_SCOPE_ONLY
__sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL);
#endif
}
void

View File

@ -52,6 +52,15 @@ _pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
if (! _kse_isthreaded())
_kse_setthreaded(1);
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
ret = __sys_sigprocmask(how, set, oset);
if (ret != 0)
ret = errno;
/* Get a copy for thread dump */
__sys_sigprocmask(SIG_SETMASK, NULL, &curthread->sigmask);
return (ret);
}
if (set)
newset = *set;

View File

@ -55,8 +55,9 @@ _sigpending(sigset_t *set)
ret = EINVAL;
}
else {
if (!_kse_isthreaded())
return __sys_sigpending(set);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_sigpending(set));
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);

View File

@ -35,6 +35,7 @@
#include <errno.h>
#include <pthread.h>
#include <string.h>
#include <sys/signalvar.h>
#include "thr_private.h"
__weak_reference(__sigsuspend, sigsuspend);
@ -46,12 +47,14 @@ _sigsuspend(const sigset_t *set)
sigset_t oldmask, newmask;
int ret = -1;
if (!_kse_isthreaded())
return __sys_sigsuspend(set);
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
return (__sys_sigsuspend(set));
/* Check if a new signal set was provided by the caller: */
if (set != NULL) {
newmask = *set;
SIG_CANTMASK(newmask);
THR_LOCK_SWITCH(curthread);

View File

@ -50,26 +50,18 @@ lib_sigtimedwait(const sigset_t *set, siginfo_t *info,
struct pthread *curthread = _get_curthread();
int ret = 0;
int i;
sigset_t tempset, waitset;
struct sigaction act;
sigset_t waitset;
kse_critical_t crit;
siginfo_t siginfo;
if (!_kse_isthreaded()) {
if (!_kse_isthreaded() ||
(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) {
if (info == NULL)
info = &siginfo;
return __sys_sigtimedwait((sigset_t *)set, info,
(struct timespec *)timeout);
return (__sys_sigtimedwait((sigset_t *)set, info,
(struct timespec *)timeout));
}
/*
* Specify the thread kernel signal handler.
*/
act.sa_handler = (void (*) ()) _thr_sig_handler;
act.sa_flags = SA_RESTART | SA_SIGINFO;
/* Ensure the signal handler cannot be interrupted by other signals: */
SIGFILLSET(act.sa_mask);
/*
* Initialize the set of signals that will be waited on:
*/
@ -79,103 +71,60 @@ lib_sigtimedwait(const sigset_t *set, siginfo_t *info,
SIGDELSET(waitset, SIGKILL);
SIGDELSET(waitset, SIGSTOP);
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
/*
* Enter a loop to find the signals that are SIG_DFL. For
* these signals we must install a dummy signal handler in
* order for the kernel to pass them in to us. POSIX says
* that the _application_ must explicitly install a dummy
* handler for signals that are SIG_IGN in order to sigwait
* on them. Note that SIG_IGN signals are left in the
* mask because a subsequent sigaction could enable an
* POSIX says that the _application_ must explicitly install
* a dummy handler for signals that are SIG_IGN in order
* to sigwait on them. Note that SIG_IGN signals are left in
* the mask because a subsequent sigaction could enable an
* ignored signal.
*/
SIGEMPTYSET(tempset);
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(waitset, i) &&
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
_thread_dfl_count[i - 1]++;
SIGADDSET(tempset, i);
if (_thread_dfl_count[i - 1] == 1) {
if (__sys_sigaction(i, &act, NULL) != 0)
/* ret = -1 */;
}
}
}
if (ret == 0) {
/* Done accessing _thread_dfl_count for now. */
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);
for (i = 1; i <= _SIG_MAXSIG; ++i) {
if (SIGISMEMBER(waitset, i) &&
SIGISMEMBER(curthread->sigpend, i)) {
SIGDELSET(curthread->sigpend, i);
siginfo = curthread->siginfo[i - 1];
KSE_SCHED_UNLOCK(curthread->kse,
curthread->kseg);
KSE_LOCK_ACQUIRE(curthread->kse,
&_thread_signal_lock);
ret = i;
goto OUT;
}
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, curthread->kseg);
for (i = 1; i <= _SIG_MAXSIG; ++i) {
if (SIGISMEMBER(waitset, i) &&
SIGISMEMBER(curthread->sigpend, i)) {
SIGDELSET(curthread->sigpend, i);
siginfo = curthread->siginfo[i - 1];
KSE_SCHED_UNLOCK(curthread->kse,
curthread->kseg);
_kse_critical_leave(crit);
ret = i;
goto OUT;
}
curthread->timeout = 0;
curthread->interrupted = 0;
_thr_set_timeout(timeout);
/* Wait for a signal: */
curthread->oldsigmask = curthread->sigmask;
siginfo.si_signo = 0;
curthread->data.sigwaitinfo = &siginfo;
SIGFILLSET(curthread->sigmask);
SIGSETNAND(curthread->sigmask, waitset);
THR_SET_STATE(curthread, PS_SIGWAIT);
_thr_sched_switch_unlocked(curthread);
/*
* Return the signal number to the caller:
*/
if (siginfo.si_signo > 0) {
ret = siginfo.si_signo;
} else {
if (curthread->interrupted)
errno = EINTR;
else if (curthread->timeout)
errno = EAGAIN;
ret = -1;
}
curthread->timeout = 0;
curthread->interrupted = 0;
/*
* Probably unnecessary, but since it's in a union struct
* we don't know how it could be used in the future.
*/
crit = _kse_critical_enter();
curthread->data.sigwaitinfo = NULL;
/*
* Relock the array of SIG_DFL wait counts.
*/
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
}
curthread->timeout = 0;
curthread->interrupted = 0;
_thr_set_timeout(timeout);
/* Wait for a signal: */
curthread->oldsigmask = curthread->sigmask;
siginfo.si_signo = 0;
curthread->data.sigwaitinfo = &siginfo;
SIGFILLSET(curthread->sigmask);
SIGSETNAND(curthread->sigmask, waitset);
THR_SET_STATE(curthread, PS_SIGWAIT);
_thr_sched_switch_unlocked(curthread);
/*
* Return the signal number to the caller:
*/
if (siginfo.si_signo > 0) {
ret = siginfo.si_signo;
} else {
if (curthread->interrupted)
errno = EINTR;
else if (curthread->timeout)
errno = EAGAIN;
ret = -1;
}
curthread->timeout = 0;
curthread->interrupted = 0;
/*
* Probably unnecessary, but since it's in a union struct
* we don't know how it could be used in the future.
*/
curthread->data.sigwaitinfo = NULL;
OUT:
/* Restore the sigactions: */
act.sa_handler = SIG_DFL;
for (i = 1; i <= _SIG_MAXSIG; i++) {
if (SIGISMEMBER(tempset, i)) {
_thread_dfl_count[i - 1]--;
if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
(_thread_dfl_count[i - 1] == 0)) {
if (__sys_sigaction(i, &act, NULL) != 0)
/* ret = -1 */ ;
}
}
}
/* Done accessing _thread_dfl_count. */
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
_kse_critical_leave(crit);
if (ret > 0 && info != NULL)
*info = siginfo;

View File

@ -42,6 +42,9 @@ _sched_yield(void)
{
struct pthread *curthread = _get_curthread();
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
return (__sys_sched_yield());
/* Reset the accumulated time slice value for the current thread: */
curthread->slice_usec = -1;
@ -57,6 +60,11 @@ _pthread_yield(void)
{
struct pthread *curthread = _get_curthread();
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
__sys_sched_yield();
return;
}
/* Reset the accumulated time slice value for the current thread: */
curthread->slice_usec = -1;