Move idle kse wakeup to outside of regions where locks are held.

This eliminates ping-ponging of locks, where the idle KSE wakes
up only to find the lock it needs is being held.  This gives
little or no gain to M:N mode but greatly speeds up 1:1 mode.

Reviewed & Tested by:	davidxu
This commit is contained in:
deischen 2003-07-23 02:11:07 +00:00
parent a831749015
commit 9f8651cad6
16 changed files with 260 additions and 146 deletions

View File

@ -20,6 +20,7 @@ _pthread_cancel(pthread_t pthread)
{
struct pthread *curthread = _get_curthread();
struct pthread *joinee = NULL;
struct kse_mailbox *kmbx = NULL;
int ret;
if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
@ -65,7 +66,7 @@ _pthread_cancel(pthread_t pthread)
/* Interrupt and resume: */
pthread->interrupted = 1;
pthread->cancelflags |= THR_CANCELLING;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
break;
case PS_JOIN:
@ -73,7 +74,7 @@ _pthread_cancel(pthread_t pthread)
joinee = pthread->join_status.thread;
pthread->join_status.thread = NULL;
pthread->cancelflags |= THR_CANCELLING;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
if ((joinee != NULL) &&
(pthread->kseg == joinee->kseg)) {
/* Remove the joiner from the joinee. */
@ -97,7 +98,7 @@ _pthread_cancel(pthread_t pthread)
*/
pthread->interrupted = 1;
pthread->cancelflags |= THR_CANCEL_NEEDED;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
pthread->continuation = finish_cancellation;
break;
@ -120,6 +121,8 @@ _pthread_cancel(pthread_t pthread)
*/
THR_SCHED_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
if ((joinee != NULL) &&
(_thr_ref_add(curthread, joinee, /* include dead */1) == 0)) {

View File

@ -584,6 +584,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@ -619,8 +620,10 @@ _pthread_cond_signal(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Check for no more waiters: */
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
@ -649,6 +652,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@ -682,8 +686,10 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* There are no more waiting threads: */

View File

@ -43,6 +43,7 @@ int
_pthread_detach(pthread_t pthread)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx = NULL;
struct pthread *joiner;
int rval = 0;
@ -83,12 +84,11 @@ _pthread_detach(pthread_t pthread)
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
joiner = NULL;
}
THR_SCHED_UNLOCK(curthread, pthread);
/* See if there is a thread waiting in pthread_join(): */
if ((joiner != NULL) &&
(_thr_ref_add(curthread, joiner, 0) == 0)) {
@ -102,12 +102,14 @@ _pthread_detach(pthread_t pthread)
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
THR_SCHED_UNLOCK(curthread, joiner);
_thr_ref_delete(curthread, joiner);
}
_thr_ref_delete(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Return the completion status: */

View File

@ -142,7 +142,7 @@ static void kseg_init(struct kse_group *kseg);
static void kseg_reinit(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
static void kse_wakeup_multi(struct kse *curkse);
static void kse_wakeup_one(struct pthread *thread);
static struct kse_mailbox *kse_wakeup_one(struct pthread *thread);
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
static void thr_link(struct pthread *thread);
static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *);
@ -341,7 +341,7 @@ _kse_single_thread(struct pthread *curthread)
#else
if (__isthreaded)
_thr_signal_deinit();
_ksd_readandclear_tmbx();
_ksd_set_tmbx(NULL);
__isthreaded = 0;
active_threads = 0;
#endif
@ -505,10 +505,9 @@ _thr_lock_wait(struct lock *lock, struct lockuser *lu)
struct pthread *curthread = (struct pthread *)lu->lu_private;
do {
THR_SCHED_LOCK(curthread, curthread);
THR_LOCK_SWITCH(curthread);
THR_SET_STATE(curthread, PS_LOCKWAIT);
THR_SCHED_UNLOCK(curthread, curthread);
_thr_sched_switch(curthread);
_thr_sched_switch_unlocked(curthread);
} while (!_LCK_GRANTED(lu));
}
@ -517,14 +516,17 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
{
struct pthread *thread;
struct pthread *curthread;
struct kse_mailbox *kmbx;
curthread = _get_curthread();
thread = (struct pthread *)_LCK_GET_PRIVATE(lu);
THR_SCHED_LOCK(curthread, thread);
_lock_grant(lock, lu);
_thr_setrunnable_unlocked(thread);
kmbx = _thr_setrunnable_unlocked(thread);
THR_SCHED_UNLOCK(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
kse_critical_t
@ -532,7 +534,8 @@ _kse_critical_enter(void)
{
kse_critical_t crit;
crit = _ksd_readandclear_tmbx();
crit = _ksd_get_tmbx();
_ksd_set_tmbx(NULL);
return (crit);
}
@ -841,8 +844,8 @@ kse_sched_single(struct kse *curkse)
if (SIGISMEMBER(curthread->sigmask, i))
continue;
if (SIGISMEMBER(curthread->sigpend, i))
_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
(void)_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
}
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
NULL);
@ -1142,6 +1145,7 @@ static void
thr_cleanup(struct kse *curkse, struct pthread *thread)
{
struct pthread *joiner;
struct kse_mailbox *kmbx = NULL;
int sys_scope;
if ((joiner = thread->joiner) != NULL) {
@ -1150,7 +1154,7 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
if (joiner->join_status.thread == thread) {
joiner->join_status.thread = NULL;
joiner->join_status.ret = thread->ret;
_thr_setrunnable_unlocked(joiner);
(void)_thr_setrunnable_unlocked(joiner);
}
} else {
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
@ -1160,10 +1164,12 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
if (joiner->join_status.thread == thread) {
joiner->join_status.thread = NULL;
joiner->join_status.ret = thread->ret;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
KSE_SCHED_UNLOCK(curkse, joiner->kseg);
_thr_ref_delete(thread, joiner);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
@ -1436,7 +1442,8 @@ kse_check_completed(struct kse *kse)
if (SIGISMEMBER(thread->sigmask, sig))
SIGADDSET(thread->sigpend, sig);
else
_thr_sig_add(thread, sig, &thread->tmbx.tm_syncsig);
(void)_thr_sig_add(thread, sig,
&thread->tmbx.tm_syncsig);
thread->tmbx.tm_syncsig.si_signo = 0;
}
completed = completed->tm_next;
@ -1653,10 +1660,11 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
if (SIGISMEMBER(thread->sigmask, i))
continue;
if (SIGISMEMBER(thread->sigpend, i))
_thr_sig_add(thread, i, &thread->siginfo[i-1]);
(void)_thr_sig_add(thread, i,
&thread->siginfo[i-1]);
else if (SIGISMEMBER(_thr_proc_sigpending, i) &&
_thr_getprocsig_unlocked(i, &siginfo)) {
_thr_sig_add(thread, i, &siginfo);
(void)_thr_sig_add(thread, i, &siginfo);
}
}
KSE_LOCK_RELEASE(kse, &_thread_signal_lock);
@ -1823,23 +1831,31 @@ void
_thr_setrunnable(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
struct kse_mailbox *kmbx;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, thread->kseg);
_thr_setrunnable_unlocked(thread);
kmbx = _thr_setrunnable_unlocked(thread);
KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
_kse_critical_leave(crit);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
void
struct kse_mailbox *
_thr_setrunnable_unlocked(struct pthread *thread)
{
struct kse_mailbox *kmbx = NULL;
if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) {
/* No silly queues for these threads. */
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
THR_SET_STATE(thread, PS_SUSPENDED);
else
else {
THR_SET_STATE(thread, PS_RUNNING);
kmbx = kse_wakeup_one(thread);
}
} else if (thread->state != PS_RUNNING) {
if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0)
KSE_WAITQ_REMOVE(thread->kse, thread);
@ -1850,25 +1866,31 @@ _thr_setrunnable_unlocked(struct pthread *thread)
if ((thread->blocked == 0) && (thread->active == 0) &&
(thread->flags & THR_FLAGS_IN_RUNQ) == 0)
THR_RUNQ_INSERT_TAIL(thread);
/*
* XXX - Threads are not yet assigned to specific
* KSEs; they are assigned to the KSEG. So
* the fact that a thread's KSE is waiting
* doesn't necessarily mean that it will be
* the KSE that runs the thread after the
* lock is granted. But we don't know if the
* other KSEs within the same KSEG are also
* in a waiting state or not so we err on the
* side of caution and wakeup the thread's
* last known KSE. We ensure that the
* threads KSE doesn't change while it's
* scheduling lock is held so it is safe to
* reference it (the KSE). If the KSE wakes
* up and doesn't find any more work it will
* again go back to waiting so no harm is
* done.
*/
kmbx = kse_wakeup_one(thread);
}
}
/*
* XXX - Threads are not yet assigned to specific KSEs; they are
* assigned to the KSEG. So the fact that a thread's KSE is
* waiting doesn't necessarily mean that it will be the KSE
* that runs the thread after the lock is granted. But we
* don't know if the other KSEs within the same KSEG are
* also in a waiting state or not so we err on the side of
* caution and wakeup the thread's last known KSE. We
* ensure that the threads KSE doesn't change while it's
* scheduling lock is held so it is safe to reference it
* (the KSE). If the KSE wakes up and doesn't find any more
* work it will again go back to waiting so no harm is done.
*/
kse_wakeup_one(thread);
return (kmbx);
}
static void
static struct kse_mailbox *
kse_wakeup_one(struct pthread *thread)
{
struct kse *ke;
@ -1876,17 +1898,17 @@ kse_wakeup_one(struct pthread *thread)
if (KSE_IS_IDLE(thread->kse)) {
KSE_CLEAR_IDLE(thread->kse);
thread->kseg->kg_idle_kses--;
KSE_WAKEUP(thread->kse);
return (&thread->kse->k_mbx);
} else {
TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) {
if (KSE_IS_IDLE(ke)) {
KSE_CLEAR_IDLE(ke);
ke->k_kseg->kg_idle_kses--;
KSE_WAKEUP(ke);
return;
return (&ke->k_mbx);
}
}
}
return (NULL);
}
static void

View File

@ -67,7 +67,8 @@
/*
* Prototypes
*/
static void mutex_handoff(struct pthread *, struct pthread_mutex *);
static struct kse_mailbox *mutex_handoff(struct pthread *,
struct pthread_mutex *);
static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
static int mutex_unlock_common(pthread_mutex_t *, int);
@ -860,6 +861,7 @@ static int
mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx = NULL;
int ret = 0;
if (m == NULL || *m == NULL)
@ -904,7 +906,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -961,7 +963,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -1017,7 +1019,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -1034,6 +1036,8 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Return the completion status: */
@ -1460,9 +1464,10 @@ _mutex_lock_backout(struct pthread *curthread)
* is necessary to lock the thread's scheduling queue while also
* holding the mutex lock.
*/
static void
static struct kse_mailbox *
mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
{
struct kse_mailbox *kmbx = NULL;
struct pthread *pthread;
/* Keep dequeueing until we find a valid thread: */
@ -1564,7 +1569,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
}
/* Make the thread runnable and unlock the scheduling queue: */
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
/* Add a preemption point. */
if ((curthread->kseg == pthread->kseg) &&
@ -1583,6 +1588,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
/* This mutex has no priority: */
mutex->m_prio = 0;
return (kmbx);
}
/*

View File

@ -1106,8 +1106,8 @@ void _thr_ref_delete(struct pthread *, struct pthread *);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
void _thr_setrunnable_unlocked(struct pthread *thread);
void _thr_sig_add(struct pthread *, int, siginfo_t *);
struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
int _thr_stack_alloc(struct pthread_attr *);
void _thr_stack_free(struct pthread_attr *);

View File

@ -35,7 +35,7 @@
#include <pthread.h>
#include "thr_private.h"
static void resume_common(struct pthread *);
static struct kse_mailbox *resume_common(struct pthread *);
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
@ -46,15 +46,18 @@ int
_pthread_resume_np(pthread_t thread)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx;
int ret;
/* Add a reference to the thread: */
if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
kmbx = resume_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
_thr_ref_delete(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
return (ret);
}
@ -64,6 +67,7 @@ _pthread_resume_all_np(void)
{
struct pthread *curthread = _get_curthread();
struct pthread *thread;
struct kse_mailbox *kmbx;
kse_critical_t crit;
/* Take the thread list lock: */
@ -73,8 +77,10 @@ _pthread_resume_all_np(void)
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
kmbx = resume_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
}
@ -83,7 +89,7 @@ _pthread_resume_all_np(void)
_kse_critical_leave(crit);
}
static void
static struct kse_mailbox *
resume_common(struct pthread *thread)
{
/* Clear the suspend flag: */
@ -95,5 +101,7 @@ resume_common(struct pthread *thread)
* state to running and insert it into the run queue.
*/
if (thread->state == PS_SUSPENDED)
_thr_setrunnable_unlocked(thread);
return (_thr_setrunnable_unlocked(thread));
else
return (NULL);
}

View File

@ -249,6 +249,7 @@ _thr_start_sig_daemon(void)
void
_thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
{
struct kse_mailbox *kmbx;
struct pthread *thread;
DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
@ -280,9 +281,11 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
KSE_SCHED_UNLOCK(curkse, thread->kseg);
_thr_ref_delete(NULL, thread);
} else {
_thr_sig_add(thread, sig, info);
kmbx = _thr_sig_add(thread, sig, info);
KSE_SCHED_UNLOCK(curkse, thread->kseg);
_thr_ref_delete(NULL, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
break;
}
}
@ -533,6 +536,7 @@ _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo)
struct pthread *
thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
struct kse_mailbox *kmbx = NULL;
struct pthread *pthread;
struct pthread *suspended_thread, *signaled_thread;
__siginfohandler_t *sigfunc;
@ -582,7 +586,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
/* where to put siginfo ? */
*(pthread->data.sigwaitinfo) = si;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
}
KSE_SCHED_UNLOCK(curkse, pthread->kseg);
/*
@ -596,6 +600,8 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
* to the process pending set.
*/
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (kmbx != NULL)
kse_wakeup(kmbx);
return (NULL);
} else if (!SIGISMEMBER(pthread->sigmask, sig) ||
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
@ -811,15 +817,16 @@ handle_special_signals(struct kse *curkse, int sig)
*
* This must be called with the thread's scheduling lock held.
*/
void
struct kse_mailbox *
_thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
{
siginfo_t siginfo;
struct kse *curkse;
struct kse_mailbox *kmbx = NULL;
struct pthread *curthread = _get_curthread();
int restart;
int suppress_handler = 0;
int fromproc = 0;
struct pthread *curthread = _get_curthread();
struct kse *curkse;
siginfo_t siginfo;
DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig);
@ -829,12 +836,12 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK ||
pthread->state == PS_STATE_MAX)
return; /* return false */
return (NULL); /* return false */
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(curthread != pthread)) {
PANIC("Please use _thr_send_sig for bound thread");
return;
return (NULL);
}
if (pthread->curframe == NULL ||
@ -851,7 +858,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
sizeof(*info));
} else {
if (!_thr_getprocsig(sig, &pthread->siginfo[sig-1]))
return;
return (NULL);
SIGADDSET(pthread->sigpend, sig);
}
if (!SIGISMEMBER(pthread->sigmask, sig)) {
@ -867,7 +874,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
/* if process signal not exists, just return */
if (fromproc) {
if (!_thr_getprocsig(sig, &siginfo))
return;
return (NULL);
info = &siginfo;
}
/*
@ -877,7 +884,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
case PS_DEAD:
case PS_DEADLOCK:
case PS_STATE_MAX:
return; /* XXX return false */
return (NULL); /* XXX return false */
case PS_LOCKWAIT:
case PS_SUSPENDED:
/*
@ -937,7 +944,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
*(pthread->data.sigwaitinfo) = pthread->siginfo[sig-1];
pthread->sigmask = pthread->oldsigmask;
/* Make the thread runnable: */
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
} else {
/* Increment the pending signal count. */
SIGADDSET(pthread->sigpend, sig);
@ -945,11 +952,10 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->check_pending = 1;
pthread->interrupted = 1;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
}
}
return;
return (kmbx);
}
SIGADDSET(pthread->sigpend, sig);
@ -967,11 +973,12 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
if (pthread->flags & THR_FLAGS_IN_RUNQ)
THR_RUNQ_REMOVE(pthread);
pthread->active_priority |= THR_SIGNAL_PRIORITY;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
} else {
pthread->check_pending = 1;
}
}
return (kmbx);
}
/*
@ -981,6 +988,7 @@ void
_thr_sig_send(struct pthread *pthread, int sig)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx;
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig);
@ -990,8 +998,10 @@ _thr_sig_send(struct pthread *pthread, int sig)
/* Lock the scheduling queue of the target thread. */
THR_SCHED_LOCK(curthread, pthread);
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
_thr_sig_add(pthread, sig, NULL);
kmbx = _thr_sig_add(pthread, sig, NULL);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
/* XXX
* If thread sent signal to itself, check signals now.
* It is not really needed, _kse_critical_leave should

View File

@ -20,6 +20,7 @@ _pthread_cancel(pthread_t pthread)
{
struct pthread *curthread = _get_curthread();
struct pthread *joinee = NULL;
struct kse_mailbox *kmbx = NULL;
int ret;
if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
@ -65,7 +66,7 @@ _pthread_cancel(pthread_t pthread)
/* Interrupt and resume: */
pthread->interrupted = 1;
pthread->cancelflags |= THR_CANCELLING;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
break;
case PS_JOIN:
@ -73,7 +74,7 @@ _pthread_cancel(pthread_t pthread)
joinee = pthread->join_status.thread;
pthread->join_status.thread = NULL;
pthread->cancelflags |= THR_CANCELLING;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
if ((joinee != NULL) &&
(pthread->kseg == joinee->kseg)) {
/* Remove the joiner from the joinee. */
@ -97,7 +98,7 @@ _pthread_cancel(pthread_t pthread)
*/
pthread->interrupted = 1;
pthread->cancelflags |= THR_CANCEL_NEEDED;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
pthread->continuation = finish_cancellation;
break;
@ -120,6 +121,8 @@ _pthread_cancel(pthread_t pthread)
*/
THR_SCHED_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
if ((joinee != NULL) &&
(_thr_ref_add(curthread, joinee, /* include dead */1) == 0)) {

View File

@ -584,6 +584,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@ -619,8 +620,10 @@ _pthread_cond_signal(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Check for no more waiters: */
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
@ -649,6 +652,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@ -682,8 +686,10 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* There are no more waiting threads: */

View File

@ -43,6 +43,7 @@ int
_pthread_detach(pthread_t pthread)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx = NULL;
struct pthread *joiner;
int rval = 0;
@ -83,12 +84,11 @@ _pthread_detach(pthread_t pthread)
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
joiner = NULL;
}
THR_SCHED_UNLOCK(curthread, pthread);
/* See if there is a thread waiting in pthread_join(): */
if ((joiner != NULL) &&
(_thr_ref_add(curthread, joiner, 0) == 0)) {
@ -102,12 +102,14 @@ _pthread_detach(pthread_t pthread)
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
THR_SCHED_UNLOCK(curthread, joiner);
_thr_ref_delete(curthread, joiner);
}
_thr_ref_delete(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Return the completion status: */

View File

@ -142,7 +142,7 @@ static void kseg_init(struct kse_group *kseg);
static void kseg_reinit(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
static void kse_wakeup_multi(struct kse *curkse);
static void kse_wakeup_one(struct pthread *thread);
static struct kse_mailbox *kse_wakeup_one(struct pthread *thread);
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
static void thr_link(struct pthread *thread);
static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *);
@ -341,7 +341,7 @@ _kse_single_thread(struct pthread *curthread)
#else
if (__isthreaded)
_thr_signal_deinit();
_ksd_readandclear_tmbx();
_ksd_set_tmbx(NULL);
__isthreaded = 0;
active_threads = 0;
#endif
@ -505,10 +505,9 @@ _thr_lock_wait(struct lock *lock, struct lockuser *lu)
struct pthread *curthread = (struct pthread *)lu->lu_private;
do {
THR_SCHED_LOCK(curthread, curthread);
THR_LOCK_SWITCH(curthread);
THR_SET_STATE(curthread, PS_LOCKWAIT);
THR_SCHED_UNLOCK(curthread, curthread);
_thr_sched_switch(curthread);
_thr_sched_switch_unlocked(curthread);
} while (!_LCK_GRANTED(lu));
}
@ -517,14 +516,17 @@ _thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
{
struct pthread *thread;
struct pthread *curthread;
struct kse_mailbox *kmbx;
curthread = _get_curthread();
thread = (struct pthread *)_LCK_GET_PRIVATE(lu);
THR_SCHED_LOCK(curthread, thread);
_lock_grant(lock, lu);
_thr_setrunnable_unlocked(thread);
kmbx = _thr_setrunnable_unlocked(thread);
THR_SCHED_UNLOCK(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
kse_critical_t
@ -532,7 +534,8 @@ _kse_critical_enter(void)
{
kse_critical_t crit;
crit = _ksd_readandclear_tmbx();
crit = _ksd_get_tmbx();
_ksd_set_tmbx(NULL);
return (crit);
}
@ -841,8 +844,8 @@ kse_sched_single(struct kse *curkse)
if (SIGISMEMBER(curthread->sigmask, i))
continue;
if (SIGISMEMBER(curthread->sigpend, i))
_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
(void)_thr_sig_add(curthread, i,
&curthread->siginfo[i-1]);
}
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
NULL);
@ -1142,6 +1145,7 @@ static void
thr_cleanup(struct kse *curkse, struct pthread *thread)
{
struct pthread *joiner;
struct kse_mailbox *kmbx = NULL;
int sys_scope;
if ((joiner = thread->joiner) != NULL) {
@ -1150,7 +1154,7 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
if (joiner->join_status.thread == thread) {
joiner->join_status.thread = NULL;
joiner->join_status.ret = thread->ret;
_thr_setrunnable_unlocked(joiner);
(void)_thr_setrunnable_unlocked(joiner);
}
} else {
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
@ -1160,10 +1164,12 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
if (joiner->join_status.thread == thread) {
joiner->join_status.thread = NULL;
joiner->join_status.ret = thread->ret;
_thr_setrunnable_unlocked(joiner);
kmbx = _thr_setrunnable_unlocked(joiner);
}
KSE_SCHED_UNLOCK(curkse, joiner->kseg);
_thr_ref_delete(thread, joiner);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
@ -1436,7 +1442,8 @@ kse_check_completed(struct kse *kse)
if (SIGISMEMBER(thread->sigmask, sig))
SIGADDSET(thread->sigpend, sig);
else
_thr_sig_add(thread, sig, &thread->tmbx.tm_syncsig);
(void)_thr_sig_add(thread, sig,
&thread->tmbx.tm_syncsig);
thread->tmbx.tm_syncsig.si_signo = 0;
}
completed = completed->tm_next;
@ -1653,10 +1660,11 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
if (SIGISMEMBER(thread->sigmask, i))
continue;
if (SIGISMEMBER(thread->sigpend, i))
_thr_sig_add(thread, i, &thread->siginfo[i-1]);
(void)_thr_sig_add(thread, i,
&thread->siginfo[i-1]);
else if (SIGISMEMBER(_thr_proc_sigpending, i) &&
_thr_getprocsig_unlocked(i, &siginfo)) {
_thr_sig_add(thread, i, &siginfo);
(void)_thr_sig_add(thread, i, &siginfo);
}
}
KSE_LOCK_RELEASE(kse, &_thread_signal_lock);
@ -1823,23 +1831,31 @@ void
_thr_setrunnable(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
struct kse_mailbox *kmbx;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, thread->kseg);
_thr_setrunnable_unlocked(thread);
kmbx = _thr_setrunnable_unlocked(thread);
KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
_kse_critical_leave(crit);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
void
struct kse_mailbox *
_thr_setrunnable_unlocked(struct pthread *thread)
{
struct kse_mailbox *kmbx = NULL;
if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0) {
/* No silly queues for these threads. */
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
THR_SET_STATE(thread, PS_SUSPENDED);
else
else {
THR_SET_STATE(thread, PS_RUNNING);
kmbx = kse_wakeup_one(thread);
}
} else if (thread->state != PS_RUNNING) {
if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0)
KSE_WAITQ_REMOVE(thread->kse, thread);
@ -1850,25 +1866,31 @@ _thr_setrunnable_unlocked(struct pthread *thread)
if ((thread->blocked == 0) && (thread->active == 0) &&
(thread->flags & THR_FLAGS_IN_RUNQ) == 0)
THR_RUNQ_INSERT_TAIL(thread);
/*
* XXX - Threads are not yet assigned to specific
* KSEs; they are assigned to the KSEG. So
* the fact that a thread's KSE is waiting
* doesn't necessarily mean that it will be
* the KSE that runs the thread after the
* lock is granted. But we don't know if the
* other KSEs within the same KSEG are also
* in a waiting state or not so we err on the
* side of caution and wakeup the thread's
* last known KSE. We ensure that the
* threads KSE doesn't change while it's
* scheduling lock is held so it is safe to
* reference it (the KSE). If the KSE wakes
* up and doesn't find any more work it will
* again go back to waiting so no harm is
* done.
*/
kmbx = kse_wakeup_one(thread);
}
}
/*
* XXX - Threads are not yet assigned to specific KSEs; they are
* assigned to the KSEG. So the fact that a thread's KSE is
* waiting doesn't necessarily mean that it will be the KSE
* that runs the thread after the lock is granted. But we
* don't know if the other KSEs within the same KSEG are
* also in a waiting state or not so we err on the side of
* caution and wakeup the thread's last known KSE. We
* ensure that the threads KSE doesn't change while it's
* scheduling lock is held so it is safe to reference it
* (the KSE). If the KSE wakes up and doesn't find any more
* work it will again go back to waiting so no harm is done.
*/
kse_wakeup_one(thread);
return (kmbx);
}
static void
static struct kse_mailbox *
kse_wakeup_one(struct pthread *thread)
{
struct kse *ke;
@ -1876,17 +1898,17 @@ kse_wakeup_one(struct pthread *thread)
if (KSE_IS_IDLE(thread->kse)) {
KSE_CLEAR_IDLE(thread->kse);
thread->kseg->kg_idle_kses--;
KSE_WAKEUP(thread->kse);
return (&thread->kse->k_mbx);
} else {
TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) {
if (KSE_IS_IDLE(ke)) {
KSE_CLEAR_IDLE(ke);
ke->k_kseg->kg_idle_kses--;
KSE_WAKEUP(ke);
return;
return (&ke->k_mbx);
}
}
}
return (NULL);
}
static void

View File

@ -67,7 +67,8 @@
/*
* Prototypes
*/
static void mutex_handoff(struct pthread *, struct pthread_mutex *);
static struct kse_mailbox *mutex_handoff(struct pthread *,
struct pthread_mutex *);
static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
static int mutex_unlock_common(pthread_mutex_t *, int);
@ -860,6 +861,7 @@ static int
mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx = NULL;
int ret = 0;
if (m == NULL || *m == NULL)
@ -904,7 +906,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -961,7 +963,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -1017,7 +1019,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
mutex_handoff(curthread, *m);
kmbx = mutex_handoff(curthread, *m);
}
break;
@ -1034,6 +1036,8 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
/* Return the completion status: */
@ -1460,9 +1464,10 @@ _mutex_lock_backout(struct pthread *curthread)
* is necessary to lock the thread's scheduling queue while also
* holding the mutex lock.
*/
static void
static struct kse_mailbox *
mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
{
struct kse_mailbox *kmbx = NULL;
struct pthread *pthread;
/* Keep dequeueing until we find a valid thread: */
@ -1564,7 +1569,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
}
/* Make the thread runnable and unlock the scheduling queue: */
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
/* Add a preemption point. */
if ((curthread->kseg == pthread->kseg) &&
@ -1583,6 +1588,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
/* This mutex has no priority: */
mutex->m_prio = 0;
return (kmbx);
}
/*

View File

@ -1106,8 +1106,8 @@ void _thr_ref_delete(struct pthread *, struct pthread *);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
void _thr_setrunnable_unlocked(struct pthread *thread);
void _thr_sig_add(struct pthread *, int, siginfo_t *);
struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
int _thr_stack_alloc(struct pthread_attr *);
void _thr_stack_free(struct pthread_attr *);

View File

@ -35,7 +35,7 @@
#include <pthread.h>
#include "thr_private.h"
static void resume_common(struct pthread *);
static struct kse_mailbox *resume_common(struct pthread *);
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
@ -46,15 +46,18 @@ int
_pthread_resume_np(pthread_t thread)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx;
int ret;
/* Add a reference to the thread: */
if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
kmbx = resume_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
_thr_ref_delete(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
return (ret);
}
@ -64,6 +67,7 @@ _pthread_resume_all_np(void)
{
struct pthread *curthread = _get_curthread();
struct pthread *thread;
struct kse_mailbox *kmbx;
kse_critical_t crit;
/* Take the thread list lock: */
@ -73,8 +77,10 @@ _pthread_resume_all_np(void)
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
kmbx = resume_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
}
}
@ -83,7 +89,7 @@ _pthread_resume_all_np(void)
_kse_critical_leave(crit);
}
static void
static struct kse_mailbox *
resume_common(struct pthread *thread)
{
/* Clear the suspend flag: */
@ -95,5 +101,7 @@ resume_common(struct pthread *thread)
* state to running and insert it into the run queue.
*/
if (thread->state == PS_SUSPENDED)
_thr_setrunnable_unlocked(thread);
return (_thr_setrunnable_unlocked(thread));
else
return (NULL);
}

View File

@ -249,6 +249,7 @@ _thr_start_sig_daemon(void)
void
_thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
{
struct kse_mailbox *kmbx;
struct pthread *thread;
DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
@ -280,9 +281,11 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
KSE_SCHED_UNLOCK(curkse, thread->kseg);
_thr_ref_delete(NULL, thread);
} else {
_thr_sig_add(thread, sig, info);
kmbx = _thr_sig_add(thread, sig, info);
KSE_SCHED_UNLOCK(curkse, thread->kseg);
_thr_ref_delete(NULL, thread);
if (kmbx != NULL)
kse_wakeup(kmbx);
break;
}
}
@ -533,6 +536,7 @@ _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo)
struct pthread *
thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
struct kse_mailbox *kmbx = NULL;
struct pthread *pthread;
struct pthread *suspended_thread, *signaled_thread;
__siginfohandler_t *sigfunc;
@ -582,7 +586,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
/* where to put siginfo ? */
*(pthread->data.sigwaitinfo) = si;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
}
KSE_SCHED_UNLOCK(curkse, pthread->kseg);
/*
@ -596,6 +600,8 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
* to the process pending set.
*/
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (kmbx != NULL)
kse_wakeup(kmbx);
return (NULL);
} else if (!SIGISMEMBER(pthread->sigmask, sig) ||
(!SIGISMEMBER(pthread->oldsigmask, sig) &&
@ -811,15 +817,16 @@ handle_special_signals(struct kse *curkse, int sig)
*
* This must be called with the thread's scheduling lock held.
*/
void
struct kse_mailbox *
_thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
{
siginfo_t siginfo;
struct kse *curkse;
struct kse_mailbox *kmbx = NULL;
struct pthread *curthread = _get_curthread();
int restart;
int suppress_handler = 0;
int fromproc = 0;
struct pthread *curthread = _get_curthread();
struct kse *curkse;
siginfo_t siginfo;
DBG_MSG(">>> _thr_sig_add %p (%d)\n", pthread, sig);
@ -829,12 +836,12 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK ||
pthread->state == PS_STATE_MAX)
return; /* return false */
return (NULL); /* return false */
if ((pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
(curthread != pthread)) {
PANIC("Please use _thr_send_sig for bound thread");
return;
return (NULL);
}
if (pthread->curframe == NULL ||
@ -851,7 +858,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
sizeof(*info));
} else {
if (!_thr_getprocsig(sig, &pthread->siginfo[sig-1]))
return;
return (NULL);
SIGADDSET(pthread->sigpend, sig);
}
if (!SIGISMEMBER(pthread->sigmask, sig)) {
@ -867,7 +874,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
/* if process signal not exists, just return */
if (fromproc) {
if (!_thr_getprocsig(sig, &siginfo))
return;
return (NULL);
info = &siginfo;
}
/*
@ -877,7 +884,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
case PS_DEAD:
case PS_DEADLOCK:
case PS_STATE_MAX:
return; /* XXX return false */
return (NULL); /* XXX return false */
case PS_LOCKWAIT:
case PS_SUSPENDED:
/*
@ -937,7 +944,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
*(pthread->data.sigwaitinfo) = pthread->siginfo[sig-1];
pthread->sigmask = pthread->oldsigmask;
/* Make the thread runnable: */
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
} else {
/* Increment the pending signal count. */
SIGADDSET(pthread->sigpend, sig);
@ -945,11 +952,10 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
pthread->check_pending = 1;
pthread->interrupted = 1;
pthread->sigmask = pthread->oldsigmask;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
}
}
return;
return (kmbx);
}
SIGADDSET(pthread->sigpend, sig);
@ -967,11 +973,12 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
if (pthread->flags & THR_FLAGS_IN_RUNQ)
THR_RUNQ_REMOVE(pthread);
pthread->active_priority |= THR_SIGNAL_PRIORITY;
_thr_setrunnable_unlocked(pthread);
kmbx = _thr_setrunnable_unlocked(pthread);
} else {
pthread->check_pending = 1;
}
}
return (kmbx);
}
/*
@ -981,6 +988,7 @@ void
_thr_sig_send(struct pthread *pthread, int sig)
{
struct pthread *curthread = _get_curthread();
struct kse_mailbox *kmbx;
if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig);
@ -990,8 +998,10 @@ _thr_sig_send(struct pthread *pthread, int sig)
/* Lock the scheduling queue of the target thread. */
THR_SCHED_LOCK(curthread, pthread);
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
_thr_sig_add(pthread, sig, NULL);
kmbx = _thr_sig_add(pthread, sig, NULL);
THR_SCHED_UNLOCK(curthread, pthread);
if (kmbx != NULL)
kse_wakeup(kmbx);
/* XXX
* If thread sent signal to itself, check signals now.
* It is not really needed, _kse_critical_leave should