Revamp libpthread so that it has a chance of working in an SMP
environment. This includes support for multiple KSEs and KSEGs. The ability to create more than 1 KSE via pthread_setconcurrency() is in the works as well as support for PTHREAD_SCOPE_SYSTEM threads. Those should come shortly. There are still some known issues which davidxu and I are working on, but it'll make it easier for us by committing what we have. This library now passes all of the ACE tests that libc_r passes with the exception of one. It also seems to work OK with KDE including konqueror, kwrite, etc. I haven't been able to get mozilla to run due to lack of java plugin, so I'd be interested to see how it works with that. Reviewed by: davidxu
This commit is contained in:
parent
b025fc9a31
commit
a0240e2cb0
@ -9,16 +9,18 @@
|
||||
# system call stubs.
|
||||
LIB=kse
|
||||
SHLIB_MAJOR= 1
|
||||
CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
|
||||
CFLAGS+=-DPTHREAD_KERNEL
|
||||
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
|
||||
-I${.CURDIR}/../../include
|
||||
CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
|
||||
CFLAGS+=-I${.CURDIR}/sys
|
||||
|
||||
# Uncomment this if you want libpthread to contain debug information for
|
||||
# thread locking.
|
||||
CFLAGS+=-D_LOCK_DEBUG
|
||||
CFLAGS+=-D_LOCK_DEBUG -g
|
||||
|
||||
# enable extra internal consistancy checks
|
||||
CFLAGS+=-D_PTHREADS_INVARIANTS
|
||||
CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
|
||||
|
||||
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
|
||||
PRECIOUSLIB= yes
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
SRCS+= \
|
||||
thr_aio_suspend.c \
|
||||
thr_autoinit.c \
|
||||
thr_attr_destroy.c \
|
||||
thr_attr_init.c \
|
||||
thr_attr_get_np.c \
|
||||
@ -27,7 +28,6 @@ SRCS+= \
|
||||
thr_attr_setstack.c \
|
||||
thr_attr_setstackaddr.c \
|
||||
thr_attr_setstacksize.c \
|
||||
thr_autoinit.c \
|
||||
thr_cancel.c \
|
||||
thr_clean.c \
|
||||
thr_close.c \
|
||||
@ -43,7 +43,6 @@ SRCS+= \
|
||||
thr_find_thread.c \
|
||||
thr_fork.c \
|
||||
thr_fsync.c \
|
||||
thr_gc.c \
|
||||
thr_getprio.c \
|
||||
thr_getschedparam.c \
|
||||
thr_info.c \
|
||||
@ -82,6 +81,8 @@ SRCS+= \
|
||||
thr_sig.c \
|
||||
thr_sigaction.c \
|
||||
thr_sigmask.c \
|
||||
thr_sigpending.c \
|
||||
thr_sigprocmask.c \
|
||||
thr_sigsuspend.c \
|
||||
thr_sigwait.c \
|
||||
thr_single_np.c \
|
||||
|
@ -39,12 +39,13 @@ int
|
||||
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
|
||||
timespec *timeout)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_aio_suspend(iocbs, niocb, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -36,22 +36,18 @@ __weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
|
||||
int
|
||||
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
|
||||
{
|
||||
struct pthread *curthread;
|
||||
int ret;
|
||||
|
||||
if (pid == NULL || dst == NULL || *dst == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
if ((ret = _find_thread(pid)) != 0)
|
||||
curthread = _get_curthread();
|
||||
if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
|
||||
return (ret);
|
||||
|
||||
memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
|
||||
|
||||
/*
|
||||
* Special case, if stack address was not provided by caller
|
||||
* of pthread_create(), then return address allocated internally
|
||||
*/
|
||||
if ((*dst)->stackaddr_attr == NULL)
|
||||
(*dst)->stackaddr_attr = pid->stack;
|
||||
_thr_ref_delete(curthread, pid);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -51,7 +51,8 @@ _pthread_attr_init(pthread_attr_t *attr)
|
||||
ret = ENOMEM;
|
||||
else {
|
||||
/* Initialise the attribute object with the defaults: */
|
||||
memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
|
||||
memcpy(pattr, &_pthread_attr_default,
|
||||
sizeof(struct pthread_attr));
|
||||
|
||||
/* Return a pointer to the attribute object: */
|
||||
*attr = pattr;
|
||||
|
@ -45,7 +45,7 @@ _pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
} else {
|
||||
(*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
|
||||
(*attr)->suspend = THR_CREATE_SUSPENDED;
|
||||
ret = 0;
|
||||
}
|
||||
return(ret);
|
||||
|
@ -47,11 +47,11 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
|
||||
else {
|
||||
/*
|
||||
* Round guardsize up to the nearest multiple of
|
||||
* _pthread_page_size.
|
||||
* _thr_page_size.
|
||||
*/
|
||||
if (guardsize % _pthread_page_size != 0)
|
||||
guardsize = ((guardsize / _pthread_page_size) + 1) *
|
||||
_pthread_page_size;
|
||||
if (guardsize % _thr_page_size != 0)
|
||||
guardsize = ((guardsize / _thr_page_size) + 1) *
|
||||
_thr_page_size;
|
||||
|
||||
/* Save the stack size. */
|
||||
(*attr)->guardsize_attr = guardsize;
|
||||
|
@ -46,8 +46,8 @@ _pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *para
|
||||
ret = EINVAL;
|
||||
else if (param == NULL) {
|
||||
ret = ENOTSUP;
|
||||
} else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
|
||||
(param->sched_priority > PTHREAD_MAX_PRIORITY)) {
|
||||
} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
|
||||
(param->sched_priority > THR_MAX_PRIORITY)) {
|
||||
/* Return an unsupported value error. */
|
||||
ret = ENOTSUP;
|
||||
} else
|
||||
|
@ -45,12 +45,11 @@ _pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
|
||||
if ((attr == NULL) || (*attr == NULL)) {
|
||||
/* Return an invalid argument: */
|
||||
ret = EINVAL;
|
||||
} else if ((contentionscope != PTHREAD_SCOPE_PROCESS) ||
|
||||
(contentionscope == PTHREAD_SCOPE_SYSTEM)) {
|
||||
/* We don't support PTHREAD_SCOPE_SYSTEM. */
|
||||
ret = ENOTSUP;
|
||||
} else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
|
||||
(contentionscope != PTHREAD_SCOPE_SYSTEM)) {
|
||||
ret = EINVAL;
|
||||
} else
|
||||
(*attr)->flags |= contentionscope;
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -38,13 +38,16 @@
|
||||
* threads package at program start-up time.
|
||||
*/
|
||||
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
void _thread_init_hack(void) __attribute__ ((constructor));
|
||||
|
||||
void
|
||||
_thread_init_hack(void)
|
||||
{
|
||||
|
||||
_thread_init();
|
||||
_libpthread_init(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6,32 +6,32 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void finish_cancellation(void *arg);
|
||||
|
||||
__weak_reference(_pthread_cancel, pthread_cancel);
|
||||
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
|
||||
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
|
||||
__weak_reference(_pthread_testcancel, pthread_testcancel);
|
||||
|
||||
static int checkcancel(struct pthread *curthread);
|
||||
static void testcancel(struct pthread *curthread);
|
||||
static void finish_cancellation(void *arg);
|
||||
|
||||
int
|
||||
_pthread_cancel(pthread_t pthread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
if ((ret = _find_thread(pthread)) != 0) {
|
||||
/* NOTHING */
|
||||
} else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK
|
||||
|| (pthread->flags & PTHREAD_EXITING) != 0) {
|
||||
ret = 0;
|
||||
} else {
|
||||
/* Protect the scheduling queues: */
|
||||
_thread_kern_sig_defer();
|
||||
if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
|
||||
/*
|
||||
* Take the scheduling lock while we change the cancel flags.
|
||||
*/
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
|
||||
if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
|
||||
(((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) &&
|
||||
((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0)))
|
||||
(((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
|
||||
((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
|
||||
/* Just mark it for cancellation: */
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
else {
|
||||
/*
|
||||
* Check if we need to kick it back into the
|
||||
@ -40,23 +40,27 @@ _pthread_cancel(pthread_t pthread)
|
||||
switch (pthread->state) {
|
||||
case PS_RUNNING:
|
||||
/* No need to resume: */
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
break;
|
||||
|
||||
case PS_LOCKWAIT:
|
||||
/*
|
||||
* These can't be removed from the queue.
|
||||
* Just mark it as cancelling and tell it
|
||||
* to yield once it leaves the critical
|
||||
* region.
|
||||
*/
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
pthread->critical_yield = 1;
|
||||
break;
|
||||
|
||||
case PS_SPINBLOCK:
|
||||
/* Remove these threads from the work queue: */
|
||||
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
!= 0)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
/* Fall through: */
|
||||
case PS_SLEEP_WAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGWAIT:
|
||||
/* Interrupt and resume: */
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
break;
|
||||
|
||||
case PS_JOIN:
|
||||
@ -68,8 +72,8 @@ _pthread_cancel(pthread_t pthread)
|
||||
= NULL;
|
||||
pthread->join_status.thread = NULL;
|
||||
}
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
break;
|
||||
|
||||
case PS_SUSPENDED:
|
||||
@ -86,8 +90,8 @@ _pthread_cancel(pthread_t pthread)
|
||||
* cancellation completion routine.
|
||||
*/
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCEL_NEEDED;
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCEL_NEEDED;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
pthread->continuation = finish_cancellation;
|
||||
break;
|
||||
|
||||
@ -97,12 +101,17 @@ _pthread_cancel(pthread_t pthread)
|
||||
/* Ignore - only here to silence -Wall: */
|
||||
break;
|
||||
}
|
||||
if ((pthread->blocked != 0) &&
|
||||
((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
|
||||
kse_thr_interrupt(&pthread->tmbx);
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
ret = 0;
|
||||
/*
|
||||
* Release the thread's scheduling lock and remove the
|
||||
* reference:
|
||||
*/
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -113,6 +122,10 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ostate;
|
||||
int ret;
|
||||
int need_exit = 0;
|
||||
|
||||
/* Take the scheduling lock while fiddling with the thread's state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE;
|
||||
|
||||
@ -122,7 +135,7 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
*oldstate = ostate;
|
||||
curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE;
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)
|
||||
pthread_testcancel();
|
||||
need_exit = checkcancel(curthread);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTHREAD_CANCEL_DISABLE:
|
||||
@ -135,6 +148,12 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
ret = EINVAL;
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
if (need_exit != 0) {
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -144,6 +163,10 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int otype;
|
||||
int ret;
|
||||
int need_exit = 0;
|
||||
|
||||
/* Take the scheduling lock while fiddling with the state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
|
||||
switch (type) {
|
||||
@ -151,7 +174,7 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
if (oldtype != NULL)
|
||||
*oldtype = otype;
|
||||
curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
|
||||
pthread_testcancel();
|
||||
need_exit = checkcancel(curthread);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTHREAD_CANCEL_DEFERRED:
|
||||
@ -164,47 +187,72 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
ret = EINVAL;
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
if (need_exit != 0) {
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
checkcancel(struct pthread *curthread)
|
||||
{
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & THR_CANCELLING) != 0)) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
}
|
||||
else
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
testcancel(struct pthread *curthread)
|
||||
{
|
||||
/* Take the scheduling lock while fiddling with the state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
if (checkcancel(curthread) != 0) {
|
||||
/* Unlock before exiting: */
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
void
|
||||
_pthread_testcancel(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & PTHREAD_CANCELLING) != 0) &&
|
||||
((curthread->flags & PTHREAD_EXITING) == 0)) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~PTHREAD_CANCELLING;
|
||||
_thread_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
testcancel(curthread);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_enter_cancellation_point(void)
|
||||
_thr_enter_cancellation_point(struct pthread *thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Look for a cancellation before we block: */
|
||||
pthread_testcancel();
|
||||
curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT;
|
||||
testcancel(thread);
|
||||
thread->cancelflags |= THR_AT_CANCEL_POINT;
|
||||
}
|
||||
|
||||
void
|
||||
_thread_leave_cancellation_point(void)
|
||||
_thr_leave_cancellation_point(struct pthread *thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
|
||||
thread->cancelflags &= ~THR_AT_CANCEL_POINT;
|
||||
/* Look for a cancellation after we unblock: */
|
||||
pthread_testcancel();
|
||||
testcancel(thread);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -215,9 +263,9 @@ finish_cancellation(void *arg)
|
||||
curthread->continuation = NULL;
|
||||
curthread->interrupted = 0;
|
||||
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
|
||||
curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
|
||||
_thread_exit_cleanup();
|
||||
if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) {
|
||||
curthread->cancelflags &= ~THR_CANCEL_NEEDED;
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,8 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread_cleanup *new;
|
||||
|
||||
if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
if ((new = (struct pthread_cleanup *)
|
||||
malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
new->routine = routine;
|
||||
new->routine_arg = routine_arg;
|
||||
new->next = curthread->cleanup;
|
||||
@ -69,4 +70,3 @@ _pthread_cleanup_pop(int execute)
|
||||
free(old);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,11 +44,12 @@ __weak_reference(__close, close);
|
||||
int
|
||||
__close(int fd)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_close(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -37,12 +37,17 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
|
||||
#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
|
||||
#define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
|
||||
#define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
|
||||
|
||||
/*
|
||||
* Prototypes
|
||||
*/
|
||||
static inline pthread_t cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
static inline struct pthread *cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
|
||||
__weak_reference(_pthread_cond_init, pthread_cond_init);
|
||||
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
|
||||
@ -52,35 +57,12 @@ __weak_reference(_pthread_cond_signal, pthread_cond_signal);
|
||||
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
|
||||
|
||||
|
||||
/* Reinitialize a condition variable to defaults. */
|
||||
int
|
||||
_cond_reinit(pthread_cond_t *cond)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
ret = EINVAL;
|
||||
else if (*cond == NULL)
|
||||
ret = pthread_cond_init(cond, NULL);
|
||||
else {
|
||||
/*
|
||||
* Initialize the condition variable structure:
|
||||
*/
|
||||
TAILQ_INIT(&(*cond)->c_queue);
|
||||
(*cond)->c_flags = COND_FLAGS_INITED;
|
||||
(*cond)->c_type = COND_TYPE_FAST;
|
||||
(*cond)->c_mutex = NULL;
|
||||
(*cond)->c_seqno = 0;
|
||||
memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
{
|
||||
enum pthread_cond_type type;
|
||||
pthread_cond_t pcond;
|
||||
int flags;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
@ -93,9 +75,11 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
if (cond_attr != NULL && *cond_attr != NULL) {
|
||||
/* Default to a fast condition variable: */
|
||||
type = (*cond_attr)->c_type;
|
||||
flags = (*cond_attr)->c_flags;
|
||||
} else {
|
||||
/* Default to a fast condition variable: */
|
||||
type = COND_TYPE_FAST;
|
||||
flags = 0;
|
||||
}
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
@ -117,6 +101,10 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
if ((pcond = (pthread_cond_t)
|
||||
malloc(sizeof(struct pthread_cond))) == NULL) {
|
||||
rval = ENOMEM;
|
||||
} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
|
||||
_kse_lock_wait, _kse_lock_wakeup) != 0) {
|
||||
free(pcond);
|
||||
rval = ENOMEM;
|
||||
} else {
|
||||
/*
|
||||
* Initialise the condition variable
|
||||
@ -127,7 +115,6 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
pcond->c_type = type;
|
||||
pcond->c_mutex = NULL;
|
||||
pcond->c_seqno = 0;
|
||||
memset(&pcond->lock,0,sizeof(pcond->lock));
|
||||
*cond = pcond;
|
||||
}
|
||||
}
|
||||
@ -139,25 +126,32 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
int
|
||||
_pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
int rval = 0;
|
||||
struct pthread_cond *cv;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL || *cond == NULL)
|
||||
rval = EINVAL;
|
||||
else {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Free the memory allocated for the condition
|
||||
* variable structure:
|
||||
*/
|
||||
free(*cond);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* NULL the caller's pointer now that the condition
|
||||
* variable has been destroyed:
|
||||
*/
|
||||
cv = *cond;
|
||||
*cond = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cv->c_lock);
|
||||
|
||||
/*
|
||||
* Free the memory allocated for the condition
|
||||
* variable structure:
|
||||
*/
|
||||
free(cv);
|
||||
|
||||
}
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
@ -170,20 +164,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int seqno;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
|
||||
if (cond == NULL)
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
if (cond == NULL) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the condition variable is statically initialized,
|
||||
* perform the dynamic initialization:
|
||||
*/
|
||||
if (*cond == NULL &&
|
||||
(rval = pthread_cond_init(cond, NULL)) != 0)
|
||||
(rval = pthread_cond_init(cond, NULL)) != 0) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a loop waiting for a condition signal or broadcast
|
||||
@ -196,7 +195,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
@ -214,7 +213,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -237,7 +236,8 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((rval = _mutex_cv_unlock(mutex)) != 0) {
|
||||
if ((unlock_mutex != 0) &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
* the running thread from the condition
|
||||
@ -246,45 +246,60 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) ==
|
||||
NULL)
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
} else {
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Schedule the next thread and unlock
|
||||
* the condition variable structure:
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
_thread_kern_sched_state_unlock(PS_COND_WAIT,
|
||||
&(*cond)->lock, __FILE__, __LINE__);
|
||||
unlock_mutex = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
* critical region (holding the cv
|
||||
* lock); we should be able to safely
|
||||
* set the state.
|
||||
*/
|
||||
THR_SET_STATE(curthread, PS_COND_WAIT);
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
* thread waiting on the CV. Signals
|
||||
* sent to threads waiting on mutexes
|
||||
* or CVs should really be deferred
|
||||
* until the threads are no longer
|
||||
* waiting, but POSIX says that signals
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Check if the wait was interrupted
|
||||
* (canceled) or needs to be resumed
|
||||
* after handling a signal.
|
||||
*/
|
||||
if (interrupted != 0) {
|
||||
/*
|
||||
* Lock the mutex and ignore any
|
||||
* errors. Note that even
|
||||
* though this thread may have
|
||||
* been canceled, POSIX requires
|
||||
* that the mutex be reaquired
|
||||
* prior to cancellation.
|
||||
*/
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else {
|
||||
if (THR_IN_SYNCQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
@ -293,11 +308,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/* Lock the mutex: */
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Note that even though this thread may
|
||||
* have been canceled, POSIX requires
|
||||
* that the mutex be reaquired prior to
|
||||
* cancellation.
|
||||
*/
|
||||
if (done != 0)
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -305,7 +333,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -316,12 +344,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
curthread->continuation((void *) curthread);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
|
||||
int
|
||||
__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _pthread_cond_wait(cond, mutex);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
const struct timespec * abstime)
|
||||
@ -330,19 +370,24 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int seqno;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
||||
abstime->tv_nsec >= 1000000000)
|
||||
abstime->tv_nsec >= 1000000000) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EINVAL);
|
||||
}
|
||||
/*
|
||||
* If the condition variable is statically initialized, perform dynamic
|
||||
* initialization.
|
||||
*/
|
||||
if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
|
||||
if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a loop waiting for a condition signal or broadcast
|
||||
@ -355,7 +400,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
@ -376,11 +421,10 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
rval = EINVAL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Set the wakeup time: */
|
||||
curthread->wakeup_time.tv_sec =
|
||||
abstime->tv_sec;
|
||||
curthread->wakeup_time.tv_sec = abstime->tv_sec;
|
||||
curthread->wakeup_time.tv_nsec =
|
||||
abstime->tv_nsec;
|
||||
|
||||
@ -399,10 +443,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((rval = _mutex_cv_unlock(mutex)) != 0) {
|
||||
if ((unlock_mutex != 0) &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
* the running thread from the condition
|
||||
* Cannot unlock the mutex; remove the
|
||||
* running thread from the condition
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
@ -412,40 +457,55 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/*
|
||||
* Schedule the next thread and unlock
|
||||
* the condition variable structure:
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
_thread_kern_sched_state_unlock(PS_COND_WAIT,
|
||||
&(*cond)->lock, __FILE__, __LINE__);
|
||||
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
interrupted = curthread->interrupted;
|
||||
unlock_mutex = 0;
|
||||
|
||||
/*
|
||||
* Check if the wait was interrupted
|
||||
* (canceled) or needs to be resumed
|
||||
* after handling a signal.
|
||||
* This thread is active and is in a
|
||||
* critical region (holding the cv
|
||||
* lock); we should be able to safely
|
||||
* set the state.
|
||||
*/
|
||||
if (interrupted != 0) {
|
||||
/*
|
||||
* Lock the mutex and ignore any
|
||||
* errors. Note that even
|
||||
* though this thread may have
|
||||
* been canceled, POSIX requires
|
||||
* that the mutex be reaquired
|
||||
* prior to cancellation.
|
||||
*/
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else {
|
||||
THR_SET_STATE(curthread, PS_COND_WAIT);
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
* thread waiting on the CV. Signals
|
||||
* sent to threads waiting on mutexes
|
||||
* or CVs should really be deferred
|
||||
* until the threads are no longer
|
||||
* waiting, but POSIX says that signals
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
@ -454,21 +514,22 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/* Lock the mutex: */
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
|
||||
/*
|
||||
* Return ETIMEDOUT if the wait
|
||||
* timed out and there wasn't an
|
||||
* error locking the mutex:
|
||||
*/
|
||||
if ((curthread->timeout != 0)
|
||||
&& rval == 0)
|
||||
rval = ETIMEDOUT;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
if (curthread->timeout != 0) {
|
||||
/* The wait timedout. */
|
||||
rval = ETIMEDOUT;
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else if ((interrupted == 0) ||
|
||||
(done != 0))
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -476,7 +537,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -484,20 +545,35 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *) curthread);
|
||||
curthread->continuation((void *)curthread);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
|
||||
int
|
||||
__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _pthread_cond_timedwait(cond, mutex, abstime);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
_pthread_cond_signal(pthread_cond_t * cond)
|
||||
{
|
||||
int rval = 0;
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *pthread;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
rval = EINVAL;
|
||||
@ -506,14 +582,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
* initialization.
|
||||
*/
|
||||
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch ((*cond)->c_type) {
|
||||
@ -522,13 +592,19 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
/* Increment the sequence number: */
|
||||
(*cond)->c_seqno++;
|
||||
|
||||
if ((pthread = cond_queue_deq(*cond)) != NULL) {
|
||||
/*
|
||||
* Wake up the signaled thread:
|
||||
*/
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
/*
|
||||
* Wakeups have to be done with the CV lock held;
|
||||
* otherwise there is a race condition where the
|
||||
* thread can timeout, run on another KSE, and enter
|
||||
* another blocking state (including blocking on a CV).
|
||||
*/
|
||||
if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
@ -542,13 +618,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
@ -558,8 +628,9 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
int
|
||||
_pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
{
|
||||
int rval = 0;
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *pthread;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
rval = EINVAL;
|
||||
@ -568,14 +639,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
* initialization.
|
||||
*/
|
||||
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch ((*cond)->c_type) {
|
||||
@ -588,11 +653,12 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
* Enter a loop to bring all threads off the
|
||||
* condition queue:
|
||||
*/
|
||||
while ((pthread = cond_queue_deq(*cond)) != NULL) {
|
||||
/*
|
||||
* Wake up the signaled thread:
|
||||
*/
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
|
||||
/* There are no more waiting threads: */
|
||||
@ -607,13 +673,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
@ -621,26 +681,20 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
void
|
||||
_cond_wait_backout(pthread_t pthread)
|
||||
_cond_wait_backout(struct pthread *curthread)
|
||||
{
|
||||
pthread_cond_t cond;
|
||||
|
||||
cond = pthread->data.cond;
|
||||
cond = curthread->data.cond;
|
||||
if (cond != NULL) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&cond->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch (cond->c_type) {
|
||||
/* Fast condition variable: */
|
||||
case COND_TYPE_FAST:
|
||||
cond_queue_remove(cond, pthread);
|
||||
cond_queue_remove(cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&cond->c_queue) == NULL)
|
||||
@ -652,13 +706,7 @@ _cond_wait_backout(pthread_t pthread)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&cond->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -666,14 +714,14 @@ _cond_wait_backout(pthread_t pthread)
|
||||
* Dequeue a waiting thread from the head of a condition queue in
|
||||
* descending priority order.
|
||||
*/
|
||||
static inline pthread_t
|
||||
static inline struct pthread *
|
||||
cond_queue_deq(pthread_cond_t cond)
|
||||
{
|
||||
pthread_t pthread;
|
||||
struct pthread *pthread;
|
||||
|
||||
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
|
||||
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_SET(pthread);
|
||||
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
|
||||
/*
|
||||
* Only exit the loop when we find a thread
|
||||
@ -684,7 +732,7 @@ cond_queue_deq(pthread_cond_t cond)
|
||||
break;
|
||||
}
|
||||
|
||||
return(pthread);
|
||||
return (pthread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -692,7 +740,7 @@ cond_queue_deq(pthread_cond_t cond)
|
||||
* order.
|
||||
*/
|
||||
static inline void
|
||||
cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
|
||||
{
|
||||
/*
|
||||
* Because pthread_cond_timedwait() can timeout as well
|
||||
@ -700,9 +748,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
* guard against removing the thread from the queue if
|
||||
* it isn't in the queue.
|
||||
*/
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
|
||||
if (THR_IN_CONDQ(pthread)) {
|
||||
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_CLEAR(pthread);
|
||||
}
|
||||
}
|
||||
|
||||
@ -711,11 +759,12 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
* order.
|
||||
*/
|
||||
static inline void
|
||||
cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
|
||||
cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
|
||||
{
|
||||
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
|
||||
struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
|
||||
|
||||
PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
|
||||
THR_ASSERT(!THR_IN_SYNCQ(pthread),
|
||||
"cond_queue_enq: thread already queued!");
|
||||
|
||||
/*
|
||||
* For the common case of all threads having equal priority,
|
||||
@ -730,6 +779,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
|
||||
tid = TAILQ_NEXT(tid, sqe);
|
||||
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
|
||||
}
|
||||
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_SET(pthread);
|
||||
pthread->data.cond = cond;
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ _pthread_condattr_init(pthread_condattr_t *attr)
|
||||
pthread_condattr_t pattr;
|
||||
|
||||
if ((pattr = (pthread_condattr_t)
|
||||
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
|
||||
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
|
||||
ret = ENOMEM;
|
||||
} else {
|
||||
memcpy(pattr, &pthread_condattr_default,
|
||||
sizeof(struct pthread_cond_attr));
|
||||
memcpy(pattr, &_pthread_condattr_default,
|
||||
sizeof(struct pthread_cond_attr));
|
||||
*attr = pattr;
|
||||
ret = 0;
|
||||
}
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(___creat, creat);
|
||||
int
|
||||
___creat(const char *path, mode_t mode)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __creat(path, mode);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -50,102 +50,150 @@ int _thread_next_offset = OFF(tle.tqe_next);
|
||||
int _thread_uniqueid_offset = OFF(uniqueid);
|
||||
int _thread_state_offset = OFF(state);
|
||||
int _thread_name_offset = OFF(name);
|
||||
int _thread_ctx_offset = OFF(mailbox.tm_context);
|
||||
int _thread_ctx_offset = OFF(tmbx.tm_context);
|
||||
#undef OFF
|
||||
|
||||
int _thread_PS_RUNNING_value = PS_RUNNING;
|
||||
int _thread_PS_DEAD_value = PS_DEAD;
|
||||
|
||||
static int create_stack(struct pthread_attr *pattr);
|
||||
static void thread_start(struct pthread *curthread,
|
||||
void *(*start_routine) (void *), void *arg);
|
||||
|
||||
__weak_reference(_pthread_create, pthread_create);
|
||||
|
||||
/*
|
||||
* Some notes on new thread creation and first time initializion
|
||||
* to enable multi-threading.
|
||||
*
|
||||
* There are basically two things that need to be done.
|
||||
*
|
||||
* 1) The internal library variables must be initialized.
|
||||
* 2) Upcalls need to be enabled to allow multiple threads
|
||||
* to be run.
|
||||
*
|
||||
* The first may be done as a result of other pthread functions
|
||||
* being called. When _thr_initial is null, _libpthread_init is
|
||||
* called to initialize the internal variables; this also creates
|
||||
* or sets the initial thread. It'd be nice to automatically
|
||||
* have _libpthread_init called on program execution so we don't
|
||||
* have to have checks throughout the library.
|
||||
*
|
||||
* The second part is only triggered by the creation of the first
|
||||
* thread (other than the initial/main thread). If the thread
|
||||
* being created is a scope system thread, then a new KSE/KSEG
|
||||
* pair needs to be allocated. Also, if upcalls haven't been
|
||||
* enabled on the initial thread's KSE, they must be now that
|
||||
* there is more than one thread; this could be delayed until
|
||||
* the initial KSEG has more than one thread.
|
||||
*/
|
||||
int
|
||||
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
void *(*start_routine) (void *), void *arg)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct itimerval itimer;
|
||||
int f_gc = 0;
|
||||
int ret = 0;
|
||||
pthread_t gc_thread;
|
||||
pthread_t new_thread;
|
||||
pthread_attr_t pattr;
|
||||
void *stack;
|
||||
struct kse *curkse;
|
||||
struct pthread *curthread, *new_thread;
|
||||
struct kse *kse = NULL;
|
||||
struct kse_group *kseg = NULL;
|
||||
kse_critical_t crit;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Locking functions in libc are required when there are
|
||||
* threads other than the initial thread.
|
||||
*/
|
||||
__isthreaded = 1;
|
||||
if (_thr_initial == NULL)
|
||||
_libpthread_init(NULL);
|
||||
|
||||
crit = _kse_critical_enter();
|
||||
curthread = _get_curthread();
|
||||
curkse = curthread->kse;
|
||||
|
||||
/* Allocate memory for the thread structure: */
|
||||
if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
|
||||
if ((new_thread = _thr_alloc(curkse)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
} else {
|
||||
/* Initialize the thread structure: */
|
||||
memset(new_thread, 0, sizeof(struct pthread));
|
||||
|
||||
/* Check if default thread attributes are required: */
|
||||
if (attr == NULL || *attr == NULL) {
|
||||
if (attr == NULL || *attr == NULL)
|
||||
/* Use the default thread attributes: */
|
||||
pattr = &pthread_attr_default;
|
||||
} else {
|
||||
pattr = *attr;
|
||||
new_thread->attr = _pthread_attr_default;
|
||||
else
|
||||
new_thread->attr = *(*attr);
|
||||
|
||||
if (create_stack(&new_thread->attr) != 0) {
|
||||
/* Insufficient memory to create a stack: */
|
||||
ret = EAGAIN;
|
||||
_thr_free(curkse, new_thread);
|
||||
}
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((stack = pattr->stackaddr_attr) != NULL) {
|
||||
}
|
||||
/* Allocate a stack: */
|
||||
else {
|
||||
stack = _thread_stack_alloc(pattr->stacksize_attr,
|
||||
pattr->guardsize_attr);
|
||||
if (stack == NULL) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
|
||||
(((kse = _kse_alloc(curkse)) == NULL)
|
||||
|| ((kseg = _kseg_alloc(curkse)) == NULL))) {
|
||||
/* Insufficient memory to create a new KSE/KSEG: */
|
||||
ret = EAGAIN;
|
||||
if (kse != NULL)
|
||||
_kse_free(curkse, kse);
|
||||
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
|
||||
_thr_stack_free(&new_thread->attr);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
||||
}
|
||||
_thr_free(curkse, new_thread);
|
||||
}
|
||||
|
||||
/* Check for errors: */
|
||||
if (ret != 0) {
|
||||
} else {
|
||||
/* Initialise the thread structure: */
|
||||
memset(new_thread, 0, sizeof(struct pthread));
|
||||
new_thread->slice_usec = -1;
|
||||
new_thread->stack = stack;
|
||||
new_thread->start_routine = start_routine;
|
||||
new_thread->arg = arg;
|
||||
|
||||
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
else {
|
||||
if (kseg != NULL) {
|
||||
/* Add the KSE to the KSEG's list of KSEs. */
|
||||
TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe);
|
||||
kse->k_kseg = kseg;
|
||||
kse->k_schedq = &kseg->kg_schedq;
|
||||
}
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
*/
|
||||
new_thread->magic = PTHREAD_MAGIC;
|
||||
new_thread->magic = THR_MAGIC;
|
||||
|
||||
/* Initialise the machine context: */
|
||||
getcontext(&new_thread->mailbox.tm_context);
|
||||
new_thread->mailbox.tm_context.uc_stack.ss_sp =
|
||||
new_thread->stack;
|
||||
new_thread->mailbox.tm_context.uc_stack.ss_size =
|
||||
pattr->stacksize_attr;
|
||||
makecontext(&new_thread->mailbox.tm_context,
|
||||
_thread_start, 1);
|
||||
new_thread->mailbox.tm_udata = (void *)new_thread;
|
||||
new_thread->slice_usec = -1;
|
||||
new_thread->start_routine = start_routine;
|
||||
new_thread->arg = arg;
|
||||
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
/* Copy the thread attributes: */
|
||||
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
|
||||
/* Initialize the thread for signals: */
|
||||
new_thread->sigmask = curthread->sigmask;
|
||||
|
||||
/* No thread is wanting to join to this one: */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/* Initialize the machine context: */
|
||||
THR_GETCONTEXT(&new_thread->tmbx.tm_context);
|
||||
new_thread->tmbx.tm_udata = new_thread;
|
||||
new_thread->tmbx.tm_context.uc_sigmask =
|
||||
new_thread->sigmask;
|
||||
new_thread->tmbx.tm_context.uc_stack.ss_size =
|
||||
new_thread->attr.stacksize_attr;
|
||||
new_thread->tmbx.tm_context.uc_stack.ss_sp =
|
||||
new_thread->attr.stackaddr_attr;
|
||||
|
||||
makecontext(&new_thread->tmbx.tm_context,
|
||||
(void (*)(void))thread_start, 4, new_thread,
|
||||
start_routine, arg);
|
||||
|
||||
/*
|
||||
* Check if this thread is to inherit the scheduling
|
||||
* attributes from its parent:
|
||||
*/
|
||||
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
|
||||
if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
|
||||
/* Copy the scheduling attributes: */
|
||||
new_thread->base_priority =
|
||||
curthread->base_priority &
|
||||
~PTHREAD_SIGNAL_PRIORITY;
|
||||
~THR_SIGNAL_PRIORITY;
|
||||
new_thread->attr.prio =
|
||||
curthread->base_priority &
|
||||
~PTHREAD_SIGNAL_PRIORITY;
|
||||
~THR_SIGNAL_PRIORITY;
|
||||
new_thread->attr.sched_policy =
|
||||
curthread->attr.sched_policy;
|
||||
} else {
|
||||
@ -160,23 +208,49 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->active_priority = new_thread->base_priority;
|
||||
new_thread->inherited_priority = 0;
|
||||
|
||||
/* Initialize joiner to NULL (no joiner): */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the mutex queue: */
|
||||
TAILQ_INIT(&new_thread->mutexq);
|
||||
|
||||
/* Initialize thread locking. */
|
||||
if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize thread lock");
|
||||
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
|
||||
_lockuser_init(&new_thread->lockusers[i],
|
||||
(void *)new_thread);
|
||||
_LCK_SET_PRIVATE2(&new_thread->lockusers[i],
|
||||
(void *)new_thread);
|
||||
}
|
||||
|
||||
/* Initialise hooks in the thread structure: */
|
||||
new_thread->specific = NULL;
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->continuation = NULL;
|
||||
|
||||
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
else
|
||||
new_thread->state = PS_RUNNING;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
* System scope threads have their own kse and
|
||||
* kseg. Process scope threads are all hung
|
||||
* off the main process kseg.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
|
||||
new_thread->kseg = _kse_initial->k_kseg;
|
||||
new_thread->kse = _kse_initial;
|
||||
}
|
||||
else {
|
||||
kse->k_curthread = NULL;
|
||||
kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
|
||||
new_thread->kse = kse;
|
||||
new_thread->kseg = kse->k_kseg;
|
||||
kse->k_mbx.km_udata = kse;
|
||||
kse->k_mbx.km_curthread = NULL;
|
||||
}
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Initialise the unique id which GDB uses to
|
||||
@ -184,57 +258,53 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
new_thread->uniqueid = next_uniqueid++;
|
||||
|
||||
/*
|
||||
* Check if the garbage collector thread
|
||||
* needs to be started.
|
||||
*/
|
||||
f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
|
||||
|
||||
/* Add the thread to the linked list of all threads: */
|
||||
TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
|
||||
THR_LIST_ADD(new_thread);
|
||||
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
|
||||
new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
} else {
|
||||
new_thread->state = PS_RUNNING;
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding
|
||||
* if necessary.
|
||||
* Schedule the new thread starting a new KSEG/KSE
|
||||
* pair if necessary.
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
_thr_schedule_add(curthread, new_thread);
|
||||
|
||||
/* Return a pointer to the thread structure: */
|
||||
(*thread) = new_thread;
|
||||
|
||||
/* Schedule the new user thread: */
|
||||
_thread_kern_sched();
|
||||
|
||||
/*
|
||||
* Start a garbage collector thread
|
||||
* if necessary.
|
||||
*/
|
||||
if (f_gc && pthread_create(&gc_thread,NULL,
|
||||
_thread_gc,NULL) != 0)
|
||||
PANIC("Can't create gc thread");
|
||||
|
||||
}
|
||||
}
|
||||
_kse_critical_leave(crit);
|
||||
|
||||
if ((ret == 0) && (_kse_isthreaded() == 0))
|
||||
_kse_setthreaded(1);
|
||||
|
||||
/* Return the status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_start(void)
|
||||
static int
|
||||
create_stack(struct pthread_attr *pattr)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((pattr->stackaddr_attr) != NULL) {
|
||||
pattr->guardsize_attr = 0;
|
||||
pattr->flags = THR_STACK_USER;
|
||||
ret = 0;
|
||||
}
|
||||
else
|
||||
ret = _thr_stack_alloc(pattr);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
thread_start(struct pthread *curthread, void *(*start_routine) (void *),
|
||||
void *arg)
|
||||
{
|
||||
/* Run the current thread's start routine with argument: */
|
||||
pthread_exit(curthread->start_routine(curthread->arg));
|
||||
pthread_exit(start_routine(arg));
|
||||
|
||||
/* This point should never be reached. */
|
||||
PANIC("Thread has resumed after exit");
|
||||
|
@ -31,6 +31,8 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <sys/types.h>
|
||||
#include <machine/atomic.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
@ -40,50 +42,60 @@ __weak_reference(_pthread_detach, pthread_detach);
|
||||
int
|
||||
_pthread_detach(pthread_t pthread)
|
||||
{
|
||||
int rval = 0;
|
||||
struct pthread *curthread, *joiner;
|
||||
int rval = 0;
|
||||
|
||||
/* Check for invalid calling parameters: */
|
||||
if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
|
||||
if (pthread == NULL || pthread->magic != THR_MAGIC)
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
|
||||
/* Check if the thread has not been detached: */
|
||||
else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
|
||||
/* Check if the thread is already detached: */
|
||||
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
|
||||
/* Return an error: */
|
||||
rval = EINVAL;
|
||||
else {
|
||||
/* Lock the detached thread: */
|
||||
curthread = _get_curthread();
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
|
||||
/* Flag the thread as detached: */
|
||||
pthread->attr.flags |= PTHREAD_DETACHED;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Retrieve any joining thread and remove it: */
|
||||
joiner = pthread->joiner;
|
||||
pthread->joiner = NULL;
|
||||
|
||||
/* Check if there is a joiner: */
|
||||
if (pthread->joiner != NULL) {
|
||||
struct pthread *joiner = pthread->joiner;
|
||||
|
||||
/* Make the thread runnable: */
|
||||
PTHREAD_NEW_STATE(joiner, PS_RUNNING);
|
||||
|
||||
/* Set the return value for the woken thread: */
|
||||
joiner->join_status.error = ESRCH;
|
||||
joiner->join_status.ret = NULL;
|
||||
joiner->join_status.thread = NULL;
|
||||
|
||||
/*
|
||||
* Disconnect the joiner from the thread being detached:
|
||||
*/
|
||||
pthread->joiner = NULL;
|
||||
/* We are already in a critical region. */
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
|
||||
THR_LIST_REMOVE(pthread);
|
||||
THR_GCLIST_ADD(pthread);
|
||||
atomic_store_rel_int(&_gc_check, 1);
|
||||
if (KSE_WAITING(_kse_initial))
|
||||
KSE_WAKEUP(_kse_initial);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if a
|
||||
* scheduling signal occurred while in the critical region.
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
} else
|
||||
/* Return an error: */
|
||||
rval = EINVAL;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
|
||||
/* See if there is a thread waiting in pthread_join(): */
|
||||
if (joiner != NULL) {
|
||||
/* Lock the joiner before fiddling with it. */
|
||||
THR_SCHED_LOCK(curthread, joiner);
|
||||
if (joiner->join_status.thread == pthread) {
|
||||
/*
|
||||
* Set the return value for the woken thread:
|
||||
*/
|
||||
joiner->join_status.error = ESRCH;
|
||||
joiner->join_status.ret = NULL;
|
||||
joiner->join_status.thread = NULL;
|
||||
|
||||
_thr_setrunnable_unlocked(joiner);
|
||||
}
|
||||
THR_SCHED_UNLOCK(curthread, joiner);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
|
@ -40,31 +40,24 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
#define FLAGS_IN_SCHEDQ \
|
||||
(PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
|
||||
void _pthread_exit(void *status);
|
||||
|
||||
__weak_reference(_pthread_exit, pthread_exit);
|
||||
|
||||
void
|
||||
_thread_exit(char *fname, int lineno, char *string)
|
||||
_thr_exit(char *fname, int lineno, char *msg)
|
||||
{
|
||||
char s[256];
|
||||
char s[256];
|
||||
|
||||
/* Prepare an error message string: */
|
||||
snprintf(s, sizeof(s),
|
||||
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
|
||||
string, lineno, fname, errno);
|
||||
msg, lineno, fname, errno);
|
||||
|
||||
/* Write the string to the standard error file descriptor: */
|
||||
__sys_write(2, s, strlen(s));
|
||||
|
||||
/* Force this process to exit: */
|
||||
/* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
|
||||
#if defined(_PTHREADS_INVARIANTS)
|
||||
abort();
|
||||
#else
|
||||
__sys_exit(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -73,7 +66,7 @@ _thread_exit(char *fname, int lineno, char *string)
|
||||
* abnormal thread termination can be found.
|
||||
*/
|
||||
void
|
||||
_thread_exit_cleanup(void)
|
||||
_thr_exit_cleanup(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
@ -96,22 +89,25 @@ _thread_exit_cleanup(void)
|
||||
void
|
||||
_pthread_exit(void *status)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Check if this thread is already in the process of exiting: */
|
||||
if ((curthread->flags & PTHREAD_EXITING) != 0) {
|
||||
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
|
||||
char msg[128];
|
||||
snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
|
||||
snprintf(msg, sizeof(msg), "Thread %p has called "
|
||||
"pthread_exit() from a destructor. POSIX 1003.1 "
|
||||
"1996 s16.2.5.2 does not allow this!", curthread);
|
||||
PANIC(msg);
|
||||
}
|
||||
|
||||
/* Flag this thread as exiting: */
|
||||
curthread->flags |= PTHREAD_EXITING;
|
||||
/*
|
||||
* Flag this thread as exiting. Threads should now be prevented
|
||||
* from joining to this thread.
|
||||
*/
|
||||
curthread->flags |= THR_FLAGS_EXITING;
|
||||
|
||||
/* Save the return value: */
|
||||
curthread->ret = status;
|
||||
|
||||
while (curthread->cleanup != NULL) {
|
||||
pthread_cleanup_pop(1);
|
||||
}
|
||||
@ -124,58 +120,11 @@ _pthread_exit(void *status)
|
||||
_thread_cleanupspecific();
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex to ensure that the garbage
|
||||
* collector is not using the dead thread list.
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
/* Add this thread to the list of dead threads. */
|
||||
TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
|
||||
|
||||
/*
|
||||
* Signal the garbage collector thread that there is something
|
||||
* to clean up.
|
||||
*/
|
||||
if (pthread_cond_signal(&_gc_cond) != 0)
|
||||
PANIC("Cannot signal gc cond");
|
||||
|
||||
/*
|
||||
* Avoid a race condition where a scheduling signal can occur
|
||||
* causing the garbage collector thread to run. If this happens,
|
||||
* the current thread can be cleaned out from under us.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Unlock the garbage collector mutex: */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Check if there is a thread joining this one: */
|
||||
if (curthread->joiner != NULL) {
|
||||
pthread = curthread->joiner;
|
||||
curthread->joiner = NULL;
|
||||
|
||||
/* Make the joining thread runnable: */
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
|
||||
/* Set the return value for the joining thread: */
|
||||
pthread->join_status.ret = curthread->ret;
|
||||
pthread->join_status.error = 0;
|
||||
pthread->join_status.thread = NULL;
|
||||
|
||||
/* Make this thread collectable by the garbage collector. */
|
||||
PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
|
||||
0), "Cannot join a detached thread");
|
||||
curthread->attr.flags |= PTHREAD_DETACHED;
|
||||
}
|
||||
|
||||
/* Remove this thread from the thread list: */
|
||||
TAILQ_REMOVE(&_thread_list, curthread, tle);
|
||||
|
||||
/* This thread will never be re-scheduled. */
|
||||
_thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
THR_SET_STATE(curthread, PS_DEAD);
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* This point should not be reached. */
|
||||
PANIC("Dead thread has resumed");
|
||||
|
@ -32,8 +32,9 @@
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include "namespace.h"
|
||||
#include <fcntl.h>
|
||||
#include "un-namespace.h"
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
@ -42,28 +43,29 @@ __weak_reference(__fcntl, fcntl);
|
||||
int
|
||||
__fcntl(int fd, int cmd,...)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
va_start(ap, cmd);
|
||||
switch (cmd) {
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
|
||||
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
|
||||
* All rights reserved.
|
||||
*
|
||||
@ -35,32 +36,65 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
/* Find a thread in the linked list of active threads: */
|
||||
/*
|
||||
* Find a thread in the linked list of active threads and add a reference
|
||||
* to it. Threads with positive reference counts will not be deallocated
|
||||
* until all references are released.
|
||||
*/
|
||||
int
|
||||
_find_thread(pthread_t pthread)
|
||||
_thr_ref_add(struct pthread *curthread, struct pthread *thread,
|
||||
int include_dead)
|
||||
{
|
||||
pthread_t pthread1;
|
||||
kse_critical_t crit;
|
||||
struct pthread *pthread;
|
||||
|
||||
/* Check if the caller has specified an invalid thread: */
|
||||
if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
|
||||
if (thread == NULL)
|
||||
/* Invalid thread: */
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
/*
|
||||
* Defer signals to protect the thread list from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Search for the specified thread: */
|
||||
TAILQ_FOREACH(pthread1, &_thread_list, tle) {
|
||||
if (pthread == pthread1)
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
||||
if (pthread == thread) {
|
||||
if ((include_dead == 0) &&
|
||||
((pthread->state == PS_DEAD) ||
|
||||
((pthread->state == PS_DEADLOCK) ||
|
||||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
|
||||
pthread = NULL;
|
||||
else {
|
||||
thread->refcount++;
|
||||
curthread->critical_count++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Undefer and handle pending signals, yielding if necessary: */
|
||||
_thread_kern_sig_undefer();
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
|
||||
/* Return zero if the thread exists: */
|
||||
return ((pthread1 != NULL) ? 0:ESRCH);
|
||||
return ((pthread != NULL) ? 0 : ESRCH);
|
||||
}
|
||||
|
||||
void
|
||||
_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
|
||||
{
|
||||
kse_critical_t crit;
|
||||
|
||||
if (thread != NULL) {
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
thread->refcount--;
|
||||
curthread->critical_count--;
|
||||
if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
|
||||
(thread->refcount == 0) &&
|
||||
((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
|
||||
THR_LIST_REMOVE(thread);
|
||||
THR_GCLIST_ADD(thread);
|
||||
_gc_check = 1;
|
||||
if (KSE_WAITING(_kse_initial))
|
||||
KSE_WAKEUP(_kse_initial);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,6 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <sys/param.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
@ -40,141 +39,21 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void free_thread_resources(struct pthread *thread);
|
||||
|
||||
__weak_reference(_fork, fork);
|
||||
|
||||
pid_t
|
||||
_fork(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int i, flags, use_deadlist = 0;
|
||||
pid_t ret;
|
||||
pthread_t pthread;
|
||||
pthread_t pthread_save;
|
||||
struct pthread *curthread;
|
||||
pid_t ret;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
curthread = _get_curthread();
|
||||
|
||||
/* Fork a new process: */
|
||||
if ((ret = __sys_fork()) != 0) {
|
||||
/* Parent process or error. Nothing to do here. */
|
||||
} else {
|
||||
/* Reinitialize the GC mutex: */
|
||||
if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC mutex for forked process");
|
||||
}
|
||||
/* Reinitialize the GC condition variable: */
|
||||
else if (_cond_reinit(&_gc_cond) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC condvar for forked process");
|
||||
}
|
||||
/* Initialize the ready queue: */
|
||||
else if (_pq_init(&_readyq) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize priority ready queue.");
|
||||
} else {
|
||||
/*
|
||||
* Enter a loop to remove all threads other than
|
||||
* the running thread from the thread list:
|
||||
*/
|
||||
if ((pthread = TAILQ_FIRST(&_thread_list)) == NULL) {
|
||||
pthread = TAILQ_FIRST(&_dead_list);
|
||||
use_deadlist = 1;
|
||||
}
|
||||
while (pthread != NULL) {
|
||||
/* Save the thread to be freed: */
|
||||
pthread_save = pthread;
|
||||
|
||||
/*
|
||||
* Advance to the next thread before
|
||||
* destroying the current thread:
|
||||
*/
|
||||
if (use_deadlist != 0)
|
||||
pthread = TAILQ_NEXT(pthread, dle);
|
||||
else
|
||||
pthread = TAILQ_NEXT(pthread, tle);
|
||||
|
||||
/* Make sure this isn't the running thread: */
|
||||
if (pthread_save != curthread) {
|
||||
/*
|
||||
* Remove this thread from the
|
||||
* appropriate list:
|
||||
*/
|
||||
if (use_deadlist != 0)
|
||||
TAILQ_REMOVE(&_thread_list,
|
||||
pthread_save, dle);
|
||||
else
|
||||
TAILQ_REMOVE(&_thread_list,
|
||||
pthread_save, tle);
|
||||
|
||||
free_thread_resources(pthread_save);
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch to the deadlist when the active
|
||||
* thread list has been consumed. This can't
|
||||
* be at the top of the loop because it is
|
||||
* used to determine to which list the thread
|
||||
* belongs (when it is removed from the list).
|
||||
*/
|
||||
if (pthread == NULL) {
|
||||
pthread = TAILQ_FIRST(&_dead_list);
|
||||
use_deadlist = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Treat the current thread as the initial thread: */
|
||||
_thread_initial = curthread;
|
||||
|
||||
/* Re-init the dead thread list: */
|
||||
TAILQ_INIT(&_dead_list);
|
||||
|
||||
/* Re-init the waiting and work queues. */
|
||||
TAILQ_INIT(&_waitingq);
|
||||
TAILQ_INIT(&_workq);
|
||||
|
||||
/* Re-init the threads mutex queue: */
|
||||
TAILQ_INIT(&curthread->mutexq);
|
||||
|
||||
/* No spinlocks yet: */
|
||||
_spinblock_count = 0;
|
||||
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
if ((ret = __sys_fork()) == 0)
|
||||
/* Child process */
|
||||
_kse_single_thread(curthread);
|
||||
|
||||
/* Return the process ID: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
free_thread_resources(struct pthread *thread)
|
||||
{
|
||||
|
||||
/* Check to see if the threads library allocated the stack. */
|
||||
if ((thread->attr.stackaddr_attr == NULL) && (thread->stack != NULL)) {
|
||||
/*
|
||||
* Since this is being called from fork, we are currently single
|
||||
* threaded so there is no need to protect the call to
|
||||
* _thread_stack_free() with _gc_mutex.
|
||||
*/
|
||||
_thread_stack_free(thread->stack, thread->attr.stacksize_attr,
|
||||
thread->attr.guardsize_attr);
|
||||
}
|
||||
|
||||
if (thread->specific != NULL)
|
||||
free(thread->specific);
|
||||
|
||||
free(thread);
|
||||
}
|
||||
|
@ -40,11 +40,12 @@ __weak_reference(__fsync, fsync);
|
||||
int
|
||||
__fsync(int fd)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_fsync(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -41,19 +41,33 @@ int
|
||||
_pthread_getschedparam(pthread_t pthread, int *policy,
|
||||
struct sched_param *param)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
if ((param == NULL) || (policy == NULL))
|
||||
/* Return an invalid argument error: */
|
||||
ret = EINVAL;
|
||||
|
||||
/* Find the thread in the list of active threads: */
|
||||
else if ((ret = _find_thread(pthread)) == 0) {
|
||||
/* Return the threads base priority and scheduling policy: */
|
||||
else if (pthread == curthread) {
|
||||
/*
|
||||
* Avoid searching the thread list when it is the current
|
||||
* thread.
|
||||
*/
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
param->sched_priority =
|
||||
PTHREAD_BASE_PRIORITY(pthread->base_priority);
|
||||
THR_BASE_PRIORITY(pthread->base_priority);
|
||||
*policy = pthread->attr.sched_policy;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return(ret);
|
||||
/* Find the thread in the list of active threads. */
|
||||
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
|
||||
== 0) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
param->sched_priority =
|
||||
THR_BASE_PRIORITY(pthread->base_priority);
|
||||
*policy = pthread->attr.sched_policy;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
@ -56,11 +56,12 @@ struct s_thread_info {
|
||||
/* Static variables: */
|
||||
static const struct s_thread_info thread_info[] = {
|
||||
{PS_RUNNING , "Running"},
|
||||
{PS_LOCKWAIT , "Waiting on an internal lock"},
|
||||
{PS_MUTEX_WAIT , "Waiting on a mutex"},
|
||||
{PS_COND_WAIT , "Waiting on a condition variable"},
|
||||
{PS_SLEEP_WAIT , "Sleeping"},
|
||||
{PS_WAIT_WAIT , "Waiting process"},
|
||||
{PS_SPINBLOCK , "Waiting for a spinlock"},
|
||||
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
|
||||
{PS_SIGWAIT , "Waiting for a signal"},
|
||||
{PS_JOIN , "Waiting to join"},
|
||||
{PS_SUSPENDED , "Suspended"},
|
||||
{PS_DEAD , "Dead"},
|
||||
@ -71,12 +72,9 @@ static const struct s_thread_info thread_info[] = {
|
||||
void
|
||||
_thread_dump_info(void)
|
||||
{
|
||||
char s[512];
|
||||
int fd;
|
||||
int i;
|
||||
pthread_t pthread;
|
||||
char tmpfile[128];
|
||||
pq_list_t *pq_list;
|
||||
char s[512], tmpfile[128];
|
||||
pthread_t pthread;
|
||||
int fd, i;
|
||||
|
||||
for (i = 0; i < 100000; i++) {
|
||||
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
|
||||
@ -102,64 +100,34 @@ _thread_dump_info(void)
|
||||
/* all 100000 possibilities are in use :( */
|
||||
return;
|
||||
} else {
|
||||
/* Output a header for active threads: */
|
||||
strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
|
||||
/* Dump the active threads. */
|
||||
strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the global list: */
|
||||
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
||||
dump_thread(fd, pthread, /*long_verson*/ 1);
|
||||
if (pthread->state != PS_DEAD)
|
||||
dump_thread(fd, pthread, /*long_verson*/ 1);
|
||||
}
|
||||
|
||||
/* Output a header for ready threads: */
|
||||
strcpy(s, "\n\n=============\nREADY THREADS\n\n");
|
||||
/*
|
||||
* Dump the ready threads.
|
||||
* XXX - We can't easily do this because the run queues
|
||||
* are per-KSEG.
|
||||
*/
|
||||
strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the ready queue: */
|
||||
TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
|
||||
TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Output a header for waiting threads: */
|
||||
strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
|
||||
/*
|
||||
* Dump the waiting threads.
|
||||
* XXX - We can't easily do this because the wait queues
|
||||
* are per-KSEG.
|
||||
*/
|
||||
strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the waiting queue: */
|
||||
TAILQ_FOREACH (pthread, &_waitingq, pqe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
|
||||
/* Output a header for threads in the work queue: */
|
||||
strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the waiting queue: */
|
||||
TAILQ_FOREACH (pthread, &_workq, qe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
|
||||
/* Check if there are no dead threads: */
|
||||
if (TAILQ_FIRST(&_dead_list) == NULL) {
|
||||
/* Output a record: */
|
||||
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
} else {
|
||||
/* Output a header for dead threads: */
|
||||
strcpy(s, "\n\nDEAD THREADS\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/*
|
||||
* Enter a loop to report each thread in the global
|
||||
* dead thread list:
|
||||
*/
|
||||
TAILQ_FOREACH(pthread, &_dead_list, dle) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the dump file: */
|
||||
/* Close the dump file. */
|
||||
__sys_close(fd);
|
||||
}
|
||||
}
|
||||
@ -167,9 +135,9 @@ _thread_dump_info(void)
|
||||
static void
|
||||
dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
char s[512];
|
||||
int i;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
char s[512];
|
||||
int i;
|
||||
|
||||
/* Find the state: */
|
||||
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
|
||||
@ -178,10 +146,11 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
|
||||
/* Output a record for the thread: */
|
||||
snprintf(s, sizeof(s),
|
||||
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
|
||||
"--------------------\n"
|
||||
"Thread %p (%s) prio %3d, blocked %s, state %s [%s:%d]\n",
|
||||
pthread, (pthread->name == NULL) ? "" : pthread->name,
|
||||
pthread->active_priority, thread_info[i].name, pthread->fname,
|
||||
pthread->lineno);
|
||||
pthread->active_priority, (pthread->blocked != 0) ? "yes" : "no",
|
||||
thread_info[i].name, pthread->fname, pthread->lineno);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
if (long_version != 0) {
|
||||
@ -192,13 +161,24 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
/* Check if this is the initial thread: */
|
||||
if (pthread == _thread_initial) {
|
||||
if (pthread == _thr_initial) {
|
||||
/* Output a record for the initial thread: */
|
||||
strcpy(s, "This is the initial thread\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
/* Process according to thread state: */
|
||||
switch (pthread->state) {
|
||||
case PS_SIGWAIT:
|
||||
snprintf(s, sizeof(s), "sigmask (hi)");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
for (i = _SIG_WORDS - 1; i >= 0; i--) {
|
||||
snprintf(s, sizeof(s), "%08x\n",
|
||||
pthread->sigmask.__bits[i]);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
snprintf(s, sizeof(s), "(lo)\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
/*
|
||||
* Trap other states that are not explicitly
|
||||
* coded to dump information:
|
||||
@ -212,10 +192,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
|
||||
/* Set the thread name for debug: */
|
||||
void
|
||||
_pthread_set_name_np(pthread_t thread, const char *name)
|
||||
_pthread_set_name_np(pthread_t thread, char *name)
|
||||
{
|
||||
/* Check if the caller has specified a valid thread: */
|
||||
if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
|
||||
if (thread != NULL && thread->magic == THR_MAGIC) {
|
||||
if (thread->name != NULL) {
|
||||
/* Free space for previous name. */
|
||||
free(thread->name);
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Daniel M. Eischen <deischen@FreeBSD.org>
|
||||
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
||||
* All rights reserved.
|
||||
*
|
||||
@ -49,7 +50,6 @@
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/ttycom.h>
|
||||
#include <sys/user.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/mman.h>
|
||||
#include <dirent.h>
|
||||
@ -57,6 +57,7 @@
|
||||
#include <fcntl.h>
|
||||
#include <paths.h>
|
||||
#include <pthread.h>
|
||||
#include <pthread_np.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -64,11 +65,20 @@
|
||||
#include <unistd.h>
|
||||
#include "un-namespace.h"
|
||||
|
||||
#include "libc_private.h"
|
||||
#include "thr_private.h"
|
||||
#include "ksd.h"
|
||||
|
||||
int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
|
||||
int __pthread_mutex_lock(pthread_mutex_t *);
|
||||
int __pthread_mutex_trylock(pthread_mutex_t *);
|
||||
|
||||
static void init_private(void);
|
||||
static void init_main_thread(struct pthread *thread);
|
||||
|
||||
/*
|
||||
* All weak references used within libc should be in this table.
|
||||
* This will is so that static libraries will work.
|
||||
* This is so that static libraries will work.
|
||||
*/
|
||||
static void *references[] = {
|
||||
&_accept,
|
||||
@ -145,40 +155,64 @@ static void *libgcc_references[] = {
|
||||
&_pthread_mutex_unlock
|
||||
};
|
||||
|
||||
int _pthread_guard_default;
|
||||
int _pthread_page_size;
|
||||
#define DUAL_ENTRY(entry) \
|
||||
(pthread_func_t)entry, (pthread_func_t)entry
|
||||
|
||||
static pthread_func_t jmp_table[][2] = {
|
||||
{DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
|
||||
{DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
|
||||
{DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
|
||||
{DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
|
||||
{(pthread_func_t)__pthread_cond_wait,
|
||||
(pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
|
||||
{DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
|
||||
{DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
|
||||
{DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
|
||||
{DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
|
||||
{DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
|
||||
{DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
|
||||
{(pthread_func_t)__pthread_mutex_lock,
|
||||
(pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
|
||||
{(pthread_func_t)__pthread_mutex_trylock,
|
||||
(pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
|
||||
{DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
|
||||
{DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
|
||||
{DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
|
||||
{DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
|
||||
{DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
|
||||
{DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
|
||||
{DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
|
||||
{DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
|
||||
{DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
|
||||
{DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
|
||||
{DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
|
||||
{DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
|
||||
{DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
|
||||
{DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
|
||||
{DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */
|
||||
};
|
||||
|
||||
static int init_once = 0;
|
||||
|
||||
/*
|
||||
* Threaded process initialization
|
||||
* Threaded process initialization.
|
||||
*
|
||||
* This is only called under two conditions:
|
||||
*
|
||||
* 1) Some thread routines have detected that the library hasn't yet
|
||||
* been initialized (_thr_initial == NULL && curthread == NULL), or
|
||||
*
|
||||
* 2) An explicit call to reinitialize after a fork (indicated
|
||||
* by curthread != NULL)
|
||||
*/
|
||||
void
|
||||
_thread_init(void)
|
||||
_libpthread_init(struct pthread *curthread)
|
||||
{
|
||||
int fd;
|
||||
int flags;
|
||||
int i;
|
||||
size_t len;
|
||||
int mib[2];
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
int fd;
|
||||
|
||||
/* Check if this function has already been called: */
|
||||
if (_thread_initial)
|
||||
/* Only initialise the threaded application once. */
|
||||
return;
|
||||
|
||||
_pthread_page_size = getpagesize();
|
||||
_pthread_guard_default = getpagesize();
|
||||
sched_stack_size = getpagesize();
|
||||
|
||||
pthread_attr_default.guardsize_attr = _pthread_guard_default;
|
||||
|
||||
|
||||
/* Check if this function has already been called: */
|
||||
if (_thread_initial)
|
||||
/* Only initialise the threaded application once. */
|
||||
if ((_thr_initial != NULL) && (curthread == NULL))
|
||||
/* Only initialize the threaded application once. */
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -188,11 +222,19 @@ _thread_init(void)
|
||||
if ((references[0] == NULL) || (libgcc_references[0] == NULL))
|
||||
PANIC("Failed loading mandatory references in _thread_init");
|
||||
|
||||
/*
|
||||
* Check the size of the jump table to make sure it is preset
|
||||
* with the correct number of entries.
|
||||
*/
|
||||
if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
|
||||
PANIC("Thread jump table not properly initialized");
|
||||
memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
|
||||
|
||||
/*
|
||||
* Check for the special case of this process running as
|
||||
* or in place of init as pid = 1:
|
||||
*/
|
||||
if (getpid() == 1) {
|
||||
if ((_thr_pid = getpid()) == 1) {
|
||||
/*
|
||||
* Setup a new session for this process which is
|
||||
* assumed to be running as root.
|
||||
@ -207,200 +249,271 @@ _thread_init(void)
|
||||
PANIC("Can't set login to root");
|
||||
if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
|
||||
PANIC("Can't set controlling terminal");
|
||||
if (__sys_dup2(fd, 0) == -1 ||
|
||||
__sys_dup2(fd, 1) == -1 ||
|
||||
__sys_dup2(fd, 2) == -1)
|
||||
PANIC("Can't dup2");
|
||||
}
|
||||
|
||||
/* Allocate and initialize the ready queue: */
|
||||
if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) !=
|
||||
0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot allocate priority ready queue.");
|
||||
}
|
||||
/* Allocate memory for the thread structure of the initial thread: */
|
||||
else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
|
||||
/* Initialize pthread private data. */
|
||||
init_private();
|
||||
_kse_init();
|
||||
|
||||
/* Initialize the initial kse and kseg. */
|
||||
_kse_initial = _kse_alloc(NULL);
|
||||
if (_kse_initial == NULL)
|
||||
PANIC("Can't allocate initial kse.");
|
||||
_kse_initial->k_kseg = _kseg_alloc(NULL);
|
||||
if (_kse_initial->k_kseg == NULL)
|
||||
PANIC("Can't allocate initial kseg.");
|
||||
_kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
|
||||
|
||||
/* Set the initial thread. */
|
||||
if (curthread == NULL) {
|
||||
/* Create and initialize the initial thread. */
|
||||
curthread = _thr_alloc(NULL);
|
||||
if (curthread == NULL)
|
||||
PANIC("Can't allocate initial thread");
|
||||
_thr_initial = curthread;
|
||||
init_main_thread(curthread);
|
||||
} else {
|
||||
/*
|
||||
* Insufficient memory to initialise this application, so
|
||||
* abort:
|
||||
* The initial thread is the current thread. It is
|
||||
* assumed that the current thread is already initialized
|
||||
* because it is left over from a fork().
|
||||
*/
|
||||
PANIC("Cannot allocate memory for initial thread");
|
||||
_thr_initial = curthread;
|
||||
}
|
||||
/* Allocate memory for the scheduler stack: */
|
||||
else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
|
||||
PANIC("Failed to allocate stack for scheduler");
|
||||
/* Allocate memory for the idle stack: */
|
||||
else if ((_idle_thr_stack = malloc(sched_stack_size)) == NULL)
|
||||
PANIC("Failed to allocate stack for scheduler");
|
||||
else {
|
||||
/* Zero the global kernel thread structure: */
|
||||
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
|
||||
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
|
||||
memset(_thread_initial, 0, sizeof(struct pthread));
|
||||
_kse_initial->k_kseg->kg_threadcount = 1;
|
||||
_thr_initial->kse = _kse_initial;
|
||||
_thr_initial->kseg = _kse_initial->k_kseg;
|
||||
_thr_initial->active = 1;
|
||||
|
||||
/* Initialize the waiting and work queues: */
|
||||
TAILQ_INIT(&_waitingq);
|
||||
TAILQ_INIT(&_workq);
|
||||
/*
|
||||
* Add the thread to the thread list and to the KSEG's thread
|
||||
* queue.
|
||||
*/
|
||||
THR_LIST_ADD(_thr_initial);
|
||||
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle);
|
||||
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
/* Setup the KSE/thread specific data for the current KSE/thread. */
|
||||
if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0)
|
||||
PANIC("Can't set initial KSE specific data");
|
||||
_set_curkse(_thr_initial->kse);
|
||||
_thr_initial->kse->k_curthread = _thr_initial;
|
||||
_thr_initial->kse->k_flags |= KF_INITIALIZED;
|
||||
_kse_initial->k_curthread = _thr_initial;
|
||||
}
|
||||
|
||||
/* Give this thread default attributes: */
|
||||
memcpy((void *) &_thread_initial->attr, &pthread_attr_default,
|
||||
sizeof(struct pthread_attr));
|
||||
/*
|
||||
* This function and pthread_create() do a lot of the same things.
|
||||
* It'd be nice to consolidate the common stuff in one place.
|
||||
*/
|
||||
static void
|
||||
init_main_thread(struct pthread *thread)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Zero the initial thread structure. */
|
||||
memset(thread, 0, sizeof(struct pthread));
|
||||
|
||||
/* Setup the thread attributes. */
|
||||
thread->attr = _pthread_attr_default;
|
||||
|
||||
/*
|
||||
* Set up the thread stack.
|
||||
*
|
||||
* Create a red zone below the main stack. All other stacks
|
||||
* are constrained to a maximum size by the parameters
|
||||
* passed to mmap(), but this stack is only limited by
|
||||
* resource limits, so this stack needs an explicitly mapped
|
||||
* red zone to protect the thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap((void *)_usrstack - THR_STACK_INITIAL -
|
||||
_thr_guard_default, _thr_guard_default, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
|
||||
/*
|
||||
* Mark the stack as an application supplied stack so that it
|
||||
* isn't deallocated.
|
||||
*
|
||||
* XXX - I'm not sure it would hurt anything to deallocate
|
||||
* the main thread stack because deallocation doesn't
|
||||
* actually free() it; it just puts it in the free
|
||||
* stack queue for later reuse.
|
||||
*/
|
||||
thread->attr.stackaddr_attr = (void *)_usrstack - THR_STACK_INITIAL;
|
||||
thread->attr.stacksize_attr = THR_STACK_INITIAL;
|
||||
thread->attr.guardsize_attr = _thr_guard_default;
|
||||
thread->attr.flags |= THR_STACK_USER;
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
*/
|
||||
thread->magic = THR_MAGIC;
|
||||
|
||||
thread->slice_usec = -1;
|
||||
thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
|
||||
thread->name = strdup("initial thread");
|
||||
|
||||
/* Initialize the thread for signals: */
|
||||
sigemptyset(&thread->sigmask);
|
||||
|
||||
/*
|
||||
* Set up the thread mailbox. The threads saved context
|
||||
* is also in the mailbox.
|
||||
*/
|
||||
thread->tmbx.tm_udata = thread;
|
||||
thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
|
||||
thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr;
|
||||
thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr;
|
||||
|
||||
/* Default the priority of the initial thread: */
|
||||
thread->base_priority = THR_DEFAULT_PRIORITY;
|
||||
thread->active_priority = THR_DEFAULT_PRIORITY;
|
||||
thread->inherited_priority = 0;
|
||||
|
||||
/* Initialize the mutex queue: */
|
||||
TAILQ_INIT(&thread->mutexq);
|
||||
|
||||
/* Initialize thread locking. */
|
||||
if (_lock_init(&thread->lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize initial thread lock");
|
||||
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
|
||||
_lockuser_init(&thread->lockusers[i], (void *)thread);
|
||||
_LCK_SET_PRIVATE2(&thread->lockusers[i], (void *)thread);
|
||||
}
|
||||
|
||||
/* Initialize hooks in the thread structure: */
|
||||
thread->specific = NULL;
|
||||
thread->cleanup = NULL;
|
||||
thread->flags = 0;
|
||||
thread->continuation = NULL;
|
||||
|
||||
thread->state = PS_RUNNING;
|
||||
thread->uniqueid = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
init_private(void)
|
||||
{
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
size_t len;
|
||||
int mib[2];
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Avoid reinitializing some things if they don't need to be,
|
||||
* e.g. after a fork().
|
||||
*/
|
||||
if (init_once == 0) {
|
||||
/* Find the stack top */
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_USRSTACK;
|
||||
len = sizeof (_usrstack);
|
||||
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
|
||||
_usrstack = (void *)USRSTACK;
|
||||
PANIC("Cannot get kern.usrstack from sysctl");
|
||||
|
||||
/*
|
||||
* Create a red zone below the main stack. All other stacks are
|
||||
* constrained to a maximum size by the paramters passed to
|
||||
* mmap(), but this stack is only limited by resource limits, so
|
||||
* this stack needs an explicitly mapped red zone to protect the
|
||||
* thread stack that is just beyond.
|
||||
* Create a red zone below the main stack. All other
|
||||
* stacks are constrained to a maximum size by the
|
||||
* parameters passed to mmap(), but this stack is only
|
||||
* limited by resource limits, so this stack needs an
|
||||
* explicitly mapped red zone to protect the thread stack
|
||||
* that is just beyond.
|
||||
*/
|
||||
if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
|
||||
_pthread_guard_default, _pthread_guard_default, 0,
|
||||
MAP_ANON, -1, 0) == MAP_FAILED)
|
||||
if (mmap((void *)_usrstack - THR_STACK_INITIAL -
|
||||
_thr_guard_default, _thr_guard_default,
|
||||
0, MAP_ANON, -1, 0) == MAP_FAILED)
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
|
||||
/* Set the main thread stack pointer. */
|
||||
_thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Set the stack attributes. */
|
||||
_thread_initial->attr.stackaddr_attr = _thread_initial->stack;
|
||||
_thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
|
||||
|
||||
/* Setup the context for the scheduler. */
|
||||
_thread_kern_kse_mailbox.km_stack.ss_sp =
|
||||
_thread_kern_sched_stack;
|
||||
_thread_kern_kse_mailbox.km_stack.ss_size = sched_stack_size;
|
||||
_thread_kern_kse_mailbox.km_func =
|
||||
(void *)_thread_kern_scheduler;
|
||||
|
||||
/* Initialize the idle context. */
|
||||
bzero(&_idle_thr_mailbox, sizeof(struct kse_thr_mailbox));
|
||||
getcontext(&_idle_thr_mailbox.tm_context);
|
||||
_idle_thr_mailbox.tm_context.uc_stack.ss_sp = _idle_thr_stack;
|
||||
_idle_thr_mailbox.tm_context.uc_stack.ss_size =
|
||||
sched_stack_size;
|
||||
makecontext(&_idle_thr_mailbox.tm_context, _thread_kern_idle,
|
||||
1);
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
*/
|
||||
_thread_initial->magic = PTHREAD_MAGIC;
|
||||
|
||||
/* Set the initial cancel state */
|
||||
_thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
/* Setup the context for initial thread. */
|
||||
getcontext(&_thread_initial->mailbox.tm_context);
|
||||
_thread_initial->mailbox.tm_context.uc_stack.ss_sp =
|
||||
_thread_initial->stack;
|
||||
_thread_initial->mailbox.tm_context.uc_stack.ss_size =
|
||||
PTHREAD_STACK_INITIAL;
|
||||
_thread_initial->mailbox.tm_udata = (void *)_thread_initial;
|
||||
|
||||
/* Default the priority of the initial thread: */
|
||||
_thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
_thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
|
||||
_thread_initial->inherited_priority = 0;
|
||||
|
||||
/* Initialise the state of the initial thread: */
|
||||
_thread_initial->state = PS_RUNNING;
|
||||
|
||||
/* Set the name of the thread: */
|
||||
_thread_initial->name = strdup("_thread_initial");
|
||||
|
||||
/* Initialize joiner to NULL (no joiner): */
|
||||
_thread_initial->joiner = NULL;
|
||||
|
||||
/* Initialize the owned mutex queue and count: */
|
||||
TAILQ_INIT(&(_thread_initial->mutexq));
|
||||
_thread_initial->priority_mutex_count = 0;
|
||||
|
||||
/* Initialize the global scheduling time: */
|
||||
_sched_ticks = 0;
|
||||
gettimeofday((struct timeval *) &_sched_tod, NULL);
|
||||
|
||||
/* Initialize last active: */
|
||||
_thread_initial->last_active = (long) _sched_ticks;
|
||||
|
||||
/* Initialise the rest of the fields: */
|
||||
_thread_initial->sig_defer_count = 0;
|
||||
_thread_initial->specific = NULL;
|
||||
_thread_initial->cleanup = NULL;
|
||||
_thread_initial->flags = 0;
|
||||
_thread_initial->error = 0;
|
||||
TAILQ_INIT(&_thread_list);
|
||||
TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
|
||||
_set_curthread(_thread_initial);
|
||||
|
||||
/* Clear the pending signals for the process. */
|
||||
sigemptyset(&_thread_sigpending);
|
||||
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
/* Check for signals which cannot be trapped. */
|
||||
if (i == SIGKILL || i == SIGSTOP)
|
||||
continue;
|
||||
|
||||
/* Get the signal handler details. */
|
||||
if (__sys_sigaction(i, NULL,
|
||||
&_thread_sigact[i - 1]) != 0)
|
||||
PANIC("Cannot read signal handler info");
|
||||
}
|
||||
|
||||
/* Register SIGCHLD (needed for wait(2)). */
|
||||
sigfillset(&act.sa_mask);
|
||||
act.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
act.sa_flags = SA_SIGINFO | SA_RESTART;
|
||||
if (__sys_sigaction(SIGCHLD, &act, NULL) != 0)
|
||||
PANIC("Can't initialize signal handler");
|
||||
|
||||
/* Get the process signal mask. */
|
||||
__sys_sigprocmask(SIG_SETMASK, NULL, &_thread_sigmask);
|
||||
|
||||
/* Get the kernel clockrate: */
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_CLOCKRATE;
|
||||
len = sizeof (struct clockinfo);
|
||||
if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
|
||||
_clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
|
||||
clockinfo.tick : CLOCK_RES_USEC_MIN;
|
||||
_clock_res_usec = clockinfo.tick;
|
||||
else
|
||||
_clock_res_usec = CLOCK_RES_USEC;
|
||||
|
||||
/* Start KSE. */
|
||||
_thread_kern_kse_mailbox.km_curthread =
|
||||
&_thread_initial->mailbox;
|
||||
if (kse_create(&_thread_kern_kse_mailbox, 0) != 0)
|
||||
PANIC("kse_new failed");
|
||||
_thr_page_size = getpagesize();
|
||||
_thr_guard_default = _thr_page_size;
|
||||
|
||||
init_once = 1; /* Don't do this again. */
|
||||
} else {
|
||||
/*
|
||||
* Destroy the locks before creating them. We don't
|
||||
* know what state they are in so it is better to just
|
||||
* recreate them.
|
||||
*/
|
||||
_lock_destroy(&_thread_signal_lock);
|
||||
_lock_destroy(&_mutex_static_lock);
|
||||
_lock_destroy(&_rwlock_static_lock);
|
||||
_lock_destroy(&_keytable_lock);
|
||||
}
|
||||
|
||||
/* Initialise the garbage collector mutex and condition variable. */
|
||||
if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
|
||||
pthread_cond_init(&_gc_cond,NULL) != 0)
|
||||
PANIC("Failed to initialise garbage collector mutex or condvar");
|
||||
}
|
||||
|
||||
/*
|
||||
* Special start up code for NetBSD/Alpha
|
||||
*/
|
||||
#if defined(__NetBSD__) && defined(__alpha__)
|
||||
int
|
||||
main(int argc, char *argv[], char *env);
|
||||
/* Initialize everything else. */
|
||||
TAILQ_INIT(&_thread_list);
|
||||
TAILQ_INIT(&_thread_gc_list);
|
||||
|
||||
int
|
||||
_thread_main(int argc, char *argv[], char *env)
|
||||
{
|
||||
_thread_init();
|
||||
return (main(argc, argv, env));
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
/* Check for signals which cannot be trapped: */
|
||||
if (i == SIGKILL || i == SIGSTOP) {
|
||||
}
|
||||
|
||||
/* Get the signal handler details: */
|
||||
else if (__sys_sigaction(i, NULL,
|
||||
&_thread_sigact[i - 1]) != 0) {
|
||||
/*
|
||||
* Abort this process if signal
|
||||
* initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot read signal handler info");
|
||||
}
|
||||
|
||||
/* Initialize the SIG_DFL dummy handler count. */
|
||||
_thread_dfl_count[i] = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install the signal handler for SIGINFO. It isn't
|
||||
* really needed, but it is nice to have for debugging
|
||||
* purposes.
|
||||
*/
|
||||
if (__sys_sigaction(SIGINFO, &act, NULL) != 0) {
|
||||
/*
|
||||
* Abort this process if signal initialisation fails:
|
||||
*/
|
||||
PANIC("Cannot initialize signal handler");
|
||||
}
|
||||
_thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART;
|
||||
|
||||
/*
|
||||
* Initialize the lock for temporary installation of signal
|
||||
* handlers (to support sigwait() semantics) and for the
|
||||
* process signal mask and pending signal sets.
|
||||
*/
|
||||
if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize _thread_signal_lock");
|
||||
if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize mutex static init lock");
|
||||
if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize rwlock static init lock");
|
||||
if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize thread specific keytable lock");
|
||||
|
||||
/* Clear pending signals and get the process signal mask. */
|
||||
sigemptyset(&_thr_proc_sigpending);
|
||||
__sys_sigprocmask(SIG_SETMASK, NULL, &_thr_proc_sigmask);
|
||||
|
||||
/*
|
||||
* _thread_list_lock and _kse_count are initialized
|
||||
* by _kse_init()
|
||||
*/
|
||||
}
|
||||
#endif
|
||||
|
@ -41,121 +41,91 @@ int
|
||||
_pthread_join(pthread_t pthread, void **thread_return)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
pthread_t thread;
|
||||
int ret = 0;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
/* Check if the caller has specified an invalid thread: */
|
||||
if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) {
|
||||
if (pthread == NULL || pthread->magic != THR_MAGIC) {
|
||||
/* Invalid thread: */
|
||||
_thread_leave_cancellation_point();
|
||||
return(EINVAL);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/* Check if the caller has specified itself: */
|
||||
if (pthread == curthread) {
|
||||
/* Avoid a deadlock condition: */
|
||||
_thread_leave_cancellation_point();
|
||||
return(EDEADLK);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EDEADLK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex to ensure that the garbage
|
||||
* collector is not using the dead thread list.
|
||||
* Find the thread in the list of active threads or in the
|
||||
* list of dead threads:
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
/*
|
||||
* Defer signals to protect the thread list from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/*
|
||||
* Unlock the garbage collector mutex, now that the garbage collector
|
||||
* can't be run:
|
||||
*/
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
/*
|
||||
* Search for the specified thread in the list of active threads. This
|
||||
* is done manually here rather than calling _find_thread() because
|
||||
* the searches in _thread_list and _dead_list (as well as setting up
|
||||
* join/detach state) have to be done atomically.
|
||||
*/
|
||||
TAILQ_FOREACH(thread, &_thread_list, tle) {
|
||||
if (thread == pthread)
|
||||
break;
|
||||
}
|
||||
if (thread == NULL) {
|
||||
/*
|
||||
* Search for the specified thread in the list of dead threads:
|
||||
*/
|
||||
TAILQ_FOREACH(thread, &_dead_list, dle) {
|
||||
if (thread == pthread)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the thread was not found or has been detached: */
|
||||
if (thread == NULL ||
|
||||
((pthread->attr.flags & PTHREAD_DETACHED) != 0)) {
|
||||
/* Undefer and handle pending signals, yielding if necessary: */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) {
|
||||
/* Return an error: */
|
||||
ret = ESRCH;
|
||||
|
||||
} else if (pthread->joiner != NULL) {
|
||||
/* Undefer and handle pending signals, yielding if necessary: */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
/* Multiple joiners are not supported. */
|
||||
ret = ENOTSUP;
|
||||
|
||||
/* Check if the thread is not dead: */
|
||||
} else if (pthread->state != PS_DEAD) {
|
||||
/* Set the running thread to be the joiner: */
|
||||
pthread->joiner = curthread;
|
||||
|
||||
/* Keep track of which thread we're joining to: */
|
||||
curthread->join_status.thread = pthread;
|
||||
|
||||
while (curthread->join_status.thread == pthread) {
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
|
||||
}
|
||||
|
||||
/*
|
||||
* The thread return value and error are set by the thread we're
|
||||
* joining to when it exits or detaches:
|
||||
*/
|
||||
ret = curthread->join_status.error;
|
||||
if ((ret == 0) && (thread_return != NULL))
|
||||
*thread_return = curthread->join_status.ret;
|
||||
} else {
|
||||
/*
|
||||
* The thread exited (is dead) without being detached, and no
|
||||
* thread has joined it.
|
||||
*/
|
||||
|
||||
/* Check if the return value is required: */
|
||||
if (thread_return != NULL) {
|
||||
/* Return the thread's return value: */
|
||||
*thread_return = pthread->ret;
|
||||
}
|
||||
|
||||
/* Make the thread collectable by the garbage collector. */
|
||||
pthread->attr.flags |= PTHREAD_DETACHED;
|
||||
|
||||
/* Undefer and handle pending signals, yielding if necessary: */
|
||||
_thread_kern_sig_undefer();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (ESRCH);
|
||||
}
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
/* Check if this thread has been detached: */
|
||||
if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
|
||||
/* Remove the reference and return an error: */
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
ret = ESRCH;
|
||||
} else {
|
||||
/* Lock the target thread while checking its state. */
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
if ((pthread->state == PS_DEAD) ||
|
||||
((pthread->flags & THR_FLAGS_EXITING) != 0)) {
|
||||
if (thread_return != NULL)
|
||||
/* Return the thread's return value: */
|
||||
*thread_return = pthread->ret;
|
||||
|
||||
/* Unlock the thread and remove the reference. */
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
else if (pthread->joiner != NULL) {
|
||||
/* Unlock the thread and remove the reference. */
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
|
||||
/* Multiple joiners are not supported. */
|
||||
ret = ENOTSUP;
|
||||
}
|
||||
else {
|
||||
/* Set the running thread to be the joiner: */
|
||||
pthread->joiner = curthread;
|
||||
|
||||
/* Keep track of which thread we're joining to: */
|
||||
curthread->join_status.thread = pthread;
|
||||
|
||||
/* Unlock the thread and remove the reference. */
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
if (curthread->join_status.thread == pthread)
|
||||
THR_SET_STATE(curthread, PS_JOIN);
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
while (curthread->join_status.thread == pthread) {
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
}
|
||||
|
||||
/*
|
||||
* The thread return value and error are set by the
|
||||
* thread we're joining to when it exits or detaches:
|
||||
*/
|
||||
ret = curthread->join_status.error;
|
||||
if ((ret == 0) && (thread_return != NULL))
|
||||
*thread_return = curthread->join_status.ret;
|
||||
}
|
||||
}
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -41,8 +41,26 @@ __weak_reference(_pthread_kill, pthread_kill);
|
||||
int
|
||||
_pthread_kill(pthread_t pthread, int sig)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/* Check for invalid signal numbers: */
|
||||
if (sig < 0 || sig >= NSIG)
|
||||
/* Invalid signal: */
|
||||
ret = EINVAL;
|
||||
/*
|
||||
* All signals are unsupported.
|
||||
* Ensure the thread is in the list of active threads, and the
|
||||
* signal is valid (signal 0 specifies error checking only) and
|
||||
* not being ignored:
|
||||
*/
|
||||
return (EINVAL);
|
||||
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
|
||||
== 0) {
|
||||
if ((sig > 0) &&
|
||||
(_thread_sigact[sig - 1].sa_handler != SIG_IGN))
|
||||
_thr_sig_send(pthread, sig);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
@ -40,8 +40,8 @@ int
|
||||
_pthread_main_np()
|
||||
{
|
||||
|
||||
if (!_thread_initial)
|
||||
if (!_thr_initial)
|
||||
return (-1);
|
||||
else
|
||||
return (pthread_equal(pthread_self(), _thread_initial) ? 1 : 0);
|
||||
return (pthread_equal(pthread_self(), _thr_initial) ? 1 : 0);
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ _pthread_mutexattr_init(pthread_mutexattr_t *attr)
|
||||
pthread_mutexattr_t pattr;
|
||||
|
||||
if ((pattr = (pthread_mutexattr_t)
|
||||
malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
|
||||
malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
|
||||
ret = ENOMEM;
|
||||
} else {
|
||||
memcpy(pattr, &pthread_mutexattr_default,
|
||||
sizeof(struct pthread_mutex_attr));
|
||||
memcpy(pattr, &_pthread_mutexattr_default,
|
||||
sizeof(struct pthread_mutex_attr));
|
||||
*attr = pattr;
|
||||
ret = 0;
|
||||
}
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ __weak_reference(__msync, msync);
|
||||
int
|
||||
__msync(void *addr, size_t len, int flags)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -24,9 +25,9 @@ __msync(void *addr, size_t len, int flags)
|
||||
* write. The only real use of this wrapper is to guarantee
|
||||
* a cancellation point, as per the standard. sigh.
|
||||
*/
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_msync(addr, len, flags);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -98,16 +98,14 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
|
||||
ret = EINVAL;
|
||||
else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
/* Lock the mutex: */
|
||||
if ((ret = pthread_mutex_lock(mutex)) == 0) {
|
||||
/* Return the old ceiling and set the new ceiling: */
|
||||
*old_ceiling = (*mutex)->m_prio;
|
||||
(*mutex)->m_prio = prioceiling;
|
||||
/* Lock the mutex: */
|
||||
else if ((ret = pthread_mutex_lock(mutex)) == 0) {
|
||||
/* Return the old ceiling and set the new ceiling: */
|
||||
*old_ceiling = (*mutex)->m_prio;
|
||||
(*mutex)->m_prio = prioceiling;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
ret = pthread_mutex_unlock(mutex);
|
||||
}
|
||||
/* Unlock the mutex: */
|
||||
ret = pthread_mutex_unlock(mutex);
|
||||
}
|
||||
return(ret);
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ _pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
(*mattr)->m_protocol = protocol;
|
||||
(*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
|
||||
(*mattr)->m_ceiling = THR_MAX_PRIORITY;
|
||||
}
|
||||
return(ret);
|
||||
}
|
||||
|
@ -39,57 +39,42 @@
|
||||
__weak_reference(__nanosleep, nanosleep);
|
||||
|
||||
int
|
||||
_nanosleep(const struct timespec * time_to_sleep,
|
||||
struct timespec * time_remaining)
|
||||
_nanosleep(const struct timespec *time_to_sleep,
|
||||
struct timespec *time_remaining)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
struct timespec current_time;
|
||||
struct timespec current_time1;
|
||||
struct timespec ts, ts1;
|
||||
struct timespec remaining_time;
|
||||
struct timeval tv;
|
||||
|
||||
/* Check if the time to sleep is legal: */
|
||||
if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
|
||||
time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
|
||||
if ((time_to_sleep == NULL) || (time_to_sleep->tv_sec < 0) ||
|
||||
(time_to_sleep->tv_nsec < 0) ||
|
||||
(time_to_sleep->tv_nsec >= 1000000000)) {
|
||||
/* Return an EINVAL error : */
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
} else {
|
||||
/*
|
||||
* As long as we're going to get the time of day, we
|
||||
* might as well store it in the global time of day:
|
||||
*/
|
||||
gettimeofday((struct timeval *) &_sched_tod, NULL);
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, ¤t_time);
|
||||
KSE_GET_TOD(curthread->kse, &ts);
|
||||
|
||||
/* Calculate the time for the current thread to wake up: */
|
||||
curthread->wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
|
||||
curthread->wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;
|
||||
TIMESPEC_ADD(&curthread->wakeup_time, &ts, time_to_sleep);
|
||||
|
||||
/* Check if the nanosecond field has overflowed: */
|
||||
if (curthread->wakeup_time.tv_nsec >= 1000000000) {
|
||||
/* Wrap the nanosecond field: */
|
||||
curthread->wakeup_time.tv_sec += 1;
|
||||
curthread->wakeup_time.tv_nsec -= 1000000000;
|
||||
}
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
curthread->interrupted = 0;
|
||||
|
||||
/* Reschedule the current thread to sleep: */
|
||||
_thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);
|
||||
THR_SET_STATE(curthread, PS_SLEEP_WAIT);
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/*
|
||||
* As long as we're going to get the time of day, we
|
||||
* might as well store it in the global time of day:
|
||||
*/
|
||||
gettimeofday((struct timeval *) &_sched_tod, NULL);
|
||||
GET_CURRENT_TOD(tv);
|
||||
TIMEVAL_TO_TIMESPEC(&tv, ¤t_time1);
|
||||
/* Reschedule the current thread to sleep: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* Calculate the remaining time to sleep: */
|
||||
remaining_time.tv_sec = time_to_sleep->tv_sec + current_time.tv_sec - current_time1.tv_sec;
|
||||
remaining_time.tv_nsec = time_to_sleep->tv_nsec + current_time.tv_nsec - current_time1.tv_nsec;
|
||||
KSE_GET_TOD(curthread->kse, &ts1);
|
||||
remaining_time.tv_sec = time_to_sleep->tv_sec
|
||||
+ ts.tv_sec - ts1.tv_sec;
|
||||
remaining_time.tv_nsec = time_to_sleep->tv_nsec
|
||||
+ ts.tv_nsec - ts1.tv_nsec;
|
||||
|
||||
/* Check if the nanosecond field has underflowed: */
|
||||
if (remaining_time.tv_nsec < 0) {
|
||||
@ -97,9 +82,8 @@ _nanosleep(const struct timespec * time_to_sleep,
|
||||
remaining_time.tv_sec -= 1;
|
||||
remaining_time.tv_nsec += 1000000000;
|
||||
}
|
||||
|
||||
/* Check if the nanosecond field has overflowed: */
|
||||
if (remaining_time.tv_nsec >= 1000000000) {
|
||||
else if (remaining_time.tv_nsec >= 1000000000) {
|
||||
/* Handle the overflow: */
|
||||
remaining_time.tv_sec += 1;
|
||||
remaining_time.tv_nsec -= 1000000000;
|
||||
@ -130,14 +114,15 @@ _nanosleep(const struct timespec * time_to_sleep,
|
||||
}
|
||||
|
||||
int
|
||||
__nanosleep(const struct timespec * time_to_sleep, struct timespec *
|
||||
time_remaining)
|
||||
__nanosleep(const struct timespec *time_to_sleep,
|
||||
struct timespec *time_remaining)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _nanosleep(time_to_sleep, time_remaining);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -31,23 +31,25 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include "namespace.h"
|
||||
#include <pthread.h>
|
||||
#include "un-namespace.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
__weak_reference(_pthread_once, pthread_once);
|
||||
|
||||
int
|
||||
_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
|
||||
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
|
||||
{
|
||||
if (once_control->state == PTHREAD_NEEDS_INIT) {
|
||||
if (_thread_initial == NULL)
|
||||
_thread_init();
|
||||
pthread_mutex_lock(&(once_control->mutex));
|
||||
if (_thr_initial == NULL)
|
||||
_libpthread_init(NULL);
|
||||
_pthread_mutex_lock(&(once_control->mutex));
|
||||
if (once_control->state == PTHREAD_NEEDS_INIT) {
|
||||
init_routine();
|
||||
once_control->state = PTHREAD_DONE_INIT;
|
||||
}
|
||||
pthread_mutex_unlock(&(once_control->mutex));
|
||||
_pthread_mutex_unlock(&(once_control->mutex));
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -45,11 +45,12 @@ __weak_reference(__open, open);
|
||||
int
|
||||
__open(const char *path, int flags,...)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
int mode = 0;
|
||||
va_list ap;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
/* Check if the file is being created: */
|
||||
if (flags & O_CREAT) {
|
||||
@ -60,7 +61,7 @@ __open(const char *path, int flags,...)
|
||||
}
|
||||
|
||||
ret = __sys_open(path, flags, mode);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(_pause, pause);
|
||||
int
|
||||
_pause(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __pause();
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -46,11 +46,12 @@ __weak_reference(__poll, poll);
|
||||
int
|
||||
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_poll(fds, nfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -29,13 +29,9 @@
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/uio.h>
|
||||
#include <errno.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "thr_private.h"
|
||||
|
||||
@ -109,7 +105,7 @@ static void
|
||||
pchar(int fd, char c)
|
||||
{
|
||||
|
||||
write(fd, &c, 1);
|
||||
__sys_write(fd, &c, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -119,6 +115,6 @@ static void
|
||||
pstr(int fd, const char *s)
|
||||
{
|
||||
|
||||
write(fd, s, strlen(s));
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
|
||||
|
@ -42,47 +42,40 @@ static void pq_insert_prio_list(pq_queue_t *pq, int prio);
|
||||
|
||||
#if defined(_PTHREADS_INVARIANTS)
|
||||
|
||||
static int _pq_active = 0;
|
||||
#define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ)
|
||||
|
||||
#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
|
||||
|
||||
#define _PQ_SET_ACTIVE() _pq_active = 1
|
||||
#define _PQ_CLEAR_ACTIVE() _pq_active = 0
|
||||
#define _PQ_ASSERT_ACTIVE(msg) do { \
|
||||
if (_pq_active == 0) \
|
||||
#define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE
|
||||
#define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE
|
||||
#define PQ_ASSERT_ACTIVE(pq, msg) do { \
|
||||
if (((pq)->pq_flags & PQF_ACTIVE) == 0) \
|
||||
PANIC(msg); \
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_INACTIVE(msg) do { \
|
||||
if (_pq_active != 0) \
|
||||
#define PQ_ASSERT_INACTIVE(pq, msg) do { \
|
||||
if (((pq)->pq_flags & PQF_ACTIVE) != 0) \
|
||||
PANIC(msg); \
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
|
||||
if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
|
||||
#define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
|
||||
if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \
|
||||
PANIC(msg); \
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
|
||||
if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
|
||||
#define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \
|
||||
if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \
|
||||
PANIC(msg); \
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
|
||||
if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
|
||||
#define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
|
||||
if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \
|
||||
PANIC(msg); \
|
||||
} while (0)
|
||||
#define _PQ_ASSERT_PROTECTED(msg) \
|
||||
PTHREAD_ASSERT((_thread_kern_kse_mailbox.km_curthread == NULL) || \
|
||||
((_get_curthread())->sig_defer_count > 0), \
|
||||
msg);
|
||||
|
||||
#else
|
||||
|
||||
#define _PQ_SET_ACTIVE()
|
||||
#define _PQ_CLEAR_ACTIVE()
|
||||
#define _PQ_ASSERT_ACTIVE(msg)
|
||||
#define _PQ_ASSERT_INACTIVE(msg)
|
||||
#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
|
||||
#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
|
||||
#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
|
||||
#define _PQ_ASSERT_PROTECTED(msg)
|
||||
#define PQ_SET_ACTIVE(pq)
|
||||
#define PQ_CLEAR_ACTIVE(pq)
|
||||
#define PQ_ASSERT_ACTIVE(pq, msg)
|
||||
#define PQ_ASSERT_INACTIVE(pq, msg)
|
||||
#define PQ_ASSERT_IN_WAITQ(thrd, msg)
|
||||
#define PQ_ASSERT_IN_RUNQ(thrd, msg)
|
||||
#define PQ_ASSERT_NOT_QUEUED(thrd, msg)
|
||||
|
||||
#endif
|
||||
|
||||
@ -123,10 +116,9 @@ _pq_init(pq_queue_t *pq)
|
||||
pq->pq_lists[i].pl_prio = i;
|
||||
pq->pq_lists[i].pl_queued = 0;
|
||||
}
|
||||
|
||||
/* Initialize the priority queue: */
|
||||
TAILQ_INIT(&pq->pq_queue);
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
pq->pq_flags = 0;
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -139,10 +131,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
|
||||
_PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
|
||||
PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active");
|
||||
PQ_SET_ACTIVE(pq);
|
||||
PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue");
|
||||
|
||||
/*
|
||||
* Remove this thread from priority list. Note that if
|
||||
@ -155,9 +146,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
|
||||
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
|
||||
|
||||
/* This thread is now longer in the priority queue. */
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
|
||||
pthread->flags &= ~THR_FLAGS_IN_RUNQ;
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
PQ_CLEAR_ACTIVE(pq);
|
||||
}
|
||||
|
||||
|
||||
@ -167,34 +158,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
|
||||
int prio;
|
||||
|
||||
/*
|
||||
* Don't insert suspended threads into the priority queue.
|
||||
* The caller is responsible for setting the threads state.
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
|
||||
/* Make sure the threads state is suspended. */
|
||||
if (pthread->state != PS_SUSPENDED)
|
||||
PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
|
||||
} else {
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_NOT_QUEUED(pthread,
|
||||
"_pq_insert_head: Already in priority queue");
|
||||
_PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
|
||||
PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active");
|
||||
PQ_SET_ACTIVE(pq);
|
||||
PQ_ASSERT_NOT_QUEUED(pthread,
|
||||
"_pq_insert_head: Already in priority queue");
|
||||
|
||||
prio = pthread->active_priority;
|
||||
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
|
||||
if (pq->pq_lists[prio].pl_queued == 0)
|
||||
/* Insert the list into the priority queue: */
|
||||
pq_insert_prio_list(pq, prio);
|
||||
prio = pthread->active_priority;
|
||||
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
|
||||
if (pq->pq_lists[prio].pl_queued == 0)
|
||||
/* Insert the list into the priority queue: */
|
||||
pq_insert_prio_list(pq, prio);
|
||||
|
||||
/* Mark this thread as being in the priority queue. */
|
||||
pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
|
||||
/* Mark this thread as being in the priority queue. */
|
||||
pthread->flags |= THR_FLAGS_IN_RUNQ;
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
}
|
||||
PQ_CLEAR_ACTIVE(pq);
|
||||
}
|
||||
|
||||
|
||||
@ -204,34 +184,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
|
||||
int prio;
|
||||
|
||||
/*
|
||||
* Don't insert suspended threads into the priority queue.
|
||||
* The caller is responsible for setting the threads state.
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
|
||||
/* Make sure the threads state is suspended. */
|
||||
if (pthread->state != PS_SUSPENDED)
|
||||
PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
|
||||
} else {
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_NOT_QUEUED(pthread,
|
||||
"_pq_insert_tail: Already in priority queue");
|
||||
_PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
|
||||
PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active");
|
||||
PQ_SET_ACTIVE(pq);
|
||||
PQ_ASSERT_NOT_QUEUED(pthread,
|
||||
"_pq_insert_tail: Already in priority queue");
|
||||
|
||||
prio = pthread->active_priority;
|
||||
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
|
||||
if (pq->pq_lists[prio].pl_queued == 0)
|
||||
/* Insert the list into the priority queue: */
|
||||
pq_insert_prio_list(pq, prio);
|
||||
prio = pthread->active_priority;
|
||||
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
|
||||
if (pq->pq_lists[prio].pl_queued == 0)
|
||||
/* Insert the list into the priority queue: */
|
||||
pq_insert_prio_list(pq, prio);
|
||||
|
||||
/* Mark this thread as being in the priority queue. */
|
||||
pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
|
||||
/* Mark this thread as being in the priority queue. */
|
||||
pthread->flags |= THR_FLAGS_IN_RUNQ;
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
}
|
||||
PQ_CLEAR_ACTIVE(pq);
|
||||
}
|
||||
|
||||
|
||||
@ -244,9 +213,8 @@ _pq_first(pq_queue_t *pq)
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_pq_first: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
|
||||
PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active");
|
||||
PQ_SET_ACTIVE(pq);
|
||||
|
||||
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
|
||||
(pthread == NULL)) {
|
||||
@ -259,21 +227,10 @@ _pq_first(pq_queue_t *pq)
|
||||
|
||||
/* Mark the list as not being in the queue: */
|
||||
pql->pl_queued = 0;
|
||||
} else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
|
||||
/*
|
||||
* This thread is suspended; remove it from the
|
||||
* list and ensure its state is suspended.
|
||||
*/
|
||||
TAILQ_REMOVE(&pql->pl_head, pthread, pqe);
|
||||
PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
|
||||
|
||||
/* This thread is now longer in the priority queue. */
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
|
||||
pthread = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
PQ_CLEAR_ACTIVE(pq);
|
||||
return (pthread);
|
||||
}
|
||||
|
||||
@ -286,8 +243,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
|
||||
_PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
|
||||
PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active");
|
||||
|
||||
/*
|
||||
* The priority queue is in descending priority order. Start at
|
||||
@ -307,64 +263,3 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
|
||||
/* Mark this list as being in the queue: */
|
||||
pq->pq_lists[prio].pl_queued = 1;
|
||||
}
|
||||
|
||||
void
|
||||
_waitq_insert(pthread_t pthread)
|
||||
{
|
||||
pthread_t tid;
|
||||
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
|
||||
|
||||
if (pthread->wakeup_time.tv_sec == -1)
|
||||
TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
|
||||
else {
|
||||
tid = TAILQ_FIRST(&_waitingq);
|
||||
while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
|
||||
((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
|
||||
((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
|
||||
(tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
|
||||
tid = TAILQ_NEXT(tid, pqe);
|
||||
if (tid == NULL)
|
||||
TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
|
||||
else
|
||||
TAILQ_INSERT_BEFORE(tid, pthread, pqe);
|
||||
}
|
||||
pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
}
|
||||
|
||||
void
|
||||
_waitq_remove(pthread_t pthread)
|
||||
{
|
||||
/*
|
||||
* Make some assertions when debugging is enabled:
|
||||
*/
|
||||
_PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
_PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
|
||||
|
||||
TAILQ_REMOVE(&_waitingq, pthread, pqe);
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
|
||||
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
}
|
||||
|
||||
void
|
||||
_waitq_setactive(void)
|
||||
{
|
||||
_PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
|
||||
_PQ_SET_ACTIVE();
|
||||
}
|
||||
|
||||
void
|
||||
_waitq_clearactive(void)
|
||||
{
|
||||
_PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
|
||||
_PQ_CLEAR_ACTIVE();
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -44,11 +44,12 @@ int
|
||||
pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
|
||||
const struct timespec *timo, const sigset_t *mask)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __pselect(count, rfds, wfds, efds, timo, mask);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
@ -45,11 +45,12 @@ __weak_reference(__read, read);
|
||||
ssize_t
|
||||
__read(int fd, void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_read(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -45,11 +45,12 @@ __weak_reference(__readv, readv);
|
||||
ssize_t
|
||||
__readv(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_readv(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -35,33 +35,32 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void resume_common(struct pthread *);
|
||||
static void resume_common(struct pthread *);
|
||||
|
||||
__weak_reference(_pthread_resume_np, pthread_resume_np);
|
||||
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
|
||||
|
||||
|
||||
/* Resume a thread: */
|
||||
int
|
||||
_pthread_resume_np(pthread_t thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/* Find the thread in the list of active threads: */
|
||||
if ((ret = _find_thread(thread)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Add a reference to the thread: */
|
||||
if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
|
||||
/* Is it currently suspended? */
|
||||
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) {
|
||||
/* Lock the threads scheduling queue: */
|
||||
THR_SCHED_LOCK(curthread, thread);
|
||||
|
||||
if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
|
||||
resume_common(thread);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
/* Unlock the threads scheduling queue: */
|
||||
THR_SCHED_UNLOCK(curthread, thread);
|
||||
}
|
||||
_thr_ref_delete(curthread, thread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -69,43 +68,42 @@ _pthread_resume_np(pthread_t thread)
|
||||
void
|
||||
_pthread_resume_all_np(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *thread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *thread;
|
||||
kse_critical_t crit;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Take the thread list lock: */
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
TAILQ_FOREACH(thread, &_thread_list, tle) {
|
||||
if ((thread != curthread) &&
|
||||
((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
|
||||
((thread->flags & THR_FLAGS_SUSPENDED) != 0) &&
|
||||
(thread->state != PS_DEAD) &&
|
||||
(thread->state != PS_DEADLOCK) &&
|
||||
((thread->flags & THR_FLAGS_EXITING) == 0)) {
|
||||
THR_SCHED_LOCK(curthread, thread);
|
||||
resume_common(thread);
|
||||
THR_SCHED_UNLOCK(curthread, thread);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
/* Release the thread list lock: */
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
|
||||
static void
|
||||
resume_common(struct pthread *thread)
|
||||
{
|
||||
/* Clear the suspend flag: */
|
||||
thread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
|
||||
thread->flags &= ~THR_FLAGS_SUSPENDED;
|
||||
|
||||
/*
|
||||
* If the thread's state is suspended, that means it is
|
||||
* now runnable but not in any scheduling queue. Set the
|
||||
* state to running and insert it into the run queue.
|
||||
*/
|
||||
if (thread->state == PS_SUSPENDED) {
|
||||
PTHREAD_SET_STATE(thread, PS_RUNNING);
|
||||
if (thread->priority_mutex_count > 0)
|
||||
PTHREAD_PRIOQ_INSERT_HEAD(thread);
|
||||
else
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(thread);
|
||||
}
|
||||
if (thread->state == PS_SUSPENDED)
|
||||
_thr_setrunnable_unlocked(thread);
|
||||
}
|
||||
|
@ -30,7 +30,9 @@
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "namespace.h"
|
||||
#include <pthread.h>
|
||||
#include "un-namespace.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
/* maximum number of times a read lock may be obtained */
|
||||
@ -44,25 +46,28 @@ __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
|
||||
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
|
||||
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
|
||||
|
||||
static int init_static (pthread_rwlock_t *rwlock);
|
||||
|
||||
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
|
||||
/*
|
||||
* Prototypes
|
||||
*/
|
||||
static int init_static(pthread_rwlock_t *rwlock);
|
||||
|
||||
|
||||
static int
|
||||
init_static (pthread_rwlock_t *rwlock)
|
||||
init_static(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
struct pthread *thread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_SPINLOCK(&static_init_lock);
|
||||
THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
|
||||
|
||||
if (*rwlock == NULL)
|
||||
ret = pthread_rwlock_init(rwlock, NULL);
|
||||
ret = _pthread_rwlock_init(rwlock, NULL);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
_SPINUNLOCK(&static_init_lock);
|
||||
|
||||
return(ret);
|
||||
THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -77,9 +82,9 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
pthread_mutex_destroy(&prwlock->lock);
|
||||
pthread_cond_destroy(&prwlock->read_signal);
|
||||
pthread_cond_destroy(&prwlock->write_signal);
|
||||
_pthread_mutex_destroy(&prwlock->lock);
|
||||
_pthread_cond_destroy(&prwlock->read_signal);
|
||||
_pthread_cond_destroy(&prwlock->write_signal);
|
||||
free(prwlock);
|
||||
|
||||
*rwlock = NULL;
|
||||
@ -87,7 +92,7 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -100,25 +105,25 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
|
||||
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
|
||||
|
||||
if (prwlock == NULL)
|
||||
return(ENOMEM);
|
||||
return (ENOMEM);
|
||||
|
||||
/* initialize the lock */
|
||||
if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
|
||||
if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
|
||||
free(prwlock);
|
||||
else {
|
||||
/* initialize the read condition signal */
|
||||
ret = pthread_cond_init(&prwlock->read_signal, NULL);
|
||||
ret = _pthread_cond_init(&prwlock->read_signal, NULL);
|
||||
|
||||
if (ret != 0) {
|
||||
pthread_mutex_destroy(&prwlock->lock);
|
||||
_pthread_mutex_destroy(&prwlock->lock);
|
||||
free(prwlock);
|
||||
} else {
|
||||
/* initialize the write condition signal */
|
||||
ret = pthread_cond_init(&prwlock->write_signal, NULL);
|
||||
ret = _pthread_cond_init(&prwlock->write_signal, NULL);
|
||||
|
||||
if (ret != 0) {
|
||||
pthread_cond_destroy(&prwlock->read_signal);
|
||||
pthread_mutex_destroy(&prwlock->lock);
|
||||
_pthread_cond_destroy(&prwlock->read_signal);
|
||||
_pthread_mutex_destroy(&prwlock->lock);
|
||||
free(prwlock);
|
||||
} else {
|
||||
/* success */
|
||||
@ -130,7 +135,7 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
|
||||
}
|
||||
}
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -140,30 +145,30 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
||||
int ret;
|
||||
|
||||
if (rwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
/* check for static initialization */
|
||||
if (prwlock == NULL) {
|
||||
if ((ret = init_static(rwlock)) != 0)
|
||||
return(ret);
|
||||
return (ret);
|
||||
|
||||
prwlock = *rwlock;
|
||||
}
|
||||
|
||||
/* grab the monitor lock */
|
||||
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return(ret);
|
||||
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return (ret);
|
||||
|
||||
/* give writers priority over readers */
|
||||
while (prwlock->blocked_writers || prwlock->state < 0) {
|
||||
ret = pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
|
||||
ret = _pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
|
||||
|
||||
if (ret != 0) {
|
||||
/* can't do a whole lot if this fails */
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
return(ret);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,9 +184,9 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
||||
* lock. Decrementing 'state' is no good because we probably
|
||||
* don't have the monitor lock.
|
||||
*/
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -191,21 +196,21 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
int ret;
|
||||
|
||||
if (rwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
/* check for static initialization */
|
||||
if (prwlock == NULL) {
|
||||
if ((ret = init_static(rwlock)) != 0)
|
||||
return(ret);
|
||||
return (ret);
|
||||
|
||||
prwlock = *rwlock;
|
||||
}
|
||||
|
||||
/* grab the monitor lock */
|
||||
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return(ret);
|
||||
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return (ret);
|
||||
|
||||
/* give writers priority over readers */
|
||||
if (prwlock->blocked_writers || prwlock->state < 0)
|
||||
@ -216,9 +221,9 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
++prwlock->state; /* indicate we are locked for reading */
|
||||
|
||||
/* see the comment on this in pthread_rwlock_rdlock */
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -228,21 +233,21 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
int ret;
|
||||
|
||||
if (rwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
/* check for static initialization */
|
||||
if (prwlock == NULL) {
|
||||
if ((ret = init_static(rwlock)) != 0)
|
||||
return(ret);
|
||||
return (ret);
|
||||
|
||||
prwlock = *rwlock;
|
||||
}
|
||||
|
||||
/* grab the monitor lock */
|
||||
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return(ret);
|
||||
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return (ret);
|
||||
|
||||
if (prwlock->state != 0)
|
||||
ret = EBUSY;
|
||||
@ -251,9 +256,9 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
prwlock->state = -1;
|
||||
|
||||
/* see the comment on this in pthread_rwlock_rdlock */
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -263,34 +268,34 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
int ret;
|
||||
|
||||
if (rwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
if (prwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
/* grab the monitor lock */
|
||||
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return(ret);
|
||||
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return (ret);
|
||||
|
||||
if (prwlock->state > 0) {
|
||||
if (--prwlock->state == 0 && prwlock->blocked_writers)
|
||||
ret = pthread_cond_signal(&prwlock->write_signal);
|
||||
ret = _pthread_cond_signal(&prwlock->write_signal);
|
||||
} else if (prwlock->state < 0) {
|
||||
prwlock->state = 0;
|
||||
|
||||
if (prwlock->blocked_writers)
|
||||
ret = pthread_cond_signal(&prwlock->write_signal);
|
||||
ret = _pthread_cond_signal(&prwlock->write_signal);
|
||||
else
|
||||
ret = pthread_cond_broadcast(&prwlock->read_signal);
|
||||
ret = _pthread_cond_broadcast(&prwlock->read_signal);
|
||||
} else
|
||||
ret = EINVAL;
|
||||
|
||||
/* see the comment on this in pthread_rwlock_rdlock */
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -300,31 +305,31 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
|
||||
int ret;
|
||||
|
||||
if (rwlock == NULL)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
prwlock = *rwlock;
|
||||
|
||||
/* check for static initialization */
|
||||
if (prwlock == NULL) {
|
||||
if ((ret = init_static(rwlock)) != 0)
|
||||
return(ret);
|
||||
return (ret);
|
||||
|
||||
prwlock = *rwlock;
|
||||
}
|
||||
|
||||
/* grab the monitor lock */
|
||||
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return(ret);
|
||||
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
|
||||
return (ret);
|
||||
|
||||
while (prwlock->state != 0) {
|
||||
++prwlock->blocked_writers;
|
||||
|
||||
ret = pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
|
||||
ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
|
||||
|
||||
if (ret != 0) {
|
||||
--prwlock->blocked_writers;
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
return(ret);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
--prwlock->blocked_writers;
|
||||
@ -334,8 +339,7 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
|
||||
prwlock->state = -1;
|
||||
|
||||
/* see the comment on this in pthread_rwlock_rdlock */
|
||||
pthread_mutex_unlock(&prwlock->lock);
|
||||
_pthread_mutex_unlock(&prwlock->lock);
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -49,11 +49,12 @@ int
|
||||
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
|
||||
struct timeval *timeout)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -39,6 +39,9 @@ __weak_reference(_pthread_self, pthread_self);
|
||||
pthread_t
|
||||
_pthread_self(void)
|
||||
{
|
||||
if (_thr_initial == NULL)
|
||||
_libpthread_init(NULL);
|
||||
|
||||
/* Return the running thread pointer: */
|
||||
return (_get_curthread());
|
||||
}
|
||||
|
@ -32,7 +32,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
#include "namespace.h"
|
||||
#include <pthread.h>
|
||||
#include "un-namespace.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
#define _SEM_CHECK_VALIDITY(sem) \
|
||||
@ -88,15 +90,15 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
/*
|
||||
* Initialize the semaphore.
|
||||
*/
|
||||
if (pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
|
||||
if (_pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
|
||||
free(*sem);
|
||||
errno = ENOSPC;
|
||||
retval = -1;
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
if (pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
|
||||
pthread_mutex_destroy(&(*sem)->lock);
|
||||
if (_pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
|
||||
_pthread_mutex_destroy(&(*sem)->lock);
|
||||
free(*sem);
|
||||
errno = ENOSPC;
|
||||
retval = -1;
|
||||
@ -109,7 +111,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
|
||||
retval = 0;
|
||||
RETURN:
|
||||
return retval;
|
||||
return (retval);
|
||||
}
|
||||
|
||||
int
|
||||
@ -120,71 +122,72 @@ _sem_destroy(sem_t *sem)
|
||||
_SEM_CHECK_VALIDITY(sem);
|
||||
|
||||
/* Make sure there are no waiters. */
|
||||
pthread_mutex_lock(&(*sem)->lock);
|
||||
_pthread_mutex_lock(&(*sem)->lock);
|
||||
if ((*sem)->nwaiters > 0) {
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
errno = EBUSY;
|
||||
retval = -1;
|
||||
goto RETURN;
|
||||
}
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
|
||||
pthread_mutex_destroy(&(*sem)->lock);
|
||||
pthread_cond_destroy(&(*sem)->gtzero);
|
||||
_pthread_mutex_destroy(&(*sem)->lock);
|
||||
_pthread_cond_destroy(&(*sem)->gtzero);
|
||||
(*sem)->magic = 0;
|
||||
|
||||
free(*sem);
|
||||
|
||||
retval = 0;
|
||||
RETURN:
|
||||
return retval;
|
||||
return (retval);
|
||||
}
|
||||
|
||||
sem_t *
|
||||
_sem_open(const char *name, int oflag, ...)
|
||||
{
|
||||
errno = ENOSYS;
|
||||
return SEM_FAILED;
|
||||
return (SEM_FAILED);
|
||||
}
|
||||
|
||||
int
|
||||
_sem_close(sem_t *sem)
|
||||
{
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
int
|
||||
_sem_unlink(const char *name)
|
||||
{
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
return (-1);
|
||||
}
|
||||
|
||||
int
|
||||
_sem_wait(sem_t *sem)
|
||||
{
|
||||
int retval;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int retval;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
_SEM_CHECK_VALIDITY(sem);
|
||||
|
||||
pthread_mutex_lock(&(*sem)->lock);
|
||||
_pthread_mutex_lock(&(*sem)->lock);
|
||||
|
||||
while ((*sem)->count == 0) {
|
||||
(*sem)->nwaiters++;
|
||||
pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
|
||||
_pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
|
||||
(*sem)->nwaiters--;
|
||||
}
|
||||
(*sem)->count--;
|
||||
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
|
||||
retval = 0;
|
||||
RETURN:
|
||||
_thread_leave_cancellation_point();
|
||||
return retval;
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (retval);
|
||||
}
|
||||
|
||||
int
|
||||
@ -194,7 +197,7 @@ _sem_trywait(sem_t *sem)
|
||||
|
||||
_SEM_CHECK_VALIDITY(sem);
|
||||
|
||||
pthread_mutex_lock(&(*sem)->lock);
|
||||
_pthread_mutex_lock(&(*sem)->lock);
|
||||
|
||||
if ((*sem)->count > 0) {
|
||||
(*sem)->count--;
|
||||
@ -204,37 +207,38 @@ _sem_trywait(sem_t *sem)
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
|
||||
RETURN:
|
||||
return retval;
|
||||
return (retval);
|
||||
}
|
||||
|
||||
int
|
||||
_sem_post(sem_t *sem)
|
||||
{
|
||||
int retval;
|
||||
kse_critical_t crit;
|
||||
int retval;
|
||||
|
||||
_SEM_CHECK_VALIDITY(sem);
|
||||
|
||||
/*
|
||||
* sem_post() is required to be safe to call from within signal
|
||||
* handlers. Thus, we must defer signals.
|
||||
* handlers. Thus, we must enter a critical region.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
crit = _kse_critical_enter();
|
||||
|
||||
pthread_mutex_lock(&(*sem)->lock);
|
||||
_pthread_mutex_lock(&(*sem)->lock);
|
||||
|
||||
(*sem)->count++;
|
||||
if ((*sem)->nwaiters > 0)
|
||||
pthread_cond_signal(&(*sem)->gtzero);
|
||||
_pthread_cond_signal(&(*sem)->gtzero);
|
||||
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
|
||||
_thread_kern_sig_undefer();
|
||||
_kse_critical_leave(crit);
|
||||
retval = 0;
|
||||
RETURN:
|
||||
return retval;
|
||||
return (retval);
|
||||
}
|
||||
|
||||
int
|
||||
@ -244,11 +248,11 @@ _sem_getvalue(sem_t *sem, int *sval)
|
||||
|
||||
_SEM_CHECK_VALIDITY(sem);
|
||||
|
||||
pthread_mutex_lock(&(*sem)->lock);
|
||||
_pthread_mutex_lock(&(*sem)->lock);
|
||||
*sval = (int)(*sem)->count;
|
||||
pthread_mutex_unlock(&(*sem)->lock);
|
||||
_pthread_mutex_unlock(&(*sem)->lock);
|
||||
|
||||
retval = 0;
|
||||
RETURN:
|
||||
return retval;
|
||||
return (retval);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ void
|
||||
_thread_seterrno(pthread_t thread, int error)
|
||||
{
|
||||
/* Check for the initial thread: */
|
||||
if (thread == _thread_initial)
|
||||
if (thread == _thr_initial)
|
||||
/* The initial thread always uses the global error variable: */
|
||||
errno = error;
|
||||
else
|
||||
|
@ -42,40 +42,55 @@ int
|
||||
_pthread_setschedparam(pthread_t pthread, int policy,
|
||||
const struct sched_param *param)
|
||||
{
|
||||
int old_prio, in_readyq = 0, ret = 0;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int in_syncq;
|
||||
int in_readyq = 0;
|
||||
int old_prio;
|
||||
int ret = 0;
|
||||
|
||||
if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
|
||||
/* Return an invalid argument error: */
|
||||
ret = EINVAL;
|
||||
} else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
|
||||
(param->sched_priority > PTHREAD_MAX_PRIORITY)) {
|
||||
} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
|
||||
(param->sched_priority > THR_MAX_PRIORITY)) {
|
||||
/* Return an unsupported value error. */
|
||||
ret = ENOTSUP;
|
||||
|
||||
/* Find the thread in the list of active threads: */
|
||||
} else if ((ret = _find_thread(pthread)) == 0) {
|
||||
} else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
|
||||
== 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
* Lock the threads scheduling queue while we change
|
||||
* its priority:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ;
|
||||
|
||||
if (param->sched_priority !=
|
||||
PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
|
||||
/* Set the scheduling policy: */
|
||||
pthread->attr.sched_policy = policy;
|
||||
|
||||
if (param->sched_priority ==
|
||||
THR_BASE_PRIORITY(pthread->base_priority))
|
||||
/*
|
||||
* There is nothing to do; unlock the threads
|
||||
* scheduling queue.
|
||||
*/
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
else {
|
||||
/*
|
||||
* Remove the thread from its current priority
|
||||
* queue before any adjustments are made to its
|
||||
* active priority:
|
||||
*/
|
||||
old_prio = pthread->active_priority;
|
||||
if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
|
||||
if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) {
|
||||
in_readyq = 1;
|
||||
PTHREAD_PRIOQ_REMOVE(pthread);
|
||||
THR_RUNQ_REMOVE(pthread);
|
||||
}
|
||||
|
||||
/* Set the thread base priority: */
|
||||
pthread->base_priority &=
|
||||
(PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
|
||||
(THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
|
||||
pthread->base_priority = param->sched_priority;
|
||||
|
||||
/* Recalculate the active priority: */
|
||||
@ -92,28 +107,23 @@ _pthread_setschedparam(pthread_t pthread, int policy,
|
||||
* its priority if it owns any priority
|
||||
* protection or inheritence mutexes.
|
||||
*/
|
||||
PTHREAD_PRIOQ_INSERT_HEAD(pthread);
|
||||
THR_RUNQ_INSERT_HEAD(pthread);
|
||||
}
|
||||
else
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(pthread);
|
||||
THR_RUNQ_INSERT_TAIL(pthread);
|
||||
}
|
||||
|
||||
/* Unlock the threads scheduling queue: */
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
|
||||
/*
|
||||
* Check for any mutex priority adjustments. This
|
||||
* includes checking for a priority mutex on which
|
||||
* this thread is waiting.
|
||||
*/
|
||||
_mutex_notify_priochange(pthread);
|
||||
_mutex_notify_priochange(curthread, pthread, in_syncq);
|
||||
}
|
||||
|
||||
/* Set the scheduling policy: */
|
||||
pthread->attr.sched_policy = policy;
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -50,8 +50,8 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
} else {
|
||||
if (_thread_initial == NULL)
|
||||
_thread_init();
|
||||
if (_thr_initial == NULL)
|
||||
_libpthread_init(NULL);
|
||||
|
||||
/*
|
||||
* Check if the existing signal action structure contents are
|
||||
@ -76,14 +76,9 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
|
||||
* Check if the kernel needs to be advised of a change
|
||||
* in signal action:
|
||||
*/
|
||||
if (act != NULL && sig != SIGCHLD) {
|
||||
/*
|
||||
* Ensure the signal handler cannot be interrupted
|
||||
* by other signals. Always request the POSIX signal
|
||||
* handler arguments.
|
||||
*/
|
||||
sigfillset(&gact.sa_mask);
|
||||
gact.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
if (act != NULL && sig != SIGINFO) {
|
||||
gact.sa_mask = act->sa_mask;
|
||||
gact.sa_flags = SA_SIGINFO | act->sa_flags;
|
||||
|
||||
/*
|
||||
* Check if the signal handler is being set to
|
||||
@ -98,10 +93,10 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
|
||||
* Specify the thread kernel signal
|
||||
* handler:
|
||||
*/
|
||||
gact.sa_handler = (void (*) ()) _thread_sig_handler;
|
||||
gact.sa_handler = (void (*) ())_thr_sig_handler;
|
||||
|
||||
/* Change the signal action in the kernel: */
|
||||
if (__sys_sigaction(sig,&gact,NULL) != 0)
|
||||
if (__sys_sigaction(sig, &gact, NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <sys/signalvar.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
@ -44,32 +45,59 @@ __weak_reference(_pthread_sigmask, pthread_sigmask);
|
||||
int
|
||||
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
|
||||
{
|
||||
int i;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
ret = 0;
|
||||
if (oset != NULL)
|
||||
bcopy(&curthread->mailbox.tm_context.uc_sigmask, oset,
|
||||
sizeof(sigset_t));
|
||||
if (set == NULL)
|
||||
return (0);
|
||||
switch (how) {
|
||||
case SIG_BLOCK:
|
||||
for (i = 0; i < _SIG_WORDS; i++)
|
||||
curthread->mailbox.tm_context.uc_sigmask.__bits[i] |=
|
||||
set->__bits[i];
|
||||
break;
|
||||
case SIG_UNBLOCK:
|
||||
for (i = 0; i < _SIG_WORDS; i++)
|
||||
curthread->mailbox.tm_context.uc_sigmask.__bits[i] &=
|
||||
~set->__bits[i];
|
||||
break;
|
||||
case SIG_SETMASK:
|
||||
bcopy(set, &curthread->mailbox.tm_context.uc_sigmask,
|
||||
sizeof(sigset_t));
|
||||
break;
|
||||
default:
|
||||
errno = EINVAL;
|
||||
return (-1);
|
||||
/* Return the current mask: */
|
||||
*oset = curthread->tmbx.tm_context.uc_sigmask;
|
||||
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
/* Process according to what to do: */
|
||||
switch (how) {
|
||||
/* Block signals: */
|
||||
case SIG_BLOCK:
|
||||
/* Add signals to the existing mask: */
|
||||
SIGSETOR(curthread->tmbx.tm_context.uc_sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Unblock signals: */
|
||||
case SIG_UNBLOCK:
|
||||
/* Clear signals from the existing mask: */
|
||||
SIGSETNAND(curthread->tmbx.tm_context.uc_sigmask, *set);
|
||||
break;
|
||||
|
||||
/* Set the signal process mask: */
|
||||
case SIG_SETMASK:
|
||||
/* Set the new mask: */
|
||||
curthread->tmbx.tm_context.uc_sigmask = *set;
|
||||
break;
|
||||
|
||||
/* Trap invalid actions: */
|
||||
default:
|
||||
/* Return an invalid argument: */
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
curthread->sigmask =
|
||||
curthread->tmbx.tm_context.uc_sigmask;
|
||||
curthread->sigmask_seqno++;
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/*
|
||||
* Run down any pending signals:
|
||||
*/
|
||||
if (ret == 0)
|
||||
_thr_sig_check_pending(curthread);
|
||||
}
|
||||
return (0);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ int
|
||||
_sigpending(sigset_t *set)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
kse_critical_t crit;
|
||||
int ret = 0;
|
||||
|
||||
/* Check for a null signal set pointer: */
|
||||
@ -54,7 +55,11 @@ _sigpending(sigset_t *set)
|
||||
}
|
||||
else {
|
||||
*set = curthread->sigpend;
|
||||
SIGSETOR(*set, _thread_sigpending);
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
|
||||
SIGSETOR(*set, _thr_proc_sigpending);
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
|
@ -32,22 +32,58 @@
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <signal.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/signalvar.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
__weak_reference(__sigsuspend, sigsuspend);
|
||||
|
||||
int
|
||||
_sigsuspend(const sigset_t *set)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = -1;
|
||||
|
||||
/* Check if a new signal set was provided by the caller: */
|
||||
if (set != NULL) {
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
/* Change the caller's mask: */
|
||||
memcpy(&curthread->tmbx.tm_context.uc_sigmask,
|
||||
set, sizeof(sigset_t));
|
||||
|
||||
THR_SET_STATE(curthread, PS_SIGSUSPEND);
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/* Wait for a signal: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* Always return an interrupted error: */
|
||||
errno = EINTR;
|
||||
|
||||
/* Restore the signal mask: */
|
||||
memcpy(&curthread->tmbx.tm_context.uc_sigmask,
|
||||
&curthread->sigmask, sizeof(sigset_t));
|
||||
} else {
|
||||
/* Return an invalid argument error: */
|
||||
errno = EINVAL;
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
__sigsuspend(const sigset_t * set)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
ret = __sys_sigsuspend(set);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _sigsuspend(set);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -42,11 +42,135 @@
|
||||
__weak_reference(_sigwait, sigwait);
|
||||
|
||||
int
|
||||
_sigwait(const sigset_t * __restrict set, int * __restrict sig)
|
||||
_sigwait(const sigset_t *set, int *sig)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
int i;
|
||||
sigset_t tempset, waitset;
|
||||
struct sigaction act;
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
/*
|
||||
* All signals are invalid for waiting.
|
||||
* Specify the thread kernel signal handler.
|
||||
*/
|
||||
return (EINVAL);
|
||||
act.sa_handler = (void (*) ()) _thr_sig_handler;
|
||||
act.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
/* Ensure the signal handler cannot be interrupted by other signals: */
|
||||
sigfillset(&act.sa_mask);
|
||||
|
||||
/*
|
||||
* Initialize the set of signals that will be waited on:
|
||||
*/
|
||||
waitset = *set;
|
||||
|
||||
/* These signals can't be waited on. */
|
||||
sigdelset(&waitset, SIGKILL);
|
||||
sigdelset(&waitset, SIGSTOP);
|
||||
|
||||
/*
|
||||
* Check to see if a pending signal is in the wait mask.
|
||||
* This has to be atomic. */
|
||||
tempset = curthread->sigpend;
|
||||
SIGSETOR(tempset, _thr_proc_sigpending);
|
||||
SIGSETAND(tempset, waitset);
|
||||
if (SIGNOTEMPTY(tempset)) {
|
||||
/* Enter a loop to find a pending signal: */
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember (&tempset, i))
|
||||
break;
|
||||
}
|
||||
|
||||
/* Clear the pending signal: */
|
||||
if (sigismember(&curthread->sigpend,i))
|
||||
sigdelset(&curthread->sigpend,i);
|
||||
else
|
||||
sigdelset(&_thr_proc_sigpending,i);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = i;
|
||||
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the array of SIG_DFL wait counts.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
|
||||
|
||||
/*
|
||||
* Enter a loop to find the signals that are SIG_DFL. For
|
||||
* these signals we must install a dummy signal handler in
|
||||
* order for the kernel to pass them in to us. POSIX says
|
||||
* that the _application_ must explicitly install a dummy
|
||||
* handler for signals that are SIG_IGN in order to sigwait
|
||||
* on them. Note that SIG_IGN signals are left in the
|
||||
* mask because a subsequent sigaction could enable an
|
||||
* ignored signal.
|
||||
*/
|
||||
sigemptyset(&tempset);
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&waitset, i) &&
|
||||
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
|
||||
_thread_dfl_count[i]++;
|
||||
sigaddset(&tempset, i);
|
||||
if (_thread_dfl_count[i] == 1) {
|
||||
if (__sys_sigaction(i, &act, NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count for now. */
|
||||
THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* Save the wait signal mask. The wait signal
|
||||
* mask is independent of the threads signal mask
|
||||
* and requires separate storage.
|
||||
*/
|
||||
curthread->data.sigwait = &waitset;
|
||||
|
||||
/* Wait for a signal: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
THR_SET_STATE(curthread, PS_SIGWAIT);
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* Return the signal number to the caller: */
|
||||
*sig = curthread->signo;
|
||||
|
||||
/*
|
||||
* Probably unnecessary, but since it's in a union struct
|
||||
* we don't know how it could be used in the future.
|
||||
*/
|
||||
curthread->data.sigwait = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Relock the array of SIG_DFL wait counts.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
|
||||
|
||||
/* Restore the sigactions: */
|
||||
act.sa_handler = SIG_DFL;
|
||||
for (i = 1; i < NSIG; i++) {
|
||||
if (sigismember(&tempset, i)) {
|
||||
_thread_dfl_count[i]--;
|
||||
if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
|
||||
(_thread_dfl_count[i] == 0)) {
|
||||
if (__sys_sigaction(i, &act, NULL) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Done accessing _thread_dfl_count. */
|
||||
THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
|
||||
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (ret);
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(_sleep, sleep);
|
||||
unsigned int
|
||||
_sleep(unsigned int seconds)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
unsigned int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sleep(seconds);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "thr_private.h"
|
||||
|
||||
struct pthread_key {
|
||||
spinlock_t lock;
|
||||
volatile int allocated;
|
||||
volatile int count;
|
||||
int seqno;
|
||||
@ -47,7 +46,7 @@ struct pthread_key {
|
||||
};
|
||||
|
||||
/* Static variables: */
|
||||
static struct pthread_key key_table[PTHREAD_KEYS_MAX];
|
||||
static struct pthread_key key_table[PTHREAD_KEYS_MAX];
|
||||
|
||||
__weak_reference(_pthread_key_create, pthread_key_create);
|
||||
__weak_reference(_pthread_key_delete, pthread_key_delete);
|
||||
@ -56,44 +55,47 @@ __weak_reference(_pthread_setspecific, pthread_setspecific);
|
||||
|
||||
|
||||
int
|
||||
_pthread_key_create(pthread_key_t * key, void (*destructor) (void *))
|
||||
_pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Lock the key table: */
|
||||
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
|
||||
for ((*key) = 0; (*key) < PTHREAD_KEYS_MAX; (*key)++) {
|
||||
/* Lock the key table entry: */
|
||||
_SPINLOCK(&key_table[*key].lock);
|
||||
|
||||
if (key_table[(*key)].allocated == 0) {
|
||||
key_table[(*key)].allocated = 1;
|
||||
key_table[(*key)].destructor = destructor;
|
||||
key_table[(*key)].seqno++;
|
||||
|
||||
/* Unlock the key table entry: */
|
||||
_SPINUNLOCK(&key_table[*key].lock);
|
||||
/* Unlock the key table: */
|
||||
THR_LOCK_RELEASE(curthread, &_keytable_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Unlock the key table entry: */
|
||||
_SPINUNLOCK(&key_table[*key].lock);
|
||||
}
|
||||
/* Unlock the key table: */
|
||||
THR_LOCK_RELEASE(curthread, &_keytable_lock);
|
||||
return (EAGAIN);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_key_delete(pthread_key_t key)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret = 0;
|
||||
|
||||
if (key < PTHREAD_KEYS_MAX) {
|
||||
/* Lock the key table entry: */
|
||||
_SPINLOCK(&key_table[key].lock);
|
||||
/* Lock the key table: */
|
||||
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
|
||||
|
||||
if (key_table[key].allocated)
|
||||
key_table[key].allocated = 0;
|
||||
else
|
||||
ret = EINVAL;
|
||||
|
||||
/* Unlock the key table entry: */
|
||||
_SPINUNLOCK(&key_table[key].lock);
|
||||
/* Unlock the key table: */
|
||||
THR_LOCK_RELEASE(curthread, &_keytable_lock);
|
||||
} else
|
||||
ret = EINVAL;
|
||||
return (ret);
|
||||
@ -105,44 +107,41 @@ _thread_cleanupspecific(void)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
void *data = NULL;
|
||||
int key;
|
||||
int itr;
|
||||
void (*destructor)( void *);
|
||||
|
||||
for (itr = 0; itr < PTHREAD_DESTRUCTOR_ITERATIONS; itr++) {
|
||||
for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
|
||||
if (curthread->specific_data_count > 0) {
|
||||
/* Lock the key table entry: */
|
||||
_SPINLOCK(&key_table[key].lock);
|
||||
destructor = NULL;
|
||||
if (curthread->specific != NULL) {
|
||||
/* Lock the key table: */
|
||||
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
|
||||
for (key = 0; (key < PTHREAD_KEYS_MAX) &&
|
||||
(curthread->specific_data_count > 0); key++) {
|
||||
destructor = NULL;
|
||||
|
||||
if (key_table[key].allocated &&
|
||||
(curthread->specific[key].data != NULL)) {
|
||||
if (curthread->specific[key].seqno ==
|
||||
key_table[key].seqno) {
|
||||
data = (void *) curthread->specific[key].data;
|
||||
destructor = key_table[key].destructor;
|
||||
}
|
||||
curthread->specific[key].data = NULL;
|
||||
curthread->specific_data_count--;
|
||||
if (key_table[key].allocated &&
|
||||
(curthread->specific[key].data != NULL)) {
|
||||
if (curthread->specific[key].seqno ==
|
||||
key_table[key].seqno) {
|
||||
data = (void *)curthread->specific[key].data;
|
||||
destructor = key_table[key].destructor;
|
||||
}
|
||||
curthread->specific[key].data = NULL;
|
||||
curthread->specific_data_count--;
|
||||
}
|
||||
|
||||
/* Unlock the key table entry: */
|
||||
_SPINUNLOCK(&key_table[key].lock);
|
||||
|
||||
/*
|
||||
* If there is a destructore, call it
|
||||
* with the key table entry unlocked:
|
||||
*/
|
||||
if (destructor != NULL) {
|
||||
/*
|
||||
* If there is a destructore, call it
|
||||
* with the key table entry unlocked:
|
||||
* Don't hold the lock while calling the
|
||||
* destructor:
|
||||
*/
|
||||
if (destructor)
|
||||
destructor(data);
|
||||
} else {
|
||||
free(curthread->specific);
|
||||
curthread->specific = NULL;
|
||||
return;
|
||||
THR_LOCK_RELEASE(curthread, &_keytable_lock);
|
||||
destructor(data);
|
||||
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (curthread->specific != NULL) {
|
||||
THR_LOCK_RELEASE(curthread, &_keytable_lock);
|
||||
free(curthread->specific);
|
||||
curthread->specific = NULL;
|
||||
}
|
||||
|
@ -41,9 +41,14 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libc_private.h>
|
||||
|
||||
#include "spinlock.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
/*
|
||||
* These are for compatability only. Spinlocks of this type
|
||||
* are deprecated.
|
||||
*/
|
||||
|
||||
void
|
||||
_spinunlock(spinlock_t *lck)
|
||||
{
|
||||
@ -60,20 +65,14 @@ _spinunlock(spinlock_t *lck)
|
||||
void
|
||||
_spinlock(spinlock_t *lck)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/*
|
||||
* Try to grab the lock and loop if another thread grabs
|
||||
* it before we do.
|
||||
*/
|
||||
while(_atomic_lock(&lck->access_lock)) {
|
||||
/* Block the thread until the lock. */
|
||||
curthread->data.spinlock = lck;
|
||||
_thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
|
||||
while (lck->access_lock)
|
||||
;
|
||||
}
|
||||
|
||||
/* The running thread now owns the lock: */
|
||||
lck->lock_owner = (long) curthread;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -89,30 +88,12 @@ _spinlock(spinlock_t *lck)
|
||||
void
|
||||
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int cnt = 0;
|
||||
|
||||
/*
|
||||
* Try to grab the lock and loop if another thread grabs
|
||||
* it before we do.
|
||||
*/
|
||||
while(_atomic_lock(&lck->access_lock)) {
|
||||
cnt++;
|
||||
if (cnt > 100) {
|
||||
char str[256];
|
||||
snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno);
|
||||
__sys_write(2,str,strlen(str));
|
||||
__sleep(1);
|
||||
cnt = 0;
|
||||
}
|
||||
|
||||
/* Block the thread until the lock. */
|
||||
curthread->data.spinlock = lck;
|
||||
_thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
|
||||
while (lck->access_lock)
|
||||
;
|
||||
}
|
||||
|
||||
/* The running thread now owns the lock: */
|
||||
lck->lock_owner = (long) curthread;
|
||||
lck->fname = fname;
|
||||
lck->lineno = lineno;
|
||||
}
|
||||
|
@ -28,9 +28,7 @@
|
||||
*/
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/user.h>
|
||||
#include <stdlib.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
@ -44,31 +42,32 @@ struct stack {
|
||||
};
|
||||
|
||||
/*
|
||||
* Default sized (stack and guard) spare stack queue. Stacks are cached to
|
||||
* avoid additional complexity managing mmap()ed stack regions. Spare stacks
|
||||
* are used in LIFO order to increase cache locality.
|
||||
* Default sized (stack and guard) spare stack queue. Stacks are cached
|
||||
* to avoid additional complexity managing mmap()ed stack regions. Spare
|
||||
* stacks are used in LIFO order to increase cache locality.
|
||||
*/
|
||||
static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
|
||||
static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
|
||||
|
||||
/*
|
||||
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
|
||||
* Stacks are cached to avoid additional complexity managing mmap()ed stack
|
||||
* regions. This list is unordered, since ordering on both stack size and guard
|
||||
* size would be more trouble than it's worth. Stacks are allocated from this
|
||||
* cache on a first size match basis.
|
||||
* Stacks are cached to avoid additional complexity managing mmap()ed
|
||||
* stack regions. This list is unordered, since ordering on both stack
|
||||
* size and guard size would be more trouble than it's worth. Stacks are
|
||||
* allocated from this cache on a first size match basis.
|
||||
*/
|
||||
static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
|
||||
static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
|
||||
|
||||
/**
|
||||
* Base address of the last stack allocated (including its red zone, if there is
|
||||
* one). Stacks are allocated contiguously, starting beyond the top of the main
|
||||
* stack. When a new stack is created, a red zone is typically created
|
||||
* (actually, the red zone is simply left unmapped) above the top of the stack,
|
||||
* such that the stack will not be able to grow all the way to the bottom of the
|
||||
* next stack. This isn't fool-proof. It is possible for a stack to grow by a
|
||||
* large amount, such that it grows into the next stack, and as long as the
|
||||
* memory within the red zone is never accessed, nothing will prevent one thread
|
||||
* stack from trouncing all over the next.
|
||||
* Base address of the last stack allocated (including its red zone, if
|
||||
* there is one). Stacks are allocated contiguously, starting beyond the
|
||||
* top of the main stack. When a new stack is created, a red zone is
|
||||
* typically created (actually, the red zone is simply left unmapped) above
|
||||
* the top of the stack, such that the stack will not be able to grow all
|
||||
* the way to the bottom of the next stack. This isn't fool-proof. It is
|
||||
* possible for a stack to grow by a large amount, such that it grows into
|
||||
* the next stack, and as long as the memory within the red zone is never
|
||||
* accessed, nothing will prevent one thread stack from trouncing all over
|
||||
* the next.
|
||||
*
|
||||
* low memory
|
||||
* . . . . . . . . . . . . . . . . . .
|
||||
@ -112,50 +111,51 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
|
||||
* high memory
|
||||
*
|
||||
*/
|
||||
static void * last_stack;
|
||||
static void *last_stack = NULL;
|
||||
|
||||
void *
|
||||
_thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
int
|
||||
_thr_stack_alloc(struct pthread_attr *attr)
|
||||
{
|
||||
void *stack = NULL;
|
||||
struct stack *spare_stack;
|
||||
size_t stack_size;
|
||||
struct stack *spare_stack;
|
||||
struct kse *curkse;
|
||||
kse_critical_t crit;
|
||||
size_t stacksize;
|
||||
size_t guardsize;
|
||||
|
||||
stacksize = attr->stacksize_attr;
|
||||
guardsize = attr->guardsize_attr;
|
||||
|
||||
/*
|
||||
* Round up stack size to nearest multiple of _pthread_page_size,
|
||||
* so that mmap() * will work. If the stack size is not an even
|
||||
* multiple, we end up initializing things such that there is unused
|
||||
* space above the beginning of the stack, so the stack sits snugly
|
||||
* against its guard.
|
||||
* Round up stack size to nearest multiple of _thr_page_size so
|
||||
* that mmap() * will work. If the stack size is not an even
|
||||
* multiple, we end up initializing things such that there is
|
||||
* unused space above the beginning of the stack, so the stack
|
||||
* sits snugly against its guard.
|
||||
*/
|
||||
if (stacksize % _pthread_page_size != 0)
|
||||
stack_size = ((stacksize / _pthread_page_size) + 1) *
|
||||
_pthread_page_size;
|
||||
else
|
||||
stack_size = stacksize;
|
||||
if ((stacksize % _thr_page_size) != 0)
|
||||
stacksize = ((stacksize / _thr_page_size) + 1) *
|
||||
_thr_page_size;
|
||||
attr->stackaddr_attr = NULL;
|
||||
attr->flags &= ~THR_STACK_USER;
|
||||
|
||||
/*
|
||||
* Use the garbage collector lock for synchronization of the
|
||||
* spare stack lists and allocations from usrstack.
|
||||
*/
|
||||
crit = _kse_critical_enter();
|
||||
curkse = _get_curkse();
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
|
||||
/*
|
||||
* If the stack and guard sizes are default, try to allocate a stack
|
||||
* from the default-size stack cache:
|
||||
*/
|
||||
if (stack_size == PTHREAD_STACK_DEFAULT &&
|
||||
guardsize == _pthread_guard_default) {
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization of the
|
||||
* spare stack list.
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
|
||||
/* Use the spare stack. */
|
||||
if ((stacksize == THR_STACK_DEFAULT) &&
|
||||
(guardsize == _thr_guard_default)) {
|
||||
if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
|
||||
/* Use the spare stack. */
|
||||
LIST_REMOVE(spare_stack, qe);
|
||||
stack = spare_stack->stackaddr;
|
||||
attr->stackaddr_attr = spare_stack->stackaddr;
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
}
|
||||
/*
|
||||
* The user specified a non-default stack and/or guard size, so try to
|
||||
@ -163,78 +163,75 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
* rounded up stack size (stack_size) in the search:
|
||||
*/
|
||||
else {
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization of the
|
||||
* spare stack list.
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
LIST_FOREACH(spare_stack, &_mstackq, qe) {
|
||||
if (spare_stack->stacksize == stack_size &&
|
||||
LIST_FOREACH(spare_stack, &mstackq, qe) {
|
||||
if (spare_stack->stacksize == stacksize &&
|
||||
spare_stack->guardsize == guardsize) {
|
||||
LIST_REMOVE(spare_stack, qe);
|
||||
stack = spare_stack->stackaddr;
|
||||
attr->stackaddr_attr = spare_stack->stackaddr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
}
|
||||
|
||||
/* Check if a stack was not allocated from a stack cache: */
|
||||
if (stack == NULL) {
|
||||
|
||||
if (attr->stackaddr_attr != NULL) {
|
||||
/* A cached stack was found. Release the lock. */
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
else {
|
||||
/* Allocate a stack from usrstack. */
|
||||
if (last_stack == NULL)
|
||||
last_stack = _usrstack - PTHREAD_STACK_INITIAL -
|
||||
_pthread_guard_default;
|
||||
last_stack = _usrstack - THR_STACK_INITIAL -
|
||||
_thr_guard_default;
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = last_stack - stack_size;
|
||||
attr->stackaddr_attr = last_stack - stacksize;
|
||||
|
||||
/*
|
||||
* Even if stack allocation fails, we don't want to try to use
|
||||
* this location again, so unconditionally decrement
|
||||
* Even if stack allocation fails, we don't want to try to
|
||||
* use this location again, so unconditionally decrement
|
||||
* last_stack. Under normal operating conditions, the most
|
||||
* likely reason for an mmap() error is a stack overflow of the
|
||||
* adjacent thread stack.
|
||||
* likely reason for an mmap() error is a stack overflow of
|
||||
* the adjacent thread stack.
|
||||
*/
|
||||
last_stack -= (stack_size + guardsize);
|
||||
last_stack -= (stacksize + guardsize);
|
||||
|
||||
/* Stack: */
|
||||
if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
|
||||
-1, 0) == MAP_FAILED)
|
||||
stack = NULL;
|
||||
/* Release the lock before mmap'ing it. */
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
|
||||
/* Map the stack, but not the guard page: */
|
||||
if (mmap(attr->stackaddr_attr, stacksize,
|
||||
PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED)
|
||||
attr->stackaddr_attr = NULL;
|
||||
}
|
||||
|
||||
return (stack);
|
||||
if (attr->stackaddr_attr != NULL)
|
||||
return (0);
|
||||
else
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/* This function must be called with _gc_mutex held. */
|
||||
/* This function must be called with _thread_list_lock held. */
|
||||
void
|
||||
_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
|
||||
_thr_stack_free(struct pthread_attr *attr)
|
||||
{
|
||||
struct stack *spare_stack;
|
||||
struct stack *spare_stack;
|
||||
|
||||
spare_stack = (stack + stacksize - sizeof(struct stack));
|
||||
/* Round stacksize up to nearest multiple of _pthread_page_size. */
|
||||
if (stacksize % _pthread_page_size != 0) {
|
||||
spare_stack->stacksize =
|
||||
((stacksize / _pthread_page_size) + 1) *
|
||||
_pthread_page_size;
|
||||
} else
|
||||
spare_stack->stacksize = stacksize;
|
||||
spare_stack->guardsize = guardsize;
|
||||
spare_stack->stackaddr = stack;
|
||||
if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
|
||||
&& (attr->stackaddr_attr != NULL)) {
|
||||
spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
|
||||
- sizeof(struct stack));
|
||||
spare_stack->stacksize = attr->stacksize_attr;
|
||||
spare_stack->guardsize = attr->guardsize_attr;
|
||||
spare_stack->stackaddr = attr->stackaddr_attr;
|
||||
|
||||
if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
|
||||
spare_stack->guardsize == _pthread_guard_default) {
|
||||
/* Default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
|
||||
} else {
|
||||
/* Non-default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
|
||||
if (spare_stack->stacksize == THR_STACK_DEFAULT &&
|
||||
spare_stack->guardsize == _thr_guard_default) {
|
||||
/* Default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
|
||||
} else {
|
||||
/* Non-default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
|
||||
}
|
||||
attr->stackaddr_attr = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void suspend_common(struct pthread *thread);
|
||||
static void suspend_common(struct pthread *thread);
|
||||
|
||||
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
|
||||
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
|
||||
@ -44,27 +44,26 @@ __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
|
||||
int
|
||||
_pthread_suspend_np(pthread_t thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/* Suspending the current thread doesn't make sense. */
|
||||
if (thread == _get_curthread())
|
||||
ret = EDEADLK;
|
||||
|
||||
/* Find the thread in the list of active threads: */
|
||||
else if ((ret = _find_thread(thread)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Add a reference to the thread: */
|
||||
else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
|
||||
== 0) {
|
||||
/* Lock the threads scheduling queue: */
|
||||
THR_SCHED_LOCK(curthread, thread);
|
||||
|
||||
suspend_common(thread);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
/* Unlock the threads scheduling queue: */
|
||||
THR_SCHED_UNLOCK(curthread, thread);
|
||||
|
||||
/* Don't forget to remove the reference: */
|
||||
_thr_ref_delete(curthread, thread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -74,31 +73,34 @@ _pthread_suspend_all_np(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *thread;
|
||||
kse_critical_t crit;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Take the thread list lock: */
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
TAILQ_FOREACH(thread, &_thread_list, tle) {
|
||||
if (thread != curthread)
|
||||
if ((thread != curthread) &&
|
||||
(thread->state != PS_DEAD) &&
|
||||
(thread->state != PS_DEADLOCK) &&
|
||||
((thread->flags & THR_FLAGS_EXITING) == 0)) {
|
||||
THR_SCHED_LOCK(curthread, thread);
|
||||
suspend_common(thread);
|
||||
THR_SCHED_UNLOCK(curthread, thread);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
/* Release the thread list lock: */
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
|
||||
void
|
||||
suspend_common(struct pthread *thread)
|
||||
{
|
||||
thread->flags |= PTHREAD_FLAGS_SUSPENDED;
|
||||
if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
|
||||
PTHREAD_PRIOQ_REMOVE(thread);
|
||||
PTHREAD_SET_STATE(thread, PS_SUSPENDED);
|
||||
thread->flags |= THR_FLAGS_SUSPENDED;
|
||||
if (thread->flags & THR_FLAGS_IN_RUNQ) {
|
||||
THR_RUNQ_REMOVE(thread);
|
||||
THR_SET_STATE(thread, PS_SUSPENDED);
|
||||
}
|
||||
}
|
||||
|
@ -43,29 +43,11 @@ __weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
|
||||
int
|
||||
_pthread_switch_add_np(pthread_switch_routine_t routine)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (routine == NULL)
|
||||
/* Return an invalid argument error: */
|
||||
ret = EINVAL;
|
||||
else
|
||||
/* Shouldn't need a lock to protect this assigment. */
|
||||
_sched_switch_hook = routine;
|
||||
|
||||
return(ret);
|
||||
return (ENOTSUP);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_switch_delete_np(pthread_switch_routine_t routine)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (routine != _sched_switch_hook)
|
||||
/* Return an invalid argument error: */
|
||||
ret = EINVAL;
|
||||
else
|
||||
/* Shouldn't need a lock to protect this assigment. */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
return(ret);
|
||||
return (ENOTSUP);
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(_system, system);
|
||||
int
|
||||
_system(const char *string)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __system(string);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(_tcdrain, tcdrain);
|
||||
int
|
||||
_tcdrain(int fd)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __tcdrain(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -37,11 +37,12 @@ __weak_reference(_wait, wait);
|
||||
pid_t
|
||||
_wait(int *istat)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pid_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __wait(istat);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -43,11 +43,12 @@ __weak_reference(__wait4, wait4);
|
||||
pid_t
|
||||
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pid_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _wait4(pid, istat, options, rusage);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -39,11 +39,12 @@ __weak_reference(_waitpid, waitpid);
|
||||
pid_t
|
||||
_waitpid(pid_t wpid, int *status, int options)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pid_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __waitpid(wpid, status, options);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -45,11 +45,12 @@ __weak_reference(__write, write);
|
||||
ssize_t
|
||||
__write(int fd, const void *buf, size_t nbytes)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_write(fd, buf, nbytes);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -47,11 +47,12 @@ __weak_reference(__writev, writev);
|
||||
ssize_t
|
||||
__writev(int fd, const struct iovec *iov, int iovcnt)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
ssize_t ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_writev(fd, iov, iovcnt);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ _sched_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched();
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* Always return no error. */
|
||||
return(0);
|
||||
@ -62,5 +62,5 @@ _pthread_yield(void)
|
||||
curthread->slice_usec = -1;
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thread_kern_sched();
|
||||
_thr_sched_switch(curthread);
|
||||
}
|
||||
|
@ -9,16 +9,18 @@
|
||||
# system call stubs.
|
||||
LIB=kse
|
||||
SHLIB_MAJOR= 1
|
||||
CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
|
||||
CFLAGS+=-DPTHREAD_KERNEL
|
||||
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
|
||||
-I${.CURDIR}/../../include
|
||||
CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
|
||||
CFLAGS+=-I${.CURDIR}/sys
|
||||
|
||||
# Uncomment this if you want libpthread to contain debug information for
|
||||
# thread locking.
|
||||
CFLAGS+=-D_LOCK_DEBUG
|
||||
CFLAGS+=-D_LOCK_DEBUG -g
|
||||
|
||||
# enable extra internal consistancy checks
|
||||
CFLAGS+=-D_PTHREADS_INVARIANTS
|
||||
CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
|
||||
|
||||
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
|
||||
PRECIOUSLIB= yes
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
SRCS+= \
|
||||
thr_aio_suspend.c \
|
||||
thr_autoinit.c \
|
||||
thr_attr_destroy.c \
|
||||
thr_attr_init.c \
|
||||
thr_attr_get_np.c \
|
||||
@ -27,7 +28,6 @@ SRCS+= \
|
||||
thr_attr_setstack.c \
|
||||
thr_attr_setstackaddr.c \
|
||||
thr_attr_setstacksize.c \
|
||||
thr_autoinit.c \
|
||||
thr_cancel.c \
|
||||
thr_clean.c \
|
||||
thr_close.c \
|
||||
@ -43,7 +43,6 @@ SRCS+= \
|
||||
thr_find_thread.c \
|
||||
thr_fork.c \
|
||||
thr_fsync.c \
|
||||
thr_gc.c \
|
||||
thr_getprio.c \
|
||||
thr_getschedparam.c \
|
||||
thr_info.c \
|
||||
@ -82,6 +81,8 @@ SRCS+= \
|
||||
thr_sig.c \
|
||||
thr_sigaction.c \
|
||||
thr_sigmask.c \
|
||||
thr_sigpending.c \
|
||||
thr_sigprocmask.c \
|
||||
thr_sigsuspend.c \
|
||||
thr_sigwait.c \
|
||||
thr_single_np.c \
|
||||
|
@ -1,47 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2001 Thomas Moestl
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/acl.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
__weak_reference(___acl_aclcheck_fd, __acl_aclcheck_fd);
|
||||
|
||||
int
|
||||
___acl_aclcheck_fd(int fd, acl_type_t tp, acl_t acl)
|
||||
{
|
||||
int error;
|
||||
|
||||
if ((error = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
|
||||
error = __sys___acl_aclcheck_fd(fd, tp, (struct acl *)acl);
|
||||
_FD_UNLOCK(fd, FD_READ);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
@ -39,12 +39,13 @@ int
|
||||
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
|
||||
timespec *timeout)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_aio_suspend(iocbs, niocb, timeout);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -36,22 +36,18 @@ __weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
|
||||
int
|
||||
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
|
||||
{
|
||||
struct pthread *curthread;
|
||||
int ret;
|
||||
|
||||
if (pid == NULL || dst == NULL || *dst == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
if ((ret = _find_thread(pid)) != 0)
|
||||
curthread = _get_curthread();
|
||||
if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
|
||||
return (ret);
|
||||
|
||||
memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
|
||||
|
||||
/*
|
||||
* Special case, if stack address was not provided by caller
|
||||
* of pthread_create(), then return address allocated internally
|
||||
*/
|
||||
if ((*dst)->stackaddr_attr == NULL)
|
||||
(*dst)->stackaddr_attr = pid->stack;
|
||||
_thr_ref_delete(curthread, pid);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -51,7 +51,8 @@ _pthread_attr_init(pthread_attr_t *attr)
|
||||
ret = ENOMEM;
|
||||
else {
|
||||
/* Initialise the attribute object with the defaults: */
|
||||
memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
|
||||
memcpy(pattr, &_pthread_attr_default,
|
||||
sizeof(struct pthread_attr));
|
||||
|
||||
/* Return a pointer to the attribute object: */
|
||||
*attr = pattr;
|
||||
|
@ -45,7 +45,7 @@ _pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
|
||||
errno = EINVAL;
|
||||
ret = -1;
|
||||
} else {
|
||||
(*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
|
||||
(*attr)->suspend = THR_CREATE_SUSPENDED;
|
||||
ret = 0;
|
||||
}
|
||||
return(ret);
|
||||
|
@ -47,11 +47,11 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
|
||||
else {
|
||||
/*
|
||||
* Round guardsize up to the nearest multiple of
|
||||
* _pthread_page_size.
|
||||
* _thr_page_size.
|
||||
*/
|
||||
if (guardsize % _pthread_page_size != 0)
|
||||
guardsize = ((guardsize / _pthread_page_size) + 1) *
|
||||
_pthread_page_size;
|
||||
if (guardsize % _thr_page_size != 0)
|
||||
guardsize = ((guardsize / _thr_page_size) + 1) *
|
||||
_thr_page_size;
|
||||
|
||||
/* Save the stack size. */
|
||||
(*attr)->guardsize_attr = guardsize;
|
||||
|
@ -46,8 +46,8 @@ _pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *para
|
||||
ret = EINVAL;
|
||||
else if (param == NULL) {
|
||||
ret = ENOTSUP;
|
||||
} else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
|
||||
(param->sched_priority > PTHREAD_MAX_PRIORITY)) {
|
||||
} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
|
||||
(param->sched_priority > THR_MAX_PRIORITY)) {
|
||||
/* Return an unsupported value error. */
|
||||
ret = ENOTSUP;
|
||||
} else
|
||||
|
@ -45,12 +45,11 @@ _pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
|
||||
if ((attr == NULL) || (*attr == NULL)) {
|
||||
/* Return an invalid argument: */
|
||||
ret = EINVAL;
|
||||
} else if ((contentionscope != PTHREAD_SCOPE_PROCESS) ||
|
||||
(contentionscope == PTHREAD_SCOPE_SYSTEM)) {
|
||||
/* We don't support PTHREAD_SCOPE_SYSTEM. */
|
||||
ret = ENOTSUP;
|
||||
} else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
|
||||
(contentionscope != PTHREAD_SCOPE_SYSTEM)) {
|
||||
ret = EINVAL;
|
||||
} else
|
||||
(*attr)->flags |= contentionscope;
|
||||
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -38,13 +38,16 @@
|
||||
* threads package at program start-up time.
|
||||
*/
|
||||
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
void _thread_init_hack(void) __attribute__ ((constructor));
|
||||
|
||||
void
|
||||
_thread_init_hack(void)
|
||||
{
|
||||
|
||||
_thread_init();
|
||||
_libpthread_init(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6,32 +6,32 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void finish_cancellation(void *arg);
|
||||
|
||||
__weak_reference(_pthread_cancel, pthread_cancel);
|
||||
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
|
||||
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
|
||||
__weak_reference(_pthread_testcancel, pthread_testcancel);
|
||||
|
||||
static int checkcancel(struct pthread *curthread);
|
||||
static void testcancel(struct pthread *curthread);
|
||||
static void finish_cancellation(void *arg);
|
||||
|
||||
int
|
||||
_pthread_cancel(pthread_t pthread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
if ((ret = _find_thread(pthread)) != 0) {
|
||||
/* NOTHING */
|
||||
} else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK
|
||||
|| (pthread->flags & PTHREAD_EXITING) != 0) {
|
||||
ret = 0;
|
||||
} else {
|
||||
/* Protect the scheduling queues: */
|
||||
_thread_kern_sig_defer();
|
||||
if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
|
||||
/*
|
||||
* Take the scheduling lock while we change the cancel flags.
|
||||
*/
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
|
||||
if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
|
||||
(((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) &&
|
||||
((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0)))
|
||||
(((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
|
||||
((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
|
||||
/* Just mark it for cancellation: */
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
else {
|
||||
/*
|
||||
* Check if we need to kick it back into the
|
||||
@ -40,23 +40,27 @@ _pthread_cancel(pthread_t pthread)
|
||||
switch (pthread->state) {
|
||||
case PS_RUNNING:
|
||||
/* No need to resume: */
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
break;
|
||||
|
||||
case PS_LOCKWAIT:
|
||||
/*
|
||||
* These can't be removed from the queue.
|
||||
* Just mark it as cancelling and tell it
|
||||
* to yield once it leaves the critical
|
||||
* region.
|
||||
*/
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
pthread->critical_yield = 1;
|
||||
break;
|
||||
|
||||
case PS_SPINBLOCK:
|
||||
/* Remove these threads from the work queue: */
|
||||
if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
|
||||
!= 0)
|
||||
PTHREAD_WORKQ_REMOVE(pthread);
|
||||
/* Fall through: */
|
||||
case PS_SLEEP_WAIT:
|
||||
case PS_WAIT_WAIT:
|
||||
case PS_SIGSUSPEND:
|
||||
case PS_SIGWAIT:
|
||||
/* Interrupt and resume: */
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
break;
|
||||
|
||||
case PS_JOIN:
|
||||
@ -68,8 +72,8 @@ _pthread_cancel(pthread_t pthread)
|
||||
= NULL;
|
||||
pthread->join_status.thread = NULL;
|
||||
}
|
||||
pthread->cancelflags |= PTHREAD_CANCELLING;
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCELLING;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
break;
|
||||
|
||||
case PS_SUSPENDED:
|
||||
@ -86,8 +90,8 @@ _pthread_cancel(pthread_t pthread)
|
||||
* cancellation completion routine.
|
||||
*/
|
||||
pthread->interrupted = 1;
|
||||
pthread->cancelflags |= PTHREAD_CANCEL_NEEDED;
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
pthread->cancelflags |= THR_CANCEL_NEEDED;
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
pthread->continuation = finish_cancellation;
|
||||
break;
|
||||
|
||||
@ -97,12 +101,17 @@ _pthread_cancel(pthread_t pthread)
|
||||
/* Ignore - only here to silence -Wall: */
|
||||
break;
|
||||
}
|
||||
if ((pthread->blocked != 0) &&
|
||||
((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
|
||||
kse_thr_interrupt(&pthread->tmbx);
|
||||
}
|
||||
|
||||
/* Unprotect the scheduling queues: */
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
ret = 0;
|
||||
/*
|
||||
* Release the thread's scheduling lock and remove the
|
||||
* reference:
|
||||
*/
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -113,6 +122,10 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ostate;
|
||||
int ret;
|
||||
int need_exit = 0;
|
||||
|
||||
/* Take the scheduling lock while fiddling with the thread's state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE;
|
||||
|
||||
@ -122,7 +135,7 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
*oldstate = ostate;
|
||||
curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE;
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)
|
||||
pthread_testcancel();
|
||||
need_exit = checkcancel(curthread);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTHREAD_CANCEL_DISABLE:
|
||||
@ -135,6 +148,12 @@ _pthread_setcancelstate(int state, int *oldstate)
|
||||
ret = EINVAL;
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
if (need_exit != 0) {
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -144,6 +163,10 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int otype;
|
||||
int ret;
|
||||
int need_exit = 0;
|
||||
|
||||
/* Take the scheduling lock while fiddling with the state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
|
||||
switch (type) {
|
||||
@ -151,7 +174,7 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
if (oldtype != NULL)
|
||||
*oldtype = otype;
|
||||
curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
|
||||
pthread_testcancel();
|
||||
need_exit = checkcancel(curthread);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTHREAD_CANCEL_DEFERRED:
|
||||
@ -164,47 +187,72 @@ _pthread_setcanceltype(int type, int *oldtype)
|
||||
ret = EINVAL;
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
if (need_exit != 0) {
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
checkcancel(struct pthread *curthread)
|
||||
{
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & THR_CANCELLING) != 0)) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
}
|
||||
else
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
testcancel(struct pthread *curthread)
|
||||
{
|
||||
/* Take the scheduling lock while fiddling with the state: */
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
|
||||
if (checkcancel(curthread) != 0) {
|
||||
/* Unlock before exiting: */
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
void
|
||||
_pthread_testcancel(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & PTHREAD_CANCELLING) != 0) &&
|
||||
((curthread->flags & PTHREAD_EXITING) == 0)) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~PTHREAD_CANCELLING;
|
||||
_thread_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
PANIC("cancel");
|
||||
}
|
||||
testcancel(curthread);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_enter_cancellation_point(void)
|
||||
_thr_enter_cancellation_point(struct pthread *thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Look for a cancellation before we block: */
|
||||
pthread_testcancel();
|
||||
curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT;
|
||||
testcancel(thread);
|
||||
thread->cancelflags |= THR_AT_CANCEL_POINT;
|
||||
}
|
||||
|
||||
void
|
||||
_thread_leave_cancellation_point(void)
|
||||
_thr_leave_cancellation_point(struct pthread *thread)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
|
||||
thread->cancelflags &= ~THR_AT_CANCEL_POINT;
|
||||
/* Look for a cancellation after we unblock: */
|
||||
pthread_testcancel();
|
||||
testcancel(thread);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -215,9 +263,9 @@ finish_cancellation(void *arg)
|
||||
curthread->continuation = NULL;
|
||||
curthread->interrupted = 0;
|
||||
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
|
||||
curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
|
||||
_thread_exit_cleanup();
|
||||
if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) {
|
||||
curthread->cancelflags &= ~THR_CANCEL_NEEDED;
|
||||
_thr_exit_cleanup();
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,8 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread_cleanup *new;
|
||||
|
||||
if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
if ((new = (struct pthread_cleanup *)
|
||||
malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
new->routine = routine;
|
||||
new->routine_arg = routine_arg;
|
||||
new->next = curthread->cleanup;
|
||||
@ -69,4 +70,3 @@ _pthread_cleanup_pop(int execute)
|
||||
free(old);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,11 +44,12 @@ __weak_reference(__close, close);
|
||||
int
|
||||
__close(int fd)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_close(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -37,12 +37,17 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
|
||||
#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
|
||||
#define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
|
||||
#define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
|
||||
|
||||
/*
|
||||
* Prototypes
|
||||
*/
|
||||
static inline pthread_t cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
static inline struct pthread *cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
|
||||
__weak_reference(_pthread_cond_init, pthread_cond_init);
|
||||
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
|
||||
@ -52,35 +57,12 @@ __weak_reference(_pthread_cond_signal, pthread_cond_signal);
|
||||
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
|
||||
|
||||
|
||||
/* Reinitialize a condition variable to defaults. */
|
||||
int
|
||||
_cond_reinit(pthread_cond_t *cond)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
ret = EINVAL;
|
||||
else if (*cond == NULL)
|
||||
ret = pthread_cond_init(cond, NULL);
|
||||
else {
|
||||
/*
|
||||
* Initialize the condition variable structure:
|
||||
*/
|
||||
TAILQ_INIT(&(*cond)->c_queue);
|
||||
(*cond)->c_flags = COND_FLAGS_INITED;
|
||||
(*cond)->c_type = COND_TYPE_FAST;
|
||||
(*cond)->c_mutex = NULL;
|
||||
(*cond)->c_seqno = 0;
|
||||
memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
{
|
||||
enum pthread_cond_type type;
|
||||
pthread_cond_t pcond;
|
||||
int flags;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
@ -93,9 +75,11 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
if (cond_attr != NULL && *cond_attr != NULL) {
|
||||
/* Default to a fast condition variable: */
|
||||
type = (*cond_attr)->c_type;
|
||||
flags = (*cond_attr)->c_flags;
|
||||
} else {
|
||||
/* Default to a fast condition variable: */
|
||||
type = COND_TYPE_FAST;
|
||||
flags = 0;
|
||||
}
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
@ -117,6 +101,10 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
if ((pcond = (pthread_cond_t)
|
||||
malloc(sizeof(struct pthread_cond))) == NULL) {
|
||||
rval = ENOMEM;
|
||||
} else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
|
||||
_kse_lock_wait, _kse_lock_wakeup) != 0) {
|
||||
free(pcond);
|
||||
rval = ENOMEM;
|
||||
} else {
|
||||
/*
|
||||
* Initialise the condition variable
|
||||
@ -127,7 +115,6 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
pcond->c_type = type;
|
||||
pcond->c_mutex = NULL;
|
||||
pcond->c_seqno = 0;
|
||||
memset(&pcond->lock,0,sizeof(pcond->lock));
|
||||
*cond = pcond;
|
||||
}
|
||||
}
|
||||
@ -139,25 +126,32 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
int
|
||||
_pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
int rval = 0;
|
||||
struct pthread_cond *cv;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL || *cond == NULL)
|
||||
rval = EINVAL;
|
||||
else {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Free the memory allocated for the condition
|
||||
* variable structure:
|
||||
*/
|
||||
free(*cond);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* NULL the caller's pointer now that the condition
|
||||
* variable has been destroyed:
|
||||
*/
|
||||
cv = *cond;
|
||||
*cond = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cv->c_lock);
|
||||
|
||||
/*
|
||||
* Free the memory allocated for the condition
|
||||
* variable structure:
|
||||
*/
|
||||
free(cv);
|
||||
|
||||
}
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
@ -170,20 +164,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int seqno;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
|
||||
if (cond == NULL)
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
if (cond == NULL) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the condition variable is statically initialized,
|
||||
* perform the dynamic initialization:
|
||||
*/
|
||||
if (*cond == NULL &&
|
||||
(rval = pthread_cond_init(cond, NULL)) != 0)
|
||||
(rval = pthread_cond_init(cond, NULL)) != 0) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a loop waiting for a condition signal or broadcast
|
||||
@ -196,7 +195,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
@ -214,7 +213,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -237,7 +236,8 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((rval = _mutex_cv_unlock(mutex)) != 0) {
|
||||
if ((unlock_mutex != 0) &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
* the running thread from the condition
|
||||
@ -246,45 +246,60 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) ==
|
||||
NULL)
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
} else {
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Schedule the next thread and unlock
|
||||
* the condition variable structure:
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
_thread_kern_sched_state_unlock(PS_COND_WAIT,
|
||||
&(*cond)->lock, __FILE__, __LINE__);
|
||||
unlock_mutex = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
* critical region (holding the cv
|
||||
* lock); we should be able to safely
|
||||
* set the state.
|
||||
*/
|
||||
THR_SET_STATE(curthread, PS_COND_WAIT);
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
* thread waiting on the CV. Signals
|
||||
* sent to threads waiting on mutexes
|
||||
* or CVs should really be deferred
|
||||
* until the threads are no longer
|
||||
* waiting, but POSIX says that signals
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Check if the wait was interrupted
|
||||
* (canceled) or needs to be resumed
|
||||
* after handling a signal.
|
||||
*/
|
||||
if (interrupted != 0) {
|
||||
/*
|
||||
* Lock the mutex and ignore any
|
||||
* errors. Note that even
|
||||
* though this thread may have
|
||||
* been canceled, POSIX requires
|
||||
* that the mutex be reaquired
|
||||
* prior to cancellation.
|
||||
*/
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else {
|
||||
if (THR_IN_SYNCQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
@ -293,11 +308,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/* Lock the mutex: */
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Note that even though this thread may
|
||||
* have been canceled, POSIX requires
|
||||
* that the mutex be reaquired prior to
|
||||
* cancellation.
|
||||
*/
|
||||
if (done != 0)
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -305,7 +333,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -316,12 +344,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
curthread->continuation((void *) curthread);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
|
||||
int
|
||||
__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _pthread_cond_wait(cond, mutex);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
const struct timespec * abstime)
|
||||
@ -330,19 +370,24 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int seqno;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
||||
abstime->tv_nsec >= 1000000000)
|
||||
abstime->tv_nsec >= 1000000000) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (EINVAL);
|
||||
}
|
||||
/*
|
||||
* If the condition variable is statically initialized, perform dynamic
|
||||
* initialization.
|
||||
*/
|
||||
if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
|
||||
if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) {
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a loop waiting for a condition signal or broadcast
|
||||
@ -355,7 +400,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
@ -376,11 +421,10 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
rval = EINVAL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Set the wakeup time: */
|
||||
curthread->wakeup_time.tv_sec =
|
||||
abstime->tv_sec;
|
||||
curthread->wakeup_time.tv_sec = abstime->tv_sec;
|
||||
curthread->wakeup_time.tv_nsec =
|
||||
abstime->tv_nsec;
|
||||
|
||||
@ -399,10 +443,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((rval = _mutex_cv_unlock(mutex)) != 0) {
|
||||
if ((unlock_mutex != 0) &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
* the running thread from the condition
|
||||
* Cannot unlock the mutex; remove the
|
||||
* running thread from the condition
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
@ -412,40 +457,55 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/*
|
||||
* Schedule the next thread and unlock
|
||||
* the condition variable structure:
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
_thread_kern_sched_state_unlock(PS_COND_WAIT,
|
||||
&(*cond)->lock, __FILE__, __LINE__);
|
||||
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
interrupted = curthread->interrupted;
|
||||
unlock_mutex = 0;
|
||||
|
||||
/*
|
||||
* Check if the wait was interrupted
|
||||
* (canceled) or needs to be resumed
|
||||
* after handling a signal.
|
||||
* This thread is active and is in a
|
||||
* critical region (holding the cv
|
||||
* lock); we should be able to safely
|
||||
* set the state.
|
||||
*/
|
||||
if (interrupted != 0) {
|
||||
/*
|
||||
* Lock the mutex and ignore any
|
||||
* errors. Note that even
|
||||
* though this thread may have
|
||||
* been canceled, POSIX requires
|
||||
* that the mutex be reaquired
|
||||
* prior to cancellation.
|
||||
*/
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else {
|
||||
THR_SET_STATE(curthread, PS_COND_WAIT);
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
* thread waiting on the CV. Signals
|
||||
* sent to threads waiting on mutexes
|
||||
* or CVs should really be deferred
|
||||
* until the threads are no longer
|
||||
* waiting, but POSIX says that signals
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
@ -454,21 +514,22 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/* Lock the mutex: */
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
|
||||
/*
|
||||
* Return ETIMEDOUT if the wait
|
||||
* timed out and there wasn't an
|
||||
* error locking the mutex:
|
||||
*/
|
||||
if ((curthread->timeout != 0)
|
||||
&& rval == 0)
|
||||
rval = ETIMEDOUT;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
if (curthread->timeout != 0) {
|
||||
/* The wait timedout. */
|
||||
rval = ETIMEDOUT;
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else if ((interrupted == 0) ||
|
||||
(done != 0))
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -476,7 +537,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
@ -484,20 +545,35 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *) curthread);
|
||||
curthread->continuation((void *)curthread);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
|
||||
int
|
||||
__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = _pthread_cond_timedwait(cond, mutex, abstime);
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
_pthread_cond_signal(pthread_cond_t * cond)
|
||||
{
|
||||
int rval = 0;
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *pthread;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
rval = EINVAL;
|
||||
@ -506,14 +582,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
* initialization.
|
||||
*/
|
||||
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch ((*cond)->c_type) {
|
||||
@ -522,13 +592,19 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
/* Increment the sequence number: */
|
||||
(*cond)->c_seqno++;
|
||||
|
||||
if ((pthread = cond_queue_deq(*cond)) != NULL) {
|
||||
/*
|
||||
* Wake up the signaled thread:
|
||||
*/
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
/*
|
||||
* Wakeups have to be done with the CV lock held;
|
||||
* otherwise there is a race condition where the
|
||||
* thread can timeout, run on another KSE, and enter
|
||||
* another blocking state (including blocking on a CV).
|
||||
*/
|
||||
if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
@ -542,13 +618,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
@ -558,8 +628,9 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
int
|
||||
_pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
{
|
||||
int rval = 0;
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct pthread *pthread;
|
||||
int rval = 0;
|
||||
|
||||
if (cond == NULL)
|
||||
rval = EINVAL;
|
||||
@ -568,14 +639,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
* initialization.
|
||||
*/
|
||||
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&(*cond)->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch ((*cond)->c_type) {
|
||||
@ -588,11 +653,12 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
* Enter a loop to bring all threads off the
|
||||
* condition queue:
|
||||
*/
|
||||
while ((pthread = cond_queue_deq(*cond)) != NULL) {
|
||||
/*
|
||||
* Wake up the signaled thread:
|
||||
*/
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
_thr_setrunnable_unlocked(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
|
||||
/* There are no more waiting threads: */
|
||||
@ -607,13 +673,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&(*cond)->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
@ -621,26 +681,20 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
}
|
||||
|
||||
void
|
||||
_cond_wait_backout(pthread_t pthread)
|
||||
_cond_wait_backout(struct pthread *curthread)
|
||||
{
|
||||
pthread_cond_t cond;
|
||||
|
||||
cond = pthread->data.cond;
|
||||
cond = curthread->data.cond;
|
||||
if (cond != NULL) {
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
_SPINLOCK(&cond->lock);
|
||||
THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
|
||||
|
||||
/* Process according to condition variable type: */
|
||||
switch (cond->c_type) {
|
||||
/* Fast condition variable: */
|
||||
case COND_TYPE_FAST:
|
||||
cond_queue_remove(cond, pthread);
|
||||
cond_queue_remove(cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&cond->c_queue) == NULL)
|
||||
@ -652,13 +706,7 @@ _cond_wait_backout(pthread_t pthread)
|
||||
}
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
_SPINUNLOCK(&cond->lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -666,14 +714,14 @@ _cond_wait_backout(pthread_t pthread)
|
||||
* Dequeue a waiting thread from the head of a condition queue in
|
||||
* descending priority order.
|
||||
*/
|
||||
static inline pthread_t
|
||||
static inline struct pthread *
|
||||
cond_queue_deq(pthread_cond_t cond)
|
||||
{
|
||||
pthread_t pthread;
|
||||
struct pthread *pthread;
|
||||
|
||||
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
|
||||
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_SET(pthread);
|
||||
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
|
||||
/*
|
||||
* Only exit the loop when we find a thread
|
||||
@ -684,7 +732,7 @@ cond_queue_deq(pthread_cond_t cond)
|
||||
break;
|
||||
}
|
||||
|
||||
return(pthread);
|
||||
return (pthread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -692,7 +740,7 @@ cond_queue_deq(pthread_cond_t cond)
|
||||
* order.
|
||||
*/
|
||||
static inline void
|
||||
cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
|
||||
{
|
||||
/*
|
||||
* Because pthread_cond_timedwait() can timeout as well
|
||||
@ -700,9 +748,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
* guard against removing the thread from the queue if
|
||||
* it isn't in the queue.
|
||||
*/
|
||||
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
|
||||
if (THR_IN_CONDQ(pthread)) {
|
||||
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
|
||||
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_CLEAR(pthread);
|
||||
}
|
||||
}
|
||||
|
||||
@ -711,11 +759,12 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
|
||||
* order.
|
||||
*/
|
||||
static inline void
|
||||
cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
|
||||
cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
|
||||
{
|
||||
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
|
||||
struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
|
||||
|
||||
PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
|
||||
THR_ASSERT(!THR_IN_SYNCQ(pthread),
|
||||
"cond_queue_enq: thread already queued!");
|
||||
|
||||
/*
|
||||
* For the common case of all threads having equal priority,
|
||||
@ -730,6 +779,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
|
||||
tid = TAILQ_NEXT(tid, sqe);
|
||||
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
|
||||
}
|
||||
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
|
||||
THR_CONDQ_SET(pthread);
|
||||
pthread->data.cond = cond;
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ _pthread_condattr_init(pthread_condattr_t *attr)
|
||||
pthread_condattr_t pattr;
|
||||
|
||||
if ((pattr = (pthread_condattr_t)
|
||||
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
|
||||
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
|
||||
ret = ENOMEM;
|
||||
} else {
|
||||
memcpy(pattr, &pthread_condattr_default,
|
||||
sizeof(struct pthread_cond_attr));
|
||||
memcpy(pattr, &_pthread_condattr_default,
|
||||
sizeof(struct pthread_cond_attr));
|
||||
*attr = pattr;
|
||||
ret = 0;
|
||||
}
|
||||
return(ret);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ __weak_reference(___creat, creat);
|
||||
int
|
||||
___creat(const char *path, mode_t mode)
|
||||
{
|
||||
int ret;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __creat(path, mode);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -50,102 +50,150 @@ int _thread_next_offset = OFF(tle.tqe_next);
|
||||
int _thread_uniqueid_offset = OFF(uniqueid);
|
||||
int _thread_state_offset = OFF(state);
|
||||
int _thread_name_offset = OFF(name);
|
||||
int _thread_ctx_offset = OFF(mailbox.tm_context);
|
||||
int _thread_ctx_offset = OFF(tmbx.tm_context);
|
||||
#undef OFF
|
||||
|
||||
int _thread_PS_RUNNING_value = PS_RUNNING;
|
||||
int _thread_PS_DEAD_value = PS_DEAD;
|
||||
|
||||
static int create_stack(struct pthread_attr *pattr);
|
||||
static void thread_start(struct pthread *curthread,
|
||||
void *(*start_routine) (void *), void *arg);
|
||||
|
||||
__weak_reference(_pthread_create, pthread_create);
|
||||
|
||||
/*
|
||||
* Some notes on new thread creation and first time initializion
|
||||
* to enable multi-threading.
|
||||
*
|
||||
* There are basically two things that need to be done.
|
||||
*
|
||||
* 1) The internal library variables must be initialized.
|
||||
* 2) Upcalls need to be enabled to allow multiple threads
|
||||
* to be run.
|
||||
*
|
||||
* The first may be done as a result of other pthread functions
|
||||
* being called. When _thr_initial is null, _libpthread_init is
|
||||
* called to initialize the internal variables; this also creates
|
||||
* or sets the initial thread. It'd be nice to automatically
|
||||
* have _libpthread_init called on program execution so we don't
|
||||
* have to have checks throughout the library.
|
||||
*
|
||||
* The second part is only triggered by the creation of the first
|
||||
* thread (other than the initial/main thread). If the thread
|
||||
* being created is a scope system thread, then a new KSE/KSEG
|
||||
* pair needs to be allocated. Also, if upcalls haven't been
|
||||
* enabled on the initial thread's KSE, they must be now that
|
||||
* there is more than one thread; this could be delayed until
|
||||
* the initial KSEG has more than one thread.
|
||||
*/
|
||||
int
|
||||
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
void *(*start_routine) (void *), void *arg)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
struct itimerval itimer;
|
||||
int f_gc = 0;
|
||||
int ret = 0;
|
||||
pthread_t gc_thread;
|
||||
pthread_t new_thread;
|
||||
pthread_attr_t pattr;
|
||||
void *stack;
|
||||
struct kse *curkse;
|
||||
struct pthread *curthread, *new_thread;
|
||||
struct kse *kse = NULL;
|
||||
struct kse_group *kseg = NULL;
|
||||
kse_critical_t crit;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Locking functions in libc are required when there are
|
||||
* threads other than the initial thread.
|
||||
*/
|
||||
__isthreaded = 1;
|
||||
if (_thr_initial == NULL)
|
||||
_libpthread_init(NULL);
|
||||
|
||||
crit = _kse_critical_enter();
|
||||
curthread = _get_curthread();
|
||||
curkse = curthread->kse;
|
||||
|
||||
/* Allocate memory for the thread structure: */
|
||||
if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
|
||||
if ((new_thread = _thr_alloc(curkse)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
} else {
|
||||
/* Initialize the thread structure: */
|
||||
memset(new_thread, 0, sizeof(struct pthread));
|
||||
|
||||
/* Check if default thread attributes are required: */
|
||||
if (attr == NULL || *attr == NULL) {
|
||||
if (attr == NULL || *attr == NULL)
|
||||
/* Use the default thread attributes: */
|
||||
pattr = &pthread_attr_default;
|
||||
} else {
|
||||
pattr = *attr;
|
||||
new_thread->attr = _pthread_attr_default;
|
||||
else
|
||||
new_thread->attr = *(*attr);
|
||||
|
||||
if (create_stack(&new_thread->attr) != 0) {
|
||||
/* Insufficient memory to create a stack: */
|
||||
ret = EAGAIN;
|
||||
_thr_free(curkse, new_thread);
|
||||
}
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((stack = pattr->stackaddr_attr) != NULL) {
|
||||
}
|
||||
/* Allocate a stack: */
|
||||
else {
|
||||
stack = _thread_stack_alloc(pattr->stacksize_attr,
|
||||
pattr->guardsize_attr);
|
||||
if (stack == NULL) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
|
||||
(((kse = _kse_alloc(curkse)) == NULL)
|
||||
|| ((kseg = _kseg_alloc(curkse)) == NULL))) {
|
||||
/* Insufficient memory to create a new KSE/KSEG: */
|
||||
ret = EAGAIN;
|
||||
if (kse != NULL)
|
||||
_kse_free(curkse, kse);
|
||||
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
|
||||
_thr_stack_free(&new_thread->attr);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
|
||||
}
|
||||
_thr_free(curkse, new_thread);
|
||||
}
|
||||
|
||||
/* Check for errors: */
|
||||
if (ret != 0) {
|
||||
} else {
|
||||
/* Initialise the thread structure: */
|
||||
memset(new_thread, 0, sizeof(struct pthread));
|
||||
new_thread->slice_usec = -1;
|
||||
new_thread->stack = stack;
|
||||
new_thread->start_routine = start_routine;
|
||||
new_thread->arg = arg;
|
||||
|
||||
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
else {
|
||||
if (kseg != NULL) {
|
||||
/* Add the KSE to the KSEG's list of KSEs. */
|
||||
TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe);
|
||||
kse->k_kseg = kseg;
|
||||
kse->k_schedq = &kseg->kg_schedq;
|
||||
}
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
*/
|
||||
new_thread->magic = PTHREAD_MAGIC;
|
||||
new_thread->magic = THR_MAGIC;
|
||||
|
||||
/* Initialise the machine context: */
|
||||
getcontext(&new_thread->mailbox.tm_context);
|
||||
new_thread->mailbox.tm_context.uc_stack.ss_sp =
|
||||
new_thread->stack;
|
||||
new_thread->mailbox.tm_context.uc_stack.ss_size =
|
||||
pattr->stacksize_attr;
|
||||
makecontext(&new_thread->mailbox.tm_context,
|
||||
_thread_start, 1);
|
||||
new_thread->mailbox.tm_udata = (void *)new_thread;
|
||||
new_thread->slice_usec = -1;
|
||||
new_thread->start_routine = start_routine;
|
||||
new_thread->arg = arg;
|
||||
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
|
||||
PTHREAD_CANCEL_DEFERRED;
|
||||
|
||||
/* Copy the thread attributes: */
|
||||
memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
|
||||
/* Initialize the thread for signals: */
|
||||
new_thread->sigmask = curthread->sigmask;
|
||||
|
||||
/* No thread is wanting to join to this one: */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/* Initialize the machine context: */
|
||||
THR_GETCONTEXT(&new_thread->tmbx.tm_context);
|
||||
new_thread->tmbx.tm_udata = new_thread;
|
||||
new_thread->tmbx.tm_context.uc_sigmask =
|
||||
new_thread->sigmask;
|
||||
new_thread->tmbx.tm_context.uc_stack.ss_size =
|
||||
new_thread->attr.stacksize_attr;
|
||||
new_thread->tmbx.tm_context.uc_stack.ss_sp =
|
||||
new_thread->attr.stackaddr_attr;
|
||||
|
||||
makecontext(&new_thread->tmbx.tm_context,
|
||||
(void (*)(void))thread_start, 4, new_thread,
|
||||
start_routine, arg);
|
||||
|
||||
/*
|
||||
* Check if this thread is to inherit the scheduling
|
||||
* attributes from its parent:
|
||||
*/
|
||||
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
|
||||
if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
|
||||
/* Copy the scheduling attributes: */
|
||||
new_thread->base_priority =
|
||||
curthread->base_priority &
|
||||
~PTHREAD_SIGNAL_PRIORITY;
|
||||
~THR_SIGNAL_PRIORITY;
|
||||
new_thread->attr.prio =
|
||||
curthread->base_priority &
|
||||
~PTHREAD_SIGNAL_PRIORITY;
|
||||
~THR_SIGNAL_PRIORITY;
|
||||
new_thread->attr.sched_policy =
|
||||
curthread->attr.sched_policy;
|
||||
} else {
|
||||
@ -160,23 +208,49 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->active_priority = new_thread->base_priority;
|
||||
new_thread->inherited_priority = 0;
|
||||
|
||||
/* Initialize joiner to NULL (no joiner): */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the mutex queue: */
|
||||
TAILQ_INIT(&new_thread->mutexq);
|
||||
|
||||
/* Initialize thread locking. */
|
||||
if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
|
||||
_thr_lock_wait, _thr_lock_wakeup) != 0)
|
||||
PANIC("Cannot initialize thread lock");
|
||||
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
|
||||
_lockuser_init(&new_thread->lockusers[i],
|
||||
(void *)new_thread);
|
||||
_LCK_SET_PRIVATE2(&new_thread->lockusers[i],
|
||||
(void *)new_thread);
|
||||
}
|
||||
|
||||
/* Initialise hooks in the thread structure: */
|
||||
new_thread->specific = NULL;
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->continuation = NULL;
|
||||
|
||||
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
else
|
||||
new_thread->state = PS_RUNNING;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues
|
||||
* from access by the signal handler:
|
||||
* System scope threads have their own kse and
|
||||
* kseg. Process scope threads are all hung
|
||||
* off the main process kseg.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
|
||||
new_thread->kseg = _kse_initial->k_kseg;
|
||||
new_thread->kse = _kse_initial;
|
||||
}
|
||||
else {
|
||||
kse->k_curthread = NULL;
|
||||
kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
|
||||
new_thread->kse = kse;
|
||||
new_thread->kseg = kse->k_kseg;
|
||||
kse->k_mbx.km_udata = kse;
|
||||
kse->k_mbx.km_curthread = NULL;
|
||||
}
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Initialise the unique id which GDB uses to
|
||||
@ -184,57 +258,53 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
new_thread->uniqueid = next_uniqueid++;
|
||||
|
||||
/*
|
||||
* Check if the garbage collector thread
|
||||
* needs to be started.
|
||||
*/
|
||||
f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
|
||||
|
||||
/* Add the thread to the linked list of all threads: */
|
||||
TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
|
||||
THR_LIST_ADD(new_thread);
|
||||
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
|
||||
new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
} else {
|
||||
new_thread->state = PS_RUNNING;
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding
|
||||
* if necessary.
|
||||
* Schedule the new thread starting a new KSEG/KSE
|
||||
* pair if necessary.
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
_thr_schedule_add(curthread, new_thread);
|
||||
|
||||
/* Return a pointer to the thread structure: */
|
||||
(*thread) = new_thread;
|
||||
|
||||
/* Schedule the new user thread: */
|
||||
_thread_kern_sched();
|
||||
|
||||
/*
|
||||
* Start a garbage collector thread
|
||||
* if necessary.
|
||||
*/
|
||||
if (f_gc && pthread_create(&gc_thread,NULL,
|
||||
_thread_gc,NULL) != 0)
|
||||
PANIC("Can't create gc thread");
|
||||
|
||||
}
|
||||
}
|
||||
_kse_critical_leave(crit);
|
||||
|
||||
if ((ret == 0) && (_kse_isthreaded() == 0))
|
||||
_kse_setthreaded(1);
|
||||
|
||||
/* Return the status: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
_thread_start(void)
|
||||
static int
|
||||
create_stack(struct pthread_attr *pattr)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((pattr->stackaddr_attr) != NULL) {
|
||||
pattr->guardsize_attr = 0;
|
||||
pattr->flags = THR_STACK_USER;
|
||||
ret = 0;
|
||||
}
|
||||
else
|
||||
ret = _thr_stack_alloc(pattr);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
thread_start(struct pthread *curthread, void *(*start_routine) (void *),
|
||||
void *arg)
|
||||
{
|
||||
/* Run the current thread's start routine with argument: */
|
||||
pthread_exit(curthread->start_routine(curthread->arg));
|
||||
pthread_exit(start_routine(arg));
|
||||
|
||||
/* This point should never be reached. */
|
||||
PANIC("Thread has resumed after exit");
|
||||
|
@ -31,6 +31,8 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <sys/types.h>
|
||||
#include <machine/atomic.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
@ -40,50 +42,60 @@ __weak_reference(_pthread_detach, pthread_detach);
|
||||
int
|
||||
_pthread_detach(pthread_t pthread)
|
||||
{
|
||||
int rval = 0;
|
||||
struct pthread *curthread, *joiner;
|
||||
int rval = 0;
|
||||
|
||||
/* Check for invalid calling parameters: */
|
||||
if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
|
||||
if (pthread == NULL || pthread->magic != THR_MAGIC)
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
|
||||
/* Check if the thread has not been detached: */
|
||||
else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
|
||||
/* Check if the thread is already detached: */
|
||||
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
|
||||
/* Return an error: */
|
||||
rval = EINVAL;
|
||||
else {
|
||||
/* Lock the detached thread: */
|
||||
curthread = _get_curthread();
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
|
||||
/* Flag the thread as detached: */
|
||||
pthread->attr.flags |= PTHREAD_DETACHED;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
/* Retrieve any joining thread and remove it: */
|
||||
joiner = pthread->joiner;
|
||||
pthread->joiner = NULL;
|
||||
|
||||
/* Check if there is a joiner: */
|
||||
if (pthread->joiner != NULL) {
|
||||
struct pthread *joiner = pthread->joiner;
|
||||
|
||||
/* Make the thread runnable: */
|
||||
PTHREAD_NEW_STATE(joiner, PS_RUNNING);
|
||||
|
||||
/* Set the return value for the woken thread: */
|
||||
joiner->join_status.error = ESRCH;
|
||||
joiner->join_status.ret = NULL;
|
||||
joiner->join_status.thread = NULL;
|
||||
|
||||
/*
|
||||
* Disconnect the joiner from the thread being detached:
|
||||
*/
|
||||
pthread->joiner = NULL;
|
||||
/* We are already in a critical region. */
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
|
||||
THR_LIST_REMOVE(pthread);
|
||||
THR_GCLIST_ADD(pthread);
|
||||
atomic_store_rel_int(&_gc_check, 1);
|
||||
if (KSE_WAITING(_kse_initial))
|
||||
KSE_WAKEUP(_kse_initial);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if a
|
||||
* scheduling signal occurred while in the critical region.
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
} else
|
||||
/* Return an error: */
|
||||
rval = EINVAL;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
|
||||
/* See if there is a thread waiting in pthread_join(): */
|
||||
if (joiner != NULL) {
|
||||
/* Lock the joiner before fiddling with it. */
|
||||
THR_SCHED_LOCK(curthread, joiner);
|
||||
if (joiner->join_status.thread == pthread) {
|
||||
/*
|
||||
* Set the return value for the woken thread:
|
||||
*/
|
||||
joiner->join_status.error = ESRCH;
|
||||
joiner->join_status.ret = NULL;
|
||||
joiner->join_status.thread = NULL;
|
||||
|
||||
_thr_setrunnable_unlocked(joiner);
|
||||
}
|
||||
THR_SCHED_UNLOCK(curthread, joiner);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
|
@ -40,31 +40,24 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
#define FLAGS_IN_SCHEDQ \
|
||||
(PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
|
||||
void _pthread_exit(void *status);
|
||||
|
||||
__weak_reference(_pthread_exit, pthread_exit);
|
||||
|
||||
void
|
||||
_thread_exit(char *fname, int lineno, char *string)
|
||||
_thr_exit(char *fname, int lineno, char *msg)
|
||||
{
|
||||
char s[256];
|
||||
char s[256];
|
||||
|
||||
/* Prepare an error message string: */
|
||||
snprintf(s, sizeof(s),
|
||||
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
|
||||
string, lineno, fname, errno);
|
||||
msg, lineno, fname, errno);
|
||||
|
||||
/* Write the string to the standard error file descriptor: */
|
||||
__sys_write(2, s, strlen(s));
|
||||
|
||||
/* Force this process to exit: */
|
||||
/* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
|
||||
#if defined(_PTHREADS_INVARIANTS)
|
||||
abort();
|
||||
#else
|
||||
__sys_exit(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -73,7 +66,7 @@ _thread_exit(char *fname, int lineno, char *string)
|
||||
* abnormal thread termination can be found.
|
||||
*/
|
||||
void
|
||||
_thread_exit_cleanup(void)
|
||||
_thr_exit_cleanup(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
@ -96,22 +89,25 @@ _thread_exit_cleanup(void)
|
||||
void
|
||||
_pthread_exit(void *status)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
pthread_t pthread;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Check if this thread is already in the process of exiting: */
|
||||
if ((curthread->flags & PTHREAD_EXITING) != 0) {
|
||||
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
|
||||
char msg[128];
|
||||
snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
|
||||
snprintf(msg, sizeof(msg), "Thread %p has called "
|
||||
"pthread_exit() from a destructor. POSIX 1003.1 "
|
||||
"1996 s16.2.5.2 does not allow this!", curthread);
|
||||
PANIC(msg);
|
||||
}
|
||||
|
||||
/* Flag this thread as exiting: */
|
||||
curthread->flags |= PTHREAD_EXITING;
|
||||
/*
|
||||
* Flag this thread as exiting. Threads should now be prevented
|
||||
* from joining to this thread.
|
||||
*/
|
||||
curthread->flags |= THR_FLAGS_EXITING;
|
||||
|
||||
/* Save the return value: */
|
||||
curthread->ret = status;
|
||||
|
||||
while (curthread->cleanup != NULL) {
|
||||
pthread_cleanup_pop(1);
|
||||
}
|
||||
@ -124,58 +120,11 @@ _pthread_exit(void *status)
|
||||
_thread_cleanupspecific();
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex to ensure that the garbage
|
||||
* collector is not using the dead thread list.
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
/* Add this thread to the list of dead threads. */
|
||||
TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
|
||||
|
||||
/*
|
||||
* Signal the garbage collector thread that there is something
|
||||
* to clean up.
|
||||
*/
|
||||
if (pthread_cond_signal(&_gc_cond) != 0)
|
||||
PANIC("Cannot signal gc cond");
|
||||
|
||||
/*
|
||||
* Avoid a race condition where a scheduling signal can occur
|
||||
* causing the garbage collector thread to run. If this happens,
|
||||
* the current thread can be cleaned out from under us.
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Unlock the garbage collector mutex: */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Check if there is a thread joining this one: */
|
||||
if (curthread->joiner != NULL) {
|
||||
pthread = curthread->joiner;
|
||||
curthread->joiner = NULL;
|
||||
|
||||
/* Make the joining thread runnable: */
|
||||
PTHREAD_NEW_STATE(pthread, PS_RUNNING);
|
||||
|
||||
/* Set the return value for the joining thread: */
|
||||
pthread->join_status.ret = curthread->ret;
|
||||
pthread->join_status.error = 0;
|
||||
pthread->join_status.thread = NULL;
|
||||
|
||||
/* Make this thread collectable by the garbage collector. */
|
||||
PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
|
||||
0), "Cannot join a detached thread");
|
||||
curthread->attr.flags |= PTHREAD_DETACHED;
|
||||
}
|
||||
|
||||
/* Remove this thread from the thread list: */
|
||||
TAILQ_REMOVE(&_thread_list, curthread, tle);
|
||||
|
||||
/* This thread will never be re-scheduled. */
|
||||
_thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
THR_SET_STATE(curthread, PS_DEAD);
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
/* This point should not be reached. */
|
||||
PANIC("Dead thread has resumed");
|
||||
|
@ -32,8 +32,9 @@
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include "namespace.h"
|
||||
#include <fcntl.h>
|
||||
#include "un-namespace.h"
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
@ -42,28 +43,29 @@ __weak_reference(__fcntl, fcntl);
|
||||
int
|
||||
__fcntl(int fd, int cmd,...)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
|
||||
va_start(ap, cmd);
|
||||
switch (cmd) {
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
case F_DUPFD:
|
||||
case F_SETFD:
|
||||
case F_SETFL:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
|
||||
break;
|
||||
case F_GETFD:
|
||||
case F_GETFL:
|
||||
ret = __sys_fcntl(fd, cmd);
|
||||
break;
|
||||
default:
|
||||
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
|
||||
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
|
||||
* All rights reserved.
|
||||
*
|
||||
@ -35,32 +36,65 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
/* Find a thread in the linked list of active threads: */
|
||||
/*
|
||||
* Find a thread in the linked list of active threads and add a reference
|
||||
* to it. Threads with positive reference counts will not be deallocated
|
||||
* until all references are released.
|
||||
*/
|
||||
int
|
||||
_find_thread(pthread_t pthread)
|
||||
_thr_ref_add(struct pthread *curthread, struct pthread *thread,
|
||||
int include_dead)
|
||||
{
|
||||
pthread_t pthread1;
|
||||
kse_critical_t crit;
|
||||
struct pthread *pthread;
|
||||
|
||||
/* Check if the caller has specified an invalid thread: */
|
||||
if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
|
||||
if (thread == NULL)
|
||||
/* Invalid thread: */
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
/*
|
||||
* Defer signals to protect the thread list from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Search for the specified thread: */
|
||||
TAILQ_FOREACH(pthread1, &_thread_list, tle) {
|
||||
if (pthread == pthread1)
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
||||
if (pthread == thread) {
|
||||
if ((include_dead == 0) &&
|
||||
((pthread->state == PS_DEAD) ||
|
||||
((pthread->state == PS_DEADLOCK) ||
|
||||
((pthread->flags & THR_FLAGS_EXITING) != 0))))
|
||||
pthread = NULL;
|
||||
else {
|
||||
thread->refcount++;
|
||||
curthread->critical_count++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Undefer and handle pending signals, yielding if necessary: */
|
||||
_thread_kern_sig_undefer();
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
|
||||
/* Return zero if the thread exists: */
|
||||
return ((pthread1 != NULL) ? 0:ESRCH);
|
||||
return ((pthread != NULL) ? 0 : ESRCH);
|
||||
}
|
||||
|
||||
void
|
||||
_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
|
||||
{
|
||||
kse_critical_t crit;
|
||||
|
||||
if (thread != NULL) {
|
||||
crit = _kse_critical_enter();
|
||||
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
|
||||
thread->refcount--;
|
||||
curthread->critical_count--;
|
||||
if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
|
||||
(thread->refcount == 0) &&
|
||||
((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
|
||||
THR_LIST_REMOVE(thread);
|
||||
THR_GCLIST_ADD(thread);
|
||||
_gc_check = 1;
|
||||
if (KSE_WAITING(_kse_initial))
|
||||
KSE_WAKEUP(_kse_initial);
|
||||
}
|
||||
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
|
||||
_kse_critical_leave(crit);
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,6 @@
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <sys/param.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
@ -40,141 +39,21 @@
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
static void free_thread_resources(struct pthread *thread);
|
||||
|
||||
__weak_reference(_fork, fork);
|
||||
|
||||
pid_t
|
||||
_fork(void)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int i, flags, use_deadlist = 0;
|
||||
pid_t ret;
|
||||
pthread_t pthread;
|
||||
pthread_t pthread_save;
|
||||
struct pthread *curthread;
|
||||
pid_t ret;
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from access
|
||||
* by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
curthread = _get_curthread();
|
||||
|
||||
/* Fork a new process: */
|
||||
if ((ret = __sys_fork()) != 0) {
|
||||
/* Parent process or error. Nothing to do here. */
|
||||
} else {
|
||||
/* Reinitialize the GC mutex: */
|
||||
if (_mutex_reinit(&_gc_mutex) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC mutex for forked process");
|
||||
}
|
||||
/* Reinitialize the GC condition variable: */
|
||||
else if (_cond_reinit(&_gc_cond) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize GC condvar for forked process");
|
||||
}
|
||||
/* Initialize the ready queue: */
|
||||
else if (_pq_init(&_readyq) != 0) {
|
||||
/* Abort this application: */
|
||||
PANIC("Cannot initialize priority ready queue.");
|
||||
} else {
|
||||
/*
|
||||
* Enter a loop to remove all threads other than
|
||||
* the running thread from the thread list:
|
||||
*/
|
||||
if ((pthread = TAILQ_FIRST(&_thread_list)) == NULL) {
|
||||
pthread = TAILQ_FIRST(&_dead_list);
|
||||
use_deadlist = 1;
|
||||
}
|
||||
while (pthread != NULL) {
|
||||
/* Save the thread to be freed: */
|
||||
pthread_save = pthread;
|
||||
|
||||
/*
|
||||
* Advance to the next thread before
|
||||
* destroying the current thread:
|
||||
*/
|
||||
if (use_deadlist != 0)
|
||||
pthread = TAILQ_NEXT(pthread, dle);
|
||||
else
|
||||
pthread = TAILQ_NEXT(pthread, tle);
|
||||
|
||||
/* Make sure this isn't the running thread: */
|
||||
if (pthread_save != curthread) {
|
||||
/*
|
||||
* Remove this thread from the
|
||||
* appropriate list:
|
||||
*/
|
||||
if (use_deadlist != 0)
|
||||
TAILQ_REMOVE(&_thread_list,
|
||||
pthread_save, dle);
|
||||
else
|
||||
TAILQ_REMOVE(&_thread_list,
|
||||
pthread_save, tle);
|
||||
|
||||
free_thread_resources(pthread_save);
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch to the deadlist when the active
|
||||
* thread list has been consumed. This can't
|
||||
* be at the top of the loop because it is
|
||||
* used to determine to which list the thread
|
||||
* belongs (when it is removed from the list).
|
||||
*/
|
||||
if (pthread == NULL) {
|
||||
pthread = TAILQ_FIRST(&_dead_list);
|
||||
use_deadlist = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Treat the current thread as the initial thread: */
|
||||
_thread_initial = curthread;
|
||||
|
||||
/* Re-init the dead thread list: */
|
||||
TAILQ_INIT(&_dead_list);
|
||||
|
||||
/* Re-init the waiting and work queues. */
|
||||
TAILQ_INIT(&_waitingq);
|
||||
TAILQ_INIT(&_workq);
|
||||
|
||||
/* Re-init the threads mutex queue: */
|
||||
TAILQ_INIT(&curthread->mutexq);
|
||||
|
||||
/* No spinlocks yet: */
|
||||
_spinblock_count = 0;
|
||||
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
if ((ret = __sys_fork()) == 0)
|
||||
/* Child process */
|
||||
_kse_single_thread(curthread);
|
||||
|
||||
/* Return the process ID: */
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
free_thread_resources(struct pthread *thread)
|
||||
{
|
||||
|
||||
/* Check to see if the threads library allocated the stack. */
|
||||
if ((thread->attr.stackaddr_attr == NULL) && (thread->stack != NULL)) {
|
||||
/*
|
||||
* Since this is being called from fork, we are currently single
|
||||
* threaded so there is no need to protect the call to
|
||||
* _thread_stack_free() with _gc_mutex.
|
||||
*/
|
||||
_thread_stack_free(thread->stack, thread->attr.stacksize_attr,
|
||||
thread->attr.guardsize_attr);
|
||||
}
|
||||
|
||||
if (thread->specific != NULL)
|
||||
free(thread->specific);
|
||||
|
||||
free(thread);
|
||||
}
|
||||
|
@ -40,11 +40,12 @@ __weak_reference(__fsync, fsync);
|
||||
int
|
||||
__fsync(int fd)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
_thread_enter_cancellation_point();
|
||||
_thr_enter_cancellation_point(curthread);
|
||||
ret = __sys_fsync(fd);
|
||||
_thread_leave_cancellation_point();
|
||||
_thr_leave_cancellation_point(curthread);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
@ -1,219 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* This product includes software developed by John Birrell.
|
||||
* 4. Neither the name of the author nor the names of any co-contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*
|
||||
* Garbage collector thread. Frees memory allocated for dead threads.
|
||||
*
|
||||
*/
|
||||
#include <sys/param.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <pthread.h>
|
||||
#include "thr_private.h"
|
||||
|
||||
pthread_addr_t
|
||||
_thread_gc(pthread_addr_t arg)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int f_debug;
|
||||
int f_done = 0;
|
||||
int ret;
|
||||
sigset_t mask;
|
||||
pthread_t pthread;
|
||||
pthread_t pthread_cln;
|
||||
struct timespec abstime;
|
||||
void *p_stack;
|
||||
|
||||
/* Block all signals */
|
||||
sigfillset(&mask);
|
||||
pthread_sigmask(SIG_BLOCK, &mask, NULL);
|
||||
|
||||
/* Mark this thread as a library thread (not a user thread). */
|
||||
curthread->flags |= PTHREAD_FLAGS_PRIVATE;
|
||||
|
||||
/* Set a debug flag based on an environment variable. */
|
||||
f_debug = (getenv("LIBC_R_DEBUG") != NULL);
|
||||
|
||||
/* Set the name of this thread. */
|
||||
pthread_set_name_np(curthread,"GC");
|
||||
|
||||
while (!f_done) {
|
||||
/* Check if debugging this application. */
|
||||
if (f_debug)
|
||||
/* Dump thread info to file. */
|
||||
_thread_dump_info();
|
||||
|
||||
/*
|
||||
* Defer signals to protect the scheduling queues from
|
||||
* access by the signal handler:
|
||||
*/
|
||||
_thread_kern_sig_defer();
|
||||
|
||||
/* Check if this is the last running thread: */
|
||||
if (TAILQ_FIRST(&_thread_list) == curthread &&
|
||||
TAILQ_NEXT(curthread, tle) == NULL)
|
||||
/*
|
||||
* This is the last thread, so it can exit
|
||||
* now.
|
||||
*/
|
||||
f_done = 1;
|
||||
|
||||
/*
|
||||
* Undefer and handle pending signals, yielding if
|
||||
* necessary:
|
||||
*/
|
||||
_thread_kern_sig_undefer();
|
||||
|
||||
/* No stack of thread structure to free yet: */
|
||||
p_stack = NULL;
|
||||
pthread_cln = NULL;
|
||||
|
||||
/*
|
||||
* Lock the garbage collector mutex which ensures that
|
||||
* this thread sees another thread exit:
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
/*
|
||||
* Enter a loop to search for the first dead thread that
|
||||
* has memory to free.
|
||||
*/
|
||||
for (pthread = TAILQ_FIRST(&_dead_list);
|
||||
p_stack == NULL && pthread_cln == NULL && pthread != NULL;
|
||||
pthread = TAILQ_NEXT(pthread, dle)) {
|
||||
/* Check if the initial thread: */
|
||||
if (pthread == _thread_initial) {
|
||||
/* Don't destroy the initial thread. */
|
||||
}
|
||||
/*
|
||||
* Check if this thread has detached:
|
||||
*/
|
||||
else if ((pthread->attr.flags &
|
||||
PTHREAD_DETACHED) != 0) {
|
||||
/* Remove this thread from the dead list: */
|
||||
TAILQ_REMOVE(&_dead_list, pthread, dle);
|
||||
|
||||
/*
|
||||
* Check if the stack was not specified by
|
||||
* the caller to pthread_create() and has not
|
||||
* been destroyed yet:
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
_thread_stack_free(pthread->stack,
|
||||
pthread->attr.stacksize_attr,
|
||||
pthread->attr.guardsize_attr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Point to the thread structure that must
|
||||
* be freed outside the locks:
|
||||
*/
|
||||
pthread_cln = pthread;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* This thread has not detached, so do
|
||||
* not destroy it.
|
||||
*
|
||||
* Check if the stack was not specified by
|
||||
* the caller to pthread_create() and has not
|
||||
* been destroyed yet:
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
_thread_stack_free(pthread->stack,
|
||||
pthread->attr.stacksize_attr,
|
||||
pthread->attr.guardsize_attr);
|
||||
|
||||
/*
|
||||
* NULL the stack pointer now that the
|
||||
* memory has been freed:
|
||||
*/
|
||||
pthread->stack = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if this is not the last thread and there is no
|
||||
* memory to free this time around.
|
||||
*/
|
||||
if (!f_done && p_stack == NULL && pthread_cln == NULL) {
|
||||
/* Get the current time. */
|
||||
if (clock_gettime(CLOCK_REALTIME,&abstime) != 0)
|
||||
PANIC("gc cannot get time");
|
||||
|
||||
/*
|
||||
* Do a backup poll in 10 seconds if no threads
|
||||
* die before then.
|
||||
*/
|
||||
abstime.tv_sec += 10;
|
||||
|
||||
/*
|
||||
* Wait for a signal from a dying thread or a
|
||||
* timeout (for a backup poll).
|
||||
*/
|
||||
if ((ret = pthread_cond_timedwait(&_gc_cond,
|
||||
&_gc_mutex, &abstime)) != 0 && ret != ETIMEDOUT)
|
||||
PANIC("gc cannot wait for a signal");
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex: */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/*
|
||||
* If there is memory to free, do it now. The call to
|
||||
* free() might block, so this must be done outside the
|
||||
* locks.
|
||||
*/
|
||||
if (p_stack != NULL)
|
||||
free(p_stack);
|
||||
if (pthread_cln != NULL) {
|
||||
if (pthread_cln->name != NULL) {
|
||||
/* Free the thread name string. */
|
||||
free(pthread_cln->name);
|
||||
}
|
||||
/*
|
||||
* Free the memory allocated for the thread
|
||||
* structure.
|
||||
*/
|
||||
free(pthread_cln);
|
||||
}
|
||||
}
|
||||
return (NULL);
|
||||
}
|
@ -41,19 +41,33 @@ int
|
||||
_pthread_getschedparam(pthread_t pthread, int *policy,
|
||||
struct sched_param *param)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int ret;
|
||||
|
||||
if ((param == NULL) || (policy == NULL))
|
||||
/* Return an invalid argument error: */
|
||||
ret = EINVAL;
|
||||
|
||||
/* Find the thread in the list of active threads: */
|
||||
else if ((ret = _find_thread(pthread)) == 0) {
|
||||
/* Return the threads base priority and scheduling policy: */
|
||||
else if (pthread == curthread) {
|
||||
/*
|
||||
* Avoid searching the thread list when it is the current
|
||||
* thread.
|
||||
*/
|
||||
THR_SCHED_LOCK(curthread, curthread);
|
||||
param->sched_priority =
|
||||
PTHREAD_BASE_PRIORITY(pthread->base_priority);
|
||||
THR_BASE_PRIORITY(pthread->base_priority);
|
||||
*policy = pthread->attr.sched_policy;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return(ret);
|
||||
/* Find the thread in the list of active threads. */
|
||||
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
|
||||
== 0) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
param->sched_priority =
|
||||
THR_BASE_PRIORITY(pthread->base_priority);
|
||||
*policy = pthread->attr.sched_policy;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
_thr_ref_delete(curthread, pthread);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
@ -56,11 +56,12 @@ struct s_thread_info {
|
||||
/* Static variables: */
|
||||
static const struct s_thread_info thread_info[] = {
|
||||
{PS_RUNNING , "Running"},
|
||||
{PS_LOCKWAIT , "Waiting on an internal lock"},
|
||||
{PS_MUTEX_WAIT , "Waiting on a mutex"},
|
||||
{PS_COND_WAIT , "Waiting on a condition variable"},
|
||||
{PS_SLEEP_WAIT , "Sleeping"},
|
||||
{PS_WAIT_WAIT , "Waiting process"},
|
||||
{PS_SPINBLOCK , "Waiting for a spinlock"},
|
||||
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
|
||||
{PS_SIGWAIT , "Waiting for a signal"},
|
||||
{PS_JOIN , "Waiting to join"},
|
||||
{PS_SUSPENDED , "Suspended"},
|
||||
{PS_DEAD , "Dead"},
|
||||
@ -71,12 +72,9 @@ static const struct s_thread_info thread_info[] = {
|
||||
void
|
||||
_thread_dump_info(void)
|
||||
{
|
||||
char s[512];
|
||||
int fd;
|
||||
int i;
|
||||
pthread_t pthread;
|
||||
char tmpfile[128];
|
||||
pq_list_t *pq_list;
|
||||
char s[512], tmpfile[128];
|
||||
pthread_t pthread;
|
||||
int fd, i;
|
||||
|
||||
for (i = 0; i < 100000; i++) {
|
||||
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
|
||||
@ -102,64 +100,34 @@ _thread_dump_info(void)
|
||||
/* all 100000 possibilities are in use :( */
|
||||
return;
|
||||
} else {
|
||||
/* Output a header for active threads: */
|
||||
strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
|
||||
/* Dump the active threads. */
|
||||
strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the global list: */
|
||||
TAILQ_FOREACH(pthread, &_thread_list, tle) {
|
||||
dump_thread(fd, pthread, /*long_verson*/ 1);
|
||||
if (pthread->state != PS_DEAD)
|
||||
dump_thread(fd, pthread, /*long_verson*/ 1);
|
||||
}
|
||||
|
||||
/* Output a header for ready threads: */
|
||||
strcpy(s, "\n\n=============\nREADY THREADS\n\n");
|
||||
/*
|
||||
* Dump the ready threads.
|
||||
* XXX - We can't easily do this because the run queues
|
||||
* are per-KSEG.
|
||||
*/
|
||||
strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the ready queue: */
|
||||
TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
|
||||
TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Output a header for waiting threads: */
|
||||
strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
|
||||
/*
|
||||
* Dump the waiting threads.
|
||||
* XXX - We can't easily do this because the wait queues
|
||||
* are per-KSEG.
|
||||
*/
|
||||
strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the waiting queue: */
|
||||
TAILQ_FOREACH (pthread, &_waitingq, pqe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
|
||||
/* Output a header for threads in the work queue: */
|
||||
strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/* Enter a loop to report each thread in the waiting queue: */
|
||||
TAILQ_FOREACH (pthread, &_workq, qe) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
|
||||
/* Check if there are no dead threads: */
|
||||
if (TAILQ_FIRST(&_dead_list) == NULL) {
|
||||
/* Output a record: */
|
||||
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
} else {
|
||||
/* Output a header for dead threads: */
|
||||
strcpy(s, "\n\nDEAD THREADS\n\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
/*
|
||||
* Enter a loop to report each thread in the global
|
||||
* dead thread list:
|
||||
*/
|
||||
TAILQ_FOREACH(pthread, &_dead_list, dle) {
|
||||
dump_thread(fd, pthread, /*long_version*/ 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the dump file: */
|
||||
/* Close the dump file. */
|
||||
__sys_close(fd);
|
||||
}
|
||||
}
|
||||
@ -167,9 +135,9 @@ _thread_dump_info(void)
|
||||
static void
|
||||
dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
char s[512];
|
||||
int i;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
char s[512];
|
||||
int i;
|
||||
|
||||
/* Find the state: */
|
||||
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
|
||||
@ -178,10 +146,11 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
|
||||
/* Output a record for the thread: */
|
||||
snprintf(s, sizeof(s),
|
||||
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
|
||||
"--------------------\n"
|
||||
"Thread %p (%s) prio %3d, blocked %s, state %s [%s:%d]\n",
|
||||
pthread, (pthread->name == NULL) ? "" : pthread->name,
|
||||
pthread->active_priority, thread_info[i].name, pthread->fname,
|
||||
pthread->lineno);
|
||||
pthread->active_priority, (pthread->blocked != 0) ? "yes" : "no",
|
||||
thread_info[i].name, pthread->fname, pthread->lineno);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
|
||||
if (long_version != 0) {
|
||||
@ -192,13 +161,24 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
/* Check if this is the initial thread: */
|
||||
if (pthread == _thread_initial) {
|
||||
if (pthread == _thr_initial) {
|
||||
/* Output a record for the initial thread: */
|
||||
strcpy(s, "This is the initial thread\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
/* Process according to thread state: */
|
||||
switch (pthread->state) {
|
||||
case PS_SIGWAIT:
|
||||
snprintf(s, sizeof(s), "sigmask (hi)");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
for (i = _SIG_WORDS - 1; i >= 0; i--) {
|
||||
snprintf(s, sizeof(s), "%08x\n",
|
||||
pthread->sigmask.__bits[i]);
|
||||
__sys_write(fd, s, strlen(s));
|
||||
}
|
||||
snprintf(s, sizeof(s), "(lo)\n");
|
||||
__sys_write(fd, s, strlen(s));
|
||||
break;
|
||||
/*
|
||||
* Trap other states that are not explicitly
|
||||
* coded to dump information:
|
||||
@ -212,10 +192,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
|
||||
|
||||
/* Set the thread name for debug: */
|
||||
void
|
||||
_pthread_set_name_np(pthread_t thread, const char *name)
|
||||
_pthread_set_name_np(pthread_t thread, char *name)
|
||||
{
|
||||
/* Check if the caller has specified a valid thread: */
|
||||
if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
|
||||
if (thread != NULL && thread->magic == THR_MAGIC) {
|
||||
if (thread->name != NULL) {
|
||||
/* Free space for previous name. */
|
||||
free(thread->name);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user