2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-12-17 09:42:45 +00:00
|
|
|
* Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
|
|
|
|
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
|
2003-04-01 03:46:29 +00:00
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
|
2005-12-17 09:42:45 +00:00
|
|
|
*
|
2003-04-01 03:46:29 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
2005-12-17 09:42:45 +00:00
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
2003-04-01 03:46:29 +00:00
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
2005-12-17 09:42:45 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2003-04-01 03:46:29 +00:00
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _THR_PRIVATE_H
|
|
|
|
#define _THR_PRIVATE_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Include files.
|
|
|
|
*/
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/time.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <sys/cdefs.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <sys/queue.h>
|
2008-03-04 03:45:11 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/cpuset.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <machine/atomic.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <limits.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <signal.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <stddef.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <stdio.h>
|
2003-07-08 09:58:23 +00:00
|
|
|
#include <unistd.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <ucontext.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <sys/thr.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <pthread.h>
|
|
|
|
|
2005-12-21 03:53:29 +00:00
|
|
|
#ifndef __hidden
|
|
|
|
#define __hidden __attribute__((visibility("hidden")))
|
|
|
|
#endif
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#include "pthread_md.h"
|
|
|
|
#include "thr_umtx.h"
|
2005-04-12 03:00:28 +00:00
|
|
|
#include "thread_db.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-12-21 03:14:06 +00:00
|
|
|
typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist;
|
|
|
|
typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head;
|
2006-08-28 04:52:50 +00:00
|
|
|
TAILQ_HEAD(mutex_queue, pthread_mutex);
|
2003-12-09 11:20:01 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Signal to do cancellation */
|
|
|
|
#define SIGCANCEL 32
|
2003-12-09 11:20:01 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Kernel fatal error handler macro.
|
|
|
|
*/
|
|
|
|
#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/* Output debug messages like this: */
|
2005-04-02 01:20:00 +00:00
|
|
|
#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
|
2007-08-07 04:50:14 +00:00
|
|
|
#define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#ifdef _PTHREADS_INVARIANTS
|
|
|
|
#define THR_ASSERT(cond, msg) do { \
|
|
|
|
if (__predict_false(!(cond))) \
|
|
|
|
PANIC(msg); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define THR_ASSERT(cond, msg)
|
|
|
|
#endif
|
2003-04-02 03:05:39 +00:00
|
|
|
|
2006-01-10 04:53:03 +00:00
|
|
|
#ifdef PIC
|
|
|
|
# define STATIC_LIB_REQUIRE(name)
|
|
|
|
#else
|
|
|
|
# define STATIC_LIB_REQUIRE(name) __asm (".globl " #name)
|
|
|
|
#endif
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define TIMESPEC_ADD(dst, src, val) \
|
|
|
|
do { \
|
|
|
|
(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
|
|
|
|
(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
|
2005-06-22 22:38:56 +00:00
|
|
|
if ((dst)->tv_nsec >= 1000000000) { \
|
2005-04-02 01:20:00 +00:00
|
|
|
(dst)->tv_sec++; \
|
|
|
|
(dst)->tv_nsec -= 1000000000; \
|
|
|
|
} \
|
2003-06-29 23:49:41 +00:00
|
|
|
} while (0)
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define TIMESPEC_SUB(dst, src, val) \
|
|
|
|
do { \
|
|
|
|
(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
|
|
|
|
(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
|
|
|
|
if ((dst)->tv_nsec < 0) { \
|
|
|
|
(dst)->tv_sec--; \
|
|
|
|
(dst)->tv_nsec += 1000000000; \
|
|
|
|
} \
|
2003-07-06 10:10:32 +00:00
|
|
|
} while (0)
|
2003-06-29 23:49:41 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_mutex {
|
|
|
|
/*
|
|
|
|
* Lock for accesses to this structure.
|
|
|
|
*/
|
2006-08-28 04:52:50 +00:00
|
|
|
struct umutex m_lock;
|
2005-04-02 01:20:00 +00:00
|
|
|
enum pthread_mutextype m_type;
|
|
|
|
struct pthread *m_owner;
|
|
|
|
int m_count;
|
|
|
|
int m_refcount;
|
2007-12-14 06:25:57 +00:00
|
|
|
int m_spinloops;
|
|
|
|
int m_yieldloops;
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
2006-08-28 04:52:50 +00:00
|
|
|
* Link for all mutexes a thread currently owns.
|
2005-04-02 01:20:00 +00:00
|
|
|
*/
|
|
|
|
TAILQ_ENTRY(pthread_mutex) m_qe;
|
|
|
|
};
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
struct pthread_mutex_attr {
|
|
|
|
enum pthread_mutextype m_type;
|
|
|
|
int m_protocol;
|
|
|
|
int m_ceiling;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
|
|
|
|
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
|
|
|
|
|
|
|
|
struct pthread_cond {
|
2006-09-06 04:04:10 +00:00
|
|
|
struct umutex c_lock;
|
2006-12-04 14:20:41 +00:00
|
|
|
struct ucond c_kerncv;
|
2005-04-02 01:20:00 +00:00
|
|
|
int c_pshared;
|
|
|
|
int c_clockid;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct pthread_cond_attr {
|
2005-04-02 01:20:00 +00:00
|
|
|
int c_pshared;
|
|
|
|
int c_clockid;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_barrier {
|
2006-12-05 06:54:25 +00:00
|
|
|
struct umutex b_lock;
|
|
|
|
struct ucond b_cv;
|
|
|
|
volatile int64_t b_cycle;
|
|
|
|
volatile int b_count;
|
|
|
|
volatile int b_waiters;
|
2005-04-02 01:20:00 +00:00
|
|
|
};
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_barrierattr {
|
|
|
|
int pshared;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pthread_spinlock {
|
2006-09-06 04:04:10 +00:00
|
|
|
struct umutex s_lock;
|
2005-04-02 01:20:00 +00:00
|
|
|
};
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Flags for condition variables.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
#define COND_FLAGS_PRIVATE 0x01
|
|
|
|
#define COND_FLAGS_INITED 0x02
|
|
|
|
#define COND_FLAGS_BUSY 0x04
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup definitions.
|
|
|
|
*/
|
|
|
|
struct pthread_cleanup {
|
|
|
|
struct pthread_cleanup *next;
|
2006-04-04 02:57:49 +00:00
|
|
|
void (*routine)(void *args);
|
2003-04-01 03:46:29 +00:00
|
|
|
void *routine_arg;
|
2005-04-02 01:20:00 +00:00
|
|
|
int onstack;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_CLEANUP_PUSH(td, func, arg) { \
|
|
|
|
struct pthread_cleanup __cup; \
|
|
|
|
\
|
|
|
|
__cup.routine = func; \
|
|
|
|
__cup.routine_arg = arg; \
|
|
|
|
__cup.onstack = 1; \
|
|
|
|
__cup.next = (td)->cleanup; \
|
|
|
|
(td)->cleanup = &__cup;
|
|
|
|
|
|
|
|
#define THR_CLEANUP_POP(td, exec) \
|
|
|
|
(td)->cleanup = __cup.next; \
|
|
|
|
if ((exec) != 0) \
|
|
|
|
__cup.routine(__cup.routine_arg); \
|
|
|
|
}
|
|
|
|
|
2004-06-27 10:01:35 +00:00
|
|
|
struct pthread_atfork {
|
|
|
|
TAILQ_ENTRY(pthread_atfork) qe;
|
|
|
|
void (*prepare)(void);
|
|
|
|
void (*parent)(void);
|
|
|
|
void (*child)(void);
|
|
|
|
};
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
struct pthread_attr {
|
|
|
|
int sched_policy;
|
|
|
|
int sched_inherit;
|
|
|
|
int prio;
|
|
|
|
int suspend;
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
|
2003-04-01 03:46:29 +00:00
|
|
|
int flags;
|
|
|
|
void *stackaddr_attr;
|
|
|
|
size_t stacksize_attr;
|
|
|
|
size_t guardsize_attr;
|
2008-03-04 03:03:24 +00:00
|
|
|
cpuset_t *cpuset;
|
|
|
|
size_t cpusetsize;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread creation state attributes.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_CREATE_RUNNING 0
|
|
|
|
#define THR_CREATE_SUSPENDED 1
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Miscellaneous definitions.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024)
|
2005-03-06 07:56:18 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
|
|
|
* than the stacks of other threads, since many applications are likely to run
|
|
|
|
* almost entirely on this stack.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
2006-04-27 08:18:23 +00:00
|
|
|
* Define priorities returned by kernel.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2006-04-27 08:18:23 +00:00
|
|
|
#define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min)
|
2006-07-12 06:13:18 +00:00
|
|
|
#define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max)
|
2006-04-27 08:18:23 +00:00
|
|
|
#define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default)
|
|
|
|
|
|
|
|
#define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min)
|
|
|
|
#define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max)
|
|
|
|
#define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default)
|
|
|
|
|
|
|
|
/* XXX The SCHED_FIFO should have same priority range as SCHED_RR */
|
|
|
|
#define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min)
|
2006-07-12 06:13:18 +00:00
|
|
|
#define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max)
|
2006-04-27 08:18:23 +00:00
|
|
|
#define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default)
|
|
|
|
|
|
|
|
struct pthread_prio {
|
|
|
|
int pri_min;
|
|
|
|
int pri_max;
|
|
|
|
int pri_default;
|
|
|
|
};
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
struct pthread_rwlockattr {
|
|
|
|
int pshared;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pthread_rwlock {
|
2008-04-02 04:32:31 +00:00
|
|
|
struct urwlock lock;
|
2008-03-31 02:55:49 +00:00
|
|
|
struct pthread *owner;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread states.
|
|
|
|
*/
|
|
|
|
enum pthread_state {
|
|
|
|
PS_RUNNING,
|
2005-04-02 01:20:00 +00:00
|
|
|
PS_DEAD
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct pthread_specific_elem {
|
|
|
|
const void *data;
|
|
|
|
int seqno;
|
|
|
|
};
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_key {
|
|
|
|
volatile int allocated;
|
|
|
|
int seqno;
|
|
|
|
void (*destructor)(void *);
|
2004-01-19 14:51:45 +00:00
|
|
|
};
|
|
|
|
|
2006-08-28 04:52:50 +00:00
|
|
|
/*
|
|
|
|
* lwpid_t is 32bit but kernel thr API exports tid as long type
|
|
|
|
* in very earily date.
|
|
|
|
*/
|
|
|
|
#define TID(thread) ((uint32_t) ((thread)->tid))
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Thread structure.
|
|
|
|
*/
|
|
|
|
struct pthread {
|
2006-03-27 23:50:21 +00:00
|
|
|
/* Kernel thread id. */
|
|
|
|
long tid;
|
|
|
|
#define TID_TERMINATED 1
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock for accesses to this thread structure.
|
|
|
|
*/
|
2006-09-06 04:04:10 +00:00
|
|
|
struct umutex lock;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
/* Internal condition variable cycle number. */
|
2008-04-29 03:58:18 +00:00
|
|
|
uint32_t cycle;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
/* How many low level locks the thread held. */
|
|
|
|
int locklevel;
|
|
|
|
|
2006-01-05 13:51:22 +00:00
|
|
|
/*
|
|
|
|
* Set to non-zero when this thread has entered a critical
|
|
|
|
* region. We allow for recursive entries into critical regions.
|
|
|
|
*/
|
|
|
|
int critical_count;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Signal blocked counter. */
|
|
|
|
int sigblock;
|
|
|
|
|
|
|
|
/* Queue entry for list of all threads. */
|
|
|
|
TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
|
|
|
|
|
|
|
|
/* Queue entry for GC lists. */
|
|
|
|
TAILQ_ENTRY(pthread) gcle;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Hash queue entry. */
|
|
|
|
LIST_ENTRY(pthread) hle;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Threads reference count. */
|
|
|
|
int refcount;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread start routine, argument, stack pointer and thread
|
|
|
|
* attributes.
|
|
|
|
*/
|
|
|
|
void *(*start_routine)(void *);
|
|
|
|
void *arg;
|
|
|
|
struct pthread_attr attr;
|
|
|
|
|
2006-11-24 09:57:38 +00:00
|
|
|
#define SHOULD_CANCEL(thr) \
|
|
|
|
((thr)->cancel_pending && \
|
|
|
|
((thr)->cancel_point || (thr)->cancel_async) && \
|
|
|
|
(thr)->cancel_enable && (thr)->cancelling == 0)
|
|
|
|
|
|
|
|
/* Cancellation is enabled */
|
|
|
|
int cancel_enable;
|
|
|
|
|
|
|
|
/* Cancellation request is pending */
|
|
|
|
int cancel_pending;
|
|
|
|
|
|
|
|
/* Thread is at cancellation point */
|
|
|
|
int cancel_point;
|
|
|
|
|
2006-12-04 14:20:41 +00:00
|
|
|
/* Cancellation should be synchoronized */
|
|
|
|
int cancel_defer;
|
|
|
|
|
2006-11-24 09:57:38 +00:00
|
|
|
/* Asynchronouse cancellation is enabled */
|
|
|
|
int cancel_async;
|
|
|
|
|
|
|
|
/* Cancellation is in progress */
|
|
|
|
int cancelling;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
/* Thread temporary signal mask. */
|
|
|
|
sigset_t sigmask;
|
|
|
|
|
2008-03-04 04:28:59 +00:00
|
|
|
/* Thread is in SIGCANCEL handler. */
|
|
|
|
int in_sigcancel_handler;
|
|
|
|
|
|
|
|
/* New thread should unblock SIGCANCEL. */
|
|
|
|
int unblock_sigcancel;
|
|
|
|
|
2008-03-05 07:01:20 +00:00
|
|
|
/* Force new thread to exit. */
|
|
|
|
int force_exit;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Thread state: */
|
2006-10-13 12:45:21 +00:00
|
|
|
enum pthread_state state;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Error variable used instead of errno. The function __error()
|
|
|
|
* returns a pointer to this.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
int error;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The joiner is the thread that is joining to this thread. The
|
|
|
|
* join status keeps track of a join operation to another thread.
|
|
|
|
*/
|
|
|
|
struct pthread *joiner;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Miscellaneous flags; only set with scheduling lock held. */
|
|
|
|
int flags;
|
|
|
|
#define THR_FLAGS_PRIVATE 0x0001
|
|
|
|
#define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
|
|
|
|
#define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
|
|
|
|
|
|
|
|
/* Thread list flags; only set with thread list lock held. */
|
|
|
|
int tlflags;
|
|
|
|
#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
|
|
|
|
#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
|
|
|
|
#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
|
|
|
|
#define TLFLAGS_DETACHED 0x0008 /* thread is detached */
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-08-28 04:52:50 +00:00
|
|
|
/* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */
|
|
|
|
struct mutex_queue mutexq;
|
|
|
|
|
|
|
|
/* Queue of all owned PRIO_PROTECT mutexes. */
|
|
|
|
struct mutex_queue pp_mutexq;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
void *ret;
|
|
|
|
struct pthread_specific_elem *specific;
|
|
|
|
int specific_data_count;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Number rwlocks rdlocks held. */
|
|
|
|
int rdlock_count;
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Current locks bitmap for rtld. */
|
|
|
|
int rtld_bits;
|
|
|
|
|
|
|
|
/* Thread control block */
|
|
|
|
struct tcb *tcb;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/* Cleanup handlers Link List */
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_cleanup *cleanup;
|
2005-04-12 03:00:28 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
/*
|
|
|
|
* Magic value to help recognize a valid thread structure
|
|
|
|
* from an invalid one:
|
|
|
|
*/
|
|
|
|
#define THR_MAGIC ((u_int32_t) 0xd09ba115)
|
|
|
|
u_int32_t magic;
|
|
|
|
|
2005-04-12 03:00:28 +00:00
|
|
|
/* Enable event reporting */
|
|
|
|
int report_events;
|
|
|
|
|
|
|
|
/* Event mask */
|
|
|
|
int event_mask;
|
|
|
|
|
|
|
|
/* Event */
|
|
|
|
td_event_msg_t event_buf;
|
2003-04-01 03:46:29 +00:00
|
|
|
};
|
|
|
|
|
2006-01-05 13:51:22 +00:00
|
|
|
#define THR_IN_CRITICAL(thrd) \
|
|
|
|
(((thrd)->locklevel > 0) || \
|
|
|
|
((thrd)->critical_count > 0))
|
|
|
|
|
2006-03-25 04:49:07 +00:00
|
|
|
#define THR_CRITICAL_ENTER(thrd) \
|
|
|
|
(thrd)->critical_count++
|
|
|
|
|
|
|
|
#define THR_CRITICAL_LEAVE(thrd) \
|
2008-04-03 02:47:35 +00:00
|
|
|
do { \
|
|
|
|
(thrd)->critical_count--; \
|
|
|
|
_thr_ast(thrd); \
|
|
|
|
} while (0)
|
2006-03-25 04:49:07 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
#define THR_UMUTEX_TRYLOCK(thrd, lck) \
|
|
|
|
_thr_umutex_trylock((lck), TID(thrd))
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
#define THR_UMUTEX_LOCK(thrd, lck) \
|
|
|
|
_thr_umutex_lock((lck), TID(thrd))
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
#define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \
|
|
|
|
_thr_umutex_timedlock((lck), TID(thrd), (timo))
|
2003-05-26 00:37:07 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
#define THR_UMUTEX_UNLOCK(thrd, lck) \
|
|
|
|
_thr_umutex_unlock((lck), TID(thrd))
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_LOCK_ACQUIRE(thrd, lck) \
|
|
|
|
do { \
|
|
|
|
(thrd)->locklevel++; \
|
2006-09-06 04:04:10 +00:00
|
|
|
_thr_umutex_lock(lck, TID(thrd)); \
|
2005-04-02 01:20:00 +00:00
|
|
|
} while (0)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-01-09 07:32:22 +00:00
|
|
|
#ifdef _PTHREADS_INVARIANTS
|
|
|
|
#define THR_ASSERT_LOCKLEVEL(thrd) \
|
2005-04-02 01:20:00 +00:00
|
|
|
do { \
|
2006-01-09 07:32:22 +00:00
|
|
|
if (__predict_false((thrd)->locklevel <= 0)) \
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_assert_lock_level(); \
|
2006-01-09 07:32:22 +00:00
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define THR_ASSERT_LOCKLEVEL(thrd)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define THR_LOCK_RELEASE(thrd, lck) \
|
|
|
|
do { \
|
|
|
|
THR_ASSERT_LOCKLEVEL(thrd); \
|
2006-09-06 04:04:10 +00:00
|
|
|
_thr_umutex_unlock((lck), TID(thrd)); \
|
2006-01-09 07:32:22 +00:00
|
|
|
(thrd)->locklevel--; \
|
|
|
|
_thr_ast(thrd); \
|
2005-04-02 01:20:00 +00:00
|
|
|
} while (0)
|
2003-05-25 08:31:33 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
|
|
|
|
#define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
|
|
|
|
#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
|
|
|
|
#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
|
2003-05-25 08:31:33 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THREAD_LIST_LOCK(curthrd) \
|
|
|
|
do { \
|
|
|
|
THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \
|
|
|
|
} while (0)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define THREAD_LIST_UNLOCK(curthrd) \
|
|
|
|
do { \
|
|
|
|
THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \
|
|
|
|
} while (0)
|
2004-06-27 10:01:35 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Macros to insert/remove threads to the all thread list and
|
|
|
|
* the gc list.
|
|
|
|
*/
|
|
|
|
#define THR_LIST_ADD(thrd) do { \
|
|
|
|
if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
|
|
|
|
TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
|
|
|
|
_thr_hash_add(thrd); \
|
|
|
|
(thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#define THR_LIST_REMOVE(thrd) do { \
|
|
|
|
if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
|
|
|
|
TAILQ_REMOVE(&_thread_list, thrd, tle); \
|
|
|
|
_thr_hash_remove(thrd); \
|
|
|
|
(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#define THR_GCLIST_ADD(thrd) do { \
|
|
|
|
if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
|
|
|
|
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
|
|
|
|
(thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
|
|
|
|
_gc_count++; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#define THR_GCLIST_REMOVE(thrd) do { \
|
|
|
|
if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
|
|
|
|
TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
|
|
|
|
(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
|
|
|
|
_gc_count--; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define GC_NEEDED() (_gc_count >= 5)
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-12 03:00:28 +00:00
|
|
|
#define SHOULD_REPORT_EVENT(curthr, e) \
|
|
|
|
(curthr->report_events && \
|
|
|
|
(((curthr)->event_mask | _thread_event_mask ) & e) != 0)
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
extern int __isthreaded;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Global variables for the pthread kernel.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
extern char *_usrstack __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
extern struct pthread *_thr_initial __hidden;
|
2005-04-12 03:00:28 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* For debugger */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern int _libthr_debug;
|
|
|
|
extern int _thread_event_mask;
|
|
|
|
extern struct pthread *_thread_last_event;
|
2003-06-29 15:55:44 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* List of all threads: */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern pthreadlist _thread_list;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* List of threads needing GC: */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern pthreadlist _thread_gc_list __hidden;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2005-12-21 03:14:06 +00:00
|
|
|
extern int _thread_active_threads;
|
|
|
|
extern atfork_head _thr_atfork_list __hidden;
|
2006-09-06 04:04:10 +00:00
|
|
|
extern struct umutex _thr_atfork_lock __hidden;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
/* Default thread attributes: */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern struct pthread_attr _pthread_attr_default __hidden;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
/* Default mutex attributes: */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden;
|
2004-07-18 04:23:30 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Default condition variable attributes: */
|
2005-12-21 03:14:06 +00:00
|
|
|
extern struct pthread_cond_attr _pthread_condattr_default __hidden;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-04-27 08:18:23 +00:00
|
|
|
extern struct pthread_prio _thr_priorities[] __hidden;
|
|
|
|
|
2005-12-21 03:14:06 +00:00
|
|
|
extern pid_t _thr_pid __hidden;
|
2006-12-15 11:52:01 +00:00
|
|
|
extern int _thr_is_smp __hidden;
|
2006-08-08 04:42:41 +00:00
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
extern size_t _thr_guard_default __hidden;
|
|
|
|
extern size_t _thr_stack_default __hidden;
|
|
|
|
extern size_t _thr_stack_initial __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
extern int _thr_page_size __hidden;
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
extern int _thr_spinloops __hidden;
|
|
|
|
extern int _thr_yieldloops __hidden;
|
|
|
|
|
2005-12-21 03:14:06 +00:00
|
|
|
/* Garbage thread count. */
|
|
|
|
extern int _gc_count __hidden;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
extern struct umutex _mutex_static_lock __hidden;
|
|
|
|
extern struct umutex _cond_static_lock __hidden;
|
|
|
|
extern struct umutex _rwlock_static_lock __hidden;
|
|
|
|
extern struct umutex _keytable_lock __hidden;
|
|
|
|
extern struct umutex _thr_list_lock __hidden;
|
|
|
|
extern struct umutex _thr_event_lock __hidden;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Function prototype definitions.
|
|
|
|
*/
|
|
|
|
__BEGIN_DECLS
|
2005-12-21 03:14:06 +00:00
|
|
|
int _thr_setthreaded(int) __hidden;
|
2006-04-08 13:24:44 +00:00
|
|
|
int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden;
|
|
|
|
int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
int _mutex_reinit(pthread_mutex_t *) __hidden;
|
|
|
|
void _mutex_fork(struct pthread *curthread) __hidden;
|
|
|
|
void _libpthread_init(struct pthread *) __hidden;
|
|
|
|
struct pthread *_thr_alloc(struct pthread *) __hidden;
|
2006-04-04 02:57:49 +00:00
|
|
|
void _thread_exit(const char *, int, const char *) __hidden __dead2;
|
2005-12-21 03:14:06 +00:00
|
|
|
void _thr_exit_cleanup(void) __hidden;
|
|
|
|
int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden;
|
|
|
|
void _thr_ref_delete(struct pthread *, struct pthread *) __hidden;
|
2006-01-05 13:51:22 +00:00
|
|
|
void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden;
|
|
|
|
void _thr_rtld_init(void) __hidden;
|
|
|
|
void _thr_rtld_fini(void) __hidden;
|
|
|
|
int _thr_stack_alloc(struct pthread_attr *) __hidden;
|
|
|
|
void _thr_stack_free(struct pthread_attr *) __hidden;
|
|
|
|
void _thr_free(struct pthread *, struct pthread *) __hidden;
|
|
|
|
void _thr_gc(struct pthread *) __hidden;
|
|
|
|
void _thread_cleanupspecific(void) __hidden;
|
|
|
|
void _thread_dump_info(void) __hidden;
|
|
|
|
void _thread_printf(int, const char *, ...) __hidden;
|
|
|
|
void _thr_spinlock_init(void) __hidden;
|
2006-11-24 09:57:38 +00:00
|
|
|
void _thr_cancel_enter(struct pthread *) __hidden;
|
|
|
|
void _thr_cancel_leave(struct pthread *) __hidden;
|
2006-12-04 14:20:41 +00:00
|
|
|
void _thr_cancel_enter_defer(struct pthread *) __hidden;
|
|
|
|
void _thr_cancel_leave_defer(struct pthread *, int) __hidden;
|
2006-11-24 09:57:38 +00:00
|
|
|
void _thr_testcancel(struct pthread *) __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
void _thr_signal_block(struct pthread *) __hidden;
|
|
|
|
void _thr_signal_unblock(struct pthread *) __hidden;
|
|
|
|
void _thr_signal_init(void) __hidden;
|
|
|
|
void _thr_signal_deinit(void) __hidden;
|
|
|
|
int _thr_send_sig(struct pthread *, int sig) __hidden;
|
|
|
|
void _thr_list_init(void) __hidden;
|
|
|
|
void _thr_hash_add(struct pthread *) __hidden;
|
|
|
|
void _thr_hash_remove(struct pthread *) __hidden;
|
|
|
|
struct pthread *_thr_hash_find(struct pthread *) __hidden;
|
2006-01-05 13:51:22 +00:00
|
|
|
void _thr_link(struct pthread *, struct pthread *) __hidden;
|
|
|
|
void _thr_unlink(struct pthread *, struct pthread *) __hidden;
|
|
|
|
void _thr_suspend_check(struct pthread *) __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
void _thr_assert_lock_level(void) __hidden __dead2;
|
2006-01-05 13:51:22 +00:00
|
|
|
void _thr_ast(struct pthread *) __hidden;
|
2006-02-15 23:05:03 +00:00
|
|
|
void _thr_once_init(void) __hidden;
|
2005-04-12 03:00:28 +00:00
|
|
|
void _thr_report_creation(struct pthread *curthread,
|
2006-04-04 02:57:49 +00:00
|
|
|
struct pthread *newthread) __hidden;
|
2005-12-21 03:14:06 +00:00
|
|
|
void _thr_report_death(struct pthread *curthread) __hidden;
|
2006-09-21 04:21:30 +00:00
|
|
|
int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden;
|
|
|
|
int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden;
|
|
|
|
int _rtp_to_schedparam(const struct rtprio *rtp, int *policy,
|
|
|
|
struct sched_param *param) __hidden;
|
|
|
|
int _schedparam_to_rtp(int policy, const struct sched_param *param,
|
|
|
|
struct rtprio *rtp) __hidden;
|
2005-04-12 03:00:28 +00:00
|
|
|
void _thread_bp_create(void);
|
|
|
|
void _thread_bp_death(void);
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
int _sched_yield(void);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* #include <fcntl.h> */
|
|
|
|
#ifdef _SYS_FCNTL_H_
|
|
|
|
int __sys_fcntl(int, int, ...);
|
|
|
|
int __sys_open(const char *, int, ...);
|
2003-04-01 03:46:29 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* #include <signal.h> */
|
|
|
|
#ifdef _SIGNAL_H_
|
|
|
|
int __sys_kill(pid_t, int);
|
|
|
|
int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
|
|
|
|
int __sys_sigpending(sigset_t *);
|
|
|
|
int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
|
|
|
|
int __sys_sigsuspend(const sigset_t *);
|
|
|
|
int __sys_sigreturn(ucontext_t *);
|
|
|
|
int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
|
2006-04-04 02:57:49 +00:00
|
|
|
int __sys_sigwait(const sigset_t *, int *);
|
|
|
|
int __sys_sigtimedwait(const sigset_t *, siginfo_t *,
|
|
|
|
const struct timespec *);
|
|
|
|
int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
|
2003-04-01 03:46:29 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* #include <time.h> */
|
|
|
|
#ifdef _TIME_H_
|
|
|
|
int __sys_nanosleep(const struct timespec *, struct timespec *);
|
2003-04-01 03:46:29 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* #include <unistd.h> */
|
|
|
|
#ifdef _UNISTD_H_
|
|
|
|
int __sys_close(int);
|
|
|
|
int __sys_fork(void);
|
|
|
|
pid_t __sys_getpid(void);
|
|
|
|
ssize_t __sys_read(int, void *, size_t);
|
|
|
|
ssize_t __sys_write(int, const void *, size_t);
|
|
|
|
void __sys_exit(int);
|
2003-04-01 03:46:29 +00:00
|
|
|
#endif
|
|
|
|
|
2008-04-03 02:13:51 +00:00
|
|
|
int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
|
2008-04-02 07:41:25 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static inline int
|
|
|
|
_thr_isthreaded(void)
|
|
|
|
{
|
|
|
|
return (__isthreaded != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
_thr_is_inited(void)
|
|
|
|
{
|
|
|
|
return (_thr_initial != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
_thr_check_init(void)
|
|
|
|
{
|
|
|
|
if (_thr_initial == NULL)
|
|
|
|
_libpthread_init(NULL);
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
__END_DECLS
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#endif /* !_THR_PRIVATE_H */
|