2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
|
2003-04-01 03:46:29 +00:00
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "namespace.h"
|
|
|
|
#include <sys/types.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <sys/signalvar.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/ttycom.h>
|
|
|
|
#include <sys/mman.h>
|
2006-07-12 06:13:18 +00:00
|
|
|
#include <sys/rtprio.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <paths.h>
|
|
|
|
#include <pthread.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <pthread_np.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <signal.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2006-01-10 04:53:03 +00:00
|
|
|
#include <time.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include "un-namespace.h"
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#include "libc_private.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include "thr_private.h"
|
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
char *_usrstack;
|
2005-12-21 03:14:06 +00:00
|
|
|
struct pthread *_thr_initial;
|
|
|
|
int _libthr_debug;
|
|
|
|
int _thread_event_mask;
|
|
|
|
struct pthread *_thread_last_event;
|
|
|
|
pthreadlist _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list);
|
|
|
|
pthreadlist _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
|
|
|
|
int _thread_active_threads = 1;
|
|
|
|
atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list);
|
2010-09-01 03:11:21 +00:00
|
|
|
struct urwlock _thr_atfork_lock = DEFAULT_URWLOCK;
|
2005-12-21 03:14:06 +00:00
|
|
|
|
2006-04-27 08:18:23 +00:00
|
|
|
struct pthread_prio _thr_priorities[3] = {
|
2006-07-12 06:13:18 +00:00
|
|
|
{RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */
|
|
|
|
{0, 0, 63}, /* OTHER */
|
|
|
|
{RTP_PRIO_MIN, RTP_PRIO_MAX, 0} /* RR */
|
2006-04-27 08:18:23 +00:00
|
|
|
};
|
|
|
|
|
2005-12-21 03:14:06 +00:00
|
|
|
struct pthread_attr _pthread_attr_default = {
|
2006-03-27 23:50:21 +00:00
|
|
|
.sched_policy = SCHED_OTHER,
|
2008-03-05 07:01:20 +00:00
|
|
|
.sched_inherit = PTHREAD_INHERIT_SCHED,
|
2006-04-27 08:18:23 +00:00
|
|
|
.prio = 0,
|
2005-12-21 03:14:06 +00:00
|
|
|
.suspend = THR_CREATE_RUNNING,
|
2006-03-20 03:14:14 +00:00
|
|
|
.flags = PTHREAD_SCOPE_SYSTEM,
|
2005-12-21 03:14:06 +00:00
|
|
|
.stackaddr_attr = NULL,
|
|
|
|
.stacksize_attr = THR_STACK_DEFAULT,
|
2008-03-04 03:03:24 +00:00
|
|
|
.guardsize_attr = 0,
|
|
|
|
.cpusetsize = 0,
|
|
|
|
.cpuset = NULL
|
2005-12-21 03:14:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct pthread_mutex_attr _pthread_mutexattr_default = {
|
|
|
|
.m_type = PTHREAD_MUTEX_DEFAULT,
|
|
|
|
.m_protocol = PTHREAD_PRIO_NONE,
|
2008-05-29 07:57:33 +00:00
|
|
|
.m_ceiling = 0
|
2005-12-21 03:14:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Default condition variable attributes: */
|
|
|
|
struct pthread_cond_attr _pthread_condattr_default = {
|
|
|
|
.c_pshared = PTHREAD_PROCESS_PRIVATE,
|
|
|
|
.c_clockid = CLOCK_REALTIME
|
|
|
|
};
|
|
|
|
|
|
|
|
pid_t _thr_pid;
|
2006-12-15 11:52:01 +00:00
|
|
|
int _thr_is_smp = 0;
|
2006-04-04 02:57:49 +00:00
|
|
|
size_t _thr_guard_default;
|
|
|
|
size_t _thr_stack_default = THR_STACK_DEFAULT;
|
|
|
|
size_t _thr_stack_initial = THR_STACK_INITIAL;
|
2005-12-21 03:14:06 +00:00
|
|
|
int _thr_page_size;
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
int _thr_spinloops;
|
|
|
|
int _thr_yieldloops;
|
2005-12-21 03:14:06 +00:00
|
|
|
int _gc_count;
|
2006-09-06 04:04:10 +00:00
|
|
|
struct umutex _mutex_static_lock = DEFAULT_UMUTEX;
|
|
|
|
struct umutex _cond_static_lock = DEFAULT_UMUTEX;
|
|
|
|
struct umutex _rwlock_static_lock = DEFAULT_UMUTEX;
|
|
|
|
struct umutex _keytable_lock = DEFAULT_UMUTEX;
|
|
|
|
struct umutex _thr_list_lock = DEFAULT_UMUTEX;
|
|
|
|
struct umutex _thr_event_lock = DEFAULT_UMUTEX;
|
2005-12-21 03:14:06 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
|
|
|
|
int __pthread_mutex_lock(pthread_mutex_t *);
|
|
|
|
int __pthread_mutex_trylock(pthread_mutex_t *);
|
|
|
|
void _thread_init_hack(void) __attribute__ ((constructor));
|
|
|
|
|
|
|
|
static void init_private(void);
|
|
|
|
static void init_main_thread(struct pthread *thread);
|
2003-06-04 08:23:05 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* All weak references used within libc should be in this table.
|
2005-04-02 01:20:00 +00:00
|
|
|
* This is so that static libraries will work.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2006-07-25 12:50:05 +00:00
|
|
|
|
|
|
|
STATIC_LIB_REQUIRE(_fork);
|
2006-01-10 04:53:03 +00:00
|
|
|
STATIC_LIB_REQUIRE(_pthread_getspecific);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_key_create);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_key_delete);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_destroy);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_init);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_lock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_trylock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_unlock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutexattr_init);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutexattr_destroy);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutexattr_settype);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_once);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_setspecific);
|
2006-07-25 12:50:05 +00:00
|
|
|
STATIC_LIB_REQUIRE(_raise);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_destroy);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_getvalue);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_init);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_post);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_timedwait);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_trywait);
|
|
|
|
STATIC_LIB_REQUIRE(_sem_wait);
|
2006-01-10 04:53:03 +00:00
|
|
|
STATIC_LIB_REQUIRE(_sigaction);
|
|
|
|
STATIC_LIB_REQUIRE(_sigprocmask);
|
|
|
|
STATIC_LIB_REQUIRE(_sigsuspend);
|
2006-07-25 12:50:05 +00:00
|
|
|
STATIC_LIB_REQUIRE(_sigtimedwait);
|
|
|
|
STATIC_LIB_REQUIRE(_sigwait);
|
|
|
|
STATIC_LIB_REQUIRE(_sigwaitinfo);
|
|
|
|
STATIC_LIB_REQUIRE(_spinlock);
|
|
|
|
STATIC_LIB_REQUIRE(_spinlock_debug);
|
|
|
|
STATIC_LIB_REQUIRE(_spinunlock);
|
2006-01-10 04:53:03 +00:00
|
|
|
STATIC_LIB_REQUIRE(_thread_init_hack);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These are needed when linking statically. All references within
|
|
|
|
* libgcc (and in the future libc) to these routines are weak, but
|
|
|
|
* if they are not (strongly) referenced by the application or other
|
|
|
|
* libraries, then the actual functions will not be loaded.
|
|
|
|
*/
|
2006-01-10 04:53:03 +00:00
|
|
|
STATIC_LIB_REQUIRE(_pthread_once);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_key_create);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_key_delete);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_getspecific);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_setspecific);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_init);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_destroy);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_lock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_trylock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_mutex_unlock);
|
|
|
|
STATIC_LIB_REQUIRE(_pthread_create);
|
|
|
|
|
|
|
|
/* Pull in all symbols required by libthread_db */
|
|
|
|
STATIC_LIB_REQUIRE(_thread_state_running);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
#define DUAL_ENTRY(entry) \
|
|
|
|
(pthread_func_t)entry, (pthread_func_t)entry
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static pthread_func_t jmp_table[][2] = {
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_atfork)}, /* PJT_ATFORK */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_destroy)}, /* PJT_ATTR_DESTROY */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getdetachstate)}, /* PJT_ATTR_GETDETACHSTATE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getguardsize)}, /* PJT_ATTR_GETGUARDSIZE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getinheritsched)}, /* PJT_ATTR_GETINHERITSCHED */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getschedparam)}, /* PJT_ATTR_GETSCHEDPARAM */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getschedpolicy)}, /* PJT_ATTR_GETSCHEDPOLICY */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getscope)}, /* PJT_ATTR_GETSCOPE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getstackaddr)}, /* PJT_ATTR_GETSTACKADDR */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_getstacksize)}, /* PJT_ATTR_GETSTACKSIZE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_init)}, /* PJT_ATTR_INIT */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setdetachstate)}, /* PJT_ATTR_SETDETACHSTATE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setguardsize)}, /* PJT_ATTR_SETGUARDSIZE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setinheritsched)}, /* PJT_ATTR_SETINHERITSCHED */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setschedparam)}, /* PJT_ATTR_SETSCHEDPARAM */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setschedpolicy)}, /* PJT_ATTR_SETSCHEDPOLICY */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setscope)}, /* PJT_ATTR_SETSCOPE */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setstackaddr)}, /* PJT_ATTR_SETSTACKADDR */
|
|
|
|
{DUAL_ENTRY(_pthread_attr_setstacksize)}, /* PJT_ATTR_SETSTACKSIZE */
|
|
|
|
{DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */
|
|
|
|
{DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */
|
|
|
|
{DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
|
|
|
|
{DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
|
|
|
|
{DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
|
|
|
|
{DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */
|
2005-04-02 01:20:00 +00:00
|
|
|
{(pthread_func_t)__pthread_cond_wait,
|
|
|
|
(pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */
|
|
|
|
{DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */
|
|
|
|
{DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_join)}, /* PJT_JOIN */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
|
|
|
|
{DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_kill)}, /* PJT_KILL */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
|
|
|
|
{DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
|
|
|
|
{DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
|
|
|
|
{DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
|
|
|
|
{(pthread_func_t)__pthread_mutex_lock,
|
|
|
|
(pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
|
|
|
|
{(pthread_func_t)__pthread_mutex_trylock,
|
|
|
|
(pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
|
|
|
|
{DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */
|
|
|
|
{DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */
|
2005-04-02 01:20:00 +00:00
|
|
|
{DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
|
2006-03-05 18:10:28 +00:00
|
|
|
{DUAL_ENTRY(_pthread_sigmask)}, /* PJT_SIGMASK */
|
Use umtx to implement process sharable semaphore, to make this work,
now type sema_t is a structure which can be put in a shared memory area,
and multiple processes can operate it concurrently.
User can either use mmap(MAP_SHARED) + sem_init(pshared=1) or use sem_open()
to initialize a shared semaphore.
Named semaphore uses file system and is located in /tmp directory, and its
file name is prefixed with 'SEMD', so now it is chroot or jail friendly.
In simplist cases, both for named and un-named semaphore, userland code
does not have to enter kernel to reduce/increase semaphore's count.
The semaphore is designed to be crash-safe, it means even if an application
is crashed in the middle of operating semaphore, the semaphore state is
still safely recovered by later use, there is no waiter counter maintained
by userland code.
The main semaphore code is in libc and libthr only has some necessary stubs,
this makes it possible that a non-threaded application can use semaphore
without linking to thread library.
Old semaphore implementation is kept libc to maintain binary compatibility.
The kernel ksem API is no longer used in the new implemenation.
Discussed on: threads@
2010-01-05 02:37:59 +00:00
|
|
|
{DUAL_ENTRY(_pthread_testcancel)}, /* PJT_TESTCANCEL */
|
|
|
|
{DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */
|
|
|
|
{DUAL_ENTRY(__pthread_cleanup_push_imp)}/* PJT_CLEANUP_PUSH_IMP */
|
2005-04-02 01:20:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int init_once = 0;
|
2003-12-26 08:16:17 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* For the shared version of the threads library, the above is sufficient.
|
|
|
|
* But for the archive version of the library, we need a little bit more.
|
|
|
|
* Namely, we must arrange for this particular module to be pulled in from
|
|
|
|
* the archive library at link time. To accomplish that, we define and
|
|
|
|
* initialize a variable, "_thread_autoinit_dummy_decl". This variable is
|
|
|
|
* referenced (as an extern) from libc/stdlib/exit.c. This will always
|
|
|
|
* create a need for this module, ensuring that it is present in the
|
|
|
|
* executable.
|
2003-12-26 08:16:17 +00:00
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
extern int _thread_autoinit_dummy_decl;
|
|
|
|
int _thread_autoinit_dummy_decl = 0;
|
|
|
|
|
2003-12-26 08:16:17 +00:00
|
|
|
void
|
2005-04-02 01:20:00 +00:00
|
|
|
_thread_init_hack(void)
|
2003-12-26 08:16:17 +00:00
|
|
|
{
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
_libpthread_init(NULL);
|
2003-12-26 08:16:17 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Threaded process initialization.
|
|
|
|
*
|
|
|
|
* This is only called under two conditions:
|
|
|
|
*
|
|
|
|
* 1) Some thread routines have detected that the library hasn't yet
|
|
|
|
* been initialized (_thr_initial == NULL && curthread == NULL), or
|
|
|
|
*
|
|
|
|
* 2) An explicit call to reinitialize after a fork (indicated
|
|
|
|
* by curthread != NULL)
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
|
|
|
void
|
2005-04-02 01:20:00 +00:00
|
|
|
_libpthread_init(struct pthread *curthread)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
int fd, first = 0;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/* Check if this function has already been called: */
|
2005-04-02 01:20:00 +00:00
|
|
|
if ((_thr_initial != NULL) && (curthread == NULL))
|
|
|
|
/* Only initialize the threaded application once. */
|
2003-04-01 03:46:29 +00:00
|
|
|
return;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Check the size of the jump table to make sure it is preset
|
|
|
|
* with the correct number of entries.
|
|
|
|
*/
|
|
|
|
if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
|
|
|
|
PANIC("Thread jump table not properly initialized");
|
|
|
|
memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Check for the special case of this process running as
|
|
|
|
* or in place of init as pid = 1:
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
if ((_thr_pid = getpid()) == 1) {
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Setup a new session for this process which is
|
|
|
|
* assumed to be running as root.
|
|
|
|
*/
|
|
|
|
if (setsid() == -1)
|
|
|
|
PANIC("Can't set session ID");
|
|
|
|
if (revoke(_PATH_CONSOLE) != 0)
|
|
|
|
PANIC("Can't revoke console");
|
|
|
|
if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0)
|
|
|
|
PANIC("Can't open console");
|
|
|
|
if (setlogin("root") == -1)
|
|
|
|
PANIC("Can't set login to root");
|
2006-04-04 02:57:49 +00:00
|
|
|
if (_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
|
2003-04-01 03:46:29 +00:00
|
|
|
PANIC("Can't set controlling terminal");
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Initialize pthread private data. */
|
|
|
|
init_private();
|
|
|
|
|
|
|
|
/* Set the initial thread. */
|
|
|
|
if (curthread == NULL) {
|
|
|
|
first = 1;
|
|
|
|
/* Create and initialize the initial thread. */
|
|
|
|
curthread = _thr_alloc(NULL);
|
|
|
|
if (curthread == NULL)
|
|
|
|
PANIC("Can't allocate initial thread");
|
|
|
|
init_main_thread(curthread);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Add the thread to the thread list queue.
|
|
|
|
*/
|
|
|
|
THR_LIST_ADD(curthread);
|
|
|
|
_thread_active_threads = 1;
|
2003-05-21 03:22:36 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Setup the thread specific data */
|
|
|
|
_tcb_set(curthread->tcb);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (first) {
|
|
|
|
_thr_initial = curthread;
|
Add signal handler wrapper, the reason to add it becauses there are
some cases we want to improve:
1) if a thread signal got a signal while in cancellation point,
it is possible the TDP_WAKEUP may be eaten by signal handler
if the handler called some interruptibly system calls.
2) In signal handler, we want to disable cancellation.
3) When thread holding some low level locks, it is better to
disable signal, those code need not to worry reentrancy,
sigprocmask system call is avoided because it is a bit expensive.
The signal handler wrapper works in this way:
1) libthr installs its signal handler if user code invokes sigaction
to install its handler, the user handler is recorded in internal
array.
2) when a signal is delivered, libthr's signal handler is invoke,
libthr checks if thread holds some low level lock or is in critical
region, if it is true, the signal is buffered, and all signals are
masked, once the thread leaves critical region, correct signal
mask is restored and buffered signal is processed.
3) before user signal handler is invoked, cancellation is temporarily
disabled, after user signal handler is returned, cancellation state
is restored, and pending cancellation is rescheduled.
2010-09-01 02:18:33 +00:00
|
|
|
_thr_signal_init();
|
2005-04-12 03:13:49 +00:00
|
|
|
if (_thread_event_mask & TD_CREATE)
|
|
|
|
_thr_report_creation(curthread, curthread);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* This function and pthread_create() do a lot of the same things.
|
|
|
|
* It'd be nice to consolidate the common stuff in one place.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
init_main_thread(struct pthread *thread)
|
|
|
|
{
|
2006-07-13 22:45:19 +00:00
|
|
|
struct sched_param sched_param;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Setup the thread attributes. */
|
|
|
|
thr_self(&thread->tid);
|
|
|
|
thread->attr = _pthread_attr_default;
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Set up the thread stack.
|
|
|
|
*
|
|
|
|
* Create a red zone below the main stack. All other stacks
|
|
|
|
* are constrained to a maximum size by the parameters
|
|
|
|
* passed to mmap(), but this stack is only limited by
|
|
|
|
* resource limits, so this stack needs an explicitly mapped
|
|
|
|
* red zone to protect the thread stack that is just beyond.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2006-04-04 02:57:49 +00:00
|
|
|
if (mmap(_usrstack - _thr_stack_initial -
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_guard_default, _thr_guard_default, 0, MAP_ANON,
|
|
|
|
-1, 0) == MAP_FAILED)
|
2003-04-01 03:46:29 +00:00
|
|
|
PANIC("Cannot allocate red zone for initial thread");
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Mark the stack as an application supplied stack so that it
|
|
|
|
* isn't deallocated.
|
|
|
|
*
|
|
|
|
* XXX - I'm not sure it would hurt anything to deallocate
|
|
|
|
* the main thread stack because deallocation doesn't
|
|
|
|
* actually free() it; it just puts it in the free
|
|
|
|
* stack queue for later reuse.
|
|
|
|
*/
|
2006-04-04 02:57:49 +00:00
|
|
|
thread->attr.stackaddr_attr = _usrstack - _thr_stack_initial;
|
2005-04-02 01:20:00 +00:00
|
|
|
thread->attr.stacksize_attr = _thr_stack_initial;
|
|
|
|
thread->attr.guardsize_attr = _thr_guard_default;
|
|
|
|
thread->attr.flags |= THR_STACK_USER;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a magic value to the thread structure
|
|
|
|
* to help identify valid ones:
|
|
|
|
*/
|
|
|
|
thread->magic = THR_MAGIC;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-11-24 09:57:38 +00:00
|
|
|
thread->cancel_enable = 1;
|
|
|
|
thread->cancel_async = 0;
|
2006-02-05 03:04:54 +00:00
|
|
|
thr_set_name(thread->tid, "initial thread");
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Initialize the mutex queue: */
|
|
|
|
TAILQ_INIT(&thread->mutexq);
|
2006-08-28 04:52:50 +00:00
|
|
|
TAILQ_INIT(&thread->pp_mutexq);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
thread->state = PS_RUNNING;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-09-21 04:21:30 +00:00
|
|
|
_thr_getscheduler(thread->tid, &thread->attr.sched_policy,
|
|
|
|
&sched_param);
|
2006-07-13 22:45:19 +00:00
|
|
|
thread->attr.prio = sched_param.sched_priority;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Others cleared to zero by thr_alloc() */
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
init_private(void)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
int mib[2];
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
char *env;
|
2006-12-20 05:05:44 +00:00
|
|
|
|
2006-09-06 04:04:10 +00:00
|
|
|
_thr_umutex_init(&_mutex_static_lock);
|
|
|
|
_thr_umutex_init(&_cond_static_lock);
|
|
|
|
_thr_umutex_init(&_rwlock_static_lock);
|
|
|
|
_thr_umutex_init(&_keytable_lock);
|
2010-09-01 03:11:21 +00:00
|
|
|
_thr_urwlock_init(&_thr_atfork_lock);
|
2006-09-06 04:04:10 +00:00
|
|
|
_thr_umutex_init(&_thr_event_lock);
|
2006-02-15 23:05:03 +00:00
|
|
|
_thr_once_init();
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_spinlock_init();
|
|
|
|
_thr_list_init();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid reinitializing some things if they don't need to be,
|
|
|
|
* e.g. after a fork().
|
|
|
|
*/
|
|
|
|
if (init_once == 0) {
|
|
|
|
/* Find the stack top */
|
|
|
|
mib[0] = CTL_KERN;
|
|
|
|
mib[1] = KERN_USRSTACK;
|
|
|
|
len = sizeof (_usrstack);
|
|
|
|
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
|
|
|
|
PANIC("Cannot get kern.usrstack from sysctl");
|
2006-12-15 11:52:01 +00:00
|
|
|
len = sizeof(_thr_is_smp);
|
|
|
|
sysctlbyname("kern.smp.cpus", &_thr_is_smp, &len, NULL, 0);
|
|
|
|
_thr_is_smp = (_thr_is_smp > 1);
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_page_size = getpagesize();
|
|
|
|
_thr_guard_default = _thr_page_size;
|
|
|
|
_pthread_attr_default.guardsize_attr = _thr_guard_default;
|
|
|
|
_pthread_attr_default.stacksize_attr = _thr_stack_default;
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
env = getenv("LIBPTHREAD_SPINLOOPS");
|
|
|
|
if (env)
|
|
|
|
_thr_spinloops = atoi(env);
|
|
|
|
env = getenv("LIBPTHREAD_YIELDLOOPS");
|
|
|
|
if (env)
|
|
|
|
_thr_yieldloops = atoi(env);
|
2005-04-02 01:20:00 +00:00
|
|
|
TAILQ_INIT(&_thr_atfork_list);
|
|
|
|
}
|
|
|
|
init_once = 1;
|
|
|
|
}
|