2017-11-25 17:09:43 +00:00
|
|
|
/*-
|
|
|
|
* SPDX-License-Identifier: BSD-4-Clause
|
|
|
|
*
|
1996-01-22 00:24:56 +00:00
|
|
|
* Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
|
1998-04-04 10:59:42 +00:00
|
|
|
* Copyright (c) 1995-1998 by John Birrell <jb@cimlogic.com.au>
|
1996-01-22 00:24:56 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Chris Provenzano.
|
|
|
|
* 4. The name of Chris Provenzano may not be used to endorse or promote
|
|
|
|
* products derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
|
|
|
|
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-08-27 23:45:13 +00:00
|
|
|
* $FreeBSD$
|
1996-01-22 00:24:56 +00:00
|
|
|
*/
|
|
|
|
#ifndef _PTHREAD_H_
|
2015-03-29 20:20:45 +00:00
|
|
|
#define _PTHREAD_H_
|
1996-01-22 00:24:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Header files.
|
|
|
|
*/
|
|
|
|
#include <sys/cdefs.h>
|
2005-05-31 15:18:17 +00:00
|
|
|
#include <sys/_pthreadtypes.h>
|
2005-09-01 15:33:22 +00:00
|
|
|
#include <machine/_limits.h>
|
2021-06-14 10:18:51 +00:00
|
|
|
#include <sys/_types.h>
|
2005-09-01 15:33:22 +00:00
|
|
|
#include <sys/_sigset.h>
|
1999-03-23 05:11:30 +00:00
|
|
|
#include <sched.h>
|
2005-09-01 15:33:22 +00:00
|
|
|
#include <time.h>
|
1996-01-22 00:24:56 +00:00
|
|
|
|
|
|
|
/*
|
1996-08-20 08:22:01 +00:00
|
|
|
* Run-time invariant values:
|
1996-01-22 00:24:56 +00:00
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_DESTRUCTOR_ITERATIONS 4
|
|
|
|
#define PTHREAD_KEYS_MAX 256
|
|
|
|
#define PTHREAD_STACK_MIN __MINSIGSTKSZ
|
|
|
|
#define PTHREAD_THREADS_MAX __ULONG_MAX
|
|
|
|
#define PTHREAD_BARRIER_SERIAL_THREAD -1
|
1996-01-22 00:24:56 +00:00
|
|
|
|
1997-02-05 23:26:09 +00:00
|
|
|
/*
|
|
|
|
* Flags for threads and thread attributes.
|
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_DETACHED 0x1
|
|
|
|
#define PTHREAD_SCOPE_SYSTEM 0x2
|
|
|
|
#define PTHREAD_INHERIT_SCHED 0x4
|
|
|
|
#define PTHREAD_NOFLOAT 0x8
|
1997-02-05 23:26:09 +00:00
|
|
|
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_CREATE_DETACHED PTHREAD_DETACHED
|
|
|
|
#define PTHREAD_CREATE_JOINABLE 0
|
|
|
|
#define PTHREAD_SCOPE_PROCESS 0
|
|
|
|
#define PTHREAD_EXPLICIT_SCHED 0
|
1997-02-05 23:26:09 +00:00
|
|
|
|
1998-09-07 19:01:43 +00:00
|
|
|
/*
|
2016-02-28 17:52:33 +00:00
|
|
|
* Values for process shared/private attributes.
|
1998-09-07 19:01:43 +00:00
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_PROCESS_PRIVATE 0
|
|
|
|
#define PTHREAD_PROCESS_SHARED 1
|
1998-09-07 19:01:43 +00:00
|
|
|
|
1999-11-28 05:38:13 +00:00
|
|
|
/*
|
|
|
|
* Flags for cancelling threads
|
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_CANCEL_ENABLE 0
|
|
|
|
#define PTHREAD_CANCEL_DISABLE 1
|
|
|
|
#define PTHREAD_CANCEL_DEFERRED 0
|
|
|
|
#define PTHREAD_CANCEL_ASYNCHRONOUS 2
|
|
|
|
#define PTHREAD_CANCELED ((void *) 1)
|
1999-11-28 05:38:13 +00:00
|
|
|
|
1996-01-22 00:24:56 +00:00
|
|
|
/*
|
|
|
|
* Flags for once initialization.
|
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_NEEDS_INIT 0
|
|
|
|
#define PTHREAD_DONE_INIT 1
|
1996-01-22 00:24:56 +00:00
|
|
|
|
|
|
|
/*
|
2015-03-29 20:20:45 +00:00
|
|
|
* Static once initialization values.
|
1996-01-22 00:24:56 +00:00
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_ONCE_INIT { PTHREAD_NEEDS_INIT, NULL }
|
1996-01-22 00:24:56 +00:00
|
|
|
|
|
|
|
/*
|
2015-03-29 20:20:45 +00:00
|
|
|
* Static initialization values.
|
1998-04-04 10:59:42 +00:00
|
|
|
*/
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_MUTEX_INITIALIZER NULL
|
|
|
|
#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP ((pthread_mutex_t)1)
|
|
|
|
#define PTHREAD_COND_INITIALIZER NULL
|
|
|
|
#define PTHREAD_RWLOCK_INITIALIZER NULL
|
1998-04-04 10:59:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Default attribute arguments (draft 4, deprecated).
|
1996-01-22 00:24:56 +00:00
|
|
|
*/
|
|
|
|
#ifndef PTHREAD_KERNEL
|
2015-03-29 20:20:45 +00:00
|
|
|
#define pthread_condattr_default NULL
|
|
|
|
#define pthread_mutexattr_default NULL
|
|
|
|
#define pthread_attr_default NULL
|
1996-01-22 00:24:56 +00:00
|
|
|
#endif
|
|
|
|
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_PRIO_NONE 0
|
|
|
|
#define PTHREAD_PRIO_INHERIT 1
|
|
|
|
#define PTHREAD_PRIO_PROTECT 2
|
1999-03-23 05:11:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mutex types (Single UNIX Specification, Version 2, 1997).
|
|
|
|
*
|
|
|
|
* Note that a mutex attribute with one of the following types:
|
|
|
|
*
|
|
|
|
* PTHREAD_MUTEX_NORMAL
|
|
|
|
* PTHREAD_MUTEX_RECURSIVE
|
|
|
|
*
|
|
|
|
* will deviate from POSIX specified semantics.
|
|
|
|
*/
|
1996-11-11 09:19:54 +00:00
|
|
|
enum pthread_mutextype {
|
1999-03-23 05:11:30 +00:00
|
|
|
PTHREAD_MUTEX_ERRORCHECK = 1, /* Default POSIX mutex */
|
|
|
|
PTHREAD_MUTEX_RECURSIVE = 2, /* Recursive mutex */
|
|
|
|
PTHREAD_MUTEX_NORMAL = 3, /* No error checking */
|
2007-10-29 21:01:47 +00:00
|
|
|
PTHREAD_MUTEX_ADAPTIVE_NP = 4, /* Adaptive mutex, spins briefly before blocking on lock */
|
2005-08-19 21:31:42 +00:00
|
|
|
PTHREAD_MUTEX_TYPE_MAX
|
1996-11-11 09:19:54 +00:00
|
|
|
};
|
|
|
|
|
2015-03-29 20:20:45 +00:00
|
|
|
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK
|
1998-11-28 23:52:58 +00:00
|
|
|
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
#define PTHREAD_MUTEX_STALLED 0
|
|
|
|
#define PTHREAD_MUTEX_ROBUST 1
|
|
|
|
|
2008-06-09 01:14:10 +00:00
|
|
|
struct _pthread_cleanup_info {
|
|
|
|
__uintptr_t pthread_cleanup_pad[8];
|
|
|
|
};
|
|
|
|
|
1996-01-22 00:24:56 +00:00
|
|
|
/*
|
|
|
|
* Thread function prototype definitions:
|
|
|
|
*/
|
|
|
|
__BEGIN_DECLS
|
2005-08-19 08:37:16 +00:00
|
|
|
int pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_attr_destroy(pthread_attr_t *);
|
2017-01-28 20:54:43 +00:00
|
|
|
int pthread_attr_getstack(
|
2018-04-04 02:00:10 +00:00
|
|
|
const pthread_attr_t * __restrict, void ** __restrict,
|
|
|
|
size_t * __restrict);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_attr_getstacksize(const pthread_attr_t * __restrict,
|
|
|
|
size_t * __restrict);
|
|
|
|
int pthread_attr_getguardsize(const pthread_attr_t * __restrict,
|
|
|
|
size_t * __restrict);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_attr_getstackaddr(const pthread_attr_t *, void **);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_attr_getdetachstate(const pthread_attr_t *,
|
|
|
|
int *);
|
|
|
|
int pthread_attr_init(pthread_attr_t *);
|
|
|
|
int pthread_attr_setstacksize(pthread_attr_t *, size_t);
|
|
|
|
int pthread_attr_setguardsize(pthread_attr_t *, size_t);
|
|
|
|
int pthread_attr_setstack(pthread_attr_t *, void *,
|
2017-01-28 20:54:43 +00:00
|
|
|
size_t);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_attr_setstackaddr(pthread_attr_t *, void *);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_attr_setdetachstate(pthread_attr_t *, int);
|
|
|
|
int pthread_barrier_destroy(pthread_barrier_t *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_barrier_init(pthread_barrier_t * __restrict,
|
|
|
|
const pthread_barrierattr_t * __restrict, unsigned);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_barrier_wait(pthread_barrier_t *);
|
|
|
|
int pthread_barrierattr_destroy(pthread_barrierattr_t *);
|
2017-01-28 20:54:43 +00:00
|
|
|
int pthread_barrierattr_getpshared(
|
2018-08-18 01:05:38 +00:00
|
|
|
const pthread_barrierattr_t * __restrict, int * __restrict);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_barrierattr_init(pthread_barrierattr_t *);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_barrierattr_setpshared(pthread_barrierattr_t *, int);
|
2008-06-09 01:14:10 +00:00
|
|
|
|
|
|
|
#define pthread_cleanup_push(cleanup_routine, cleanup_arg) \
|
|
|
|
{ \
|
|
|
|
struct _pthread_cleanup_info __cleanup_info__; \
|
|
|
|
__pthread_cleanup_push_imp(cleanup_routine, cleanup_arg,\
|
|
|
|
&__cleanup_info__); \
|
|
|
|
{
|
|
|
|
|
|
|
|
#define pthread_cleanup_pop(execute) \
|
2013-10-22 19:53:52 +00:00
|
|
|
(void)0; \
|
2008-06-09 01:14:10 +00:00
|
|
|
} \
|
|
|
|
__pthread_cleanup_pop_imp(execute); \
|
|
|
|
}
|
|
|
|
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_condattr_destroy(pthread_condattr_t *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_condattr_getclock(const pthread_condattr_t * __restrict,
|
|
|
|
clockid_t * __restrict);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_condattr_getpshared(const pthread_condattr_t *, int *);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_condattr_init(pthread_condattr_t *);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_condattr_setclock(pthread_condattr_t *, clockid_t);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_condattr_setpshared(pthread_condattr_t *, int);
|
|
|
|
int pthread_cond_broadcast(pthread_cond_t *);
|
|
|
|
int pthread_cond_destroy(pthread_cond_t *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_cond_init(pthread_cond_t * __restrict,
|
|
|
|
const pthread_condattr_t * __restrict);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_cond_signal(pthread_cond_t *);
|
|
|
|
int pthread_cond_timedwait(pthread_cond_t *,
|
|
|
|
pthread_mutex_t * __mutex,
|
|
|
|
const struct timespec *)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_exclusive(*__mutex);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_cond_wait(pthread_cond_t * __restrict,
|
|
|
|
pthread_mutex_t * __restrict __mutex)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_exclusive(*__mutex);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_create(pthread_t * __restrict,
|
|
|
|
const pthread_attr_t * __restrict, void *(*) (void *),
|
|
|
|
void * __restrict);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_detach(pthread_t);
|
|
|
|
int pthread_equal(pthread_t, pthread_t);
|
|
|
|
void pthread_exit(void *) __dead2;
|
|
|
|
void *pthread_getspecific(pthread_key_t);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_getcpuclockid(pthread_t, clockid_t *);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_join(pthread_t, void **);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_key_create(pthread_key_t *, void (*) (void *));
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_key_delete(pthread_key_t);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutexattr_init(pthread_mutexattr_t *);
|
|
|
|
int pthread_mutexattr_destroy(pthread_mutexattr_t *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutexattr_getpshared(
|
|
|
|
const pthread_mutexattr_t * __restrict,
|
|
|
|
int * __restrict);
|
|
|
|
int pthread_mutexattr_gettype(
|
|
|
|
const pthread_mutexattr_t * __restrict, int * __restrict);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutex_consistent(pthread_mutex_t * __mutex)
|
2017-01-28 20:54:43 +00:00
|
|
|
__requires_exclusive(*__mutex);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutex_destroy(pthread_mutex_t * __mutex)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__mutex);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutex_init(pthread_mutex_t * __restrict __mutex,
|
|
|
|
const pthread_mutexattr_t * __restrict)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__mutex);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutex_lock(pthread_mutex_t * __mutex)
|
2017-01-28 20:54:43 +00:00
|
|
|
__locks_exclusive(*__mutex);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutex_trylock(pthread_mutex_t * __mutex)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_exclusive(0, *__mutex);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutex_timedlock(pthread_mutex_t * __restrict __mutex,
|
|
|
|
const struct timespec * __restrict)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_exclusive(0, *__mutex);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_mutex_unlock(pthread_mutex_t * __mutex)
|
2017-01-01 17:16:47 +00:00
|
|
|
__unlocks(*__mutex);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_once(pthread_once_t *, void (*) (void));
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_destroy(pthread_rwlock_t * __rwlock)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__rwlock);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_rwlock_init(pthread_rwlock_t * __restrict __rwlock,
|
|
|
|
const pthread_rwlockattr_t * __restrict)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_rdlock(pthread_rwlock_t * __rwlock)
|
2017-01-28 20:54:43 +00:00
|
|
|
__locks_shared(*__rwlock);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_rwlock_timedrdlock(
|
|
|
|
pthread_rwlock_t * __restrict __rwlock,
|
|
|
|
const struct timespec * __restrict)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_shared(0, *__rwlock);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_rwlock_timedwrlock(
|
|
|
|
pthread_rwlock_t * __restrict __rwlock,
|
|
|
|
const struct timespec * __restrict)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_exclusive(0, *__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_tryrdlock(pthread_rwlock_t * __rwlock)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_shared(0, *__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_trywrlock(pthread_rwlock_t * __rwlock)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_exclusive(0, *__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_unlock(pthread_rwlock_t * __rwlock)
|
2017-01-01 17:16:47 +00:00
|
|
|
__unlocks(*__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlock_wrlock(pthread_rwlock_t * __rwlock)
|
2017-01-28 20:54:43 +00:00
|
|
|
__locks_exclusive(*__rwlock);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t *,
|
|
|
|
int *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_rwlockattr_getpshared(
|
|
|
|
const pthread_rwlockattr_t * __restrict,
|
|
|
|
int * __restrict);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_rwlockattr_init(pthread_rwlockattr_t *);
|
|
|
|
int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t *,
|
2017-01-28 20:54:43 +00:00
|
|
|
int);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
|
2002-03-23 17:24:55 +00:00
|
|
|
pthread_t pthread_self(void);
|
|
|
|
int pthread_setspecific(pthread_key_t, const void *);
|
1996-08-20 08:22:01 +00:00
|
|
|
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_spin_init(pthread_spinlock_t * __spin, int)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__spin);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_spin_destroy(pthread_spinlock_t * __spin)
|
2017-01-01 17:16:47 +00:00
|
|
|
__requires_unlocked(*__spin);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_spin_lock(pthread_spinlock_t * __spin)
|
2017-01-28 20:54:43 +00:00
|
|
|
__locks_exclusive(*__spin);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_spin_trylock(pthread_spinlock_t * __spin)
|
2017-01-28 20:54:43 +00:00
|
|
|
__trylocks_exclusive(0, *__spin);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_spin_unlock(pthread_spinlock_t * __spin)
|
2017-01-28 20:54:43 +00:00
|
|
|
__unlocks(*__spin);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_cancel(pthread_t);
|
|
|
|
int pthread_setcancelstate(int, int *);
|
|
|
|
int pthread_setcanceltype(int, int *);
|
|
|
|
void pthread_testcancel(void);
|
1996-08-20 08:22:01 +00:00
|
|
|
|
2009-03-14 20:10:14 +00:00
|
|
|
#if __BSD_VISIBLE
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_getprio(pthread_t);
|
|
|
|
int pthread_setprio(pthread_t, int);
|
|
|
|
void pthread_yield(void);
|
2020-06-10 22:13:24 +00:00
|
|
|
|
|
|
|
int pthread_getname_np(pthread_t, char *, size_t);
|
|
|
|
int pthread_setname_np(pthread_t, const char *);
|
2009-03-14 20:10:14 +00:00
|
|
|
#endif
|
1999-03-23 05:11:30 +00:00
|
|
|
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutexattr_getprioceiling(
|
|
|
|
const pthread_mutexattr_t * __restrict,
|
|
|
|
int * __restrict);
|
2017-01-28 20:54:43 +00:00
|
|
|
int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *, int);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict,
|
|
|
|
int * __restrict);
|
|
|
|
int pthread_mutex_setprioceiling(pthread_mutex_t * __restrict, int,
|
|
|
|
int * __restrict);
|
1999-03-23 05:11:30 +00:00
|
|
|
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_mutexattr_getprotocol(
|
|
|
|
const pthread_mutexattr_t * __restrict,
|
|
|
|
int * __restrict);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_mutexattr_setprotocol(pthread_mutexattr_t *, int);
|
1999-03-23 05:11:30 +00:00
|
|
|
|
2017-01-28 20:54:43 +00:00
|
|
|
int pthread_mutexattr_getrobust(
|
2018-04-04 02:00:10 +00:00
|
|
|
pthread_mutexattr_t * __restrict, int * __restrict);
|
2018-04-04 15:16:04 +00:00
|
|
|
int pthread_mutexattr_setrobust(pthread_mutexattr_t *, int);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_attr_getinheritsched(const pthread_attr_t * __restrict,
|
|
|
|
int * __restrict);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_attr_getschedparam(const pthread_attr_t *,
|
|
|
|
struct sched_param *);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_attr_getschedpolicy(const pthread_attr_t * __restrict,
|
|
|
|
int * __restrict);
|
|
|
|
int pthread_attr_getscope(const pthread_attr_t * __restrict,
|
|
|
|
int * __restrict);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_attr_setinheritsched(pthread_attr_t *, int);
|
2018-04-04 02:00:10 +00:00
|
|
|
int pthread_attr_setschedparam(pthread_attr_t *,
|
|
|
|
const struct sched_param *);
|
|
|
|
int pthread_attr_setschedpolicy(pthread_attr_t *, int);
|
|
|
|
int pthread_attr_setscope(pthread_attr_t *, int);
|
2018-08-18 01:05:38 +00:00
|
|
|
int pthread_getschedparam(pthread_t pthread, int * __restrict,
|
|
|
|
struct sched_param * __restrict);
|
2002-03-23 17:24:55 +00:00
|
|
|
int pthread_setschedparam(pthread_t, int,
|
2018-04-04 02:00:10 +00:00
|
|
|
const struct sched_param *);
|
2009-03-14 20:10:14 +00:00
|
|
|
#if __XSI_VISIBLE
|
2003-04-20 01:53:13 +00:00
|
|
|
int pthread_getconcurrency(void);
|
|
|
|
int pthread_setconcurrency(int);
|
2009-03-14 20:10:14 +00:00
|
|
|
#endif
|
2008-06-09 01:14:10 +00:00
|
|
|
|
|
|
|
void __pthread_cleanup_push_imp(void (*)(void *), void *,
|
|
|
|
struct _pthread_cleanup_info *);
|
|
|
|
void __pthread_cleanup_pop_imp(int);
|
1996-01-22 00:24:56 +00:00
|
|
|
__END_DECLS
|
|
|
|
|
2017-02-16 20:28:30 +00:00
|
|
|
#endif /* !_PTHREAD_H_ */
|