Implement process-shared locks support for libthr.so.3, without

breaking the ABI.  Special value is stored in the lock pointer to
indicate shared lock, and offline page in the shared memory is
allocated to store the actual lock.

Reviewed by:	vangyzen (previous version)
Discussed with:	deischen, emaste, jhb, rwatson,
	Martin Simmons <martin@lispworks.com>
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2016-02-28 17:52:33 +00:00
parent 3f3af790f9
commit 1bdbd70599
27 changed files with 1169 additions and 326 deletions

View File

@ -413,6 +413,9 @@ static const struct limits limits[] = {
#endif
#ifdef RLIMIT_KQUEUES
{ "kqueues", (char *)0, RLIMIT_KQUEUES, 1, 'k' },
#endif
#ifdef RLIMIT_UMTXP
{ "umtxp", (char *)0, RLIMIT_UMTXP, 1, 'o' },
#endif
{ (char *) 0, (char *)0, 0, 0, '\0' }
};

View File

@ -69,7 +69,7 @@
#define PTHREAD_EXPLICIT_SCHED 0
/*
* Flags for read/write lock attributes
* Values for process shared/private attributes.
*/
#define PTHREAD_PROCESS_PRIVATE 0
#define PTHREAD_PROCESS_SHARED 1

View File

@ -112,7 +112,7 @@ typedef __useconds_t useconds_t;
#define _POSIX_THREAD_PRIO_INHERIT 200112L
#define _POSIX_THREAD_PRIO_PROTECT 200112L
#define _POSIX_THREAD_PRIORITY_SCHEDULING 200112L
#define _POSIX_THREAD_PROCESS_SHARED -1
#define _POSIX_THREAD_PROCESS_SHARED 200112L
#define _POSIX_THREAD_SAFE_FUNCTIONS -1
#define _POSIX_THREAD_SPORADIC_SERVER -1
#define _POSIX_THREADS 200112L

View File

@ -36,6 +36,7 @@ SRCS+= \
thr_mutexattr.c \
thr_once.c \
thr_printf.c \
thr_pshared.c \
thr_pspinlock.c \
thr_resume_np.c \
thr_rtld.c \

View File

@ -41,14 +41,25 @@ __weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
int
_pthread_barrier_destroy(pthread_barrier_t *barrier)
{
pthread_barrier_t bar;
struct pthread *curthread;
pthread_barrier_t bar;
struct pthread *curthread;
int pshared;
if (barrier == NULL || *barrier == NULL)
return (EINVAL);
if (*barrier == THR_PSHARED_PTR) {
bar = __thr_pshared_offpage(barrier, 0);
if (bar == NULL) {
*barrier = NULL;
return (0);
}
pshared = 1;
} else {
bar = *barrier;
pshared = 0;
}
curthread = _get_curthread();
bar = *barrier;
THR_UMUTEX_LOCK(curthread, &bar->b_lock);
if (bar->b_destroying) {
THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
@ -71,37 +82,52 @@ _pthread_barrier_destroy(pthread_barrier_t *barrier)
THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
*barrier = NULL;
free(bar);
if (pshared)
__thr_pshared_destroy(barrier);
else
free(bar);
return (0);
}
int
_pthread_barrier_init(pthread_barrier_t *barrier,
const pthread_barrierattr_t *attr, unsigned count)
const pthread_barrierattr_t *attr, unsigned count)
{
pthread_barrier_t bar;
(void)attr;
pthread_barrier_t bar;
int pshared;
if (barrier == NULL || count <= 0)
return (EINVAL);
bar = calloc(1, sizeof(struct pthread_barrier));
if (bar == NULL)
return (ENOMEM);
if (attr == NULL || *attr == NULL ||
(*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
bar = calloc(1, sizeof(struct pthread_barrier));
if (bar == NULL)
return (ENOMEM);
*barrier = bar;
pshared = 0;
} else {
bar = __thr_pshared_offpage(barrier, 1);
if (bar == NULL)
return (EFAULT);
*barrier = THR_PSHARED_PTR;
pshared = 1;
}
_thr_umutex_init(&bar->b_lock);
_thr_ucond_init(&bar->b_cv);
bar->b_count = count;
*barrier = bar;
if (pshared) {
bar->b_lock.m_flags |= USYNC_PROCESS_SHARED;
bar->b_cv.c_flags |= USYNC_PROCESS_SHARED;
}
bar->b_count = count;
return (0);
}
int
_pthread_barrier_wait(pthread_barrier_t *barrier)
{
struct pthread *curthread = _get_curthread();
struct pthread *curthread;
pthread_barrier_t bar;
int64_t cycle;
int ret;
@ -109,7 +135,14 @@ _pthread_barrier_wait(pthread_barrier_t *barrier)
if (barrier == NULL || *barrier == NULL)
return (EINVAL);
bar = *barrier;
if (*barrier == THR_PSHARED_PTR) {
bar = __thr_pshared_offpage(barrier, 0);
if (bar == NULL)
return (EINVAL);
} else {
bar = *barrier;
}
curthread = _get_curthread();
THR_UMUTEX_LOCK(curthread, &bar->b_lock);
if (++bar->b_waiters == bar->b_count) {
/* Current thread is lastest thread */

View File

@ -56,7 +56,7 @@ _pthread_barrierattr_destroy(pthread_barrierattr_t *attr)
int
_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr,
int *pshared)
int *pshared)
{
if (attr == NULL || *attr == NULL)
@ -84,11 +84,9 @@ int
_pthread_barrierattr_setpshared(pthread_barrierattr_t *attr, int pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
/* Only PTHREAD_PROCESS_PRIVATE is supported. */
if (pshared != PTHREAD_PROCESS_PRIVATE)
if (attr == NULL || *attr == NULL ||
(pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED))
return (EINVAL);
(*attr)->pshared = pshared;

View File

@ -1,7 +1,11 @@
/*
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* Copyright (c) 2015 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -63,29 +67,45 @@ __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
#define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
static void
cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
{
if (cattr == NULL) {
cvp->__clock_id = CLOCK_REALTIME;
} else {
if (cattr->c_pshared)
cvp->__flags |= USYNC_PROCESS_SHARED;
cvp->__clock_id = cattr->c_clockid;
}
}
static int
cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
struct pthread_cond *cvp;
int error = 0;
struct pthread_cond *cvp;
const struct pthread_cond_attr *cattr;
int pshared;
if ((cvp = (pthread_cond_t)
calloc(1, sizeof(struct pthread_cond))) == NULL) {
error = ENOMEM;
cattr = cond_attr != NULL ? *cond_attr : NULL;
if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
pshared = 0;
cvp = calloc(1, sizeof(struct pthread_cond));
if (cvp == NULL)
return (ENOMEM);
} else {
/*
* Initialise the condition variable structure:
*/
if (cond_attr == NULL || *cond_attr == NULL) {
cvp->__clock_id = CLOCK_REALTIME;
} else {
if ((*cond_attr)->c_pshared)
cvp->__flags |= USYNC_PROCESS_SHARED;
cvp->__clock_id = (*cond_attr)->c_clockid;
}
*cond = cvp;
pshared = 1;
cvp = __thr_pshared_offpage(cond, 1);
if (cvp == NULL)
return (EFAULT);
}
return (error);
/*
* Initialise the condition variable structure:
*/
cond_init_body(cvp, cattr);
*cond = pshared ? THR_PSHARED_PTR : cvp;
return (0);
}
static int
@ -106,7 +126,11 @@ init_static(struct pthread *thread, pthread_cond_t *cond)
}
#define CHECK_AND_INIT_COND \
if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
if (*cond == THR_PSHARED_PTR) { \
cvp = __thr_pshared_offpage(cond, 0); \
if (cvp == NULL) \
return (EINVAL); \
} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
if (cvp == THR_COND_INITIALIZER) { \
int ret; \
ret = init_static(_get_curthread(), cond); \
@ -129,21 +153,22 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
struct pthread_cond *cvp;
int error = 0;
struct pthread_cond *cvp;
int error;
if ((cvp = *cond) == THR_COND_INITIALIZER)
error = 0;
else if (cvp == THR_COND_DESTROYED)
error = 0;
if (*cond == THR_PSHARED_PTR) {
cvp = __thr_pshared_offpage(cond, 0);
if (cvp != NULL)
__thr_pshared_destroy(cond);
*cond = THR_COND_DESTROYED;
} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
/* nothing */
} else if (cvp == THR_COND_DESTROYED) {
error = EINVAL;
else {
} else {
cvp = *cond;
*cond = THR_COND_DESTROYED;
/*
* Free the memory allocated for the condition
* variable structure:
*/
free(cvp);
}
return (error);
@ -297,7 +322,13 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
CHECK_AND_INIT_COND
mp = *mutex;
if (*mutex == THR_PSHARED_PTR) {
mp = __thr_pshared_offpage(mutex, 0);
if (mp == NULL)
return (EINVAL);
} else {
mp = *mutex;
}
if ((error = _mutex_owned(curthread, mp)) != 0)
return (error);
@ -385,7 +416,7 @@ cond_signal_common(pthread_cond_t *cond)
td = _sleepq_first(sq);
mp = td->mutex_obj;
cvp->__has_user_waiters = _sleepq_remove(sq, td);
if (mp->m_owner == curthread) {
if (mp->m_owner == TID(curthread)) {
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
@ -417,7 +448,7 @@ drop_cb(struct pthread *td, void *arg)
struct pthread *curthread = ba->curthread;
mp = td->mutex_obj;
if (mp->m_owner == curthread) {
if (mp->m_owner == TID(curthread)) {
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);

View File

@ -105,20 +105,21 @@ _pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
int
_pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
*pshared = PTHREAD_PROCESS_PRIVATE;
*pshared = (*attr)->c_pshared;
return (0);
}
int
_pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (pshared != PTHREAD_PROCESS_PRIVATE)
if (attr == NULL || *attr == NULL ||
(pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED))
return (EINVAL);
(*attr)->c_pshared = pshared;
return (0);
}

View File

@ -56,12 +56,12 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
struct thr_param param;
struct sched_param sched_param;
struct rtprio rtp;
int ret = 0, locked, create_suspended;
sigset_t set, oset;
cpuset_t *cpusetp = NULL;
int cpusetsize = 0;
int old_stack_prot;
cpuset_t *cpusetp;
int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;
cpusetp = NULL;
ret = cpusetsize = 0;
_thr_check_init();
/*
@ -118,8 +118,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->cancel_enable = 1;
new_thread->cancel_async = 0;
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
TAILQ_INIT(&new_thread->pp_mutexq);
for (i = 0; i < TMQ_NITEMS; i++)
TAILQ_INIT(&new_thread->mq[i]);
/* Initialise hooks in the thread structure: */
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {

View File

@ -91,13 +91,15 @@ struct pthread_attr _pthread_attr_default = {
struct pthread_mutex_attr _pthread_mutexattr_default = {
.m_type = PTHREAD_MUTEX_DEFAULT,
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
};
struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = {
.m_type = PTHREAD_MUTEX_ADAPTIVE_NP,
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
};
/* Default condition variable attributes: */
@ -387,6 +389,7 @@ static void
init_main_thread(struct pthread *thread)
{
struct sched_param sched_param;
int i;
/* Setup the thread attributes. */
thr_self(&thread->tid);
@ -428,9 +431,9 @@ init_main_thread(struct pthread *thread)
thread->cancel_enable = 1;
thread->cancel_async = 0;
/* Initialize the mutex queue: */
TAILQ_INIT(&thread->mutexq);
TAILQ_INIT(&thread->pp_mutexq);
/* Initialize the mutex queues */
for (i = 0; i < TMQ_NITEMS; i++)
TAILQ_INIT(&thread->mq[i]);
thread->state = PS_RUNNING;
@ -463,6 +466,7 @@ init_private(void)
_thr_once_init();
_thr_spinlock_init();
_thr_list_init();
__thr_pshared_init();
_thr_wake_addr_init();
_sleepq_init();
_single_thread = NULL;

View File

@ -1,8 +1,13 @@
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
* Copyright (c) 2015 The FreeBSD Foundation
*
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -45,26 +50,6 @@
#include "thr_private.h"
#if defined(_PTHREADS_INVARIANTS)
#define MUTEX_INIT_LINK(m) do { \
(m)->m_qe.tqe_prev = NULL; \
(m)->m_qe.tqe_next = NULL; \
} while (0)
#define MUTEX_ASSERT_IS_OWNED(m) do { \
if (__predict_false((m)->m_qe.tqe_prev == NULL))\
PANIC("mutex is not on list"); \
} while (0)
#define MUTEX_ASSERT_NOT_OWNED(m) do { \
if (__predict_false((m)->m_qe.tqe_prev != NULL || \
(m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
#else
#define MUTEX_INIT_LINK(m)
#define MUTEX_ASSERT_IS_OWNED(m)
#define MUTEX_ASSERT_NOT_OWNED(m)
#endif
/*
* For adaptive mutexes, how many times to spin doing trylock2
* before entering the kernel to block
@ -122,36 +107,71 @@ __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloop
__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
static int
mutex_init(pthread_mutex_t *mutex,
const struct pthread_mutex_attr *mutex_attr,
void *(calloc_cb)(size_t, size_t))
static void
mutex_init_link(struct pthread_mutex *m)
{
const struct pthread_mutex_attr *attr;
struct pthread_mutex *pmutex;
if (mutex_attr == NULL) {
attr = &_pthread_mutexattr_default;
} else {
attr = mutex_attr;
if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
return (EINVAL);
if (attr->m_protocol < PTHREAD_PRIO_NONE ||
attr->m_protocol > PTHREAD_PRIO_PROTECT)
return (EINVAL);
}
if ((pmutex = (pthread_mutex_t)
calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
return (ENOMEM);
#if defined(_PTHREADS_INVARIANTS)
m->m_qe.tqe_prev = NULL;
m->m_qe.tqe_next = NULL;
m->m_pqe.tqe_prev = NULL;
m->m_pqe.tqe_next = NULL;
#endif
}
static void
mutex_assert_is_owned(struct pthread_mutex *m)
{
#if defined(_PTHREADS_INVARIANTS)
if (__predict_false(m->m_qe.tqe_prev == NULL))
PANIC("mutex is not on list");
#endif
}
static void
mutex_assert_not_owned(struct pthread_mutex *m)
{
#if defined(_PTHREADS_INVARIANTS)
if (__predict_false(m->m_qe.tqe_prev != NULL ||
m->m_qe.tqe_next != NULL))
PANIC("mutex is on list");
#endif
}
static int
is_pshared_mutex(struct pthread_mutex *m)
{
return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
}
static int
mutex_check_attr(const struct pthread_mutex_attr *attr)
{
if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
return (EINVAL);
if (attr->m_protocol < PTHREAD_PRIO_NONE ||
attr->m_protocol > PTHREAD_PRIO_PROTECT)
return (EINVAL);
return (0);
}
static void
mutex_init_body(struct pthread_mutex *pmutex,
const struct pthread_mutex_attr *attr)
{
pmutex->m_flags = attr->m_type;
pmutex->m_owner = NULL;
pmutex->m_owner = 0;
pmutex->m_count = 0;
pmutex->m_spinloops = 0;
pmutex->m_yieldloops = 0;
MUTEX_INIT_LINK(pmutex);
switch(attr->m_protocol) {
mutex_init_link(pmutex);
switch (attr->m_protocol) {
case PTHREAD_PRIO_NONE:
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
pmutex->m_lock.m_flags = 0;
@ -166,13 +186,37 @@ mutex_init(pthread_mutex_t *mutex,
pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
break;
}
if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
pmutex->m_spinloops =
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
pmutex->m_yieldloops = _thr_yieldloops;
}
}
static int
mutex_init(pthread_mutex_t *mutex,
const struct pthread_mutex_attr *mutex_attr,
void *(calloc_cb)(size_t, size_t))
{
const struct pthread_mutex_attr *attr;
struct pthread_mutex *pmutex;
int error;
if (mutex_attr == NULL) {
attr = &_pthread_mutexattr_default;
} else {
attr = mutex_attr;
error = mutex_check_attr(attr);
if (error != 0)
return (error);
}
if ((pmutex = (pthread_mutex_t)
calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
return (ENOMEM);
mutex_init_body(pmutex, attr);
*mutex = pmutex;
return (0);
}
@ -187,7 +231,8 @@ init_static(struct pthread *thread, pthread_mutex_t *mutex)
if (*mutex == THR_MUTEX_INITIALIZER)
ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
calloc);
else
ret = 0;
THR_LOCK_RELEASE(thread, &_mutex_static_lock);
@ -200,7 +245,7 @@ set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
{
struct pthread_mutex *m2;
m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
if (m2 != NULL)
m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
else
@ -211,7 +256,25 @@ int
__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
struct pthread_mutex *pmtx;
int ret;
if (mutex_attr != NULL) {
ret = mutex_check_attr(*mutex_attr);
if (ret != 0)
return (ret);
}
if (mutex_attr == NULL ||
(*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
calloc));
}
pmtx = __thr_pshared_offpage(mutex, 1);
if (pmtx == NULL)
return (EFAULT);
*mutex = THR_PSHARED_PTR;
mutex_init_body(pmtx, *mutex_attr);
return (0);
}
/* This function is used internally by malloc. */
@ -222,7 +285,8 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
static const struct pthread_mutex_attr attr = {
.m_type = PTHREAD_MUTEX_NORMAL,
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
};
int ret;
@ -232,31 +296,44 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
return (ret);
}
void
_mutex_fork(struct pthread *curthread)
/*
* Fix mutex ownership for child process.
*
* Process private mutex ownership is transmitted from the forking
* thread to the child process.
*
* Process shared mutex should not be inherited because owner is
* forking thread which is in parent process, they are removed from
* the owned mutex list.
*/
static void
queue_fork(struct pthread *curthread, struct mutex_queue *q,
struct mutex_queue *qp, uint bit)
{
struct pthread_mutex *m;
/*
* Fix mutex ownership for child process.
* note that process shared mutex should not
* be inherited because owner is forking thread
* which is in parent process, they should be
* removed from the owned mutex list, current,
* process shared mutex is not supported, so I
* am not worried.
*/
TAILQ_INIT(q);
TAILQ_FOREACH(m, qp, m_pqe) {
TAILQ_INSERT_TAIL(q, m, m_qe);
m->m_lock.m_owner = TID(curthread) | bit;
m->m_owner = TID(curthread);
}
}
TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
m->m_lock.m_owner = TID(curthread);
TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
void
_mutex_fork(struct pthread *curthread)
{
queue_fork(curthread, &curthread->mq[TMQ_NORM],
&curthread->mq[TMQ_NORM_PRIV], 0);
queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
&curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
}
int
_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
pthread_mutex_t m;
pthread_mutex_t m, m1;
int ret;
m = *mutex;
@ -265,11 +342,20 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
} else if (m == THR_MUTEX_DESTROYED) {
ret = EINVAL;
} else {
if (m->m_owner != NULL) {
if (m == THR_PSHARED_PTR) {
m1 = __thr_pshared_offpage(mutex, 0);
if (m1 != NULL) {
mutex_assert_not_owned(m1);
__thr_pshared_destroy(mutex);
}
*mutex = THR_MUTEX_DESTROYED;
return (0);
}
if (m->m_owner != 0) {
ret = EBUSY;
} else {
*mutex = THR_MUTEX_DESTROYED;
MUTEX_ASSERT_NOT_OWNED(m);
mutex_assert_not_owned(m);
free(m);
ret = 0;
}
@ -278,69 +364,92 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
return (ret);
}
#define ENQUEUE_MUTEX(curthread, m) \
do { \
(m)->m_owner = curthread; \
/* Add to the list of owned mutexes: */ \
MUTEX_ASSERT_NOT_OWNED((m)); \
if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
else \
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
} while (0)
static int
mutex_qidx(struct pthread_mutex *m)
{
#define DEQUEUE_MUTEX(curthread, m) \
(m)->m_owner = NULL; \
MUTEX_ASSERT_IS_OWNED(m); \
if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \
else { \
TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \
set_inherited_priority(curthread, m); \
} \
MUTEX_INIT_LINK(m);
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (TMQ_NORM);
return (TMQ_NORM_PP);
}
#define CHECK_AND_INIT_MUTEX \
if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
if (m == THR_MUTEX_DESTROYED) \
return (EINVAL); \
int ret; \
ret = init_static(_get_curthread(), mutex); \
if (ret) \
return (ret); \
m = *mutex; \
}
static void
enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
{
int qidx;
m->m_owner = TID(curthread);
/* Add to the list of owned mutexes: */
mutex_assert_not_owned(m);
qidx = mutex_qidx(m);
TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
}
static void
dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
{
int qidx;
m->m_owner = 0;
mutex_assert_is_owned(m);
qidx = mutex_qidx(m);
TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
set_inherited_priority(curthread, m);
mutex_init_link(m);
}
static int
mutex_trylock_common(pthread_mutex_t *mutex)
check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
{
struct pthread *curthread = _get_curthread();
struct pthread_mutex *m = *mutex;
uint32_t id;
int ret;
id = TID(curthread);
if (m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
ret = _thr_umutex_trylock(&m->m_lock, id);
if (__predict_true(ret == 0)) {
ENQUEUE_MUTEX(curthread, m);
} else if (m->m_owner == curthread) {
ret = mutex_self_trylock(m);
} /* else {} */
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
THR_CRITICAL_LEAVE(curthread);
*m = *mutex;
ret = 0;
if (*m == THR_PSHARED_PTR) {
*m = __thr_pshared_offpage(mutex, 0);
if (*m == NULL)
ret = EINVAL;
} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
if (*m == THR_MUTEX_DESTROYED) {
ret = EINVAL;
} else {
ret = init_static(_get_curthread(), mutex);
if (ret == 0)
*m = *mutex;
}
}
return (ret);
}
int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
struct pthread *curthread;
struct pthread_mutex *m;
uint32_t id;
int ret;
CHECK_AND_INIT_MUTEX
return (mutex_trylock_common(mutex));
ret = check_and_init_mutex(mutex, &m);
if (ret != 0)
return (ret);
curthread = _get_curthread();
id = TID(curthread);
if (m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
ret = _thr_umutex_trylock(&m->m_lock, id);
if (__predict_true(ret == 0)) {
enqueue_mutex(curthread, m);
} else if (m->m_owner == id) {
ret = mutex_self_trylock(m);
} /* else {} */
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
THR_CRITICAL_LEAVE(curthread);
return (ret);
}
static int
@ -351,10 +460,10 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
int count;
int ret;
if (m->m_owner == curthread)
return mutex_self_lock(m, abstime);
id = TID(curthread);
if (m->m_owner == id)
return (mutex_self_lock(m, abstime));
/*
* For adaptive mutexes, spin for a bit in the expectation
* that if the application requests this mutex type then
@ -406,7 +515,7 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
}
done:
if (ret == 0)
ENQUEUE_MUTEX(curthread, m);
enqueue_mutex(curthread, m);
return (ret);
}
@ -421,7 +530,7 @@ mutex_lock_common(struct pthread_mutex *m,
if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
ENQUEUE_MUTEX(curthread, m);
enqueue_mutex(curthread, m);
ret = 0;
} else {
ret = mutex_lock_sleep(curthread, m, abstime);
@ -434,25 +543,28 @@ mutex_lock_common(struct pthread_mutex *m,
int
__pthread_mutex_lock(pthread_mutex_t *mutex)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
_thr_check_init();
CHECK_AND_INIT_MUTEX
return (mutex_lock_common(m, NULL, 0));
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
ret = mutex_lock_common(m, NULL, 0);
return (ret);
}
int
__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
__pthread_mutex_timedlock(pthread_mutex_t *mutex,
const struct timespec *abstime)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
_thr_check_init();
CHECK_AND_INIT_MUTEX
return (mutex_lock_common(m, abstime, 0));
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
ret = mutex_lock_common(m, abstime, 0);
return (ret);
}
int
@ -460,7 +572,13 @@ _pthread_mutex_unlock(pthread_mutex_t *mutex)
{
struct pthread_mutex *mp;
mp = *mutex;
if (*mutex == THR_PSHARED_PTR) {
mp = __thr_pshared_offpage(mutex, 0);
if (mp == NULL)
return (EINVAL);
} else {
mp = *mutex;
}
return (mutex_unlock_common(mp, 0, NULL));
}
@ -493,7 +611,7 @@ _mutex_cv_attach(struct pthread_mutex *m, int count)
{
struct pthread *curthread = _get_curthread();
ENQUEUE_MUTEX(curthread, m);
enqueue_mutex(curthread, m);
m->m_count = count;
return (0);
}
@ -513,7 +631,7 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
*/
*recurse = mp->m_count;
mp->m_count = 0;
DEQUEUE_MUTEX(curthread, mp);
dequeue_mutex(curthread, mp);
/* Will this happen in real-world ? */
if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
@ -641,14 +759,15 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
return (EPERM);
}
id = TID(curthread);
/*
* Check if the running thread is not the owner of the mutex.
*/
if (__predict_false(m->m_owner != curthread))
if (__predict_false(m->m_owner != id))
return (EPERM);
error = 0;
id = TID(curthread);
if (__predict_false(
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
m->m_count > 0)) {
@ -660,7 +779,7 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
} else
defered = 0;
DEQUEUE_MUTEX(curthread, m);
dequeue_mutex(curthread, m);
error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
if (mtx_defer == NULL && defered) {
@ -676,54 +795,85 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
int
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
int *prioceiling)
int *prioceiling)
{
struct pthread_mutex *m;
int ret;
m = *mutex;
if ((m <= THR_MUTEX_DESTROYED) ||
(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
ret = EINVAL;
else {
*prioceiling = m->m_lock.m_ceilings[0];
ret = 0;
if (*mutex == THR_PSHARED_PTR) {
m = __thr_pshared_offpage(mutex, 0);
if (m == NULL)
return (EINVAL);
} else {
m = *mutex;
if (m <= THR_MUTEX_DESTROYED)
return (EINVAL);
}
return (ret);
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (EINVAL);
*prioceiling = m->m_lock.m_ceilings[0];
return (0);
}
int
_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
int ceiling, int *old_ceiling)
int ceiling, int *old_ceiling)
{
struct pthread *curthread = _get_curthread();
struct pthread *curthread;
struct pthread_mutex *m, *m1, *m2;
struct mutex_queue *q, *qp;
int ret;
m = *mutex;
if ((m <= THR_MUTEX_DESTROYED) ||
(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
if (*mutex == THR_PSHARED_PTR) {
m = __thr_pshared_offpage(mutex, 0);
if (m == NULL)
return (EINVAL);
} else {
m = *mutex;
if (m <= THR_MUTEX_DESTROYED)
return (EINVAL);
}
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (EINVAL);
ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
if (ret != 0)
return (ret);
if (m->m_owner == curthread) {
MUTEX_ASSERT_IS_OWNED(m);
curthread = _get_curthread();
if (m->m_owner == TID(curthread)) {
mutex_assert_is_owned(m);
m1 = TAILQ_PREV(m, mutex_queue, m_qe);
m2 = TAILQ_NEXT(m, m_qe);
if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
(m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
q = &curthread->mq[TMQ_NORM_PP];
qp = &curthread->mq[TMQ_NORM_PP_PRIV];
TAILQ_REMOVE(q, m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_REMOVE(qp, m, m_pqe);
TAILQ_FOREACH(m2, q, m_qe) {
if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
TAILQ_INSERT_BEFORE(m2, m, m_qe);
if (!is_pshared_mutex(m)) {
while (m2 != NULL &&
is_pshared_mutex(m2)) {
m2 = TAILQ_PREV(m2,
mutex_queue, m_qe);
}
if (m2 == NULL) {
TAILQ_INSERT_HEAD(qp,
m, m_pqe);
} else {
TAILQ_INSERT_BEFORE(m2,
m, m_pqe);
}
}
return (0);
}
}
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
TAILQ_INSERT_TAIL(q, m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_INSERT_TAIL(qp, m, m_pqe);
}
}
return (0);
@ -732,44 +882,48 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
int
_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
CHECK_AND_INIT_MUTEX
*count = m->m_spinloops;
return (0);
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
*count = m->m_spinloops;
return (ret);
}
int
__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
CHECK_AND_INIT_MUTEX
m->m_spinloops = count;
return (0);
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
m->m_spinloops = count;
return (ret);
}
int
_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
CHECK_AND_INIT_MUTEX
*count = m->m_yieldloops;
return (0);
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
*count = m->m_yieldloops;
return (ret);
}
int
__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
{
struct pthread_mutex *m;
struct pthread_mutex *m;
int ret;
CHECK_AND_INIT_MUTEX
m->m_yieldloops = count;
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
m->m_yieldloops = count;
return (0);
}
@ -778,10 +932,16 @@ _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
{
struct pthread_mutex *m;
m = *mutex;
if (m <= THR_MUTEX_DESTROYED)
return (0);
return (m->m_owner == _get_curthread());
if (*mutex == THR_PSHARED_PTR) {
m = __thr_pshared_offpage(mutex, 0);
if (m == NULL)
return (0);
} else {
m = *mutex;
if (m <= THR_MUTEX_DESTROYED)
return (0);
}
return (m->m_owner == TID(_get_curthread()));
}
int
@ -792,7 +952,7 @@ _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
return (EINVAL);
return (EPERM);
}
if (mp->m_owner != curthread)
if (mp->m_owner != TID(curthread))
return (EPERM);
return (0);
}

View File

@ -176,8 +176,7 @@ _pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr,
if (attr == NULL || *attr == NULL)
return (EINVAL);
*pshared = PTHREAD_PROCESS_PRIVATE;
*pshared = (*attr)->m_pshared;
return (0);
}
@ -185,13 +184,11 @@ int
_pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
{
if (attr == NULL || *attr == NULL)
if (attr == NULL || *attr == NULL ||
(pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED))
return (EINVAL);
/* Only PTHREAD_PROCESS_PRIVATE is supported. */
if (pshared != PTHREAD_PROCESS_PRIVATE)
return (EINVAL);
(*attr)->m_pshared = pshared;
return (0);
}

View File

@ -126,6 +126,10 @@ TAILQ_HEAD(mutex_queue, pthread_mutex);
} \
} while (0)
/* Magic cookie set for shared pthread locks and cv's pointers */
#define THR_PSHARED_PTR \
((void *)(uintptr_t)((1ULL << (NBBY * sizeof(long) - 1)) | 1))
/* XXX These values should be same as those defined in pthread.h */
#define THR_MUTEX_INITIALIZER ((struct pthread_mutex *)NULL)
#define THR_ADAPTIVE_MUTEX_INITIALIZER ((struct pthread_mutex *)1)
@ -148,20 +152,24 @@ struct pthread_mutex {
*/
struct umutex m_lock;
int m_flags;
struct pthread *m_owner;
uint32_t m_owner;
int m_count;
int m_spinloops;
int m_yieldloops;
/*
* Link for all mutexes a thread currently owns.
* Link for all mutexes a thread currently owns, of the same
* prio type.
*/
TAILQ_ENTRY(pthread_mutex) m_qe;
/* Link for all private mutexes a thread currently owns. */
TAILQ_ENTRY(pthread_mutex) m_pqe;
};
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
int m_protocol;
int m_ceiling;
int m_pshared;
};
#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
@ -313,7 +321,7 @@ struct pthread_rwlockattr {
struct pthread_rwlock {
struct urwlock lock;
struct pthread *owner;
uint32_t owner;
};
/*
@ -467,11 +475,16 @@ struct pthread {
#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
/* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */
struct mutex_queue mutexq;
/* Queue of all owned PRIO_PROTECT mutexes. */
struct mutex_queue pp_mutexq;
/*
* Queues of the owned mutexes. Private queue must have index
* + 1 of the corresponding full queue.
*/
#define TMQ_NORM 0 /* NORMAL or PRIO_INHERIT normal */
#define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */
#define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */
#define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */
#define TMQ_NITEMS 4
struct mutex_queue mq[TMQ_NITEMS];
void *ret;
struct pthread_specific_elem *specific;
@ -936,6 +949,10 @@ void __thr_spinlock(struct _spinlock *lck);
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
void __thr_pshared_init(void) __hidden;
void *__thr_pshared_offpage(void *key, int doalloc) __hidden;
void __thr_pshared_destroy(void *key) __hidden;
__END_DECLS
#endif /* !_THR_PRIVATE_H */

View File

@ -0,0 +1,223 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
*
* This software was developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/queue.h>
#include "namespace.h"
#include <stdlib.h>
#include "un-namespace.h"
#include "thr_private.h"
struct psh {
LIST_ENTRY(psh) link;
void *key;
void *val;
};
LIST_HEAD(pshared_hash_head, psh);
#define HASH_SIZE 128
static struct pshared_hash_head pshared_hash[HASH_SIZE];
#define PSHARED_KEY_HASH(key) (((unsigned long)(key) >> 8) % HASH_SIZE)
/* XXXKIB: lock could be split to per-hash chain, if appears contested */
static struct urwlock pshared_lock = DEFAULT_URWLOCK;
void
__thr_pshared_init(void)
{
int i;
_thr_urwlock_init(&pshared_lock);
for (i = 0; i < HASH_SIZE; i++)
LIST_INIT(&pshared_hash[i]);
}
static void
pshared_rlock(struct pthread *curthread)
{
curthread->locklevel++;
_thr_rwl_rdlock(&pshared_lock);
}
static void
pshared_wlock(struct pthread *curthread)
{
curthread->locklevel++;
_thr_rwl_wrlock(&pshared_lock);
}
static void
pshared_unlock(struct pthread *curthread)
{
_thr_rwl_unlock(&pshared_lock);
curthread->locklevel--;
_thr_ast(curthread);
}
static void
pshared_gc(struct pthread *curthread)
{
struct pshared_hash_head *hd;
struct psh *h, *h1;
int error, i;
pshared_wlock(curthread);
for (i = 0; i < HASH_SIZE; i++) {
hd = &pshared_hash[i];
LIST_FOREACH_SAFE(h, hd, link, h1) {
error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE,
h->val, NULL);
if (error == 0)
continue;
LIST_REMOVE(h, link);
munmap(h->val, PAGE_SIZE);
free(h);
}
}
pshared_unlock(curthread);
}
static void *
pshared_lookup(void *key)
{
struct pshared_hash_head *hd;
struct psh *h;
hd = &pshared_hash[PSHARED_KEY_HASH(key)];
LIST_FOREACH(h, hd, link) {
if (h->key == key)
return (h->val);
}
return (NULL);
}
static int
pshared_insert(void *key, void **val)
{
struct pshared_hash_head *hd;
struct psh *h;
hd = &pshared_hash[PSHARED_KEY_HASH(key)];
LIST_FOREACH(h, hd, link) {
if (h->key == key) {
if (h->val != *val) {
munmap(*val, PAGE_SIZE);
*val = h->val;
}
return (1);
}
}
h = malloc(sizeof(*h));
if (h == NULL)
return (0);
h->key = key;
h->val = *val;
LIST_INSERT_HEAD(hd, h, link);
return (1);
}
static void *
pshared_remove(void *key)
{
struct pshared_hash_head *hd;
struct psh *h;
void *val;
hd = &pshared_hash[PSHARED_KEY_HASH(key)];
LIST_FOREACH(h, hd, link) {
if (h->key == key) {
LIST_REMOVE(h, link);
val = h->val;
free(h);
return (val);
}
}
return (NULL);
}
static void
pshared_clean(void *key, void *val)
{
if (val != NULL)
munmap(val, PAGE_SIZE);
_umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL);
}
void *
__thr_pshared_offpage(void *key, int doalloc)
{
struct pthread *curthread;
void *res;
int fd, ins_done;
curthread = _get_curthread();
pshared_rlock(curthread);
res = pshared_lookup(key);
pshared_unlock(curthread);
if (res != NULL)
return (res);
fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT :
UMTX_SHM_LOOKUP, key, NULL);
if (fd == -1)
return (NULL);
res = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (res == MAP_FAILED)
return (NULL);
pshared_wlock(curthread);
ins_done = pshared_insert(key, &res);
pshared_unlock(curthread);
if (!ins_done) {
pshared_clean(key, res);
res = NULL;
}
return (res);
}
void
__thr_pshared_destroy(void *key)
{
struct pthread *curthread;
void *val;
curthread = _get_curthread();
pshared_wlock(curthread);
val = pshared_remove(key);
pshared_unlock(curthread);
pshared_clean(key, val);
pshared_gc(curthread);
}

View File

@ -46,7 +46,12 @@ __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
#define CHECK_AND_INIT_RWLOCK \
if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \
if (*rwlock == THR_PSHARED_PTR) { \
prwlock = __thr_pshared_offpage(rwlock, 0); \
if (prwlock == NULL) \
return (EINVAL); \
} else if (__predict_false((prwlock = (*rwlock)) <= \
THR_RWLOCK_DESTROYED)) { \
if (prwlock == THR_RWLOCK_INITIALIZER) { \
int ret; \
ret = init_static(_get_curthread(), rwlock); \
@ -63,14 +68,23 @@ __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
*/
static int
rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
pthread_rwlock_t prwlock;
prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
if (prwlock == NULL)
return (ENOMEM);
*rwlock = prwlock;
if (attr == NULL || *attr == NULL ||
(*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
prwlock = calloc(1, sizeof(struct pthread_rwlock));
if (prwlock == NULL)
return (ENOMEM);
*rwlock = prwlock;
} else {
prwlock = __thr_pshared_offpage(rwlock, 1);
if (prwlock == NULL)
return (EFAULT);
prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
*rwlock = THR_PSHARED_PTR;
}
return (0);
}
@ -85,9 +99,12 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
ret = 0;
else if (prwlock == THR_RWLOCK_DESTROYED)
ret = EINVAL;
else {
else if (prwlock == THR_PSHARED_PTR) {
*rwlock = THR_RWLOCK_DESTROYED;
__thr_pshared_destroy(rwlock);
ret = 0;
} else {
*rwlock = THR_RWLOCK_DESTROYED;
free(prwlock);
ret = 0;
}
@ -112,8 +129,9 @@ init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
}
int
_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
*rwlock = NULL;
return (rwlock_init(rwlock, attr));
}
@ -235,7 +253,7 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
ret = _thr_rwlock_trywrlock(&prwlock->lock);
if (ret == 0)
prwlock->owner = curthread;
prwlock->owner = TID(curthread);
return (ret);
}
@ -254,19 +272,19 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
*/
ret = _thr_rwlock_trywrlock(&prwlock->lock);
if (ret == 0) {
prwlock->owner = curthread;
prwlock->owner = TID(curthread);
return (ret);
}
if (__predict_false(abstime &&
(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
return (EINVAL);
for (;;) {
/* goto kernel and lock it */
ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
if (ret == 0) {
prwlock->owner = curthread;
prwlock->owner = TID(curthread);
break;
}
@ -276,7 +294,7 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
/* if interrupted, try to lock it in userland again. */
if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
ret = 0;
prwlock->owner = curthread;
prwlock->owner = TID(curthread);
break;
}
}
@ -297,23 +315,29 @@ _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
}
int
_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
int ret;
int32_t state;
prwlock = *rwlock;
if (*rwlock == THR_PSHARED_PTR) {
prwlock = __thr_pshared_offpage(rwlock, 0);
if (prwlock == NULL)
return (EINVAL);
} else {
prwlock = *rwlock;
}
if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
return (EINVAL);
state = prwlock->lock.rw_state;
if (state & URWLOCK_WRITE_OWNER) {
if (__predict_false(prwlock->owner != curthread))
if (__predict_false(prwlock->owner != TID(curthread)))
return (EPERM);
prwlock->owner = NULL;
prwlock->owner = 0;
}
ret = _thr_rwlock_unlock(&prwlock->lock);

View File

@ -45,25 +45,21 @@ _pthread_rwlockattr_destroy(pthread_rwlockattr_t *rwlockattr)
pthread_rwlockattr_t prwlockattr;
if (rwlockattr == NULL)
return(EINVAL);
return (EINVAL);
prwlockattr = *rwlockattr;
if (prwlockattr == NULL)
return(EINVAL);
return (EINVAL);
free(prwlockattr);
return(0);
return (0);
}
int
_pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *rwlockattr,
int *pshared)
int *pshared)
{
*pshared = (*rwlockattr)->pshared;
return(0);
*pshared = (*rwlockattr)->pshared;
return (0);
}
int
@ -72,28 +68,24 @@ _pthread_rwlockattr_init(pthread_rwlockattr_t *rwlockattr)
pthread_rwlockattr_t prwlockattr;
if (rwlockattr == NULL)
return(EINVAL);
prwlockattr = (pthread_rwlockattr_t)
malloc(sizeof(struct pthread_rwlockattr));
return (EINVAL);
prwlockattr = malloc(sizeof(struct pthread_rwlockattr));
if (prwlockattr == NULL)
return(ENOMEM);
return (ENOMEM);
prwlockattr->pshared = PTHREAD_PROCESS_PRIVATE;
*rwlockattr = prwlockattr;
return(0);
prwlockattr->pshared = PTHREAD_PROCESS_PRIVATE;
*rwlockattr = prwlockattr;
return (0);
}
int
_pthread_rwlockattr_setpshared(pthread_rwlockattr_t *rwlockattr, int pshared)
{
/* Only PTHREAD_PROCESS_PRIVATE is supported. */
if (pshared != PTHREAD_PROCESS_PRIVATE)
return(EINVAL);
if (pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED)
return (EINVAL);
(*rwlockattr)->pshared = pshared;
return(0);
return (0);
}

View File

@ -1432,3 +1432,10 @@ chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
}
int
chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
{
return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
}

View File

@ -1,8 +1,12 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
* Copyright (c) 2004, David Xu <davidxu@freebsd.org>
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -33,12 +37,19 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/fcntl.h>
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mman.h>
#include <sys/mutex.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
@ -47,9 +58,12 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/sysproto.h>
#include <sys/syscallsubr.h>
#include <sys/taskqueue.h>
#include <sys/eventhandler.h>
#include <sys/umtx.h>
#include <security/mac/mac_framework.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
@ -213,6 +227,7 @@ SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_
static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
#endif
static void umtx_shm_init(void);
static void umtxq_sysinit(void *);
static void umtxq_hash(struct umtx_key *key);
static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
@ -399,6 +414,7 @@ umtxq_sysinit(void *arg __unused)
mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
EVENTHANDLER_PRI_ANY);
umtx_shm_init();
}
struct umtx_q *
@ -3440,6 +3456,310 @@ __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
return (do_sem2_wake(td, uap->obj));
}
#define USHM_OBJ_UMTX(o) \
((struct umtx_shm_obj_list *)(&(o)->umtx_data))
#define USHMF_REG_LINKED 0x0001
#define USHMF_OBJ_LINKED 0x0002
struct umtx_shm_reg {
TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
struct umtx_key ushm_key;
struct ucred *ushm_cred;
struct shmfd *ushm_obj;
u_int ushm_refcnt;
u_int ushm_flags;
};
LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
static uma_zone_t umtx_shm_reg_zone;
static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
static struct mtx umtx_shm_lock;
static struct umtx_shm_reg_head umtx_shm_reg_delfree =
TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
static void
umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
{
struct umtx_shm_reg_head d;
struct umtx_shm_reg *reg, *reg1;
TAILQ_INIT(&d);
mtx_lock(&umtx_shm_lock);
TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
mtx_unlock(&umtx_shm_lock);
TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
TAILQ_REMOVE(&d, reg, ushm_reg_link);
umtx_shm_free_reg(reg);
}
}
static struct task umtx_shm_reg_delfree_task =
TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
static struct umtx_shm_reg *
umtx_shm_find_reg_locked(const struct umtx_key *key)
{
struct umtx_shm_reg *reg;
struct umtx_shm_reg_head *reg_head;
KASSERT(key->shared, ("umtx_p_find_rg: private key"));
mtx_assert(&umtx_shm_lock, MA_OWNED);
reg_head = &umtx_shm_registry[key->hash];
TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
KASSERT(reg->ushm_key.shared,
("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
if (reg->ushm_key.info.shared.object ==
key->info.shared.object &&
reg->ushm_key.info.shared.offset ==
key->info.shared.offset) {
KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
KASSERT(reg->ushm_refcnt > 0,
("reg %p refcnt 0 onlist", reg));
KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
("reg %p not linked", reg));
reg->ushm_refcnt++;
return (reg);
}
}
return (NULL);
}
static struct umtx_shm_reg *
umtx_shm_find_reg(const struct umtx_key *key)
{
struct umtx_shm_reg *reg;
mtx_lock(&umtx_shm_lock);
reg = umtx_shm_find_reg_locked(key);
mtx_unlock(&umtx_shm_lock);
return (reg);
}
static void
umtx_shm_free_reg(struct umtx_shm_reg *reg)
{
chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
crfree(reg->ushm_cred);
shm_drop(reg->ushm_obj);
uma_zfree(umtx_shm_reg_zone, reg);
}
static bool
umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
{
bool res;
mtx_assert(&umtx_shm_lock, MA_OWNED);
KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
reg->ushm_refcnt--;
res = reg->ushm_refcnt == 0;
if (res || force) {
if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
reg, ushm_reg_link);
reg->ushm_flags &= ~USHMF_REG_LINKED;
}
if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
LIST_REMOVE(reg, ushm_obj_link);
reg->ushm_flags &= ~USHMF_OBJ_LINKED;
}
}
return (res);
}
static void
umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
{
vm_object_t object;
bool dofree;
if (force) {
object = reg->ushm_obj->shm_object;
VM_OBJECT_WLOCK(object);
object->flags |= OBJ_UMTXDEAD;
VM_OBJECT_WUNLOCK(object);
}
mtx_lock(&umtx_shm_lock);
dofree = umtx_shm_unref_reg_locked(reg, force);
mtx_unlock(&umtx_shm_lock);
if (dofree)
umtx_shm_free_reg(reg);
}
void
umtx_shm_object_init(vm_object_t object)
{
LIST_INIT(USHM_OBJ_UMTX(object));
}
void
umtx_shm_object_terminated(vm_object_t object)
{
struct umtx_shm_reg *reg, *reg1;
bool dofree;
dofree = false;
mtx_lock(&umtx_shm_lock);
LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
if (umtx_shm_unref_reg_locked(reg, true)) {
TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
ushm_reg_link);
dofree = true;
}
}
mtx_unlock(&umtx_shm_lock);
if (dofree)
taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
}
static int
umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
struct umtx_shm_reg **res)
{
struct umtx_shm_reg *reg, *reg1;
struct ucred *cred;
int error;
reg = umtx_shm_find_reg(key);
if (reg != NULL) {
*res = reg;
return (0);
}
cred = td->td_ucred;
if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
return (ENOMEM);
reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
reg->ushm_refcnt = 1;
bcopy(key, &reg->ushm_key, sizeof(*key));
reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
reg->ushm_cred = crhold(cred);
error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
if (error != 0) {
umtx_shm_free_reg(reg);
return (error);
}
mtx_lock(&umtx_shm_lock);
reg1 = umtx_shm_find_reg_locked(key);
if (reg1 != NULL) {
mtx_unlock(&umtx_shm_lock);
umtx_shm_free_reg(reg);
*res = reg1;
return (0);
}
reg->ushm_refcnt++;
TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
ushm_obj_link);
reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
mtx_unlock(&umtx_shm_lock);
*res = reg;
return (0);
}
static int
umtx_shm_alive(struct thread *td, void *addr)
{
vm_map_t map;
vm_map_entry_t entry;
vm_object_t object;
vm_pindex_t pindex;
vm_prot_t prot;
int res, ret;
boolean_t wired;
map = &td->td_proc->p_vmspace->vm_map;
res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
&object, &pindex, &prot, &wired);
if (res != KERN_SUCCESS)
return (EFAULT);
if (object == NULL)
ret = EINVAL;
else
ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
vm_map_lookup_done(map, entry);
return (ret);
}
static void
umtx_shm_init(void)
{
int i;
umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
for (i = 0; i < nitems(umtx_shm_registry); i++)
TAILQ_INIT(&umtx_shm_registry[i]);
}
static int
umtx_shm(struct thread *td, void *addr, u_int flags)
{
struct umtx_key key;
struct umtx_shm_reg *reg;
struct file *fp;
int error, fd;
if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
return (EINVAL);
if ((flags & UMTX_SHM_ALIVE) != 0)
return (umtx_shm_alive(td, addr));
error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
if (error != 0)
return (error);
KASSERT(key.shared == 1, ("non-shared key"));
if ((flags & UMTX_SHM_CREAT) != 0) {
error = umtx_shm_create_reg(td, &key, &reg);
} else {
reg = umtx_shm_find_reg(&key);
if (reg == NULL)
error = ESRCH;
}
umtx_key_release(&key);
if (error != 0)
return (error);
KASSERT(reg != NULL, ("no reg"));
if ((flags & UMTX_SHM_DESTROY) != 0) {
umtx_shm_unref_reg(reg, true);
} else {
#if 0
#ifdef MAC
error = mac_posixshm_check_open(td->td_ucred,
reg->ushm_obj, FFLAGS(O_RDWR));
if (error == 0)
#endif
error = shm_access(reg->ushm_obj, td->td_ucred,
FFLAGS(O_RDWR));
if (error == 0)
#endif
error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
if (error == 0) {
shm_hold(reg->ushm_obj);
finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
&shm_ops);
td->td_retval[0] = fd;
fdrop(fp, td);
}
}
umtx_shm_unref_reg(reg, false);
return (error);
}
static int
__umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
{
return (umtx_shm(td, uap->uaddr1, uap->val));
}
typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
static const _umtx_op_func op_table[] = {
@ -3473,6 +3793,7 @@ static const _umtx_op_func op_table[] = {
[UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
[UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
[UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
[UMTX_OP_SHM] = __umtx_op_shm,
};
int
@ -3768,6 +4089,7 @@ static const _umtx_op_func op_table_compat32[] = {
[UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
[UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
[UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
[UMTX_OP_SHM] = __umtx_op_shm,
};
int

View File

@ -109,15 +109,10 @@ static dev_t shm_dev_ino;
#define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
static int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
static void shm_init(void *arg);
static void shm_drop(struct shmfd *shmfd);
static struct shmfd *shm_hold(struct shmfd *shmfd);
static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
static int shm_dotruncate(struct shmfd *shmfd, off_t length);
static fo_rdwr_t shm_read;
static fo_rdwr_t shm_write;
@ -131,7 +126,7 @@ static fo_fill_kinfo_t shm_fill_kinfo;
static fo_mmap_t shm_mmap;
/* File descriptor operations. */
static struct fileops shm_ops = {
struct fileops shm_ops = {
.fo_read = shm_read,
.fo_write = shm_write,
.fo_truncate = shm_truncate,
@ -412,7 +407,7 @@ shm_close(struct file *fp, struct thread *td)
return (0);
}
static int
int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
vm_object_t object;
@ -521,7 +516,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
* shmfd object management including creation and reference counting
* routines.
*/
static struct shmfd *
struct shmfd *
shm_alloc(struct ucred *ucred, mode_t mode)
{
struct shmfd *shmfd;
@ -559,7 +554,7 @@ shm_alloc(struct ucred *ucred, mode_t mode)
return (shmfd);
}
static struct shmfd *
struct shmfd *
shm_hold(struct shmfd *shmfd)
{
@ -567,7 +562,7 @@ shm_hold(struct shmfd *shmfd)
return (shmfd);
}
static void
void
shm_drop(struct shmfd *shmfd)
{
@ -588,7 +583,7 @@ shm_drop(struct shmfd *shmfd)
* Determine if the credentials have sufficient permissions for a
* specified combination of FREAD and FWRITE.
*/
static int
int
shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
{
accmode_t accmode;

View File

@ -233,6 +233,13 @@ struct shmfd {
int shm_map(struct file *fp, size_t size, off_t offset, void **memp);
int shm_unmap(struct file *fp, void *mem, size_t size);
int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
struct shmfd *shm_hold(struct shmfd *shmfd);
void shm_drop(struct shmfd *shmfd);
int shm_dotruncate(struct shmfd *shmfd, off_t length);
extern struct fileops shm_ops;
#else /* !_KERNEL */
__BEGIN_DECLS

View File

@ -104,8 +104,9 @@ struct __wrusage {
#define RLIMIT_NPTS 11 /* pseudo-terminals */
#define RLIMIT_SWAP 12 /* swap used */
#define RLIMIT_KQUEUES 13 /* kqueues allocated */
#define RLIMIT_UMTXP 14 /* process-shared umtx */
#define RLIM_NLIMITS 14 /* number of resource limits */
#define RLIM_NLIMITS 15 /* number of resource limits */
#define RLIM_INFINITY ((rlim_t)(((uint64_t)1 << 63) - 1))
/* XXX Missing: RLIM_SAVED_MAX, RLIM_SAVED_CUR */
@ -131,6 +132,7 @@ static const char *rlimit_ident[RLIM_NLIMITS] = {
"npts",
"swap",
"kqueues",
"umtx",
};
#endif

View File

@ -101,6 +101,7 @@ struct uidinfo {
long ui_proccnt; /* (b) number of processes */
long ui_ptscnt; /* (b) number of pseudo-terminals */
long ui_kqcnt; /* (b) number of kqueues */
long ui_umtxcnt; /* (b) number of shared umtxs */
uid_t ui_uid; /* (a) uid */
u_int ui_ref; /* (b) reference count */
#ifdef RACCT
@ -124,6 +125,7 @@ int chgproccnt(struct uidinfo *uip, int diff, rlim_t maxval);
int chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to,
rlim_t maxval);
int chgptscnt(struct uidinfo *uip, int diff, rlim_t maxval);
int chgumtxcnt(struct uidinfo *uip, int diff, rlim_t maxval);
int fuswintr(void *base);
int kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
struct rlimit *limp);

View File

@ -83,6 +83,7 @@
#define UMTX_OP_MUTEX_WAKE2 22
#define UMTX_OP_SEM2_WAIT 23
#define UMTX_OP_SEM2_WAKE 24
#define UMTX_OP_SHM 25
/* Flags for UMTX_OP_CV_WAIT */
#define CVWAIT_CHECK_UNPARKING 0x01
@ -93,6 +94,12 @@
#define UMTX_CHECK_UNPARKING CVWAIT_CHECK_UNPARKING
/* Flags for UMTX_OP_SHM */
#define UMTX_SHM_CREAT 0x0001
#define UMTX_SHM_LOOKUP 0x0002
#define UMTX_SHM_DESTROY 0x0004
#define UMTX_SHM_ALIVE 0x0008
#ifndef _KERNEL
int _umtx_op(void *obj, int op, u_long val, void *uaddr, void *uaddr2);
@ -113,7 +120,8 @@ enum {
TYPE_PI_UMUTEX,
TYPE_PP_UMUTEX,
TYPE_RWLOCK,
TYPE_FUTEX
TYPE_FUTEX,
TYPE_SHM,
};
/* Key to represent a unique userland synchronous object */

View File

@ -264,6 +264,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
#if VM_NRESERVLEVEL > 0
LIST_INIT(&object->rvq);
#endif
umtx_shm_object_init(object);
}
/*
@ -475,6 +476,9 @@ vm_object_vndeallocate(vm_object_t object)
}
#endif
if (object->ref_count == 1)
umtx_shm_object_terminated(object);
/*
* The test for text of vp vnode does not need a bypass to
* reach right VV_TEXT there, since it is obtained from
@ -647,6 +651,7 @@ vm_object_deallocate(vm_object_t object)
return;
}
doterm:
umtx_shm_object_terminated(object);
temp = object->backing_object;
if (temp != NULL) {
KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,

View File

@ -174,6 +174,7 @@ struct vm_object {
} un_pager;
struct ucred *cred;
vm_ooffset_t charge;
void *umtx_data;
};
/*
@ -184,6 +185,7 @@ struct vm_object {
#define OBJ_ACTIVE 0x0004 /* active objects */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */
#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
@ -296,6 +298,9 @@ vm_object_cache_is_empty(vm_object_t object)
return (vm_radix_is_empty(&object->cache));
}
void umtx_shm_object_init(vm_object_t object);
void umtx_shm_object_terminated(vm_object_t object);
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
boolean_t);

View File

@ -92,6 +92,7 @@ static struct {
{ " pseudo-terminals%-4s %8s", "\n", 1 },
{ " swapuse%-4s %8s", " kB\n", 1024 },
{ " kqueues%-4s %8s", "\n", 1 },
{ " umtxp%-4s %8s", "\n", 1 },
}
},
{ "sh", "unlimited", "", " -H", " -S", "",
@ -110,6 +111,7 @@ static struct {
{ "ulimit%s -p %s", ";\n", 1 },
{ "ulimit%s -w %s", ";\n", 1024 },
{ "ulimit%s -k %s", ";\n", 1 },
{ "ulimit%s -o %s", ";\n", 1 },
}
},
{ "csh", "unlimited", "", " -h", "", NULL,
@ -128,6 +130,7 @@ static struct {
{ "limit%s pseudoterminals %s", ";\n", 1 },
{ "limit%s swapsize %s", ";\n", 1024 },
{ "limit%s kqueues %s", ";\n", 1 },
{ "limit%s umtxp %s", ";\n", 1 },
}
},
{ "bash|bash2", "unlimited", "", " -H", " -S", "",
@ -163,6 +166,7 @@ static struct {
{ "limit%s pseudoterminals %s", ";\n", 1 },
{ "limit%s swapsize %s", ";\n", 1024 },
{ "limit%s kqueues %s", ";\n", 1 },
{ "limit%s umtxp %s", ";\n", 1 },
}
},
{ "ksh|pdksh", "unlimited", "", " -H", " -S", "",
@ -239,6 +243,7 @@ static struct {
{ "pseudoterminals",login_getcapnum },
{ "swapuse", login_getcapsize },
{ "kqueues", login_getcapnum },
{ "umtxp", login_getcapnum },
};
/*
@ -289,7 +294,7 @@ main(int argc, char *argv[])
pid = -1;
optarg = NULL;
while ((ch = getopt(argc, argv,
":EeC:U:BSHP:ab:c:d:f:l:m:n:s:t:u:v:p:w:k:")) != -1) {
":EeC:U:BSHP:ab:c:d:f:l:m:n:s:t:u:v:p:w:k:o:")) != -1) {
switch(ch) {
case 'a':
doall = 1;

View File

@ -47,7 +47,7 @@
static struct {
const char *name;
const char *suffix;
} rlimit_param[14] = {
} rlimit_param[15] = {
{"cputime", "sec"},
{"filesize", "B "},
{"datasize", "B "},
@ -62,9 +62,10 @@ static struct {
{"pseudo-terminals", " "},
{"swapuse", "B "},
{"kqueues", " "},
{"umtxp", " "},
};
#if RLIM_NLIMITS > 14
#if RLIM_NLIMITS > 15
#error "Resource limits have grown. Add new entries to rlimit_param[]."
#endif