2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
|
2006-03-27 23:50:21 +00:00
|
|
|
* Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
|
2003-04-01 03:46:29 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "namespace.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <pthread.h>
|
2008-03-25 09:48:10 +00:00
|
|
|
#include <pthread_np.h>
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "un-namespace.h"
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
#include "thr_private.h"
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
#if defined(_PTHREADS_INVARIANTS)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_INIT_LINK(m) do { \
|
2003-04-01 22:39:31 +00:00
|
|
|
(m)->m_qe.tqe_prev = NULL; \
|
|
|
|
(m)->m_qe.tqe_next = NULL; \
|
|
|
|
} while (0)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_ASSERT_IS_OWNED(m) do { \
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m)->m_qe.tqe_prev == NULL))\
|
2003-04-01 22:39:31 +00:00
|
|
|
PANIC("mutex is not on list"); \
|
|
|
|
} while (0)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_ASSERT_NOT_OWNED(m) do { \
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m)->m_qe.tqe_prev != NULL || \
|
|
|
|
(m)->m_qe.tqe_next != NULL)) \
|
2003-04-01 22:39:31 +00:00
|
|
|
PANIC("mutex is on list"); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_INIT_LINK(m)
|
|
|
|
#define MUTEX_ASSERT_IS_OWNED(m)
|
|
|
|
#define MUTEX_ASSERT_NOT_OWNED(m)
|
2003-04-01 22:39:31 +00:00
|
|
|
#endif
|
|
|
|
|
2007-10-31 01:37:13 +00:00
|
|
|
/*
|
|
|
|
* For adaptive mutexes, how many times to spin doing trylock2
|
|
|
|
* before entering the kernel to block
|
|
|
|
*/
|
2008-04-26 13:19:07 +00:00
|
|
|
#define MUTEX_ADAPTIVE_SPINS 2000
|
2007-10-31 01:37:13 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Prototypes
|
|
|
|
*/
|
2006-04-04 02:57:49 +00:00
|
|
|
int __pthread_mutex_init(pthread_mutex_t *mutex,
|
|
|
|
const pthread_mutexattr_t *mutex_attr);
|
|
|
|
int __pthread_mutex_trylock(pthread_mutex_t *mutex);
|
|
|
|
int __pthread_mutex_lock(pthread_mutex_t *mutex);
|
|
|
|
int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
|
|
|
|
const struct timespec *abstime);
|
2007-12-17 02:53:11 +00:00
|
|
|
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|
|
|
void *(calloc_cb)(size_t, size_t));
|
|
|
|
int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
|
|
|
|
int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
|
|
|
|
int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
|
|
|
|
int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
|
2007-12-14 06:25:57 +00:00
|
|
|
int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
|
2007-12-17 02:53:11 +00:00
|
|
|
int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
|
2006-04-04 02:57:49 +00:00
|
|
|
|
|
|
|
static int mutex_self_trylock(pthread_mutex_t);
|
|
|
|
static int mutex_self_lock(pthread_mutex_t,
|
2005-04-02 01:20:00 +00:00
|
|
|
const struct timespec *abstime);
|
2012-08-11 23:17:02 +00:00
|
|
|
static int mutex_unlock_common(struct pthread_mutex *, int, int *);
|
2008-06-24 07:32:12 +00:00
|
|
|
static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
|
|
|
|
const struct timespec *);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
|
2003-04-01 03:46:29 +00:00
|
|
|
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Single underscore versions provided for libc internal usage: */
|
2003-04-01 03:46:29 +00:00
|
|
|
/* No difference between libc and application usage of these: */
|
|
|
|
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
|
|
|
|
__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
|
|
|
|
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
|
|
|
|
|
|
|
|
__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
|
2008-02-06 19:34:31 +00:00
|
|
|
__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
mutex_init(pthread_mutex_t *mutex,
|
2010-09-28 04:57:56 +00:00
|
|
|
const struct pthread_mutex_attr *mutex_attr,
|
2007-11-27 03:16:44 +00:00
|
|
|
void *(calloc_cb)(size_t, size_t))
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2006-02-28 06:06:19 +00:00
|
|
|
const struct pthread_mutex_attr *attr;
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_mutex *pmutex;
|
|
|
|
|
2006-02-28 06:06:19 +00:00
|
|
|
if (mutex_attr == NULL) {
|
2006-03-27 23:50:21 +00:00
|
|
|
attr = &_pthread_mutexattr_default;
|
2006-02-28 06:06:19 +00:00
|
|
|
} else {
|
2010-09-28 04:57:56 +00:00
|
|
|
attr = mutex_attr;
|
2006-02-28 06:06:19 +00:00
|
|
|
if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
|
|
|
|
attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
if (attr->m_protocol < PTHREAD_PRIO_NONE ||
|
|
|
|
attr->m_protocol > PTHREAD_PRIO_PROTECT)
|
|
|
|
return (EINVAL);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
2006-02-28 06:06:19 +00:00
|
|
|
if ((pmutex = (pthread_mutex_t)
|
2007-11-27 03:16:44 +00:00
|
|
|
calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
|
2006-02-28 06:06:19 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
pmutex->m_flags = attr->m_type;
|
2006-02-28 06:06:19 +00:00
|
|
|
pmutex->m_owner = NULL;
|
|
|
|
pmutex->m_count = 0;
|
2007-12-14 06:25:57 +00:00
|
|
|
pmutex->m_spinloops = 0;
|
|
|
|
pmutex->m_yieldloops = 0;
|
2006-02-28 06:06:19 +00:00
|
|
|
MUTEX_INIT_LINK(pmutex);
|
2006-08-28 04:52:50 +00:00
|
|
|
switch(attr->m_protocol) {
|
2010-09-28 04:57:56 +00:00
|
|
|
case PTHREAD_PRIO_NONE:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
|
|
|
|
pmutex->m_lock.m_flags = 0;
|
|
|
|
break;
|
2006-08-28 04:52:50 +00:00
|
|
|
case PTHREAD_PRIO_INHERIT:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
|
|
|
|
pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
|
|
|
|
break;
|
|
|
|
case PTHREAD_PRIO_PROTECT:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
|
|
|
|
pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
|
|
|
|
pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
|
|
|
|
break;
|
|
|
|
}
|
2007-12-14 06:25:57 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
|
2007-12-14 06:25:57 +00:00
|
|
|
pmutex->m_spinloops =
|
|
|
|
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
|
|
|
|
pmutex->m_yieldloops = _thr_yieldloops;
|
|
|
|
}
|
|
|
|
|
2006-02-28 06:06:19 +00:00
|
|
|
*mutex = pmutex;
|
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
init_static(struct pthread *thread, pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
|
2004-01-19 15:00:57 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
if (*mutex == THR_MUTEX_INITIALIZER)
|
|
|
|
ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
|
|
|
|
else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
|
|
|
|
ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
|
2005-04-02 01:20:00 +00:00
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
THR_LOCK_RELEASE(thread, &_mutex_static_lock);
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2006-11-11 13:33:47 +00:00
|
|
|
static void
|
|
|
|
set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
|
|
|
|
{
|
|
|
|
struct pthread_mutex *m2;
|
|
|
|
|
|
|
|
m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
|
|
|
|
if (m2 != NULL)
|
|
|
|
m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
|
|
|
|
else
|
|
|
|
m->m_lock.m_ceilings[1] = -1;
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
|
|
|
__pthread_mutex_init(pthread_mutex_t *mutex,
|
|
|
|
const pthread_mutexattr_t *mutex_attr)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
|
2007-11-27 03:16:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is used internally by malloc. */
|
|
|
|
int
|
|
|
|
_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|
|
|
void *(calloc_cb)(size_t, size_t))
|
|
|
|
{
|
|
|
|
static const struct pthread_mutex_attr attr = {
|
2007-11-28 00:16:24 +00:00
|
|
|
.m_type = PTHREAD_MUTEX_NORMAL,
|
2007-11-27 03:16:44 +00:00
|
|
|
.m_protocol = PTHREAD_PRIO_NONE,
|
2008-05-29 07:57:33 +00:00
|
|
|
.m_ceiling = 0
|
2007-11-27 03:16:44 +00:00
|
|
|
};
|
2010-09-01 03:11:21 +00:00
|
|
|
int ret;
|
2007-11-27 03:16:44 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
ret = mutex_init(mutex, &attr, calloc_cb);
|
2010-09-01 03:11:21 +00:00
|
|
|
if (ret == 0)
|
2010-12-22 05:01:52 +00:00
|
|
|
(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
|
2010-09-01 03:11:21 +00:00
|
|
|
return (ret);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
void
|
|
|
|
_mutex_fork(struct pthread *curthread)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_mutex *m;
|
|
|
|
|
2006-01-14 11:33:43 +00:00
|
|
|
/*
|
|
|
|
* Fix mutex ownership for child process.
|
|
|
|
* note that process shared mutex should not
|
|
|
|
* be inherited because owner is forking thread
|
|
|
|
* which is in parent process, they should be
|
2006-01-16 05:33:48 +00:00
|
|
|
* removed from the owned mutex list, current,
|
2006-01-14 11:33:43 +00:00
|
|
|
* process shared mutex is not supported, so I
|
|
|
|
* am not worried.
|
|
|
|
*/
|
2006-08-28 04:52:50 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
|
2006-08-28 04:52:50 +00:00
|
|
|
m->m_lock.m_owner = TID(curthread);
|
|
|
|
TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
|
|
|
|
m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_destroy(pthread_mutex_t *mutex)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2006-11-11 13:33:47 +00:00
|
|
|
pthread_mutex_t m;
|
2010-10-27 04:19:07 +00:00
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
m = *mutex;
|
|
|
|
if (m < THR_MUTEX_DESTROYED) {
|
|
|
|
ret = 0;
|
|
|
|
} else if (m == THR_MUTEX_DESTROYED) {
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = EINVAL;
|
2010-09-28 04:57:56 +00:00
|
|
|
} else {
|
2010-12-22 05:01:52 +00:00
|
|
|
if (m->m_owner != NULL) {
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = EBUSY;
|
|
|
|
} else {
|
2010-09-28 04:57:56 +00:00
|
|
|
*mutex = THR_MUTEX_DESTROYED;
|
2005-04-02 01:20:00 +00:00
|
|
|
MUTEX_ASSERT_NOT_OWNED(m);
|
2006-03-27 23:50:21 +00:00
|
|
|
free(m);
|
2010-10-27 04:19:07 +00:00
|
|
|
ret = 0;
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
|
|
|
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
#define ENQUEUE_MUTEX(curthread, m) \
|
2007-12-11 08:00:58 +00:00
|
|
|
do { \
|
|
|
|
(m)->m_owner = curthread; \
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
/* Add to the list of owned mutexes: */ \
|
2007-12-11 08:00:58 +00:00
|
|
|
MUTEX_ASSERT_NOT_OWNED((m)); \
|
|
|
|
if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
|
|
|
|
TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
else \
|
2007-12-11 08:00:58 +00:00
|
|
|
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
|
|
|
|
} while (0)
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
#define DEQUEUE_MUTEX(curthread, m) \
|
|
|
|
(m)->m_owner = NULL; \
|
|
|
|
MUTEX_ASSERT_IS_OWNED(m); \
|
|
|
|
if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
|
|
|
|
TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \
|
|
|
|
else { \
|
|
|
|
TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \
|
|
|
|
set_inherited_priority(curthread, m); \
|
|
|
|
} \
|
|
|
|
MUTEX_INIT_LINK(m);
|
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
#define CHECK_AND_INIT_MUTEX \
|
|
|
|
if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
|
|
|
|
if (m == THR_MUTEX_DESTROYED) \
|
|
|
|
return (EINVAL); \
|
|
|
|
int ret; \
|
|
|
|
ret = init_static(_get_curthread(), mutex); \
|
|
|
|
if (ret) \
|
|
|
|
return (ret); \
|
|
|
|
m = *mutex; \
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2010-09-28 04:57:56 +00:00
|
|
|
mutex_trylock_common(pthread_mutex_t *mutex)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
struct pthread_mutex *m = *mutex;
|
2006-08-28 04:52:50 +00:00
|
|
|
uint32_t id;
|
2006-03-27 23:50:21 +00:00
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-08-28 04:52:50 +00:00
|
|
|
id = TID(curthread);
|
2010-12-22 05:01:52 +00:00
|
|
|
if (m->m_flags & PMUTEX_FLAG_PRIVATE)
|
2010-09-01 03:11:21 +00:00
|
|
|
THR_CRITICAL_ENTER(curthread);
|
2006-08-28 04:52:50 +00:00
|
|
|
ret = _thr_umutex_trylock(&m->m_lock, id);
|
2010-09-28 04:57:56 +00:00
|
|
|
if (__predict_true(ret == 0)) {
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2006-03-27 23:50:21 +00:00
|
|
|
} else if (m->m_owner == curthread) {
|
2006-04-04 02:57:49 +00:00
|
|
|
ret = mutex_self_trylock(m);
|
2006-03-27 23:50:21 +00:00
|
|
|
} /* else {} */
|
2010-12-22 05:01:52 +00:00
|
|
|
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
|
2010-09-01 03:11:21 +00:00
|
|
|
THR_CRITICAL_LEAVE(curthread);
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
int
|
|
|
|
__pthread_mutex_trylock(pthread_mutex_t *mutex)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_MUTEX
|
|
|
|
|
|
|
|
return (mutex_trylock_common(mutex));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2008-06-24 07:32:12 +00:00
|
|
|
mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
|
|
|
|
const struct timespec *abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2008-06-24 07:32:12 +00:00
|
|
|
uint32_t id, owner;
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
int count;
|
2008-06-24 07:32:12 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2008-06-24 07:32:12 +00:00
|
|
|
if (m->m_owner == curthread)
|
|
|
|
return mutex_self_lock(m, abstime);
|
2007-10-29 21:01:47 +00:00
|
|
|
|
2008-06-24 07:32:12 +00:00
|
|
|
id = TID(curthread);
|
2008-05-29 07:57:33 +00:00
|
|
|
/*
|
|
|
|
* For adaptive mutexes, spin for a bit in the expectation
|
|
|
|
* that if the application requests this mutex type then
|
|
|
|
* the lock is likely to be released quickly and it is
|
|
|
|
* faster than entering the kernel
|
|
|
|
*/
|
2010-09-28 04:57:56 +00:00
|
|
|
if (__predict_false(
|
|
|
|
(m->m_lock.m_flags &
|
|
|
|
(UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
|
|
|
|
goto sleep_in_kernel;
|
2008-05-29 07:57:33 +00:00
|
|
|
|
|
|
|
if (!_thr_is_smp)
|
|
|
|
goto yield_loop;
|
|
|
|
|
|
|
|
count = m->m_spinloops;
|
|
|
|
while (count--) {
|
2008-06-24 07:32:12 +00:00
|
|
|
owner = m->m_lock.m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0) {
|
|
|
|
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
|
|
|
|
ret = 0;
|
2007-12-14 06:25:57 +00:00
|
|
|
goto done;
|
2008-06-24 07:32:12 +00:00
|
|
|
}
|
2007-10-29 21:01:47 +00:00
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
CPU_SPINWAIT;
|
|
|
|
}
|
2007-10-29 21:01:47 +00:00
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
yield_loop:
|
|
|
|
count = m->m_yieldloops;
|
|
|
|
while (count--) {
|
|
|
|
_sched_yield();
|
2008-06-24 07:32:12 +00:00
|
|
|
owner = m->m_lock.m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0) {
|
|
|
|
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
|
|
|
|
ret = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sleep_in_kernel:
|
|
|
|
if (abstime == NULL) {
|
2008-06-24 07:32:12 +00:00
|
|
|
ret = __thr_umutex_lock(&m->m_lock, id);
|
2008-05-29 07:57:33 +00:00
|
|
|
} else if (__predict_false(
|
2008-06-24 07:32:12 +00:00
|
|
|
abstime->tv_nsec < 0 ||
|
2008-05-29 07:57:33 +00:00
|
|
|
abstime->tv_nsec >= 1000000000)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
2008-06-24 07:32:12 +00:00
|
|
|
ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
done:
|
|
|
|
if (ret == 0)
|
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2008-06-24 07:32:12 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
static inline int
|
2010-09-28 04:57:56 +00:00
|
|
|
mutex_lock_common(struct pthread_mutex *m,
|
2010-12-22 05:01:52 +00:00
|
|
|
const struct timespec *abstime, int cvattach)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2010-09-01 03:11:21 +00:00
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
|
2010-09-01 03:11:21 +00:00
|
|
|
THR_CRITICAL_ENTER(curthread);
|
2008-06-24 07:32:12 +00:00
|
|
|
if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
|
2008-05-29 07:57:33 +00:00
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2010-09-01 03:11:21 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = mutex_lock_sleep(curthread, m, abstime);
|
2008-06-24 07:32:12 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
|
2010-09-01 03:11:21 +00:00
|
|
|
THR_CRITICAL_LEAVE(curthread);
|
|
|
|
return (ret);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-29 07:57:33 +00:00
|
|
|
__pthread_mutex_lock(pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_check_init();
|
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_MUTEX
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
return (mutex_lock_common(m, NULL, 0));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-29 07:57:33 +00:00
|
|
|
__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_check_init();
|
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_MUTEX
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
return (mutex_lock_common(m, abstime, 0));
|
2003-12-30 08:44:55 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
int
|
2010-12-22 05:01:52 +00:00
|
|
|
_pthread_mutex_unlock(pthread_mutex_t *mutex)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2010-12-22 05:01:52 +00:00
|
|
|
struct pthread_mutex *mp;
|
|
|
|
|
|
|
|
mp = *mutex;
|
2012-08-11 23:17:02 +00:00
|
|
|
return (mutex_unlock_common(mp, 0, NULL));
|
2003-07-02 02:05:23 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
int
|
2010-12-22 05:01:52 +00:00
|
|
|
_mutex_cv_lock(struct pthread_mutex *m, int count)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2010-12-22 05:01:52 +00:00
|
|
|
int error;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
error = mutex_lock_common(m, NULL, 1);
|
|
|
|
if (error == 0)
|
|
|
|
m->m_count = count;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-08-11 23:17:02 +00:00
|
|
|
_mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
|
2010-12-22 05:01:52 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the count in case this is a recursive mutex.
|
|
|
|
*/
|
|
|
|
*count = m->m_count;
|
|
|
|
m->m_count = 0;
|
2012-08-11 23:17:02 +00:00
|
|
|
(void)mutex_unlock_common(m, 1, defer);
|
2010-12-22 05:01:52 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_mutex_cv_attach(struct pthread_mutex *m, int count)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
|
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
|
|
|
m->m_count = count;
|
2011-01-06 08:13:30 +00:00
|
|
|
return (0);
|
2010-12-22 05:01:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
int defered;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((error = _mutex_owned(curthread, mp)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the count in case this is a recursive mutex.
|
|
|
|
*/
|
|
|
|
*recurse = mp->m_count;
|
|
|
|
mp->m_count = 0;
|
|
|
|
DEQUEUE_MUTEX(curthread, mp);
|
|
|
|
|
|
|
|
/* Will this happen in real-world ? */
|
|
|
|
if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
|
|
|
|
defered = 1;
|
|
|
|
mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
|
|
|
|
} else
|
|
|
|
defered = 0;
|
|
|
|
|
|
|
|
if (defered) {
|
|
|
|
_thr_wake_all(curthread->defer_waiters,
|
|
|
|
curthread->nwaiter_defer);
|
|
|
|
curthread->nwaiter_defer = 0;
|
2006-04-08 13:24:44 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2010-09-28 04:57:56 +00:00
|
|
|
mutex_self_trylock(struct pthread_mutex *m)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
switch (PMUTEX_TYPE(m->m_flags)) {
|
2005-04-02 01:20:00 +00:00
|
|
|
case PTHREAD_MUTEX_ERRORCHECK:
|
|
|
|
case PTHREAD_MUTEX_NORMAL:
|
2012-05-27 01:24:51 +00:00
|
|
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = EBUSY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTHREAD_MUTEX_RECURSIVE:
|
|
|
|
/* Increment the lock count: */
|
|
|
|
if (m->m_count + 1 > 0) {
|
|
|
|
m->m_count++;
|
|
|
|
ret = 0;
|
|
|
|
} else
|
|
|
|
ret = EAGAIN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Trap invalid mutex types; */
|
|
|
|
ret = EINVAL;
|
|
|
|
}
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2010-09-28 04:57:56 +00:00
|
|
|
mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2006-03-27 23:50:21 +00:00
|
|
|
struct timespec ts1, ts2;
|
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
switch (PMUTEX_TYPE(m->m_flags)) {
|
2003-04-01 22:39:31 +00:00
|
|
|
case PTHREAD_MUTEX_ERRORCHECK:
|
2007-10-30 09:24:23 +00:00
|
|
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
2005-04-02 01:20:00 +00:00
|
|
|
if (abstime) {
|
2008-05-29 07:57:33 +00:00
|
|
|
if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
clock_gettime(CLOCK_REALTIME, &ts1);
|
|
|
|
TIMESPEC_SUB(&ts2, abstime, &ts1);
|
|
|
|
__sys_nanosleep(&ts2, NULL);
|
|
|
|
ret = ETIMEDOUT;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* POSIX specifies that mutexes should return
|
|
|
|
* EDEADLK if a recursive lock is detected.
|
|
|
|
*/
|
|
|
|
ret = EDEADLK;
|
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTHREAD_MUTEX_NORMAL:
|
|
|
|
/*
|
|
|
|
* What SS2 define as a 'normal' mutex. Intentionally
|
|
|
|
* deadlock on attempts to get a lock you already own.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = 0;
|
|
|
|
if (abstime) {
|
2008-05-29 07:57:33 +00:00
|
|
|
if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
clock_gettime(CLOCK_REALTIME, &ts1);
|
|
|
|
TIMESPEC_SUB(&ts2, abstime, &ts1);
|
|
|
|
__sys_nanosleep(&ts2, NULL);
|
|
|
|
ret = ETIMEDOUT;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
|
|
|
ts1.tv_sec = 30;
|
|
|
|
ts1.tv_nsec = 0;
|
|
|
|
for (;;)
|
|
|
|
__sys_nanosleep(&ts1, NULL);
|
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
break;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
case PTHREAD_MUTEX_RECURSIVE:
|
|
|
|
/* Increment the lock count: */
|
|
|
|
if (m->m_count + 1 > 0) {
|
|
|
|
m->m_count++;
|
|
|
|
ret = 0;
|
|
|
|
} else
|
|
|
|
ret = EAGAIN;
|
2004-05-20 11:55:04 +00:00
|
|
|
break;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
/* Trap invalid mutex types; */
|
|
|
|
ret = EINVAL;
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
return (ret);
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2012-08-11 23:17:02 +00:00
|
|
|
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2006-08-28 04:52:50 +00:00
|
|
|
uint32_t id;
|
2015-02-25 16:18:26 +00:00
|
|
|
int defered, error;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
|
|
|
|
if (m == THR_MUTEX_DESTROYED)
|
|
|
|
return (EINVAL);
|
|
|
|
return (EPERM);
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
/*
|
2006-03-27 23:50:21 +00:00
|
|
|
* Check if the running thread is not the owner of the mutex.
|
2003-04-01 22:39:31 +00:00
|
|
|
*/
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(m->m_owner != curthread))
|
|
|
|
return (EPERM);
|
|
|
|
|
2015-02-25 16:18:26 +00:00
|
|
|
error = 0;
|
2006-08-28 04:52:50 +00:00
|
|
|
id = TID(curthread);
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(
|
2010-12-22 05:01:52 +00:00
|
|
|
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
|
2006-03-27 23:50:21 +00:00
|
|
|
m->m_count > 0)) {
|
|
|
|
m->m_count--;
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
2010-12-24 07:41:39 +00:00
|
|
|
if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
|
2010-12-22 05:01:52 +00:00
|
|
|
defered = 1;
|
|
|
|
m->m_flags &= ~PMUTEX_FLAG_DEFERED;
|
|
|
|
} else
|
2012-08-11 23:17:02 +00:00
|
|
|
defered = 0;
|
2010-09-29 06:06:58 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
DEQUEUE_MUTEX(curthread, m);
|
2015-02-25 16:18:26 +00:00
|
|
|
error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
|
2006-04-08 13:24:44 +00:00
|
|
|
|
2012-08-11 23:17:02 +00:00
|
|
|
if (mtx_defer == NULL && defered) {
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_wake_all(curthread->defer_waiters,
|
|
|
|
curthread->nwaiter_defer);
|
|
|
|
curthread->nwaiter_defer = 0;
|
|
|
|
}
|
2006-08-28 04:52:50 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
|
2010-09-01 03:11:21 +00:00
|
|
|
THR_CRITICAL_LEAVE(curthread);
|
2015-02-25 16:18:26 +00:00
|
|
|
return (error);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
|
|
|
|
int *prioceiling)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-03-27 23:50:21 +00:00
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
m = *mutex;
|
|
|
|
if ((m <= THR_MUTEX_DESTROYED) ||
|
|
|
|
(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
2006-03-27 23:50:21 +00:00
|
|
|
ret = EINVAL;
|
2006-04-04 02:57:49 +00:00
|
|
|
else {
|
2010-09-28 04:57:56 +00:00
|
|
|
*prioceiling = m->m_lock.m_ceilings[0];
|
2006-04-04 02:57:49 +00:00
|
|
|
ret = 0;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
return (ret);
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
|
2006-08-28 04:52:50 +00:00
|
|
|
int ceiling, int *old_ceiling)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2007-01-05 03:29:15 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
struct pthread_mutex *m, *m1, *m2;
|
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2007-01-05 03:29:15 +00:00
|
|
|
m = *mutex;
|
2010-09-28 04:57:56 +00:00
|
|
|
if ((m <= THR_MUTEX_DESTROYED) ||
|
|
|
|
(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
2007-01-05 03:29:15 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
|
|
|
|
if (ret != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
if (m->m_owner == curthread) {
|
|
|
|
MUTEX_ASSERT_IS_OWNED(m);
|
|
|
|
m1 = TAILQ_PREV(m, mutex_queue, m_qe);
|
|
|
|
m2 = TAILQ_NEXT(m, m_qe);
|
2007-11-21 05:25:27 +00:00
|
|
|
if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
|
|
|
|
(m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
|
2007-01-05 03:29:15 +00:00
|
|
|
TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
|
|
|
|
TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
|
2007-11-21 05:25:27 +00:00
|
|
|
if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
|
2007-01-05 03:29:15 +00:00
|
|
|
TAILQ_INSERT_BEFORE(m2, m, m_qe);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
2007-01-05 03:57:11 +00:00
|
|
|
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
|
2007-01-05 03:29:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
2003-05-12 10:34:01 +00:00
|
|
|
}
|
2007-12-14 06:25:57 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
|
|
|
|
|
|
|
CHECK_AND_INIT_MUTEX
|
|
|
|
|
|
|
|
*count = m->m_spinloops;
|
2008-03-25 09:48:10 +00:00
|
|
|
return (0);
|
2007-12-14 06:25:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2007-12-14 06:25:57 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_MUTEX
|
|
|
|
|
|
|
|
m->m_spinloops = count;
|
2007-12-14 06:25:57 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
|
|
|
|
|
|
|
CHECK_AND_INIT_MUTEX
|
|
|
|
|
|
|
|
*count = m->m_yieldloops;
|
2008-03-25 09:48:10 +00:00
|
|
|
return (0);
|
2007-12-14 06:25:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
|
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2007-12-14 06:25:57 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_MUTEX
|
|
|
|
|
|
|
|
m->m_yieldloops = count;
|
2007-12-14 06:25:57 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2008-02-03 22:38:10 +00:00
|
|
|
|
|
|
|
int
|
2008-02-06 19:34:31 +00:00
|
|
|
_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
|
2008-02-03 22:38:10 +00:00
|
|
|
{
|
2010-09-28 04:57:56 +00:00
|
|
|
struct pthread_mutex *m;
|
2008-02-03 22:38:10 +00:00
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
m = *mutex;
|
|
|
|
if (m <= THR_MUTEX_DESTROYED)
|
|
|
|
return (0);
|
|
|
|
return (m->m_owner == _get_curthread());
|
2008-02-03 22:38:10 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
|
|
|
|
{
|
|
|
|
if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
|
|
|
|
if (mp == THR_MUTEX_DESTROYED)
|
|
|
|
return (EINVAL);
|
|
|
|
return (EPERM);
|
|
|
|
}
|
|
|
|
if (mp->m_owner != curthread)
|
|
|
|
return (EPERM);
|
|
|
|
return (0);
|
|
|
|
}
|