2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
|
2006-03-27 23:50:21 +00:00
|
|
|
* Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
|
2003-04-01 03:46:29 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by John Birrell.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "namespace.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <pthread.h>
|
2008-03-25 09:48:10 +00:00
|
|
|
#include <pthread_np.h>
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "un-namespace.h"
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
#include "thr_private.h"
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
#if defined(_PTHREADS_INVARIANTS)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_INIT_LINK(m) do { \
|
2003-04-01 22:39:31 +00:00
|
|
|
(m)->m_qe.tqe_prev = NULL; \
|
|
|
|
(m)->m_qe.tqe_next = NULL; \
|
|
|
|
} while (0)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_ASSERT_IS_OWNED(m) do { \
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m)->m_qe.tqe_prev == NULL))\
|
2003-04-01 22:39:31 +00:00
|
|
|
PANIC("mutex is not on list"); \
|
|
|
|
} while (0)
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_ASSERT_NOT_OWNED(m) do { \
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m)->m_qe.tqe_prev != NULL || \
|
|
|
|
(m)->m_qe.tqe_next != NULL)) \
|
2003-04-01 22:39:31 +00:00
|
|
|
PANIC("mutex is on list"); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
2005-04-02 01:20:00 +00:00
|
|
|
#define MUTEX_INIT_LINK(m)
|
|
|
|
#define MUTEX_ASSERT_IS_OWNED(m)
|
|
|
|
#define MUTEX_ASSERT_NOT_OWNED(m)
|
2003-04-01 22:39:31 +00:00
|
|
|
#endif
|
|
|
|
|
2007-10-31 01:37:13 +00:00
|
|
|
/*
|
|
|
|
* For adaptive mutexes, how many times to spin doing trylock2
|
|
|
|
* before entering the kernel to block
|
|
|
|
*/
|
2008-04-26 13:19:07 +00:00
|
|
|
#define MUTEX_ADAPTIVE_SPINS 2000
|
2007-10-31 01:37:13 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Prototypes
|
|
|
|
*/
|
2006-04-04 02:57:49 +00:00
|
|
|
int __pthread_mutex_init(pthread_mutex_t *mutex,
|
|
|
|
const pthread_mutexattr_t *mutex_attr);
|
|
|
|
int __pthread_mutex_trylock(pthread_mutex_t *mutex);
|
|
|
|
int __pthread_mutex_lock(pthread_mutex_t *mutex);
|
|
|
|
int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
|
|
|
|
const struct timespec *abstime);
|
2007-12-17 02:53:11 +00:00
|
|
|
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|
|
|
void *(calloc_cb)(size_t, size_t));
|
|
|
|
int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
|
|
|
|
int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
|
|
|
|
int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
|
|
|
|
int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
|
2007-12-14 06:25:57 +00:00
|
|
|
int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
|
2007-12-17 02:53:11 +00:00
|
|
|
int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
|
2006-04-04 02:57:49 +00:00
|
|
|
|
|
|
|
static int mutex_self_trylock(pthread_mutex_t);
|
|
|
|
static int mutex_self_lock(pthread_mutex_t,
|
2005-04-02 01:20:00 +00:00
|
|
|
const struct timespec *abstime);
|
2006-04-08 13:24:44 +00:00
|
|
|
static int mutex_unlock_common(pthread_mutex_t *);
|
2008-06-24 07:32:12 +00:00
|
|
|
static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
|
|
|
|
const struct timespec *);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
|
2003-04-01 03:46:29 +00:00
|
|
|
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Single underscore versions provided for libc internal usage: */
|
2003-04-01 03:46:29 +00:00
|
|
|
/* No difference between libc and application usage of these: */
|
|
|
|
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
|
|
|
|
__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
|
|
|
|
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
|
|
|
|
|
|
|
|
__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
|
2008-05-29 07:57:33 +00:00
|
|
|
__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
|
2008-02-06 19:34:31 +00:00
|
|
|
__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
|
2007-12-14 06:25:57 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
mutex_init(pthread_mutex_t *mutex,
|
2008-05-29 07:57:33 +00:00
|
|
|
const pthread_mutexattr_t *mutex_attr,
|
2007-11-27 03:16:44 +00:00
|
|
|
void *(calloc_cb)(size_t, size_t))
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2006-02-28 06:06:19 +00:00
|
|
|
const struct pthread_mutex_attr *attr;
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_mutex *pmutex;
|
|
|
|
|
2006-02-28 06:06:19 +00:00
|
|
|
if (mutex_attr == NULL) {
|
2006-03-27 23:50:21 +00:00
|
|
|
attr = &_pthread_mutexattr_default;
|
2006-02-28 06:06:19 +00:00
|
|
|
} else {
|
|
|
|
attr = *mutex_attr;
|
|
|
|
if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
|
|
|
|
attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
if (attr->m_protocol < PTHREAD_PRIO_NONE ||
|
|
|
|
attr->m_protocol > PTHREAD_PRIO_PROTECT)
|
|
|
|
return (EINVAL);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
2006-02-28 06:06:19 +00:00
|
|
|
if ((pmutex = (pthread_mutex_t)
|
2007-11-27 03:16:44 +00:00
|
|
|
calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
|
2006-02-28 06:06:19 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
pmutex->m_type = attr->m_type;
|
|
|
|
pmutex->m_owner = NULL;
|
|
|
|
pmutex->m_count = 0;
|
|
|
|
pmutex->m_refcount = 0;
|
2007-12-14 06:25:57 +00:00
|
|
|
pmutex->m_spinloops = 0;
|
|
|
|
pmutex->m_yieldloops = 0;
|
2006-02-28 06:06:19 +00:00
|
|
|
MUTEX_INIT_LINK(pmutex);
|
2006-08-28 04:52:50 +00:00
|
|
|
switch(attr->m_protocol) {
|
|
|
|
case PTHREAD_PRIO_INHERIT:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
|
|
|
|
pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
|
|
|
|
break;
|
|
|
|
case PTHREAD_PRIO_PROTECT:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
|
|
|
|
pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
|
|
|
|
pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
|
|
|
|
break;
|
|
|
|
case PTHREAD_PRIO_NONE:
|
|
|
|
pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
|
|
|
|
pmutex->m_lock.m_flags = 0;
|
|
|
|
}
|
2007-12-14 06:25:57 +00:00
|
|
|
|
|
|
|
if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
|
|
|
|
pmutex->m_spinloops =
|
|
|
|
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
|
|
|
|
pmutex->m_yieldloops = _thr_yieldloops;
|
|
|
|
}
|
|
|
|
|
2006-02-28 06:06:19 +00:00
|
|
|
*mutex = pmutex;
|
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
init_static(struct pthread *thread, pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
|
2004-01-19 15:00:57 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (*mutex == NULL)
|
2008-05-29 07:57:33 +00:00
|
|
|
ret = mutex_init(mutex, NULL, calloc);
|
2005-04-02 01:20:00 +00:00
|
|
|
else
|
|
|
|
ret = 0;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
THR_LOCK_RELEASE(thread, &_mutex_static_lock);
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2006-11-11 13:33:47 +00:00
|
|
|
static void
|
|
|
|
set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
|
|
|
|
{
|
|
|
|
struct pthread_mutex *m2;
|
|
|
|
|
|
|
|
m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
|
|
|
|
if (m2 != NULL)
|
|
|
|
m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
|
|
|
|
else
|
|
|
|
m->m_lock.m_ceilings[1] = -1;
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
|
|
|
__pthread_mutex_init(pthread_mutex_t *mutex,
|
|
|
|
const pthread_mutexattr_t *mutex_attr)
|
|
|
|
{
|
2008-05-29 07:57:33 +00:00
|
|
|
return mutex_init(mutex, mutex_attr, calloc);
|
2007-11-27 03:16:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is used internally by malloc. */
|
|
|
|
int
|
|
|
|
_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|
|
|
void *(calloc_cb)(size_t, size_t))
|
|
|
|
{
|
|
|
|
static const struct pthread_mutex_attr attr = {
|
2007-11-28 00:16:24 +00:00
|
|
|
.m_type = PTHREAD_MUTEX_NORMAL,
|
2007-11-27 03:16:44 +00:00
|
|
|
.m_protocol = PTHREAD_PRIO_NONE,
|
2008-05-29 07:57:33 +00:00
|
|
|
.m_ceiling = 0
|
2007-11-27 03:16:44 +00:00
|
|
|
};
|
2007-11-28 00:16:24 +00:00
|
|
|
static const struct pthread_mutex_attr *pattr = &attr;
|
2007-11-27 03:16:44 +00:00
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
void
|
|
|
|
_mutex_fork(struct pthread *curthread)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread_mutex *m;
|
|
|
|
|
2006-01-14 11:33:43 +00:00
|
|
|
/*
|
|
|
|
* Fix mutex ownership for child process.
|
|
|
|
* note that process shared mutex should not
|
|
|
|
* be inherited because owner is forking thread
|
|
|
|
* which is in parent process, they should be
|
2006-01-16 05:33:48 +00:00
|
|
|
* removed from the owned mutex list, current,
|
2006-01-14 11:33:43 +00:00
|
|
|
* process shared mutex is not supported, so I
|
|
|
|
* am not worried.
|
|
|
|
*/
|
2006-08-28 04:52:50 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
|
2006-08-28 04:52:50 +00:00
|
|
|
m->m_lock.m_owner = TID(curthread);
|
|
|
|
TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
|
|
|
|
m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_destroy(pthread_mutex_t *mutex)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2006-11-11 13:33:47 +00:00
|
|
|
pthread_mutex_t m;
|
2006-08-28 04:52:50 +00:00
|
|
|
uint32_t id;
|
2005-04-02 01:20:00 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
if (__predict_false(*mutex == NULL))
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = EINVAL;
|
|
|
|
else {
|
2006-08-28 04:52:50 +00:00
|
|
|
id = TID(curthread);
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Try to lock the mutex structure, we only need to
|
|
|
|
* try once, if failed, the mutex is in used.
|
|
|
|
*/
|
2006-08-28 04:52:50 +00:00
|
|
|
ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
|
2005-04-02 01:20:00 +00:00
|
|
|
if (ret)
|
|
|
|
return (ret);
|
2006-08-28 04:52:50 +00:00
|
|
|
m = *mutex;
|
2005-04-02 01:20:00 +00:00
|
|
|
/*
|
|
|
|
* Check mutex other fields to see if this mutex is
|
|
|
|
* in use. Mostly for prority mutex types, or there
|
|
|
|
* are condition variables referencing it.
|
|
|
|
*/
|
2006-08-28 04:52:50 +00:00
|
|
|
if (m->m_owner != NULL || m->m_refcount != 0) {
|
2006-11-11 13:33:47 +00:00
|
|
|
if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
|
|
|
|
set_inherited_priority(curthread, m);
|
2006-08-28 04:52:50 +00:00
|
|
|
_thr_umutex_unlock(&m->m_lock, id);
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = EBUSY;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Save a pointer to the mutex so it can be free'd
|
2006-03-27 23:50:21 +00:00
|
|
|
* and set the caller's pointer to NULL.
|
2005-04-02 01:20:00 +00:00
|
|
|
*/
|
|
|
|
*mutex = NULL;
|
|
|
|
|
2006-11-11 13:33:47 +00:00
|
|
|
if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
|
|
|
|
set_inherited_priority(curthread, m);
|
2006-08-28 04:52:50 +00:00
|
|
|
_thr_umutex_unlock(&m->m_lock, id);
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
MUTEX_ASSERT_NOT_OWNED(m);
|
2006-03-27 23:50:21 +00:00
|
|
|
free(m);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
|
|
|
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
#define ENQUEUE_MUTEX(curthread, m) \
|
2007-12-11 08:00:58 +00:00
|
|
|
do { \
|
|
|
|
(m)->m_owner = curthread; \
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
/* Add to the list of owned mutexes: */ \
|
2007-12-11 08:00:58 +00:00
|
|
|
MUTEX_ASSERT_NOT_OWNED((m)); \
|
|
|
|
if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
|
|
|
|
TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
else \
|
2007-12-11 08:00:58 +00:00
|
|
|
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
|
|
|
|
} while (0)
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2006-03-27 23:50:21 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-08-28 04:52:50 +00:00
|
|
|
uint32_t id;
|
2006-03-27 23:50:21 +00:00
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-08-28 04:52:50 +00:00
|
|
|
id = TID(curthread);
|
2006-03-27 23:50:21 +00:00
|
|
|
m = *mutex;
|
2006-08-28 04:52:50 +00:00
|
|
|
ret = _thr_umutex_trylock(&m->m_lock, id);
|
2006-03-27 23:50:21 +00:00
|
|
|
if (ret == 0) {
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2006-03-27 23:50:21 +00:00
|
|
|
} else if (m->m_owner == curthread) {
|
2006-04-04 02:57:49 +00:00
|
|
|
ret = mutex_self_trylock(m);
|
2006-03-27 23:50:21 +00:00
|
|
|
} /* else {} */
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
int
|
|
|
|
__pthread_mutex_trylock(pthread_mutex_t *mutex)
|
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2006-04-08 13:24:44 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the mutex is statically initialized, perform the dynamic
|
|
|
|
* initialization:
|
|
|
|
*/
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(*mutex == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
return (mutex_trylock_common(curthread, mutex));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2008-06-24 07:32:12 +00:00
|
|
|
mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
|
|
|
|
const struct timespec *abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2008-06-24 07:32:12 +00:00
|
|
|
uint32_t id, owner;
|
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance:
1. LIBPTHREAD_SPINLOOPS
If a pthread mutex is being locked by another thread, this environment
variable sets total number of spin loops before the current thread
sleeps in kernel, this saves a syscall overhead if the mutex will be
unlocked very soon (well written application code).
2. LIBPTHREAD_YIELDLOOPS
If a pthread mutex is being locked by other threads, this environment
variable sets total number of sched_yield() loops before the currrent
thread sleeps in kernel. if a pthread mutex is locked, the current thread
gives up cpu, but will not sleep in kernel, this means, current thread
does not set contention bit in mutex, but let lock owner to run again
if the owner is on kernel's run queue, and when lock owner unlocks the
mutex, it does not need to enter kernel and do lots of work to resume
mutex waiters, in some cases, this saves lots of syscall overheads for
mutex owner.
In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance
than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments
are global to all pthread mutex, there is no interface to set them for each
pthread mutex, the default values are zero, this means spinning is turned off
by default.
2007-10-30 05:57:37 +00:00
|
|
|
int count;
|
2008-06-24 07:32:12 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2008-06-24 07:32:12 +00:00
|
|
|
if (m->m_owner == curthread)
|
|
|
|
return mutex_self_lock(m, abstime);
|
2007-10-29 21:01:47 +00:00
|
|
|
|
2008-06-24 07:32:12 +00:00
|
|
|
id = TID(curthread);
|
2008-05-29 07:57:33 +00:00
|
|
|
/*
|
|
|
|
* For adaptive mutexes, spin for a bit in the expectation
|
|
|
|
* that if the application requests this mutex type then
|
|
|
|
* the lock is likely to be released quickly and it is
|
|
|
|
* faster than entering the kernel
|
|
|
|
*/
|
2008-06-24 07:32:12 +00:00
|
|
|
if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT))
|
2008-05-29 07:57:33 +00:00
|
|
|
goto sleep_in_kernel;
|
|
|
|
|
|
|
|
if (!_thr_is_smp)
|
|
|
|
goto yield_loop;
|
|
|
|
|
|
|
|
count = m->m_spinloops;
|
|
|
|
while (count--) {
|
2008-06-24 07:32:12 +00:00
|
|
|
owner = m->m_lock.m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0) {
|
|
|
|
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
|
|
|
|
ret = 0;
|
2007-12-14 06:25:57 +00:00
|
|
|
goto done;
|
2008-06-24 07:32:12 +00:00
|
|
|
}
|
2007-10-29 21:01:47 +00:00
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
CPU_SPINWAIT;
|
|
|
|
}
|
2007-10-29 21:01:47 +00:00
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
yield_loop:
|
|
|
|
count = m->m_yieldloops;
|
|
|
|
while (count--) {
|
|
|
|
_sched_yield();
|
2008-06-24 07:32:12 +00:00
|
|
|
owner = m->m_lock.m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0) {
|
|
|
|
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
|
|
|
|
ret = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sleep_in_kernel:
|
|
|
|
if (abstime == NULL) {
|
2008-06-24 07:32:12 +00:00
|
|
|
ret = __thr_umutex_lock(&m->m_lock, id);
|
2008-05-29 07:57:33 +00:00
|
|
|
} else if (__predict_false(
|
2008-06-24 07:32:12 +00:00
|
|
|
abstime->tv_nsec < 0 ||
|
2008-05-29 07:57:33 +00:00
|
|
|
abstime->tv_nsec >= 1000000000)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
2008-06-24 07:32:12 +00:00
|
|
|
ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
2008-05-29 07:57:33 +00:00
|
|
|
done:
|
|
|
|
if (ret == 0)
|
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2008-06-24 07:32:12 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
2004-02-18 15:16:31 +00:00
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
static inline int
|
|
|
|
mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
|
2008-06-24 07:32:12 +00:00
|
|
|
const struct timespec *abstime)
|
2004-02-18 15:16:31 +00:00
|
|
|
{
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2008-06-24 07:32:12 +00:00
|
|
|
if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
|
2008-05-29 07:57:33 +00:00
|
|
|
ENQUEUE_MUTEX(curthread, m);
|
2008-06-24 07:32:12 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (mutex_lock_sleep(curthread, m, abstime));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-29 07:57:33 +00:00
|
|
|
__pthread_mutex_lock(pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread;
|
2008-05-29 07:57:33 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-04-08 13:24:44 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_check_init();
|
|
|
|
|
|
|
|
curthread = _get_curthread();
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the mutex is statically initialized, perform the dynamic
|
2008-05-29 07:57:33 +00:00
|
|
|
* initialization:
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m = *mutex) == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
2008-05-29 07:57:33 +00:00
|
|
|
m = *mutex;
|
2006-04-08 13:24:44 +00:00
|
|
|
}
|
2008-06-24 07:32:12 +00:00
|
|
|
|
2006-04-08 13:24:44 +00:00
|
|
|
return (mutex_lock_common(curthread, m, NULL));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-29 07:57:33 +00:00
|
|
|
__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread;
|
2008-05-29 07:57:33 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-04-08 13:24:44 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
_thr_check_init();
|
|
|
|
|
|
|
|
curthread = _get_curthread();
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the mutex is statically initialized, perform the dynamic
|
2005-04-02 01:20:00 +00:00
|
|
|
* initialization:
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2008-05-29 07:57:33 +00:00
|
|
|
if (__predict_false((m = *mutex) == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
2008-05-29 07:57:33 +00:00
|
|
|
m = *mutex;
|
2006-04-08 13:24:44 +00:00
|
|
|
}
|
|
|
|
return (mutex_lock_common(curthread, m, abstime));
|
2003-12-30 08:44:55 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
int
|
2005-04-02 01:20:00 +00:00
|
|
|
_pthread_mutex_unlock(pthread_mutex_t *m)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2006-04-08 13:24:44 +00:00
|
|
|
return (mutex_unlock_common(m));
|
2003-07-02 02:05:23 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
int
|
2006-04-08 13:24:44 +00:00
|
|
|
_mutex_cv_lock(pthread_mutex_t *m, int count)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
int ret;
|
|
|
|
|
2008-05-29 07:57:33 +00:00
|
|
|
ret = mutex_lock_common(_get_curthread(), *m, NULL);
|
2006-04-08 13:24:44 +00:00
|
|
|
if (ret == 0) {
|
2005-04-02 01:20:00 +00:00
|
|
|
(*m)->m_refcount--;
|
2006-04-08 13:24:44 +00:00
|
|
|
(*m)->m_count += count;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2006-04-04 02:57:49 +00:00
|
|
|
mutex_self_trylock(pthread_mutex_t m)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
switch (m->m_type) {
|
|
|
|
case PTHREAD_MUTEX_ERRORCHECK:
|
|
|
|
case PTHREAD_MUTEX_NORMAL:
|
|
|
|
ret = EBUSY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTHREAD_MUTEX_RECURSIVE:
|
|
|
|
/* Increment the lock count: */
|
|
|
|
if (m->m_count + 1 > 0) {
|
|
|
|
m->m_count++;
|
|
|
|
ret = 0;
|
|
|
|
} else
|
|
|
|
ret = EAGAIN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Trap invalid mutex types; */
|
|
|
|
ret = EINVAL;
|
|
|
|
}
|
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2006-04-04 02:57:49 +00:00
|
|
|
mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2006-03-27 23:50:21 +00:00
|
|
|
struct timespec ts1, ts2;
|
|
|
|
int ret;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
switch (m->m_type) {
|
2003-04-01 22:39:31 +00:00
|
|
|
case PTHREAD_MUTEX_ERRORCHECK:
|
2007-10-30 09:24:23 +00:00
|
|
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
2005-04-02 01:20:00 +00:00
|
|
|
if (abstime) {
|
2008-05-29 07:57:33 +00:00
|
|
|
if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
clock_gettime(CLOCK_REALTIME, &ts1);
|
|
|
|
TIMESPEC_SUB(&ts2, abstime, &ts1);
|
|
|
|
__sys_nanosleep(&ts2, NULL);
|
|
|
|
ret = ETIMEDOUT;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* POSIX specifies that mutexes should return
|
|
|
|
* EDEADLK if a recursive lock is detected.
|
|
|
|
*/
|
|
|
|
ret = EDEADLK;
|
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTHREAD_MUTEX_NORMAL:
|
|
|
|
/*
|
|
|
|
* What SS2 define as a 'normal' mutex. Intentionally
|
|
|
|
* deadlock on attempts to get a lock you already own.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = 0;
|
|
|
|
if (abstime) {
|
2008-05-29 07:57:33 +00:00
|
|
|
if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000) {
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
clock_gettime(CLOCK_REALTIME, &ts1);
|
|
|
|
TIMESPEC_SUB(&ts2, abstime, &ts1);
|
|
|
|
__sys_nanosleep(&ts2, NULL);
|
|
|
|
ret = ETIMEDOUT;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
|
|
|
ts1.tv_sec = 30;
|
|
|
|
ts1.tv_nsec = 0;
|
|
|
|
for (;;)
|
|
|
|
__sys_nanosleep(&ts1, NULL);
|
|
|
|
}
|
2003-04-01 22:39:31 +00:00
|
|
|
break;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
case PTHREAD_MUTEX_RECURSIVE:
|
|
|
|
/* Increment the lock count: */
|
|
|
|
if (m->m_count + 1 > 0) {
|
|
|
|
m->m_count++;
|
|
|
|
ret = 0;
|
|
|
|
} else
|
|
|
|
ret = EAGAIN;
|
2004-05-20 11:55:04 +00:00
|
|
|
break;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
/* Trap invalid mutex types; */
|
|
|
|
ret = EINVAL;
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
return (ret);
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2006-04-08 13:24:44 +00:00
|
|
|
mutex_unlock_common(pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2006-11-11 13:33:47 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-08-28 04:52:50 +00:00
|
|
|
uint32_t id;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
if (__predict_false((m = *mutex) == NULL))
|
|
|
|
return (EINVAL);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2003-04-01 22:39:31 +00:00
|
|
|
/*
|
2006-03-27 23:50:21 +00:00
|
|
|
* Check if the running thread is not the owner of the mutex.
|
2003-04-01 22:39:31 +00:00
|
|
|
*/
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(m->m_owner != curthread))
|
|
|
|
return (EPERM);
|
|
|
|
|
2006-08-28 04:52:50 +00:00
|
|
|
id = TID(curthread);
|
2006-04-08 13:24:44 +00:00
|
|
|
if (__predict_false(
|
2006-03-27 23:50:21 +00:00
|
|
|
m->m_type == PTHREAD_MUTEX_RECURSIVE &&
|
|
|
|
m->m_count > 0)) {
|
|
|
|
m->m_count--;
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
2006-03-27 23:50:21 +00:00
|
|
|
m->m_owner = NULL;
|
|
|
|
/* Remove the mutex from the threads queue. */
|
|
|
|
MUTEX_ASSERT_IS_OWNED(m);
|
2006-08-28 04:52:50 +00:00
|
|
|
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
|
|
|
TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
|
|
|
|
else {
|
|
|
|
TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
|
2006-11-11 13:33:47 +00:00
|
|
|
set_inherited_priority(curthread, m);
|
2006-08-28 04:52:50 +00:00
|
|
|
}
|
2006-03-27 23:50:21 +00:00
|
|
|
MUTEX_INIT_LINK(m);
|
2006-08-28 04:52:50 +00:00
|
|
|
_thr_umutex_unlock(&m->m_lock, id);
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
2006-04-08 13:24:44 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
2006-11-11 13:33:47 +00:00
|
|
|
struct pthread_mutex *m;
|
2006-04-08 13:24:44 +00:00
|
|
|
|
|
|
|
if (__predict_false((m = *mutex) == NULL))
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the running thread is not the owner of the mutex.
|
|
|
|
*/
|
|
|
|
if (__predict_false(m->m_owner != curthread))
|
|
|
|
return (EPERM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the count in case this is a recursive mutex.
|
|
|
|
*/
|
|
|
|
*count = m->m_count;
|
|
|
|
m->m_refcount++;
|
|
|
|
m->m_count = 0;
|
|
|
|
m->m_owner = NULL;
|
|
|
|
/* Remove the mutex from the threads queue. */
|
|
|
|
MUTEX_ASSERT_IS_OWNED(m);
|
2006-08-28 04:52:50 +00:00
|
|
|
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
|
|
|
TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
|
|
|
|
else {
|
|
|
|
TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
|
2006-11-11 13:33:47 +00:00
|
|
|
set_inherited_priority(curthread, m);
|
2006-08-28 04:52:50 +00:00
|
|
|
}
|
2006-04-08 13:24:44 +00:00
|
|
|
MUTEX_INIT_LINK(m);
|
2006-08-28 04:52:50 +00:00
|
|
|
_thr_umutex_unlock(&m->m_lock, TID(curthread));
|
2006-04-08 13:24:44 +00:00
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
|
|
|
|
int *prioceiling)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2006-03-27 23:50:21 +00:00
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
if (*mutex == NULL)
|
|
|
|
ret = EINVAL;
|
2006-08-28 04:52:50 +00:00
|
|
|
else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
2006-03-27 23:50:21 +00:00
|
|
|
ret = EINVAL;
|
2006-04-04 02:57:49 +00:00
|
|
|
else {
|
2006-08-28 04:52:50 +00:00
|
|
|
*prioceiling = (*mutex)->m_lock.m_ceilings[0];
|
2006-04-04 02:57:49 +00:00
|
|
|
ret = 0;
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
return(ret);
|
2003-04-01 22:39:31 +00:00
|
|
|
}
|
|
|
|
|
2006-03-27 23:50:21 +00:00
|
|
|
int
|
|
|
|
_pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
|
2006-08-28 04:52:50 +00:00
|
|
|
int ceiling, int *old_ceiling)
|
2003-04-01 22:39:31 +00:00
|
|
|
{
|
2007-01-05 03:29:15 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
struct pthread_mutex *m, *m1, *m2;
|
|
|
|
int ret;
|
2003-04-01 22:39:31 +00:00
|
|
|
|
2007-01-05 03:29:15 +00:00
|
|
|
m = *mutex;
|
|
|
|
if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
|
|
|
|
if (ret != 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
if (m->m_owner == curthread) {
|
|
|
|
MUTEX_ASSERT_IS_OWNED(m);
|
|
|
|
m1 = TAILQ_PREV(m, mutex_queue, m_qe);
|
|
|
|
m2 = TAILQ_NEXT(m, m_qe);
|
2007-11-21 05:25:27 +00:00
|
|
|
if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
|
|
|
|
(m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
|
2007-01-05 03:29:15 +00:00
|
|
|
TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
|
|
|
|
TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
|
2007-11-21 05:25:27 +00:00
|
|
|
if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
|
2007-01-05 03:29:15 +00:00
|
|
|
TAILQ_INSERT_BEFORE(m2, m, m_qe);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
2007-01-05 03:57:11 +00:00
|
|
|
TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
|
2007-01-05 03:29:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
2003-05-12 10:34:01 +00:00
|
|
|
}
|
2007-12-14 06:25:57 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
|
|
|
|
{
|
|
|
|
if (*mutex == NULL)
|
2008-03-25 09:48:10 +00:00
|
|
|
return (EINVAL);
|
|
|
|
*count = (*mutex)->m_spinloops;
|
|
|
|
return (0);
|
2007-12-14 06:25:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (__predict_false(*mutex == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
(*mutex)->m_spinloops = count;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
|
|
|
|
{
|
|
|
|
if (*mutex == NULL)
|
2008-03-25 09:48:10 +00:00
|
|
|
return (EINVAL);
|
|
|
|
*count = (*mutex)->m_yieldloops;
|
|
|
|
return (0);
|
2007-12-14 06:25:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (__predict_false(*mutex == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
(*mutex)->m_yieldloops = count;
|
|
|
|
return (0);
|
|
|
|
}
|
2008-02-03 22:38:10 +00:00
|
|
|
|
|
|
|
int
|
2008-02-06 19:34:31 +00:00
|
|
|
_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
|
2008-02-03 22:38:10 +00:00
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (__predict_false(*mutex == NULL)) {
|
|
|
|
ret = init_static(curthread, mutex);
|
|
|
|
if (__predict_false(ret))
|
|
|
|
return (ret);
|
|
|
|
}
|
2008-02-14 12:37:58 +00:00
|
|
|
return ((*mutex)->m_owner == curthread);
|
2008-02-03 22:38:10 +00:00
|
|
|
}
|