2003-04-20 03:06:42 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
|
2003-04-20 03:06:42 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
2005-04-02 01:20:00 +00:00
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
2003-04-20 03:06:42 +00:00
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2005-04-02 01:20:00 +00:00
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*
|
2003-04-20 03:06:42 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "thr_private.h"
|
2005-04-02 01:20:00 +00:00
|
|
|
#include "thr_umtx.h"
|
2003-04-20 03:06:42 +00:00
|
|
|
|
2008-04-02 07:41:25 +00:00
|
|
|
#ifndef HAS__UMTX_OP_ERR
|
|
|
|
int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
|
|
|
|
{
|
|
|
|
if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
|
|
|
|
return (errno);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-10-13 22:31:00 +00:00
|
|
|
void
|
|
|
|
_thr_umutex_init(struct umutex *mtx)
|
|
|
|
{
|
|
|
|
static struct umutex default_mtx = DEFAULT_UMUTEX;
|
|
|
|
|
|
|
|
*mtx = default_mtx;
|
|
|
|
}
|
|
|
|
|
2010-09-01 03:11:21 +00:00
|
|
|
void
|
|
|
|
_thr_urwlock_init(struct urwlock *rwl)
|
|
|
|
{
|
|
|
|
static struct urwlock default_rwl = DEFAULT_URWLOCK;
|
|
|
|
*rwl = default_rwl;
|
|
|
|
}
|
|
|
|
|
2006-08-28 04:47:27 +00:00
|
|
|
int
|
2008-06-24 07:32:12 +00:00
|
|
|
__thr_umutex_lock(struct umutex *mtx, uint32_t id)
|
2006-08-28 04:47:27 +00:00
|
|
|
{
|
2008-06-24 07:32:12 +00:00
|
|
|
uint32_t owner;
|
|
|
|
|
|
|
|
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
|
|
|
|
for (;;) {
|
|
|
|
/* wait in kernel */
|
|
|
|
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
|
|
|
|
|
|
|
|
owner = mtx->m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
|
|
|
|
atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
|
2006-08-28 04:47:27 +00:00
|
|
|
}
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
#define SPINLOOPS 1000
|
|
|
|
|
|
|
|
int
|
|
|
|
__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
|
|
|
|
{
|
|
|
|
uint32_t owner;
|
|
|
|
|
|
|
|
if (!_thr_is_smp)
|
|
|
|
return __thr_umutex_lock(mtx, id);
|
|
|
|
|
|
|
|
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
|
|
|
|
for (;;) {
|
|
|
|
int count = SPINLOOPS;
|
|
|
|
while (count--) {
|
|
|
|
owner = mtx->m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0) {
|
|
|
|
if (atomic_cmpset_acq_32(
|
|
|
|
&mtx->m_owner,
|
|
|
|
owner, id|owner)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CPU_SPINWAIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait in kernel */
|
|
|
|
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2006-08-28 04:47:27 +00:00
|
|
|
int
|
2008-06-24 07:32:12 +00:00
|
|
|
__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
const struct timespec *abstime)
|
2006-08-28 04:47:27 +00:00
|
|
|
{
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
struct _umtx_time *tm_p, timeout;
|
|
|
|
size_t tm_size;
|
2008-06-24 07:32:12 +00:00
|
|
|
uint32_t owner;
|
|
|
|
int ret;
|
|
|
|
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
if (abstime == NULL) {
|
|
|
|
tm_p = NULL;
|
|
|
|
tm_size = 0;
|
|
|
|
} else {
|
|
|
|
timeout._clockid = CLOCK_REALTIME;
|
|
|
|
timeout._flags = UMTX_ABSTIME;
|
|
|
|
timeout._timeout = *abstime;
|
|
|
|
tm_p = &timeout;
|
|
|
|
tm_size = sizeof(timeout);
|
|
|
|
}
|
2008-06-24 07:32:12 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
|
|
|
|
|
|
|
|
/* wait in kernel */
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
|
|
|
|
(void *)tm_size, __DECONST(void *, tm_p));
|
2008-06-24 07:32:12 +00:00
|
|
|
|
|
|
|
/* now try to lock it */
|
|
|
|
owner = mtx->m_owner;
|
|
|
|
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
|
|
|
|
atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
|
|
|
|
return (0);
|
|
|
|
} else {
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
|
|
|
|
(void *)tm_size, __DECONST(void *, tm_p));
|
2008-06-24 07:32:12 +00:00
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret == ETIMEDOUT)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (ret);
|
2006-08-28 04:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-06-24 07:32:12 +00:00
|
|
|
__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
|
2006-08-28 04:47:27 +00:00
|
|
|
{
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
|
2006-08-28 04:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2006-10-13 22:31:00 +00:00
|
|
|
__thr_umutex_trylock(struct umutex *mtx)
|
2006-08-28 04:47:27 +00:00
|
|
|
{
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
|
2006-08-28 04:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
|
|
|
|
uint32_t *oldceiling)
|
|
|
|
{
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
|
2006-08-28 04:47:27 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
2007-11-21 05:21:58 +00:00
|
|
|
_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
|
2003-04-20 03:06:42 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
|
|
|
|
timeout->tv_nsec <= 0)))
|
|
|
|
return (ETIMEDOUT);
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
|
|
|
|
__DECONST(void*, timeout));
|
2003-04-20 03:06:42 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
2008-04-29 03:58:18 +00:00
|
|
|
_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
|
2007-11-21 05:21:58 +00:00
|
|
|
{
|
|
|
|
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
|
|
|
|
timeout->tv_nsec <= 0)))
|
|
|
|
return (ETIMEDOUT);
|
2008-04-29 03:58:18 +00:00
|
|
|
return _umtx_op_err(__DEVOLATILE(void *, mtx),
|
|
|
|
shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
|
|
|
|
__DECONST(void*, timeout));
|
2007-11-21 05:21:58 +00:00
|
|
|
}
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
int
|
|
|
|
_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
|
|
|
|
const struct timespec *abstime, int shared)
|
|
|
|
{
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
struct _umtx_time *tm_p, timeout;
|
|
|
|
size_t tm_size;
|
|
|
|
|
|
|
|
if (abstime == NULL) {
|
|
|
|
tm_p = NULL;
|
|
|
|
tm_size = 0;
|
|
|
|
} else {
|
2012-03-19 00:07:10 +00:00
|
|
|
timeout._clockid = clockid;
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
timeout._flags = UMTX_ABSTIME;
|
|
|
|
timeout._timeout = *abstime;
|
|
|
|
tm_p = &timeout;
|
|
|
|
tm_size = sizeof(timeout);
|
|
|
|
}
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
return _umtx_op_err(__DEVOLATILE(void *, mtx),
|
2012-02-22 03:22:49 +00:00
|
|
|
shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
|
In revision 231989, we pass a 16-bit clock ID into kernel, however
according to POSIX document, the clock ID may be dynamically allocated,
it unlikely will be in 64K forever. To make it future compatible, we
pack all timeout information into a new structure called _umtx_time, and
use fourth argument as a size indication, a zero means it is old code
using timespec as timeout value, but the new structure also includes flags
and a clock ID, so the size argument is different than before, and it is
non-zero. With this change, it is possible that a thread can sleep
on any supported clock, though current kernel code does not have such a
POSIX clock driver system.
2012-02-25 02:12:17 +00:00
|
|
|
(void *)tm_size, __DECONST(void *, tm_p));
|
2010-12-22 05:01:52 +00:00
|
|
|
}
|
|
|
|
|
2007-11-21 05:21:58 +00:00
|
|
|
int
|
2008-04-29 03:58:18 +00:00
|
|
|
_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
|
2003-04-20 03:06:42 +00:00
|
|
|
{
|
2008-04-29 03:58:18 +00:00
|
|
|
return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
|
2008-04-02 07:41:25 +00:00
|
|
|
nr_wakeup, 0, 0);
|
2003-04-20 03:06:42 +00:00
|
|
|
}
|
2006-12-04 14:20:41 +00:00
|
|
|
|
2006-12-05 06:53:44 +00:00
|
|
|
void
|
|
|
|
_thr_ucond_init(struct ucond *cv)
|
|
|
|
{
|
|
|
|
bzero(cv, sizeof(struct ucond));
|
|
|
|
}
|
|
|
|
|
2006-12-04 14:20:41 +00:00
|
|
|
int
|
|
|
|
_thr_ucond_wait(struct ucond *cv, struct umutex *m,
|
2011-11-17 01:43:50 +00:00
|
|
|
const struct timespec *timeout, int flags)
|
2006-12-04 14:20:41 +00:00
|
|
|
{
|
|
|
|
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
|
|
|
|
timeout->tv_nsec <= 0))) {
|
2008-06-24 07:32:12 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
_thr_umutex_unlock(m, TID(curthread));
|
2006-12-04 14:20:41 +00:00
|
|
|
return (ETIMEDOUT);
|
|
|
|
}
|
2011-11-17 01:43:50 +00:00
|
|
|
return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
|
2008-04-02 07:41:25 +00:00
|
|
|
m, __DECONST(void*, timeout));
|
2006-12-04 14:20:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_thr_ucond_signal(struct ucond *cv)
|
|
|
|
{
|
2006-12-12 03:08:49 +00:00
|
|
|
if (!cv->c_has_waiters)
|
|
|
|
return (0);
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
|
2006-12-04 14:20:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_thr_ucond_broadcast(struct ucond *cv)
|
|
|
|
{
|
2006-12-12 03:08:49 +00:00
|
|
|
if (!cv->c_has_waiters)
|
|
|
|
return (0);
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
|
2006-12-04 14:20:41 +00:00
|
|
|
}
|
2008-04-02 04:32:31 +00:00
|
|
|
|
|
|
|
int
|
2012-02-27 13:38:52 +00:00
|
|
|
__thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
|
|
|
|
const struct timespec *tsp)
|
2008-04-02 04:32:31 +00:00
|
|
|
{
|
2012-02-27 13:38:52 +00:00
|
|
|
struct _umtx_time timeout, *tm_p;
|
|
|
|
size_t tm_size;
|
|
|
|
|
|
|
|
if (tsp == NULL) {
|
|
|
|
tm_p = NULL;
|
|
|
|
tm_size = 0;
|
|
|
|
} else {
|
|
|
|
timeout._timeout = *tsp;
|
|
|
|
timeout._flags = UMTX_ABSTIME;
|
|
|
|
timeout._clockid = CLOCK_REALTIME;
|
|
|
|
tm_p = &timeout;
|
|
|
|
tm_size = sizeof(timeout);
|
|
|
|
}
|
|
|
|
return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p);
|
2008-04-02 04:32:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-02-27 13:38:52 +00:00
|
|
|
__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
|
2008-04-02 04:32:31 +00:00
|
|
|
{
|
2012-02-27 13:38:52 +00:00
|
|
|
struct _umtx_time timeout, *tm_p;
|
|
|
|
size_t tm_size;
|
|
|
|
|
|
|
|
if (tsp == NULL) {
|
|
|
|
tm_p = NULL;
|
|
|
|
tm_size = 0;
|
|
|
|
} else {
|
|
|
|
timeout._timeout = *tsp;
|
|
|
|
timeout._flags = UMTX_ABSTIME;
|
|
|
|
timeout._clockid = CLOCK_REALTIME;
|
|
|
|
tm_p = &timeout;
|
|
|
|
tm_size = sizeof(timeout);
|
|
|
|
}
|
|
|
|
return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p);
|
2008-04-02 04:32:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__thr_rwlock_unlock(struct urwlock *rwlock)
|
|
|
|
{
|
2008-04-02 07:41:25 +00:00
|
|
|
return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
|
2008-04-02 04:32:31 +00:00
|
|
|
}
|
Add signal handler wrapper, the reason to add it becauses there are
some cases we want to improve:
1) if a thread signal got a signal while in cancellation point,
it is possible the TDP_WAKEUP may be eaten by signal handler
if the handler called some interruptibly system calls.
2) In signal handler, we want to disable cancellation.
3) When thread holding some low level locks, it is better to
disable signal, those code need not to worry reentrancy,
sigprocmask system call is avoided because it is a bit expensive.
The signal handler wrapper works in this way:
1) libthr installs its signal handler if user code invokes sigaction
to install its handler, the user handler is recorded in internal
array.
2) when a signal is delivered, libthr's signal handler is invoke,
libthr checks if thread holds some low level lock or is in critical
region, if it is true, the signal is buffered, and all signals are
masked, once the thread leaves critical region, correct signal
mask is restored and buffered signal is processed.
3) before user signal handler is invoked, cancellation is temporarily
disabled, after user signal handler is returned, cancellation state
is restored, and pending cancellation is rescheduled.
2010-09-01 02:18:33 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
_thr_rwl_rdlock(struct urwlock *rwlock)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
|
|
|
|
return;
|
|
|
|
ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
|
|
|
|
if (ret == 0)
|
|
|
|
return;
|
|
|
|
if (ret != EINTR)
|
|
|
|
PANIC("rdlock error");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_thr_rwl_wrlock(struct urwlock *rwlock)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (_thr_rwlock_trywrlock(rwlock) == 0)
|
|
|
|
return;
|
|
|
|
ret = __thr_rwlock_wrlock(rwlock, NULL);
|
|
|
|
if (ret == 0)
|
|
|
|
return;
|
|
|
|
if (ret != EINTR)
|
|
|
|
PANIC("wrlock error");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_thr_rwl_unlock(struct urwlock *rwlock)
|
|
|
|
{
|
|
|
|
if (_thr_rwlock_unlock(rwlock))
|
|
|
|
PANIC("unlock error");
|
|
|
|
}
|