Introduce kernel based userland rwlock. Each umtx chain now has two lists,

one for readers and one for writers, other types of synchronization
object just use first list.

Asked by: jeff
This commit is contained in:
David Xu 2008-04-02 04:08:37 +00:00
parent c709e029e7
commit 1a30511c61
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=177848
2 changed files with 518 additions and 56 deletions

View File

@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
#define TYPE_PI_UMUTEX 3 #define TYPE_PI_UMUTEX 3
#define TYPE_PP_UMUTEX 4 #define TYPE_PP_UMUTEX 4
#define TYPE_CV 5 #define TYPE_CV 5
#define TYPE_RWLOCK 6
/* Key to represent a unique userland synchronous object */ /* Key to represent a unique userland synchronous object */
struct umtx_key { struct umtx_key {
@ -147,7 +148,9 @@ struct umtxq_chain {
struct mtx uc_lock; struct mtx uc_lock;
/* List of sleep queues. */ /* List of sleep queues. */
struct umtxq_head uc_queue; struct umtxq_head uc_queue[2];
#define UMTX_SHARED_QUEUE 0
#define UMTX_EXCLUSIVE_QUEUE 1
/* Busy flag */ /* Busy flag */
char uc_busy; char uc_busy;
@ -185,6 +188,8 @@ struct umtxq_chain {
#define GET_SHARE(flags) \ #define GET_SHARE(flags) \
(((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE) (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
#define BUSY_SPINS 200
static uma_zone_t umtx_pi_zone; static uma_zone_t umtx_pi_zone;
static struct umtxq_chain umtxq_chains[UMTX_CHAINS]; static struct umtxq_chain umtxq_chains[UMTX_CHAINS];
static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
@ -201,11 +206,10 @@ static void umtxq_lock(struct umtx_key *key);
static void umtxq_unlock(struct umtx_key *key); static void umtxq_unlock(struct umtx_key *key);
static void umtxq_busy(struct umtx_key *key); static void umtxq_busy(struct umtx_key *key);
static void umtxq_unbusy(struct umtx_key *key); static void umtxq_unbusy(struct umtx_key *key);
static void umtxq_insert(struct umtx_q *uq); static void umtxq_insert_queue(struct umtx_q *uq, int q);
static void umtxq_remove(struct umtx_q *uq); static void umtxq_remove_queue(struct umtx_q *uq, int q);
static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo); static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo);
static int umtxq_count(struct umtx_key *key); static int umtxq_count(struct umtx_key *key);
static int umtxq_signal(struct umtx_key *key, int nr_wakeup);
static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2); static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
static int umtx_key_get(void *addr, int type, int share, static int umtx_key_get(void *addr, int type, int share,
struct umtx_key *key); struct umtx_key *key);
@ -219,6 +223,10 @@ static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
struct image_params *imgp __unused); struct image_params *imgp __unused);
SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
#define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
#define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
#define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
static struct mtx umtx_lock; static struct mtx umtx_lock;
static void static void
@ -231,7 +239,8 @@ umtxq_sysinit(void *arg __unused)
for (i = 0; i < UMTX_CHAINS; ++i) { for (i = 0; i < UMTX_CHAINS; ++i) {
mtx_init(&umtxq_chains[i].uc_lock, "umtxql", NULL, mtx_init(&umtxq_chains[i].uc_lock, "umtxql", NULL,
MTX_DEF | MTX_DUPOK); MTX_DEF | MTX_DUPOK);
TAILQ_INIT(&umtxq_chains[i].uc_queue); TAILQ_INIT(&umtxq_chains[i].uc_queue[0]);
TAILQ_INIT(&umtxq_chains[i].uc_queue[1]);
TAILQ_INIT(&umtxq_chains[i].uc_pi_list); TAILQ_INIT(&umtxq_chains[i].uc_pi_list);
umtxq_chains[i].uc_busy = 0; umtxq_chains[i].uc_busy = 0;
umtxq_chains[i].uc_waiters = 0; umtxq_chains[i].uc_waiters = 0;
@ -279,41 +288,6 @@ umtxq_getchain(struct umtx_key *key)
return (&umtxq_chains[key->hash]); return (&umtxq_chains[key->hash]);
} }
/*
* Set chain to busy state when following operation
* may be blocked (kernel mutex can not be used).
*/
static inline void
umtxq_busy(struct umtx_key *key)
{
struct umtxq_chain *uc;
uc = umtxq_getchain(key);
mtx_assert(&uc->uc_lock, MA_OWNED);
while (uc->uc_busy != 0) {
uc->uc_waiters++;
msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
uc->uc_waiters--;
}
uc->uc_busy = 1;
}
/*
* Unbusy a chain.
*/
static inline void
umtxq_unbusy(struct umtx_key *key)
{
struct umtxq_chain *uc;
uc = umtxq_getchain(key);
mtx_assert(&uc->uc_lock, MA_OWNED);
KASSERT(uc->uc_busy != 0, ("not busy"));
uc->uc_busy = 0;
if (uc->uc_waiters)
wakeup_one(uc);
}
/* /*
* Lock a chain. * Lock a chain.
*/ */
@ -339,31 +313,69 @@ umtxq_unlock(struct umtx_key *key)
} }
/* /*
* Insert a thread onto the umtx queue. * Set chain to busy state when following operation
* may be blocked (kernel mutex can not be used).
*/ */
static inline void static inline void
umtxq_insert(struct umtx_q *uq) umtxq_busy(struct umtx_key *key)
{
struct umtxq_chain *uc;
uc = umtxq_getchain(key);
mtx_assert(&uc->uc_lock, MA_OWNED);
if (uc->uc_busy) {
int count = BUSY_SPINS;
if (count > 0) {
umtxq_unlock(key);
while (uc->uc_busy && --count > 0)
cpu_spinwait();
umtxq_lock(key);
}
while (uc->uc_busy != 0) {
uc->uc_waiters++;
msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
uc->uc_waiters--;
}
}
uc->uc_busy = 1;
}
/*
* Unbusy a chain.
*/
static inline void
umtxq_unbusy(struct umtx_key *key)
{
struct umtxq_chain *uc;
uc = umtxq_getchain(key);
mtx_assert(&uc->uc_lock, MA_OWNED);
KASSERT(uc->uc_busy != 0, ("not busy"));
uc->uc_busy = 0;
if (uc->uc_waiters)
wakeup_one(uc);
}
static inline void
umtxq_insert_queue(struct umtx_q *uq, int q)
{ {
struct umtxq_chain *uc; struct umtxq_chain *uc;
uc = umtxq_getchain(&uq->uq_key); uc = umtxq_getchain(&uq->uq_key);
UMTXQ_LOCKED_ASSERT(uc); UMTXQ_LOCKED_ASSERT(uc);
TAILQ_INSERT_TAIL(&uc->uc_queue, uq, uq_link); TAILQ_INSERT_TAIL(&uc->uc_queue[q], uq, uq_link);
uq->uq_flags |= UQF_UMTXQ; uq->uq_flags |= UQF_UMTXQ;
} }
/*
* Remove thread from the umtx queue.
*/
static inline void static inline void
umtxq_remove(struct umtx_q *uq) umtxq_remove_queue(struct umtx_q *uq, int q)
{ {
struct umtxq_chain *uc; struct umtxq_chain *uc;
uc = umtxq_getchain(&uq->uq_key); uc = umtxq_getchain(&uq->uq_key);
UMTXQ_LOCKED_ASSERT(uc); UMTXQ_LOCKED_ASSERT(uc);
if (uq->uq_flags & UQF_UMTXQ) { if (uq->uq_flags & UQF_UMTXQ) {
TAILQ_REMOVE(&uc->uc_queue, uq, uq_link); TAILQ_REMOVE(&uc->uc_queue[q], uq, uq_link);
uq->uq_flags &= ~UQF_UMTXQ; uq->uq_flags &= ~UQF_UMTXQ;
} }
} }
@ -380,7 +392,7 @@ umtxq_count(struct umtx_key *key)
uc = umtxq_getchain(key); uc = umtxq_getchain(key);
UMTXQ_LOCKED_ASSERT(uc); UMTXQ_LOCKED_ASSERT(uc);
TAILQ_FOREACH(uq, &uc->uc_queue, uq_link) { TAILQ_FOREACH(uq, &uc->uc_queue[UMTX_SHARED_QUEUE], uq_link) {
if (umtx_key_match(&uq->uq_key, key)) { if (umtx_key_match(&uq->uq_key, key)) {
if (++count > 1) if (++count > 1)
break; break;
@ -403,7 +415,7 @@ umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
*first = NULL; *first = NULL;
uc = umtxq_getchain(key); uc = umtxq_getchain(key);
UMTXQ_LOCKED_ASSERT(uc); UMTXQ_LOCKED_ASSERT(uc);
TAILQ_FOREACH(uq, &uc->uc_queue, uq_link) { TAILQ_FOREACH(uq, &uc->uc_queue[UMTX_SHARED_QUEUE], uq_link) {
if (umtx_key_match(&uq->uq_key, key)) { if (umtx_key_match(&uq->uq_key, key)) {
if (++count > 1) if (++count > 1)
break; break;
@ -416,8 +428,9 @@ umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
/* /*
* Wake up threads waiting on an userland object. * Wake up threads waiting on an userland object.
*/ */
static int static int
umtxq_signal(struct umtx_key *key, int n_wake) umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
{ {
struct umtxq_chain *uc; struct umtxq_chain *uc;
struct umtx_q *uq, *next; struct umtx_q *uq, *next;
@ -426,9 +439,9 @@ umtxq_signal(struct umtx_key *key, int n_wake)
ret = 0; ret = 0;
uc = umtxq_getchain(key); uc = umtxq_getchain(key);
UMTXQ_LOCKED_ASSERT(uc); UMTXQ_LOCKED_ASSERT(uc);
TAILQ_FOREACH_SAFE(uq, &uc->uc_queue, uq_link, next) { TAILQ_FOREACH_SAFE(uq, &uc->uc_queue[q], uq_link, next) {
if (umtx_key_match(&uq->uq_key, key)) { if (umtx_key_match(&uq->uq_key, key)) {
umtxq_remove(uq); umtxq_remove_queue(uq, q);
wakeup(uq); wakeup(uq);
if (++ret >= n_wake) if (++ret >= n_wake)
break; break;
@ -437,6 +450,7 @@ umtxq_signal(struct umtx_key *key, int n_wake)
return (ret); return (ret);
} }
/* /*
* Wake up specified thread. * Wake up specified thread.
*/ */
@ -2343,6 +2357,329 @@ do_cv_broadcast(struct thread *td, struct ucond *cv)
return (error); return (error);
} }
static int
do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, int timo)
{
struct umtx_q *uq;
uint32_t flags, wrflags;
int32_t state, oldstate;
int32_t blocked_readers;
int error;
uq = td->td_umtxq;
flags = fuword32(&rwlock->rw_flags);
error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
if (error != 0)
return (error);
wrflags = URWLOCK_WRITE_OWNER;
if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
wrflags |= URWLOCK_WRITE_WAITERS;
for (;;) {
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
/* try to lock it */
while (!(state & wrflags)) {
if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
umtx_key_release(&uq->uq_key);
return (EAGAIN);
}
oldstate = casuword32(&rwlock->rw_state, state, state + 1);
if (oldstate == state) {
umtx_key_release(&uq->uq_key);
return (0);
}
state = oldstate;
}
if (error)
break;
/* grab monitor lock */
umtxq_lock(&uq->uq_key);
umtxq_busy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
/* set read contention bit */
while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
if (oldstate == state)
goto sleep;
state = oldstate;
}
/* state is changed while setting flags, restart */
if (!(state & wrflags)) {
umtxq_lock(&uq->uq_key);
umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
continue;
}
sleep:
/* contention bit is set, before sleeping, increase read waiter count */
blocked_readers = fuword32(&rwlock->rw_blocked_readers);
suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
while (state & wrflags) {
umtxq_lock(&uq->uq_key);
umtxq_insert(uq);
umtxq_unbusy(&uq->uq_key);
error = umtxq_sleep(uq, "urdlck", timo);
umtxq_busy(&uq->uq_key);
umtxq_remove(uq);
umtxq_unlock(&uq->uq_key);
if (error)
break;
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
}
/* decrease read waiter count, and may clear read contention bit */
blocked_readers = fuword32(&rwlock->rw_blocked_readers);
suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
if (blocked_readers == 1) {
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
for (;;) {
oldstate = casuword32(&rwlock->rw_state, state,
state & ~URWLOCK_READ_WAITERS);
if (oldstate == state)
break;
state = oldstate;
}
}
umtxq_lock(&uq->uq_key);
umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
}
umtx_key_release(&uq->uq_key);
return (error);
}
static int
do_rw_rdlock2(struct thread *td, void *obj, long val, struct timespec *timeout)
{
struct timespec ts, ts2, ts3;
struct timeval tv;
int error;
getnanouptime(&ts);
timespecadd(&ts, timeout);
TIMESPEC_TO_TIMEVAL(&tv, timeout);
for (;;) {
error = do_rw_rdlock(td, obj, val, tvtohz(&tv));
if (error != ETIMEDOUT)
break;
getnanouptime(&ts2);
if (timespeccmp(&ts2, &ts, >=)) {
error = ETIMEDOUT;
break;
}
ts3 = ts;
timespecsub(&ts3, &ts2);
TIMESPEC_TO_TIMEVAL(&tv, &ts3);
}
return (error);
}
static int
do_rw_wrlock(struct thread *td, struct urwlock *rwlock, int timo)
{
struct umtx_q *uq;
uint32_t flags;
int32_t state, oldstate;
int32_t blocked_writers;
int error;
uq = td->td_umtxq;
flags = fuword32(&rwlock->rw_flags);
error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
if (error != 0)
return (error);
for (;;) {
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
if (oldstate == state) {
umtx_key_release(&uq->uq_key);
return (0);
}
state = oldstate;
}
if (error)
break;
/* grab monitor lock */
umtxq_lock(&uq->uq_key);
umtxq_busy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
(state & URWLOCK_WRITE_WAITERS) == 0) {
oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
if (oldstate == state)
goto sleep;
state = oldstate;
}
if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
umtxq_lock(&uq->uq_key);
umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
continue;
}
sleep:
blocked_writers = fuword32(&rwlock->rw_blocked_writers);
suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
umtxq_lock(&uq->uq_key);
umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
umtxq_unbusy(&uq->uq_key);
error = umtxq_sleep(uq, "uwrlck", timo);
umtxq_busy(&uq->uq_key);
umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
umtxq_unlock(&uq->uq_key);
if (error)
break;
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
}
blocked_writers = fuword32(&rwlock->rw_blocked_writers);
suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
if (blocked_writers == 1) {
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
for (;;) {
oldstate = casuword32(&rwlock->rw_state, state,
state & ~URWLOCK_WRITE_WAITERS);
if (oldstate == state)
break;
state = oldstate;
}
}
umtxq_lock(&uq->uq_key);
umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
}
umtx_key_release(&uq->uq_key);
return (error);
}
static int
do_rw_wrlock2(struct thread *td, void *obj, struct timespec *timeout)
{
struct timespec ts, ts2, ts3;
struct timeval tv;
int error;
getnanouptime(&ts);
timespecadd(&ts, timeout);
TIMESPEC_TO_TIMEVAL(&tv, timeout);
for (;;) {
error = do_rw_wrlock(td, obj, tvtohz(&tv));
if (error != ETIMEDOUT)
break;
getnanouptime(&ts2);
if (timespeccmp(&ts2, &ts, >=)) {
error = ETIMEDOUT;
break;
}
ts3 = ts;
timespecsub(&ts3, &ts2);
TIMESPEC_TO_TIMEVAL(&tv, &ts3);
}
return (error);
}
static int
do_rwlock_unlock(struct thread *td, struct urwlock *rwlock)
{
struct umtx_q *uq;
uint32_t flags;
int32_t state, oldstate;
int error, q, count;
uq = td->td_umtxq;
flags = fuword32(&rwlock->rw_flags);
error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
if (error != 0)
return (error);
state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
if (state & URWLOCK_WRITE_OWNER) {
for (;;) {
oldstate = casuword32(&rwlock->rw_state, state,
state & ~URWLOCK_WRITE_OWNER);
if (oldstate != state) {
state = oldstate;
if (!(oldstate & URWLOCK_WRITE_OWNER)) {
error = EPERM;
goto out;
}
} else
break;
}
} else if (URWLOCK_READER_COUNT(state) != 0) {
for (;;) {
oldstate = casuword32(&rwlock->rw_state, state,
state - 1);
if (oldstate != state) {
state = oldstate;
if (URWLOCK_READER_COUNT(oldstate) == 0) {
error = EPERM;
goto out;
}
}
else
break;
}
} else {
error = EPERM;
goto out;
}
count = 0;
if (!(flags & URWLOCK_PREFER_READER)) {
if (state & URWLOCK_WRITE_WAITERS) {
count = 1;
q = UMTX_EXCLUSIVE_QUEUE;
} else if (state & URWLOCK_READ_WAITERS) {
count = INT_MAX;
q = UMTX_SHARED_QUEUE;
}
} else {
if (state & URWLOCK_READ_WAITERS) {
count = INT_MAX;
q = UMTX_SHARED_QUEUE;
} else if (state & URWLOCK_WRITE_WAITERS) {
count = 1;
q = UMTX_EXCLUSIVE_QUEUE;
}
}
if (count) {
umtxq_lock(&uq->uq_key);
umtxq_busy(&uq->uq_key);
umtxq_signal_queue(&uq->uq_key, count, q);
umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
}
out:
umtx_key_release(&uq->uq_key);
return (error);
}
int int
_umtx_lock(struct thread *td, struct _umtx_lock_args *uap) _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
/* struct umtx *umtx */ /* struct umtx *umtx */
@ -2507,6 +2844,59 @@ __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
return do_cv_broadcast(td, uap->obj); return do_cv_broadcast(td, uap->obj);
} }
static int
__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
{
struct timespec timeout;
int error;
/* Allow a null timespec (wait forever). */
if (uap->uaddr2 == NULL) {
error = do_rw_rdlock(td, uap->obj, uap->val, 0);
} else {
error = copyin(uap->uaddr2, &timeout,
sizeof(timeout));
if (error != 0)
return (error);
if (timeout.tv_nsec >= 1000000000 ||
timeout.tv_nsec < 0) {
return (EINVAL);
}
error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
}
return (error);
}
static int
__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
{
struct timespec timeout;
int error;
/* Allow a null timespec (wait forever). */
if (uap->uaddr2 == NULL) {
error = do_rw_wrlock(td, uap->obj, 0);
} else {
error = copyin(uap->uaddr2, &timeout,
sizeof(timeout));
if (error != 0)
return (error);
if (timeout.tv_nsec >= 1000000000 ||
timeout.tv_nsec < 0) {
return (EINVAL);
}
error = do_rw_wrlock2(td, uap->obj, &timeout);
}
return (error);
}
static int
__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
{
return do_rwlock_unlock(td, uap->obj);
}
typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap); typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
static _umtx_op_func op_table[] = { static _umtx_op_func op_table[] = {
@ -2521,7 +2911,10 @@ static _umtx_op_func op_table[] = {
__umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/ __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/
__umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */ __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
__umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */ __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
__umtx_op_wait_uint /* UMTX_OP_WAIT_UINT */ __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */
__umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */
__umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */
__umtx_op_rw_unlock /* UMTX_OP_RW_UNLOCK */
}; };
int int
@ -2656,6 +3049,52 @@ __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
} }
static int
__umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
{
struct timespec timeout;
int error;
/* Allow a null timespec (wait forever). */
if (uap->uaddr2 == NULL) {
error = do_rw_rdlock(td, uap->obj, uap->val, 0);
} else {
error = copyin(uap->uaddr2, &timeout,
sizeof(timeout));
if (error != 0)
return (error);
if (timeout.tv_nsec >= 1000000000 ||
timeout.tv_nsec < 0) {
return (EINVAL);
}
error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
}
return (error);
}
static int
__umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
{
struct timespec timeout;
int error;
/* Allow a null timespec (wait forever). */
if (uap->uaddr2 == NULL) {
error = do_rwlock_wrlock(td, uap->obj, 0);
} else {
error = copyin_timeout32(uap->uaddr2, &timeout);
if (error != 0)
return (error);
if (timeout.tv_nsec >= 1000000000 ||
timeout.tv_nsec < 0) {
return (EINVAL);
}
error = do_rw_wrlock2(td, uap->obj, uap->val, &timeout);
}
return (error);
}
static _umtx_op_func op_table_compat32[] = { static _umtx_op_func op_table_compat32[] = {
__umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */ __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */
__umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */ __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */
@ -2668,7 +3107,10 @@ static _umtx_op_func op_table_compat32[] = {
__umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/ __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/
__umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */ __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
__umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */ __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
__umtx_op_wait_compat32 /* UMTX_OP_WAIT_UINT */ __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */
__umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */
__umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */
__umtx_op_rw_unlock /* UMTX_OP_RW_UNLOCK */
}; };
int int

View File

@ -66,6 +66,23 @@ struct ucond {
uint32_t c_spare[2]; /* Spare space */ uint32_t c_spare[2]; /* Spare space */
}; };
struct urwlock {
volatile int32_t rw_state;
uint32_t rw_flags;
uint32_t rw_blocked_readers;
uint32_t rw_blocked_writers;
uint32_t rw_spare[4];
};
/* urwlock flags */
#define URWLOCK_PREFER_READER 0x0002
#define URWLOCK_WRITE_OWNER 0x80000000U
#define URWLOCK_WRITE_WAITERS 0x40000000U
#define URWLOCK_READ_WAITERS 0x20000000U
#define URWLOCK_MAX_READERS 0x1fffffffU
#define URWLOCK_READER_COUNT(c) ((c) & URWLOCK_MAX_READERS)
/* op code for _umtx_op */ /* op code for _umtx_op */
#define UMTX_OP_LOCK 0 #define UMTX_OP_LOCK 0
#define UMTX_OP_UNLOCK 1 #define UMTX_OP_UNLOCK 1
@ -79,7 +96,10 @@ struct ucond {
#define UMTX_OP_CV_SIGNAL 9 #define UMTX_OP_CV_SIGNAL 9
#define UMTX_OP_CV_BROADCAST 10 #define UMTX_OP_CV_BROADCAST 10
#define UMTX_OP_WAIT_UINT 11 #define UMTX_OP_WAIT_UINT 11
#define UMTX_OP_MAX 12 #define UMTX_OP_RW_RDLOCK 12
#define UMTX_OP_RW_WRLOCK 13
#define UMTX_OP_RW_UNLOCK 14
#define UMTX_OP_MAX 15
/* flags for UMTX_OP_CV_WAIT */ /* flags for UMTX_OP_CV_WAIT */
#define UMTX_CHECK_UNPARKING 0x01 #define UMTX_CHECK_UNPARKING 0x01