2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2004-12-18 12:52:44 +00:00
|
|
|
* Copyright (c) 2004, David Xu <davidxu@freebsd.org>
|
2003-04-01 01:10:42 +00:00
|
|
|
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
2004-07-02 00:40:07 +00:00
|
|
|
#include <sys/limits.h>
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/lock.h>
|
2003-06-03 05:24:46 +00:00
|
|
|
#include <sys/malloc.h>
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/sysproto.h>
|
2004-12-18 12:52:44 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/thr.h>
|
|
|
|
#include <sys/umtx.h>
|
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
|
|
|
|
#define UMTX_PRIVATE 0
|
|
|
|
#define UMTX_SHARED 1
|
|
|
|
|
|
|
|
struct umtx_key {
|
|
|
|
int type;
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
vm_object_t object;
|
|
|
|
long offset;
|
|
|
|
} shared;
|
|
|
|
struct {
|
|
|
|
struct umtx *umtx;
|
|
|
|
long pid;
|
|
|
|
} private;
|
|
|
|
struct {
|
|
|
|
void *ptr;
|
|
|
|
long word;
|
|
|
|
} both;
|
|
|
|
} info;
|
|
|
|
};
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
struct umtx_q {
|
|
|
|
LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */
|
2004-12-18 12:52:44 +00:00
|
|
|
struct umtx_key uq_key; /* Umtx key. */
|
2006-05-18 08:43:46 +00:00
|
|
|
int uq_flags; /* Umtx flags. */
|
|
|
|
#define UQF_UMTXQ 0x0001
|
2004-12-24 11:30:55 +00:00
|
|
|
struct thread *uq_thread; /* The thread waits on. */
|
2004-12-18 12:52:44 +00:00
|
|
|
vm_offset_t uq_addr; /* Umtx's virtual address. */
|
2003-06-03 05:24:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
LIST_HEAD(umtx_head, umtx_q);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
struct umtxq_chain {
|
2004-12-18 12:52:44 +00:00
|
|
|
struct mtx uc_lock; /* Lock for this chain. */
|
|
|
|
struct umtx_head uc_queue; /* List of sleep queues. */
|
2004-12-24 11:30:55 +00:00
|
|
|
#define UCF_BUSY 0x01
|
|
|
|
int uc_flags;
|
2006-05-09 13:00:46 +00:00
|
|
|
int uc_waiters;
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
};
|
2003-06-03 05:24:46 +00:00
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
#define GOLDEN_RATIO_PRIME 2654404609U
|
|
|
|
#define UMTX_CHAINS 128
|
|
|
|
#define UMTX_SHIFTS (__WORD_BIT - 7)
|
2003-06-03 05:24:46 +00:00
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
static struct umtxq_chain umtxq_chains[UMTX_CHAINS];
|
|
|
|
static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
|
2003-05-25 18:18:32 +00:00
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
static void umtxq_init_chains(void *);
|
|
|
|
static int umtxq_hash(struct umtx_key *key);
|
|
|
|
static struct mtx *umtxq_mtx(int chain);
|
|
|
|
static void umtxq_lock(struct umtx_key *key);
|
|
|
|
static void umtxq_unlock(struct umtx_key *key);
|
2004-12-24 11:30:55 +00:00
|
|
|
static void umtxq_busy(struct umtx_key *key);
|
|
|
|
static void umtxq_unbusy(struct umtx_key *key);
|
2004-12-18 12:52:44 +00:00
|
|
|
static void umtxq_insert(struct umtx_q *uq);
|
|
|
|
static void umtxq_remove(struct umtx_q *uq);
|
|
|
|
static int umtxq_sleep(struct thread *td, struct umtx_key *key,
|
|
|
|
int prio, const char *wmesg, int timo);
|
2004-12-24 11:30:55 +00:00
|
|
|
static int umtxq_count(struct umtx_key *key);
|
|
|
|
static int umtxq_signal(struct umtx_key *key, int nr_wakeup);
|
2004-12-18 12:52:44 +00:00
|
|
|
static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
|
|
|
|
static int umtx_key_get(struct thread *td, struct umtx *umtx,
|
|
|
|
struct umtx_key *key);
|
|
|
|
static void umtx_key_release(struct umtx_key *key);
|
|
|
|
|
|
|
|
SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
|
2005-03-05 09:15:03 +00:00
|
|
|
struct umtx_q *
|
|
|
|
umtxq_alloc(void)
|
|
|
|
{
|
2006-05-18 08:43:46 +00:00
|
|
|
return (malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK|M_ZERO));
|
2005-03-05 09:15:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
umtxq_free(struct umtx_q *uq)
|
|
|
|
{
|
|
|
|
free(uq, M_UMTX);
|
|
|
|
}
|
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
static void
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_init_chains(void *arg __unused)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < UMTX_CHAINS; ++i) {
|
|
|
|
mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
|
|
|
|
MTX_DEF | MTX_DUPOK);
|
2004-12-18 12:52:44 +00:00
|
|
|
LIST_INIT(&umtxq_chains[i].uc_queue);
|
2004-12-24 11:30:55 +00:00
|
|
|
umtxq_chains[i].uc_flags = 0;
|
2006-05-09 13:00:46 +00:00
|
|
|
umtxq_chains[i].uc_waiters = 0;
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_hash(struct umtx_key *key)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word;
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
|
|
|
|
}
|
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
static inline int
|
|
|
|
umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2)
|
|
|
|
{
|
|
|
|
return (k1->type == k2->type &&
|
|
|
|
k1->info.both.ptr == k2->info.both.ptr &&
|
|
|
|
k1->info.both.word == k2->info.both.word);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct mtx *
|
|
|
|
umtxq_mtx(int chain)
|
|
|
|
{
|
|
|
|
return (&umtxq_chains[chain].uc_lock);
|
|
|
|
}
|
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
static inline void
|
|
|
|
umtxq_busy(struct umtx_key *key)
|
|
|
|
{
|
|
|
|
int chain = umtxq_hash(key);
|
|
|
|
|
|
|
|
mtx_assert(umtxq_mtx(chain), MA_OWNED);
|
|
|
|
while (umtxq_chains[chain].uc_flags & UCF_BUSY) {
|
2006-05-09 13:00:46 +00:00
|
|
|
umtxq_chains[chain].uc_waiters++;
|
2004-12-24 11:30:55 +00:00
|
|
|
msleep(&umtxq_chains[chain], umtxq_mtx(chain),
|
2006-04-17 18:20:38 +00:00
|
|
|
0, "umtxq_busy", 0);
|
2006-05-09 13:00:46 +00:00
|
|
|
umtxq_chains[chain].uc_waiters--;
|
2004-12-24 11:30:55 +00:00
|
|
|
}
|
|
|
|
umtxq_chains[chain].uc_flags |= UCF_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
umtxq_unbusy(struct umtx_key *key)
|
|
|
|
{
|
|
|
|
int chain = umtxq_hash(key);
|
|
|
|
|
|
|
|
mtx_assert(umtxq_mtx(chain), MA_OWNED);
|
2004-12-24 11:59:20 +00:00
|
|
|
KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, ("not busy"));
|
2004-12-24 11:30:55 +00:00
|
|
|
umtxq_chains[chain].uc_flags &= ~UCF_BUSY;
|
2006-05-09 13:00:46 +00:00
|
|
|
if (umtxq_chains[chain].uc_waiters)
|
|
|
|
wakeup_one(&umtxq_chains[chain]);
|
2004-12-24 11:30:55 +00:00
|
|
|
}
|
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
static inline void
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_lock(struct umtx_key *key)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
int chain = umtxq_hash(key);
|
|
|
|
mtx_lock(umtxq_mtx(chain));
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
|
|
|
|
2004-11-30 12:18:53 +00:00
|
|
|
static inline void
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_unlock(struct umtx_key *key)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
int chain = umtxq_hash(key);
|
|
|
|
mtx_unlock(umtxq_mtx(chain));
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
2003-06-03 05:24:46 +00:00
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
/*
|
|
|
|
* Insert a thread onto the umtx queue.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
umtxq_insert(struct umtx_q *uq)
|
2003-06-03 05:24:46 +00:00
|
|
|
{
|
|
|
|
struct umtx_head *head;
|
2004-12-18 12:52:44 +00:00
|
|
|
int chain = umtxq_hash(&uq->uq_key);
|
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
mtx_assert(umtxq_mtx(chain), MA_OWNED);
|
2004-12-18 12:52:44 +00:00
|
|
|
head = &umtxq_chains[chain].uc_queue;
|
|
|
|
LIST_INSERT_HEAD(head, uq, uq_next);
|
2006-05-18 08:43:46 +00:00
|
|
|
uq->uq_flags |= UQF_UMTXQ;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove thread from the umtx queue.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
umtxq_remove(struct umtx_q *uq)
|
|
|
|
{
|
2004-12-24 11:30:55 +00:00
|
|
|
mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED);
|
2006-05-18 08:43:46 +00:00
|
|
|
if (uq->uq_flags & UQF_UMTXQ) {
|
2004-12-18 12:52:44 +00:00
|
|
|
LIST_REMOVE(uq, uq_next);
|
2006-05-18 08:43:46 +00:00
|
|
|
/* turning off UQF_UMTXQ should be the last thing. */
|
|
|
|
uq->uq_flags &= ~UQF_UMTXQ;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
umtxq_count(struct umtx_key *key)
|
|
|
|
{
|
2003-06-03 05:24:46 +00:00
|
|
|
struct umtx_q *uq;
|
2004-12-18 12:52:44 +00:00
|
|
|
struct umtx_head *head;
|
|
|
|
int chain, count = 0;
|
2003-06-03 05:24:46 +00:00
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
chain = umtxq_hash(key);
|
2004-12-24 11:30:55 +00:00
|
|
|
mtx_assert(umtxq_mtx(chain), MA_OWNED);
|
2004-12-18 12:52:44 +00:00
|
|
|
head = &umtxq_chains[chain].uc_queue;
|
2003-06-03 05:24:46 +00:00
|
|
|
LIST_FOREACH(uq, head, uq_next) {
|
2004-12-18 12:52:44 +00:00
|
|
|
if (umtx_key_match(&uq->uq_key, key)) {
|
|
|
|
if (++count > 1)
|
|
|
|
break;
|
|
|
|
}
|
2003-06-03 05:24:46 +00:00
|
|
|
}
|
2004-12-18 12:52:44 +00:00
|
|
|
return (count);
|
2003-06-03 05:24:46 +00:00
|
|
|
}
|
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
static int
|
|
|
|
umtxq_signal(struct umtx_key *key, int n_wake)
|
2003-06-03 05:24:46 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
struct umtx_q *uq, *next;
|
|
|
|
struct umtx_head *head;
|
2004-12-24 11:30:55 +00:00
|
|
|
struct thread *blocked = NULL;
|
|
|
|
int chain, ret;
|
2003-06-03 05:24:46 +00:00
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
ret = 0;
|
2004-12-18 12:52:44 +00:00
|
|
|
chain = umtxq_hash(key);
|
2004-12-24 11:30:55 +00:00
|
|
|
mtx_assert(umtxq_mtx(chain), MA_OWNED);
|
2004-12-18 12:52:44 +00:00
|
|
|
head = &umtxq_chains[chain].uc_queue;
|
2004-12-24 11:30:55 +00:00
|
|
|
for (uq = LIST_FIRST(head); uq; uq = next) {
|
2004-12-18 12:52:44 +00:00
|
|
|
next = LIST_NEXT(uq, uq_next);
|
|
|
|
if (umtx_key_match(&uq->uq_key, key)) {
|
|
|
|
blocked = uq->uq_thread;
|
|
|
|
umtxq_remove(uq);
|
|
|
|
wakeup(blocked);
|
2004-12-24 11:30:55 +00:00
|
|
|
if (++ret >= n_wake)
|
|
|
|
break;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
|
|
|
}
|
2004-12-24 11:30:55 +00:00
|
|
|
return (ret);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_sleep(struct thread *td, struct umtx_key *key, int priority,
|
|
|
|
const char *wmesg, int timo)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
int chain = umtxq_hash(key);
|
2005-01-06 02:08:34 +00:00
|
|
|
int error = msleep(td, umtxq_mtx(chain), priority, wmesg, timo);
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = ETIMEDOUT;
|
|
|
|
return (error);
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key)
|
|
|
|
{
|
|
|
|
vm_map_t map;
|
|
|
|
vm_map_entry_t entry;
|
|
|
|
vm_pindex_t pindex;
|
|
|
|
vm_prot_t prot;
|
|
|
|
boolean_t wired;
|
|
|
|
|
|
|
|
map = &td->td_proc->p_vmspace->vm_map;
|
|
|
|
if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
|
|
|
|
&entry, &key->info.shared.object, &pindex, &prot,
|
|
|
|
&wired) != KERN_SUCCESS) {
|
|
|
|
return EFAULT;
|
|
|
|
}
|
2006-02-04 06:36:39 +00:00
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
if (VM_INHERIT_SHARE == entry->inheritance) {
|
|
|
|
key->type = UMTX_SHARED;
|
|
|
|
key->info.shared.offset = entry->offset + entry->start -
|
|
|
|
(vm_offset_t)umtx;
|
|
|
|
vm_object_reference(key->info.shared.object);
|
|
|
|
} else {
|
|
|
|
key->type = UMTX_PRIVATE;
|
|
|
|
key->info.private.umtx = umtx;
|
|
|
|
key->info.private.pid = td->td_proc->p_pid;
|
|
|
|
}
|
|
|
|
vm_map_lookup_done(map, entry);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
umtx_key_release(struct umtx_key *key)
|
|
|
|
{
|
|
|
|
if (key->type == UMTX_SHARED)
|
|
|
|
vm_object_deallocate(key->info.shared.object);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
int error;
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
uq->uq_addr = (vm_offset_t)umtx;
|
|
|
|
uq->uq_thread = td;
|
|
|
|
umtxq_lock(&uq->uq_key);
|
2004-12-24 11:30:55 +00:00
|
|
|
/* hmm, for condition variable, we don't need busy flag. */
|
|
|
|
umtxq_busy(&uq->uq_key);
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_insert(uq);
|
2004-12-24 11:30:55 +00:00
|
|
|
umtxq_unbusy(&uq->uq_key);
|
2004-12-18 12:52:44 +00:00
|
|
|
umtxq_unlock(&uq->uq_key);
|
|
|
|
return (0);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
static int
|
|
|
|
_do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
|
2003-04-01 01:10:42 +00:00
|
|
|
{
|
2005-03-05 09:15:03 +00:00
|
|
|
struct umtx_q *uq;
|
2003-04-01 01:10:42 +00:00
|
|
|
intptr_t owner;
|
2003-04-02 08:02:27 +00:00
|
|
|
intptr_t old;
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
int error = 0;
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2005-03-05 09:15:03 +00:00
|
|
|
uq = td->td_umtxq;
|
2003-04-01 01:10:42 +00:00
|
|
|
/*
|
2004-12-18 12:52:44 +00:00
|
|
|
* Care must be exercised when dealing with umtx structure. It
|
2003-04-01 01:10:42 +00:00
|
|
|
* can fault on any access.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
/*
|
|
|
|
* Try the uncontested case. This should be done in userland.
|
|
|
|
*/
|
|
|
|
owner = casuptr((intptr_t *)&umtx->u_owner,
|
2004-12-18 12:52:44 +00:00
|
|
|
UMTX_UNOWNED, id);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/* The acquire succeeded. */
|
2003-06-03 05:24:46 +00:00
|
|
|
if (owner == UMTX_UNOWNED)
|
|
|
|
return (0);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
/* The address was invalid. */
|
|
|
|
if (owner == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
/* If no one owns it but it is contested try to acquire it. */
|
|
|
|
if (owner == UMTX_CONTESTED) {
|
|
|
|
owner = casuptr((intptr_t *)&umtx->u_owner,
|
2004-12-18 12:52:44 +00:00
|
|
|
UMTX_CONTESTED, id | UMTX_CONTESTED);
|
2003-06-03 05:24:46 +00:00
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
if (owner == UMTX_CONTESTED)
|
|
|
|
return (0);
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
/* The address was invalid. */
|
|
|
|
if (owner == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
|
|
|
/* If this failed the lock has changed, restart. */
|
|
|
|
continue;
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
/*
|
|
|
|
* If we caught a signal, we have retried and now
|
|
|
|
* exit immediately.
|
|
|
|
*/
|
2005-03-05 09:15:03 +00:00
|
|
|
if (error || (error = umtxq_queue_me(td, umtx, uq)) != 0)
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
return (error);
|
2003-06-03 05:24:46 +00:00
|
|
|
|
2003-04-01 01:10:42 +00:00
|
|
|
/*
|
|
|
|
* Set the contested bit so that a release in user space
|
|
|
|
* knows to use the system call for unlock. If this fails
|
|
|
|
* either some one else has acquired the lock or it has been
|
|
|
|
* released.
|
|
|
|
*/
|
2003-04-02 08:02:27 +00:00
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner, owner,
|
|
|
|
owner | UMTX_CONTESTED);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/* The address was invalid. */
|
2003-04-02 08:02:27 +00:00
|
|
|
if (old == -1) {
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
|
|
|
umtxq_busy(&uq->uq_key);
|
|
|
|
umtxq_remove(uq);
|
|
|
|
umtxq_unbusy(&uq->uq_key);
|
|
|
|
umtxq_unlock(&uq->uq_key);
|
|
|
|
umtx_key_release(&uq->uq_key);
|
2003-06-03 05:24:46 +00:00
|
|
|
return (EFAULT);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-06-03 05:24:46 +00:00
|
|
|
* We set the contested bit, sleep. Otherwise the lock changed
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
* and we need to retry or we lost a race to the thread
|
|
|
|
* unlocking the umtx.
|
2003-04-01 01:10:42 +00:00
|
|
|
*/
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
2006-05-18 08:43:46 +00:00
|
|
|
if (old == owner && (uq->uq_flags & UQF_UMTXQ)) {
|
2006-04-17 18:20:38 +00:00
|
|
|
error = umtxq_sleep(td, &uq->uq_key, PCATCH,
|
2004-12-18 12:52:44 +00:00
|
|
|
"umtx", timo);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
}
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_busy(&uq->uq_key);
|
|
|
|
umtxq_remove(uq);
|
|
|
|
umtxq_unbusy(&uq->uq_key);
|
|
|
|
umtxq_unlock(&uq->uq_key);
|
|
|
|
umtx_key_release(&uq->uq_key);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
2003-07-18 17:58:37 +00:00
|
|
|
|
|
|
|
return (0);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
static int
|
|
|
|
do_lock(struct thread *td, struct umtx *umtx, long id,
|
2005-01-14 13:38:15 +00:00
|
|
|
struct timespec *timeout)
|
2004-12-18 12:52:44 +00:00
|
|
|
{
|
2005-01-14 13:38:15 +00:00
|
|
|
struct timespec ts, ts2, ts3;
|
2004-12-18 12:52:44 +00:00
|
|
|
struct timeval tv;
|
2005-01-14 13:38:15 +00:00
|
|
|
int error;
|
2004-12-18 12:52:44 +00:00
|
|
|
|
2005-01-14 13:38:15 +00:00
|
|
|
if (timeout == NULL) {
|
2004-12-18 12:52:44 +00:00
|
|
|
error = _do_lock(td, umtx, id, 0);
|
|
|
|
} else {
|
2005-01-14 13:38:15 +00:00
|
|
|
getnanouptime(&ts);
|
|
|
|
timespecadd(&ts, timeout);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&tv, timeout);
|
2004-12-18 12:52:44 +00:00
|
|
|
for (;;) {
|
2005-01-14 13:38:15 +00:00
|
|
|
error = _do_lock(td, umtx, id, tvtohz(&tv));
|
|
|
|
if (error != ETIMEDOUT)
|
|
|
|
break;
|
|
|
|
getnanouptime(&ts2);
|
|
|
|
if (timespeccmp(&ts2, &ts, >=)) {
|
2005-01-06 02:08:34 +00:00
|
|
|
error = ETIMEDOUT;
|
2004-12-18 12:52:44 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-01-14 13:38:15 +00:00
|
|
|
ts3 = ts;
|
|
|
|
timespecsub(&ts3, &ts2);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&tv, &ts3);
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
|
|
|
}
|
2004-12-24 11:59:20 +00:00
|
|
|
/*
|
|
|
|
* This lets userland back off critical region if needed.
|
|
|
|
*/
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
2004-12-18 12:52:44 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
do_unlock(struct thread *td, struct umtx *umtx, long id)
|
2003-04-01 01:10:42 +00:00
|
|
|
{
|
2004-12-18 12:52:44 +00:00
|
|
|
struct umtx_key key;
|
2003-04-01 01:10:42 +00:00
|
|
|
intptr_t owner;
|
2003-04-02 08:02:27 +00:00
|
|
|
intptr_t old;
|
2004-12-24 11:30:55 +00:00
|
|
|
int error;
|
|
|
|
int count;
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we own this mtx.
|
|
|
|
*
|
|
|
|
* XXX Need a {fu,su}ptr this is not correct on arch where
|
|
|
|
* sizeof(intptr_t) != sizeof(long).
|
|
|
|
*/
|
2003-06-03 05:24:46 +00:00
|
|
|
if ((owner = fuword(&umtx->u_owner)) == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
2004-12-18 12:52:44 +00:00
|
|
|
if ((owner & ~UMTX_CONTESTED) != id)
|
2003-06-03 05:24:46 +00:00
|
|
|
return (EPERM);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
/* We should only ever be in here for contested locks */
|
2003-09-07 11:14:52 +00:00
|
|
|
if ((owner & UMTX_CONTESTED) == 0)
|
|
|
|
return (EINVAL);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
if ((error = umtx_key_get(td, umtx, &key)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
umtxq_lock(&key);
|
|
|
|
umtxq_busy(&key);
|
|
|
|
count = umtxq_count(&key);
|
|
|
|
umtxq_unlock(&key);
|
|
|
|
|
2003-07-18 17:58:37 +00:00
|
|
|
/*
|
|
|
|
* When unlocking the umtx, it must be marked as unowned if
|
|
|
|
* there is zero or one thread only waiting for it.
|
|
|
|
* Otherwise, it must be marked as contested.
|
|
|
|
*/
|
2004-12-24 11:30:55 +00:00
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner, owner,
|
|
|
|
count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
|
|
|
|
umtxq_lock(&key);
|
|
|
|
umtxq_signal(&key, 0);
|
|
|
|
umtxq_unbusy(&key);
|
|
|
|
umtxq_unlock(&key);
|
|
|
|
umtx_key_release(&key);
|
2003-06-03 05:24:46 +00:00
|
|
|
if (old == -1)
|
|
|
|
return (EFAULT);
|
1. use per-chain mutex instead of global mutex to reduce
lock collision.
2. Fix two race conditions. One is between _umtx_unlock and signal,
also a thread was marked TDF_UMTXWAKEUP by _umtx_unlock, it is
possible a signal delivered to the thread will cause msleep
returns EINTR, and the thread breaks out of loop, this causes
umtx ownership is not transfered to the thread. Another is in
_umtx_unlock itself, when the function sets the umtx to
UMTX_UNOWNED state, a new thread can come in and lock the umtx,
also the function tries to set contested bit flag, but it will
fail. Although the function will wake a blocked thread, if that
thread breaks out of loop by signal, no contested bit will be set.
2004-11-30 12:02:53 +00:00
|
|
|
if (old != owner)
|
|
|
|
return (EINVAL);
|
2003-06-03 05:24:46 +00:00
|
|
|
return (0);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
2004-12-18 12:52:44 +00:00
|
|
|
|
|
|
|
static int
|
2005-01-14 13:38:15 +00:00
|
|
|
do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
|
2004-12-18 12:52:44 +00:00
|
|
|
{
|
2005-03-05 09:15:03 +00:00
|
|
|
struct umtx_q *uq;
|
2005-01-14 13:38:15 +00:00
|
|
|
struct timespec ts, ts2, ts3;
|
2004-12-18 12:52:44 +00:00
|
|
|
struct timeval tv;
|
2004-12-30 02:56:17 +00:00
|
|
|
long tmp;
|
2005-01-14 13:38:15 +00:00
|
|
|
int error = 0;
|
2004-12-18 12:52:44 +00:00
|
|
|
|
2005-03-05 09:15:03 +00:00
|
|
|
uq = td->td_umtxq;
|
|
|
|
if ((error = umtxq_queue_me(td, umtx, uq)) != 0)
|
2004-12-18 12:52:44 +00:00
|
|
|
return (error);
|
2004-12-30 02:56:17 +00:00
|
|
|
tmp = fuword(&umtx->u_owner);
|
|
|
|
if (tmp != id) {
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
|
|
|
umtxq_remove(uq);
|
|
|
|
umtxq_unlock(&uq->uq_key);
|
2005-01-14 13:38:15 +00:00
|
|
|
} else if (timeout == NULL) {
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
2006-05-18 08:43:46 +00:00
|
|
|
if (uq->uq_flags & UQF_UMTXQ)
|
2005-03-05 09:15:03 +00:00
|
|
|
error = umtxq_sleep(td, &uq->uq_key,
|
2006-04-17 18:20:38 +00:00
|
|
|
PCATCH, "ucond", 0);
|
2006-05-18 08:43:46 +00:00
|
|
|
if (!(uq->uq_flags & UQF_UMTXQ))
|
2004-12-24 11:30:55 +00:00
|
|
|
error = 0;
|
|
|
|
else
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_remove(uq);
|
|
|
|
umtxq_unlock(&uq->uq_key);
|
2004-12-18 12:52:44 +00:00
|
|
|
} else {
|
2005-01-14 13:38:15 +00:00
|
|
|
getnanouptime(&ts);
|
|
|
|
timespecadd(&ts, timeout);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&tv, timeout);
|
2004-12-18 12:52:44 +00:00
|
|
|
for (;;) {
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
2006-05-18 08:43:46 +00:00
|
|
|
if (uq->uq_flags & UQF_UMTXQ) {
|
2006-04-17 18:20:38 +00:00
|
|
|
error = umtxq_sleep(td, &uq->uq_key, PCATCH,
|
2005-01-14 13:38:15 +00:00
|
|
|
"ucond", tvtohz(&tv));
|
|
|
|
}
|
2006-05-18 08:43:46 +00:00
|
|
|
if (!(uq->uq_flags & UQF_UMTXQ)) {
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_unlock(&uq->uq_key);
|
2005-01-14 13:38:15 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_unlock(&uq->uq_key);
|
2005-01-14 13:38:15 +00:00
|
|
|
if (error != ETIMEDOUT)
|
|
|
|
break;
|
|
|
|
getnanouptime(&ts2);
|
|
|
|
if (timespeccmp(&ts2, &ts, >=)) {
|
|
|
|
error = ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ts3 = ts;
|
|
|
|
timespecsub(&ts3, &ts2);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&tv, &ts3);
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
2005-03-05 09:15:03 +00:00
|
|
|
umtxq_lock(&uq->uq_key);
|
|
|
|
umtxq_remove(uq);
|
|
|
|
umtxq_unlock(&uq->uq_key);
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
2005-01-14 13:38:15 +00:00
|
|
|
out:
|
2005-03-05 09:15:03 +00:00
|
|
|
umtx_key_release(&uq->uq_key);
|
2004-12-24 11:30:55 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
2004-12-18 12:52:44 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2005-10-26 06:55:46 +00:00
|
|
|
int
|
|
|
|
kern_umtx_wake(struct thread *td, void *uaddr, int n_wake)
|
2004-12-18 12:52:44 +00:00
|
|
|
{
|
|
|
|
struct umtx_key key;
|
2004-12-24 11:30:55 +00:00
|
|
|
int ret;
|
2004-12-18 12:52:44 +00:00
|
|
|
|
2004-12-24 11:30:55 +00:00
|
|
|
if ((ret = umtx_key_get(td, uaddr, &key)) != 0)
|
|
|
|
return (ret);
|
2004-12-24 11:59:20 +00:00
|
|
|
umtxq_lock(&key);
|
2004-12-24 11:30:55 +00:00
|
|
|
ret = umtxq_signal(&key, n_wake);
|
2004-12-24 11:59:20 +00:00
|
|
|
umtxq_unlock(&key);
|
2004-12-24 11:30:55 +00:00
|
|
|
umtx_key_release(&key);
|
2004-12-18 12:52:44 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
|
|
|
|
/* struct umtx *umtx */
|
|
|
|
{
|
|
|
|
return _do_lock(td, uap->umtx, td->td_tid, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
|
|
|
|
/* struct umtx *umtx */
|
|
|
|
{
|
|
|
|
return do_unlock(td, uap->umtx, td->td_tid);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_umtx_op(struct thread *td, struct _umtx_op_args *uap)
|
|
|
|
{
|
2005-01-14 13:38:15 +00:00
|
|
|
struct timespec timeout;
|
2004-12-18 12:52:44 +00:00
|
|
|
struct timespec *ts;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
switch(uap->op) {
|
|
|
|
case UMTX_OP_LOCK:
|
|
|
|
/* Allow a null timespec (wait forever). */
|
2004-12-25 13:02:50 +00:00
|
|
|
if (uap->uaddr2 == NULL)
|
2004-12-18 12:52:44 +00:00
|
|
|
ts = NULL;
|
|
|
|
else {
|
2005-01-14 13:38:15 +00:00
|
|
|
error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
|
2004-12-18 12:52:44 +00:00
|
|
|
if (error != 0)
|
2005-01-12 05:55:52 +00:00
|
|
|
break;
|
2005-01-14 13:38:15 +00:00
|
|
|
if (timeout.tv_nsec >= 1000000000 ||
|
|
|
|
timeout.tv_nsec < 0) {
|
2005-01-12 05:55:52 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2005-01-14 13:38:15 +00:00
|
|
|
ts = &timeout;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
2005-01-12 05:55:52 +00:00
|
|
|
error = do_lock(td, uap->umtx, uap->id, ts);
|
|
|
|
break;
|
2004-12-18 12:52:44 +00:00
|
|
|
case UMTX_OP_UNLOCK:
|
2005-01-12 05:55:52 +00:00
|
|
|
error = do_unlock(td, uap->umtx, uap->id);
|
|
|
|
break;
|
2004-12-30 02:56:17 +00:00
|
|
|
case UMTX_OP_WAIT:
|
2004-12-18 12:52:44 +00:00
|
|
|
/* Allow a null timespec (wait forever). */
|
2004-12-25 13:02:50 +00:00
|
|
|
if (uap->uaddr2 == NULL)
|
2004-12-18 12:52:44 +00:00
|
|
|
ts = NULL;
|
|
|
|
else {
|
2005-01-14 13:38:15 +00:00
|
|
|
error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
|
2004-12-18 12:52:44 +00:00
|
|
|
if (error != 0)
|
2005-01-12 05:55:52 +00:00
|
|
|
break;
|
2005-01-14 13:38:15 +00:00
|
|
|
if (timeout.tv_nsec >= 1000000000 ||
|
|
|
|
timeout.tv_nsec < 0) {
|
2005-01-12 05:55:52 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2005-01-14 13:38:15 +00:00
|
|
|
ts = &timeout;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
2005-01-12 05:55:52 +00:00
|
|
|
error = do_wait(td, uap->umtx, uap->id, ts);
|
|
|
|
break;
|
2004-12-18 12:52:44 +00:00
|
|
|
case UMTX_OP_WAKE:
|
2005-10-26 06:55:46 +00:00
|
|
|
error = kern_umtx_wake(td, uap->umtx, uap->id);
|
2005-01-12 05:55:52 +00:00
|
|
|
break;
|
2004-12-18 12:52:44 +00:00
|
|
|
default:
|
2005-01-12 05:55:52 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|
2005-01-18 13:53:10 +00:00
|
|
|
return (error);
|
2004-12-18 12:52:44 +00:00
|
|
|
}
|