2003-04-01 01:10:42 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2003-06-03 05:24:46 +00:00
|
|
|
#include <sys/malloc.h>
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/sysproto.h>
|
2003-06-03 05:24:46 +00:00
|
|
|
#include <sys/sx.h>
|
2003-04-01 01:10:42 +00:00
|
|
|
#include <sys/thr.h>
|
|
|
|
#include <sys/umtx.h>
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
struct umtx_q {
|
|
|
|
LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */
|
|
|
|
TAILQ_HEAD(, thread) uq_tdq; /* List of threads blocked here. */
|
|
|
|
struct umtx *uq_umtx; /* Pointer key component. */
|
|
|
|
pid_t uq_pid; /* Pid key component. */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define UMTX_QUEUES 128
|
|
|
|
#define UMTX_HASH(pid, umtx) \
|
|
|
|
(((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
|
|
|
|
|
|
|
|
LIST_HEAD(umtx_head, umtx_q);
|
|
|
|
static struct umtx_head queues[UMTX_QUEUES];
|
|
|
|
static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
|
|
|
|
|
|
|
|
static struct mtx umtx_lock;
|
|
|
|
MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
|
|
|
|
|
2003-05-25 18:18:32 +00:00
|
|
|
#define UMTX_LOCK() mtx_lock(&umtx_lock);
|
|
|
|
#define UMTX_UNLOCK() mtx_unlock(&umtx_lock);
|
|
|
|
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
|
|
|
|
static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
|
|
|
|
|
|
|
|
static struct umtx_q *
|
|
|
|
umtx_lookup(struct thread *td, struct umtx *umtx)
|
|
|
|
{
|
|
|
|
struct umtx_head *head;
|
|
|
|
struct umtx_q *uq;
|
|
|
|
pid_t pid;
|
|
|
|
|
|
|
|
pid = td->td_proc->p_pid;
|
|
|
|
|
|
|
|
head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
|
|
|
|
|
|
|
|
LIST_FOREACH(uq, head, uq_next) {
|
|
|
|
if (uq->uq_pid == pid && uq->uq_umtx == umtx)
|
|
|
|
return (uq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert a thread onto the umtx queue.
|
|
|
|
*/
|
|
|
|
static struct umtx_q *
|
|
|
|
umtx_insert(struct thread *td, struct umtx *umtx)
|
|
|
|
{
|
|
|
|
struct umtx_head *head;
|
|
|
|
struct umtx_q *uq;
|
|
|
|
pid_t pid;
|
|
|
|
|
|
|
|
pid = td->td_proc->p_pid;
|
|
|
|
|
|
|
|
if ((uq = umtx_lookup(td, umtx)) == NULL) {
|
|
|
|
struct umtx_q *ins;
|
|
|
|
|
|
|
|
UMTX_UNLOCK();
|
|
|
|
ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
|
|
|
|
UMTX_LOCK();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some one else could have succeeded while we were blocked
|
|
|
|
* waiting on memory.
|
|
|
|
*/
|
|
|
|
if ((uq = umtx_lookup(td, umtx)) == NULL) {
|
|
|
|
head = &queues[UMTX_HASH(pid, umtx)];
|
|
|
|
uq = ins;
|
|
|
|
uq->uq_pid = pid;
|
|
|
|
uq->uq_umtx = umtx;
|
|
|
|
LIST_INSERT_HEAD(head, uq, uq_next);
|
|
|
|
TAILQ_INIT(&uq->uq_tdq);
|
|
|
|
} else
|
|
|
|
free(ins, M_UMTX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert us onto the end of the TAILQ.
|
|
|
|
*/
|
|
|
|
TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx);
|
|
|
|
|
|
|
|
return (uq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
umtx_remove(struct umtx_q *uq, struct thread *td)
|
|
|
|
{
|
|
|
|
TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
|
|
|
|
|
|
|
|
if (TAILQ_EMPTY(&uq->uq_tdq)) {
|
|
|
|
LIST_REMOVE(uq, uq_next);
|
|
|
|
free(uq, M_UMTX);
|
|
|
|
}
|
|
|
|
}
|
2003-05-25 18:18:32 +00:00
|
|
|
|
2003-04-01 01:10:42 +00:00
|
|
|
int
|
|
|
|
_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
|
|
|
|
/* struct umtx *umtx */
|
|
|
|
{
|
2003-06-03 05:24:46 +00:00
|
|
|
struct umtx_q *uq;
|
2003-04-01 01:10:42 +00:00
|
|
|
struct umtx *umtx;
|
|
|
|
intptr_t owner;
|
2003-04-02 08:02:27 +00:00
|
|
|
intptr_t old;
|
2003-04-01 01:10:42 +00:00
|
|
|
int error;
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
uq = NULL;
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Care must be exercised when dealing with this structure. It
|
|
|
|
* can fault on any access.
|
|
|
|
*/
|
|
|
|
umtx = uap->umtx;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
/*
|
|
|
|
* Try the uncontested case. This should be done in userland.
|
|
|
|
*/
|
|
|
|
owner = casuptr((intptr_t *)&umtx->u_owner,
|
|
|
|
UMTX_UNOWNED, (intptr_t)td);
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
/* The address was invalid. */
|
|
|
|
if (owner == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
2003-04-01 01:10:42 +00:00
|
|
|
/* The acquire succeeded. */
|
2003-06-03 05:24:46 +00:00
|
|
|
if (owner == UMTX_UNOWNED)
|
|
|
|
return (0);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
/* If no one owns it but it is contested try to acquire it. */
|
|
|
|
if (owner == UMTX_CONTESTED) {
|
|
|
|
owner = casuptr((intptr_t *)&umtx->u_owner,
|
|
|
|
UMTX_CONTESTED, ((intptr_t)td | UMTX_CONTESTED));
|
|
|
|
|
|
|
|
/* The address was invalid. */
|
|
|
|
if (owner == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
2003-07-04 23:28:42 +00:00
|
|
|
if (owner == UMTX_CONTESTED)
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
goto out;
|
2003-06-03 05:24:46 +00:00
|
|
|
|
|
|
|
/* If this failed the lock has changed, restart. */
|
|
|
|
continue;
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
|
|
|
|
UMTX_LOCK();
|
|
|
|
uq = umtx_insert(td, umtx);
|
|
|
|
UMTX_UNLOCK();
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the contested bit so that a release in user space
|
|
|
|
* knows to use the system call for unlock. If this fails
|
|
|
|
* either some one else has acquired the lock or it has been
|
|
|
|
* released.
|
|
|
|
*/
|
2003-04-02 08:02:27 +00:00
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner, owner,
|
|
|
|
owner | UMTX_CONTESTED);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/* The address was invalid. */
|
2003-04-02 08:02:27 +00:00
|
|
|
if (old == -1) {
|
2003-06-03 05:24:46 +00:00
|
|
|
UMTX_LOCK();
|
|
|
|
umtx_remove(uq, td);
|
|
|
|
UMTX_UNLOCK();
|
|
|
|
return (EFAULT);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-06-03 05:24:46 +00:00
|
|
|
* We set the contested bit, sleep. Otherwise the lock changed
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
* and we need to retry or we lost a race to the thread
|
|
|
|
* unlocking the umtx.
|
2003-04-01 01:10:42 +00:00
|
|
|
*/
|
2003-06-03 05:24:46 +00:00
|
|
|
UMTX_LOCK();
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
|
|
|
if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0) {
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
2003-06-03 05:24:46 +00:00
|
|
|
error = msleep(td, &umtx_lock,
|
|
|
|
td->td_priority | PCATCH, "umtx", 0);
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
|
|
|
} else
|
2003-04-01 01:10:42 +00:00
|
|
|
error = 0;
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
td->td_flags &= ~TDF_UMTXWAKEUP;
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
2003-06-03 05:24:46 +00:00
|
|
|
|
|
|
|
umtx_remove(uq, td);
|
|
|
|
UMTX_UNLOCK();
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
2003-06-03 05:24:46 +00:00
|
|
|
* If we caught a signal we might have to retry or exit
|
|
|
|
* immediately.
|
2003-04-01 01:10:42 +00:00
|
|
|
*/
|
|
|
|
if (error)
|
2003-06-03 05:24:46 +00:00
|
|
|
return (error);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* We reach here only if we just acquired a contested umtx.
|
|
|
|
*
|
|
|
|
* If there are no other threads on this umtx's queue
|
|
|
|
* clear the contested bit. However, we cannot hold
|
|
|
|
* a lock across casuptr(). So after we unset it we
|
|
|
|
* have to recheck, and set it again if another thread has
|
|
|
|
* put itself on the queue in the mean time.
|
|
|
|
*/
|
|
|
|
error = 0;
|
|
|
|
UMTX_LOCK();
|
|
|
|
uq = umtx_lookup(td, umtx);
|
|
|
|
UMTX_UNLOCK();
|
|
|
|
if (uq == NULL)
|
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner,
|
|
|
|
((intptr_t)td | UMTX_CONTESTED), (intptr_t)td);
|
|
|
|
if (uq == NULL && old == ((intptr_t)td | UMTX_CONTESTED)) {
|
|
|
|
UMTX_LOCK();
|
|
|
|
uq = umtx_lookup(td, umtx);
|
|
|
|
UMTX_UNLOCK();
|
|
|
|
if (uq != NULL) {
|
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner,
|
|
|
|
(intptr_t)td, ((intptr_t)td | UMTX_CONTESTED));
|
|
|
|
if (old == -1)
|
|
|
|
error = EFAULT;
|
|
|
|
else if (old != (intptr_t)td)
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (error);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
|
|
|
|
/* struct umtx *umtx */
|
|
|
|
{
|
2003-06-03 05:24:46 +00:00
|
|
|
struct thread *blocked;
|
2003-04-01 01:10:42 +00:00
|
|
|
struct umtx *umtx;
|
2003-06-03 05:24:46 +00:00
|
|
|
struct umtx_q *uq;
|
2003-04-01 01:10:42 +00:00
|
|
|
intptr_t owner;
|
2003-04-02 08:02:27 +00:00
|
|
|
intptr_t old;
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
umtx = uap->umtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we own this mtx.
|
|
|
|
*
|
|
|
|
* XXX Need a {fu,su}ptr this is not correct on arch where
|
|
|
|
* sizeof(intptr_t) != sizeof(long).
|
|
|
|
*/
|
2003-06-03 05:24:46 +00:00
|
|
|
if ((owner = fuword(&umtx->u_owner)) == -1)
|
|
|
|
return (EFAULT);
|
|
|
|
|
|
|
|
if ((struct thread *)(owner & ~UMTX_CONTESTED) != td)
|
|
|
|
return (EPERM);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
/* We should only ever be in here for contested locks */
|
|
|
|
KASSERT((owner & UMTX_CONTESTED) != 0, ("contested umtx is not."));
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_CONTESTED);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
if (old == -1)
|
|
|
|
return (EFAULT);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
2003-06-03 05:24:46 +00:00
|
|
|
* This will only happen if someone modifies the lock without going
|
|
|
|
* through this api.
|
2003-04-01 01:10:42 +00:00
|
|
|
*/
|
2003-06-03 05:24:46 +00:00
|
|
|
if (old != owner)
|
|
|
|
return (EINVAL);
|
2003-04-01 01:10:42 +00:00
|
|
|
|
|
|
|
/*
|
2003-06-03 05:24:46 +00:00
|
|
|
* We have to wake up one of the blocked threads.
|
2003-04-01 01:10:42 +00:00
|
|
|
*/
|
2003-06-03 05:24:46 +00:00
|
|
|
UMTX_LOCK();
|
|
|
|
uq = umtx_lookup(td, umtx);
|
|
|
|
if (uq != NULL) {
|
|
|
|
blocked = TAILQ_FIRST(&uq->uq_tdq);
|
Fix umtx locking, for libthr, in the kernel.
1. There was a race condition between a thread unlocking
a umtx and the thread contesting it. If the unlocking
thread won the race it may try to wakeup a thread that
was not yet in msleep(). The contesting thread would then
go to sleep to await a wakeup that would never come. It's
not possible to close the race by using a lock because
calls to casuptr() may have to fault a page in from swap.
Instead, the race was closed by introducing a flag that
the unlocking thread will set when waking up a thread.
The contesting thread will check for this flag before
going to sleep. For now the flag is kept in td_flags,
but it may be better to use some other member or create
a new one because of the possible performance/contention
issues of having to own sched_lock. Thanks to jhb for
pointing me in the right direction on this one.
2. Once a umtx was contested all future locks and unlocks
were happening in the kernel, regardless of whether it
was contested or not. To prevent this from happening,
when a thread locks a umtx it checks the queue for that
umtx and unsets the contested bit if there are no other
threads waiting on it. Again, this is slightly more
complicated than it needs to be because we can't hold
a lock across casuptr(). So, the thread has to check
the queue again after unseting the bit, and reset the
contested bit if it finds that another thread has put
itself on the queue in the mean time.
3. Remove the if... block for unlocking an uncontested
umtx, and replace it with a KASSERT. The _only_ time
a thread should be unlocking a umtx in the kernel is
if it is contested.
2003-07-17 11:06:40 +00:00
|
|
|
KASSERT(blocked != NULL, ("umtx_q with no waiting threads."));
|
|
|
|
mtx_lock_spin(&sched_lock);
|
|
|
|
blocked->td_flags |= TDF_UMTXWAKEUP;
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
2003-06-03 05:24:46 +00:00
|
|
|
wakeup(blocked);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|
|
|
|
|
2003-05-25 18:18:32 +00:00
|
|
|
UMTX_UNLOCK();
|
2003-04-01 01:10:42 +00:00
|
|
|
|
2003-06-03 05:24:46 +00:00
|
|
|
return (0);
|
2003-04-01 01:10:42 +00:00
|
|
|
}
|