2017-11-26 02:00:33 +00:00
|
|
|
/*-
|
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
|
2016-02-28 17:52:33 +00:00
|
|
|
* Copyright (c) 2015 The FreeBSD Foundation
|
2003-04-01 03:46:29 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2016-02-28 17:52:33 +00:00
|
|
|
* Portions of this software were developed by Konstantin Belousov
|
|
|
|
* under sponsorship from the FreeBSD Foundation.
|
|
|
|
*
|
2003-04-01 03:46:29 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
2005-04-02 01:20:00 +00:00
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
2003-04-01 03:46:29 +00:00
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
2005-04-02 01:20:00 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2016-04-08 11:15:26 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "namespace.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <pthread.h>
|
2005-04-02 01:20:00 +00:00
|
|
|
#include <limits.h>
|
2006-04-04 02:57:49 +00:00
|
|
|
#include "un-namespace.h"
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
#include "thr_private.h"
|
|
|
|
|
2016-04-08 10:21:43 +00:00
|
|
|
_Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE,
|
|
|
|
"pthread_cond too large");
|
|
|
|
|
2003-05-29 20:54:00 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Prototypes
|
2003-05-29 20:54:00 +00:00
|
|
|
*/
|
2006-04-04 02:57:49 +00:00
|
|
|
int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
|
|
const struct timespec * abstime);
|
2005-04-02 01:20:00 +00:00
|
|
|
static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
|
|
|
|
static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
|
|
const struct timespec *abstime, int cancel);
|
2010-12-22 05:01:52 +00:00
|
|
|
static int cond_signal_common(pthread_cond_t *cond);
|
|
|
|
static int cond_broadcast_common(pthread_cond_t *cond);
|
2003-05-29 20:54:00 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* Double underscore versions are cancellation points. Single underscore
|
|
|
|
* versions are not and are provided for libc internal usage (which
|
|
|
|
* shouldn't introduce cancellation points).
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2019-07-31 19:27:20 +00:00
|
|
|
__weak_reference(__thr_cond_wait, pthread_cond_wait);
|
|
|
|
__weak_reference(__thr_cond_wait, __pthread_cond_wait);
|
|
|
|
__weak_reference(_thr_cond_wait, _pthread_cond_wait);
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
|
2019-07-31 19:27:20 +00:00
|
|
|
__weak_reference(_thr_cond_init, pthread_cond_init);
|
|
|
|
__weak_reference(_thr_cond_init, _pthread_cond_init);
|
|
|
|
__weak_reference(_thr_cond_destroy, pthread_cond_destroy);
|
|
|
|
__weak_reference(_thr_cond_destroy, _pthread_cond_destroy);
|
|
|
|
__weak_reference(_thr_cond_signal, pthread_cond_signal);
|
|
|
|
__weak_reference(_thr_cond_signal, _pthread_cond_signal);
|
|
|
|
__weak_reference(_thr_cond_broadcast, pthread_cond_broadcast);
|
|
|
|
__weak_reference(_thr_cond_broadcast, _pthread_cond_broadcast);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2016-05-29 19:35:55 +00:00
|
|
|
#define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
|
2010-12-22 05:01:52 +00:00
|
|
|
|
2016-02-28 17:52:33 +00:00
|
|
|
static void
|
|
|
|
cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (cattr == NULL) {
|
2016-05-29 19:35:55 +00:00
|
|
|
cvp->kcond.c_clockid = CLOCK_REALTIME;
|
2016-02-28 17:52:33 +00:00
|
|
|
} else {
|
|
|
|
if (cattr->c_pshared)
|
2016-05-29 19:35:55 +00:00
|
|
|
cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
|
|
|
|
cvp->kcond.c_clockid = cattr->c_clockid;
|
2016-02-28 17:52:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2016-02-28 17:52:33 +00:00
|
|
|
struct pthread_cond *cvp;
|
|
|
|
const struct pthread_cond_attr *cattr;
|
|
|
|
int pshared;
|
|
|
|
|
|
|
|
cattr = cond_attr != NULL ? *cond_attr : NULL;
|
|
|
|
if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
|
|
|
|
pshared = 0;
|
|
|
|
cvp = calloc(1, sizeof(struct pthread_cond));
|
|
|
|
if (cvp == NULL)
|
|
|
|
return (ENOMEM);
|
2005-04-02 01:20:00 +00:00
|
|
|
} else {
|
2016-02-28 17:52:33 +00:00
|
|
|
pshared = 1;
|
|
|
|
cvp = __thr_pshared_offpage(cond, 1);
|
|
|
|
if (cvp == NULL)
|
|
|
|
return (EFAULT);
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|
2016-02-28 17:52:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise the condition variable structure:
|
|
|
|
*/
|
|
|
|
cond_init_body(cvp, cattr);
|
|
|
|
*cond = pshared ? THR_PSHARED_PTR : cvp;
|
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
|
|
|
init_static(struct pthread *thread, pthread_cond_t *cond)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
int ret;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (*cond == NULL)
|
|
|
|
ret = cond_init(cond, NULL);
|
2003-04-01 03:46:29 +00:00
|
|
|
else
|
2005-04-02 01:20:00 +00:00
|
|
|
ret = 0;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
THR_LOCK_RELEASE(thread, &_cond_static_lock);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (ret);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2010-09-28 04:57:56 +00:00
|
|
|
#define CHECK_AND_INIT_COND \
|
2016-02-28 17:52:33 +00:00
|
|
|
if (*cond == THR_PSHARED_PTR) { \
|
|
|
|
cvp = __thr_pshared_offpage(cond, 0); \
|
|
|
|
if (cvp == NULL) \
|
|
|
|
return (EINVAL); \
|
|
|
|
} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
|
2010-12-22 05:01:52 +00:00
|
|
|
if (cvp == THR_COND_INITIALIZER) { \
|
2010-09-28 04:57:56 +00:00
|
|
|
int ret; \
|
|
|
|
ret = init_static(_get_curthread(), cond); \
|
|
|
|
if (ret) \
|
|
|
|
return (ret); \
|
2010-12-22 05:01:52 +00:00
|
|
|
} else if (cvp == THR_COND_DESTROYED) { \
|
2010-09-28 04:57:56 +00:00
|
|
|
return (EINVAL); \
|
|
|
|
} \
|
2010-12-22 05:01:52 +00:00
|
|
|
cvp = *cond; \
|
2010-09-28 04:57:56 +00:00
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_init(pthread_cond_t * __restrict cond,
|
2018-08-18 01:05:38 +00:00
|
|
|
const pthread_condattr_t * __restrict cond_attr)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
*cond = NULL;
|
2005-04-02 01:20:00 +00:00
|
|
|
return (cond_init(cond, cond_attr));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_destroy(pthread_cond_t *cond)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2016-02-28 17:52:33 +00:00
|
|
|
struct pthread_cond *cvp;
|
|
|
|
int error;
|
2010-12-22 05:01:52 +00:00
|
|
|
|
2016-02-28 17:52:33 +00:00
|
|
|
error = 0;
|
|
|
|
if (*cond == THR_PSHARED_PTR) {
|
|
|
|
cvp = __thr_pshared_offpage(cond, 0);
|
2019-03-08 21:07:08 +00:00
|
|
|
if (cvp != NULL) {
|
|
|
|
if (cvp->kcond.c_has_waiters)
|
|
|
|
error = EBUSY;
|
|
|
|
else
|
|
|
|
__thr_pshared_destroy(cond);
|
|
|
|
}
|
|
|
|
if (error == 0)
|
|
|
|
*cond = THR_COND_DESTROYED;
|
2016-02-28 17:52:33 +00:00
|
|
|
} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
|
|
|
|
/* nothing */
|
|
|
|
} else if (cvp == THR_COND_DESTROYED) {
|
2010-12-22 05:01:52 +00:00
|
|
|
error = EINVAL;
|
2016-02-28 17:52:33 +00:00
|
|
|
} else {
|
2010-12-22 05:01:52 +00:00
|
|
|
cvp = *cond;
|
2019-03-08 21:07:08 +00:00
|
|
|
if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters)
|
|
|
|
error = EBUSY;
|
|
|
|
else {
|
|
|
|
*cond = THR_COND_DESTROYED;
|
|
|
|
free(cvp);
|
|
|
|
}
|
2006-12-12 03:08:49 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
return (error);
|
2003-05-15 18:17:13 +00:00
|
|
|
}
|
|
|
|
|
In current implementation, thread cancellation is done in signal handler,
which does not know what is the state of interrupted system call, for
example, open() system call opened a file and the thread is still cancelled,
result is descriptor leak, there are other problems which can cause resource
leak or undeterminable side effect when a thread is cancelled. However, this
is no longer true in new implementation.
In defering mode, a thread is canceled if cancellation request is pending and
later the thread enters a cancellation point, otherwise, a later
pthread_cancel() just causes SIGCANCEL to be sent to the target thread, and
causes target thread to abort system call, userland code in libthr then checks
cancellation state, and cancels the thread if needed. For example, the
cancellation point open(), the thread may be canceled at start,
but later, if it opened a file descriptor, it is not canceled, this avoids
file handle leak. Another example is read(), a thread may be canceled at start
of the function, but later, if it read some bytes from a socket, the thread
is not canceled, the caller then can decide if it should still enable cancelling
or disable it and continue reading data until it thinks it has read all
bytes of a packet, and keeps a protocol stream in health state, if user ignores
partly reading of a packet without disabling cancellation, then second iteration
of read loop cause the thread to be cancelled.
An exception is that the close() cancellation point always closes a file handle
despite whether the thread is cancelled or not.
The old mechanism is still kept, for a functions which is not so easily to
fix a cancellation problem, the rough mechanism is used.
Reviewed by: kib@
2010-08-20 05:15:39 +00:00
|
|
|
/*
|
2014-09-02 18:21:19 +00:00
|
|
|
* Cancellation behavior:
|
In current implementation, thread cancellation is done in signal handler,
which does not know what is the state of interrupted system call, for
example, open() system call opened a file and the thread is still cancelled,
result is descriptor leak, there are other problems which can cause resource
leak or undeterminable side effect when a thread is cancelled. However, this
is no longer true in new implementation.
In defering mode, a thread is canceled if cancellation request is pending and
later the thread enters a cancellation point, otherwise, a later
pthread_cancel() just causes SIGCANCEL to be sent to the target thread, and
causes target thread to abort system call, userland code in libthr then checks
cancellation state, and cancels the thread if needed. For example, the
cancellation point open(), the thread may be canceled at start,
but later, if it opened a file descriptor, it is not canceled, this avoids
file handle leak. Another example is read(), a thread may be canceled at start
of the function, but later, if it read some bytes from a socket, the thread
is not canceled, the caller then can decide if it should still enable cancelling
or disable it and continue reading data until it thinks it has read all
bytes of a packet, and keeps a protocol stream in health state, if user ignores
partly reading of a packet without disabling cancellation, then second iteration
of read loop cause the thread to be cancelled.
An exception is that the close() cancellation point always closes a file handle
despite whether the thread is cancelled or not.
The old mechanism is still kept, for a functions which is not so easily to
fix a cancellation problem, the rough mechanism is used.
Reviewed by: kib@
2010-08-20 05:15:39 +00:00
|
|
|
* Thread may be canceled at start, if thread is canceled, it means it
|
|
|
|
* did not get a wakeup from pthread_cond_signal(), otherwise, it is
|
|
|
|
* not canceled.
|
|
|
|
* Thread cancellation never cause wakeup from pthread_cond_signal()
|
|
|
|
* to be lost.
|
|
|
|
*/
|
2003-05-15 18:17:13 +00:00
|
|
|
static int
|
2010-12-22 05:01:52 +00:00
|
|
|
cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
const struct timespec *abstime, int cancel)
|
2003-05-15 18:17:13 +00:00
|
|
|
{
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
struct pthread *curthread;
|
|
|
|
int error, error2, recurse, robust;
|
|
|
|
|
|
|
|
curthread = _get_curthread();
|
|
|
|
robust = _mutex_enter_robust(curthread, mp);
|
2010-12-22 05:01:52 +00:00
|
|
|
|
|
|
|
error = _mutex_cv_detach(mp, &recurse);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (error != 0) {
|
|
|
|
if (robust)
|
|
|
|
_mutex_leave_robust(curthread, mp);
|
2010-12-22 05:01:52 +00:00
|
|
|
return (error);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (cancel)
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_cancel_enter2(curthread, 0);
|
2016-05-29 19:35:55 +00:00
|
|
|
error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
|
|
|
|
CVWAIT_ABSTIME | CVWAIT_CLOCKID);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (cancel)
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_cancel_leave(curthread, 0);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
/*
|
2010-12-22 05:01:52 +00:00
|
|
|
* Note that PP mutex and ROBUST mutex may return
|
|
|
|
* interesting error codes.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2010-12-22 05:01:52 +00:00
|
|
|
if (error == 0) {
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
error2 = _mutex_cv_lock(mp, recurse, true);
|
2010-12-22 05:01:52 +00:00
|
|
|
} else if (error == EINTR || error == ETIMEDOUT) {
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
error2 = _mutex_cv_lock(mp, recurse, true);
|
|
|
|
/*
|
|
|
|
* Do not do cancellation on EOWNERDEAD there. The
|
|
|
|
* cancellation cleanup handler will use the protected
|
|
|
|
* state and unlock the mutex without making the state
|
|
|
|
* consistent and the state will be unrecoverable.
|
|
|
|
*/
|
2016-12-06 17:13:17 +00:00
|
|
|
if (error2 == 0 && cancel) {
|
|
|
|
if (robust) {
|
|
|
|
_mutex_leave_robust(curthread, mp);
|
|
|
|
robust = false;
|
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_testcancel(curthread);
|
2016-12-06 17:13:17 +00:00
|
|
|
}
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
if (error == EINTR)
|
|
|
|
error = 0;
|
|
|
|
} else {
|
|
|
|
/* We know that it didn't unlock the mutex. */
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
_mutex_cv_attach(mp, recurse);
|
2016-12-06 17:13:17 +00:00
|
|
|
if (cancel) {
|
|
|
|
if (robust) {
|
|
|
|
_mutex_leave_robust(curthread, mp);
|
|
|
|
robust = false;
|
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_testcancel(curthread);
|
2016-12-06 17:13:17 +00:00
|
|
|
}
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
error2 = 0;
|
Make libthr async-signal-safe without costly signal masking. The guidlines I
followed are: Only 3 functions (pthread_cancel, pthread_setcancelstate,
pthread_setcanceltype) are required to be async-signal-safe by POSIX. None of
the rest of the pthread api is required to be async-signal-safe. This means
that only the three mentioned functions are safe to use from inside
signal handlers.
However, there are certain system/libc calls that are
cancellation points that a caller may call from within a signal handler,
and since they are cancellation points calls have to be made into libthr
to test for cancellation and exit the thread if necessary. So, the
cancellation test and thread exit code paths must be async-signal-safe
as well. A summary of the changes follows:
o Almost all of the code paths that masked signals, as well as locking the
pthread structure now lock only the pthread structure.
o Signals are masked (and left that way) as soon as a thread enters
pthread_exit().
o The active and dead threads locks now explicitly require that signals
are masked.
o Access to the isdead field of the pthread structure is protected by both
the active and dead list locks for writing. Either one is sufficient for
reading.
o The thread state and type fields have been combined into one three-state
switch to make it easier to read without requiring a lock. It doesn't need
a lock for writing (and therefore for reading either) because only the
current thread can write to it and it is an integer value.
o The thread state field of the pthread structure has been eliminated. It
was an unnecessary field that mostly duplicated the flags field, but
required additional locking that would make a lot more code paths require
signal masking. Any truly unique values (such as PS_DEAD) have been
reborn as separate members of the pthread structure.
o Since the mutex and condvar pthread functions are not async-signal-safe
there is no need to muck about with the wait queues when handling
a signal ...
o ... which also removes the need for wrapping signal handlers and sigaction(2).
o The condvar and mutex async-cancellation code had to be revised as a result
of some of these changes, which resulted in semi-unrelated changes which
would have been difficult to work on as a separate commit, so they are
included as well.
The only part of the changes I am worried about is related to locking for
the pthread joining fields. But, I will take a closer look at them once this
mega-patch is committed.
2004-05-20 12:06:16 +00:00
|
|
|
}
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (robust)
|
|
|
|
_mutex_leave_robust(curthread, mp);
|
2010-12-22 05:01:52 +00:00
|
|
|
return (error2 != 0 ? error2 : error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread waits in userland queue whenever possible, when thread
|
|
|
|
* is signaled or broadcasted, it is removed from the queue, and
|
|
|
|
* is saved in curthread's defer_waiters[] buffer, but won't be
|
|
|
|
* woken up until mutex is unlocked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
const struct timespec *abstime, int cancel)
|
2010-12-22 05:01:52 +00:00
|
|
|
{
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
struct pthread *curthread;
|
2010-12-22 05:01:52 +00:00
|
|
|
struct sleepqueue *sq;
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
int deferred, error, error2, recurse;
|
2006-12-04 14:20:41 +00:00
|
|
|
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
curthread = _get_curthread();
|
2010-12-22 05:01:52 +00:00
|
|
|
if (curthread->wchan != NULL)
|
2016-06-01 16:12:26 +00:00
|
|
|
PANIC("thread %p was already on queue.", curthread);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
if (cancel)
|
|
|
|
_thr_testcancel(curthread);
|
2006-12-04 14:20:41 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
_sleepq_lock(cvp);
|
|
|
|
/*
|
|
|
|
* set __has_user_waiters before unlocking mutex, this allows
|
|
|
|
* us to check it without locking in pthread_cond_signal().
|
|
|
|
*/
|
|
|
|
cvp->__has_user_waiters = 1;
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
deferred = 0;
|
|
|
|
(void)_mutex_cv_unlock(mp, &recurse, &deferred);
|
2010-12-22 05:01:52 +00:00
|
|
|
curthread->mutex_obj = mp;
|
|
|
|
_sleepq_add(cvp, curthread);
|
|
|
|
for(;;) {
|
|
|
|
_thr_clear_wake(curthread);
|
|
|
|
_sleepq_unlock(cvp);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (deferred) {
|
|
|
|
deferred = 0;
|
2012-08-11 23:17:02 +00:00
|
|
|
if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
(void)_umtx_op_err(&mp->m_lock,
|
|
|
|
UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
|
|
|
|
0, 0);
|
2012-08-11 23:17:02 +00:00
|
|
|
}
|
|
|
|
if (curthread->nwaiter_defer > 0) {
|
|
|
|
_thr_wake_all(curthread->defer_waiters,
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
curthread->nwaiter_defer);
|
2012-08-11 23:17:02 +00:00
|
|
|
curthread->nwaiter_defer = 0;
|
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (cancel)
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_cancel_enter2(curthread, 0);
|
2016-05-29 19:35:55 +00:00
|
|
|
error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (cancel)
|
2010-12-22 05:01:52 +00:00
|
|
|
_thr_cancel_leave(curthread, 0);
|
|
|
|
|
|
|
|
_sleepq_lock(cvp);
|
|
|
|
if (curthread->wchan == NULL) {
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
} else if (cancel && SHOULD_CANCEL(curthread)) {
|
|
|
|
sq = _sleepq_lookup(cvp);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
|
2010-12-22 05:01:52 +00:00
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
curthread->mutex_obj = NULL;
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
error2 = _mutex_cv_lock(mp, recurse, false);
|
2010-12-22 05:01:52 +00:00
|
|
|
if (!THR_IN_CRITICAL(curthread))
|
|
|
|
_pthread_exit(PTHREAD_CANCELED);
|
|
|
|
else /* this should not happen */
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
return (error2);
|
2010-12-22 05:01:52 +00:00
|
|
|
} else if (error == ETIMEDOUT) {
|
|
|
|
sq = _sleepq_lookup(cvp);
|
|
|
|
cvp->__has_user_waiters =
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
_sleepq_remove(sq, curthread);
|
2010-12-22 05:01:52 +00:00
|
|
|
break;
|
|
|
|
}
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
curthread->mutex_obj = NULL;
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
error2 = _mutex_cv_lock(mp, recurse, false);
|
|
|
|
if (error == 0)
|
|
|
|
error = error2;
|
2010-12-22 05:01:52 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
|
|
const struct timespec *abstime, int cancel)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
struct pthread_cond *cvp;
|
|
|
|
struct pthread_mutex *mp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
CHECK_AND_INIT_COND
|
|
|
|
|
2016-02-28 17:52:33 +00:00
|
|
|
if (*mutex == THR_PSHARED_PTR) {
|
|
|
|
mp = __thr_pshared_offpage(mutex, 0);
|
|
|
|
if (mp == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
} else {
|
|
|
|
mp = *mutex;
|
|
|
|
}
|
2010-12-22 05:01:52 +00:00
|
|
|
|
|
|
|
if ((error = _mutex_owned(curthread, mp)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (curthread->attr.sched_policy != SCHED_OTHER ||
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
(mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
|
2016-05-29 19:35:55 +00:00
|
|
|
USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
return (cond_wait_kernel(cvp, mp, abstime, cancel));
|
2010-12-22 05:01:52 +00:00
|
|
|
else
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
return (cond_wait_user(cvp, mp, abstime, cancel));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
return (cond_wait_common(cond, mutex, NULL, 0));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
__thr_cond_wait(pthread_cond_t * __restrict cond,
|
2018-08-18 01:05:38 +00:00
|
|
|
pthread_mutex_t * __restrict mutex)
|
2003-05-24 01:02:16 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
|
|
|
|
return (cond_wait_common(cond, mutex, NULL, 1));
|
2003-05-24 01:02:16 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_timedwait(pthread_cond_t * __restrict cond,
|
2018-08-18 01:05:38 +00:00
|
|
|
pthread_mutex_t * __restrict mutex,
|
|
|
|
const struct timespec * __restrict abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000)
|
Make libthr async-signal-safe without costly signal masking. The guidlines I
followed are: Only 3 functions (pthread_cancel, pthread_setcancelstate,
pthread_setcanceltype) are required to be async-signal-safe by POSIX. None of
the rest of the pthread api is required to be async-signal-safe. This means
that only the three mentioned functions are safe to use from inside
signal handlers.
However, there are certain system/libc calls that are
cancellation points that a caller may call from within a signal handler,
and since they are cancellation points calls have to be made into libthr
to test for cancellation and exit the thread if necessary. So, the
cancellation test and thread exit code paths must be async-signal-safe
as well. A summary of the changes follows:
o Almost all of the code paths that masked signals, as well as locking the
pthread structure now lock only the pthread structure.
o Signals are masked (and left that way) as soon as a thread enters
pthread_exit().
o The active and dead threads locks now explicitly require that signals
are masked.
o Access to the isdead field of the pthread structure is protected by both
the active and dead list locks for writing. Either one is sufficient for
reading.
o The thread state and type fields have been combined into one three-state
switch to make it easier to read without requiring a lock. It doesn't need
a lock for writing (and therefore for reading either) because only the
current thread can write to it and it is an integer value.
o The thread state field of the pthread structure has been eliminated. It
was an unnecessary field that mostly duplicated the flags field, but
required additional locking that would make a lot more code paths require
signal masking. Any truly unique values (such as PS_DEAD) have been
reborn as separate members of the pthread structure.
o Since the mutex and condvar pthread functions are not async-signal-safe
there is no need to muck about with the wait queues when handling
a signal ...
o ... which also removes the need for wrapping signal handlers and sigaction(2).
o The condvar and mutex async-cancellation code had to be revised as a result
of some of these changes, which resulted in semi-unrelated changes which
would have been difficult to work on as a separate commit, so they are
included as well.
The only part of the changes I am worried about is related to locking for
the pthread joining fields. But, I will take a closer look at them once this
mega-patch is committed.
2004-05-20 12:06:16 +00:00
|
|
|
return (EINVAL);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (cond_wait_common(cond, mutex, abstime, 0));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
|
|
|
__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
|
|
const struct timespec *abstime)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
|
|
|
|
abstime->tv_nsec >= 1000000000)
|
|
|
|
return (EINVAL);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
return (cond_wait_common(cond, mutex, abstime, 1));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
static int
|
2010-12-22 05:01:52 +00:00
|
|
|
cond_signal_common(pthread_cond_t *cond)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
2010-12-22 05:01:52 +00:00
|
|
|
struct pthread *td;
|
|
|
|
struct pthread_cond *cvp;
|
|
|
|
struct pthread_mutex *mp;
|
|
|
|
struct sleepqueue *sq;
|
|
|
|
int *waddr;
|
|
|
|
int pshared;
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/*
|
2005-04-02 01:20:00 +00:00
|
|
|
* If the condition variable is statically initialized, perform dynamic
|
|
|
|
* initialization.
|
2003-04-01 03:46:29 +00:00
|
|
|
*/
|
2010-09-28 04:57:56 +00:00
|
|
|
CHECK_AND_INIT_COND
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
pshared = CV_PSHARED(cvp);
|
|
|
|
|
2016-05-29 19:35:55 +00:00
|
|
|
_thr_ucond_signal(&cvp->kcond);
|
2010-12-22 05:01:52 +00:00
|
|
|
|
|
|
|
if (pshared || cvp->__has_user_waiters == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
curthread = _get_curthread();
|
|
|
|
waddr = NULL;
|
|
|
|
_sleepq_lock(cvp);
|
|
|
|
sq = _sleepq_lookup(cvp);
|
|
|
|
if (sq == NULL) {
|
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
td = _sleepq_first(sq);
|
|
|
|
mp = td->mutex_obj;
|
|
|
|
cvp->__has_user_waiters = _sleepq_remove(sq, td);
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
|
2010-12-22 05:01:52 +00:00
|
|
|
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
|
|
|
|
_thr_wake_all(curthread->defer_waiters,
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
curthread->nwaiter_defer);
|
2010-12-22 05:01:52 +00:00
|
|
|
curthread->nwaiter_defer = 0;
|
|
|
|
}
|
|
|
|
curthread->defer_waiters[curthread->nwaiter_defer++] =
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
&td->wake_addr->value;
|
|
|
|
mp->m_flags |= PMUTEX_FLAG_DEFERRED;
|
2010-12-22 05:01:52 +00:00
|
|
|
} else {
|
|
|
|
waddr = &td->wake_addr->value;
|
|
|
|
}
|
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
if (waddr != NULL)
|
|
|
|
_thr_set_wake(waddr);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct broadcast_arg {
|
|
|
|
struct pthread *curthread;
|
|
|
|
unsigned int *waddrs[MAX_DEFER_WAITERS];
|
|
|
|
int count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
drop_cb(struct pthread *td, void *arg)
|
|
|
|
{
|
|
|
|
struct broadcast_arg *ba = arg;
|
|
|
|
struct pthread_mutex *mp;
|
|
|
|
struct pthread *curthread = ba->curthread;
|
|
|
|
|
|
|
|
mp = td->mutex_obj;
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
|
2010-12-22 05:01:52 +00:00
|
|
|
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
|
|
|
|
_thr_wake_all(curthread->defer_waiters,
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
curthread->nwaiter_defer);
|
2010-12-22 05:01:52 +00:00
|
|
|
curthread->nwaiter_defer = 0;
|
|
|
|
}
|
|
|
|
curthread->defer_waiters[curthread->nwaiter_defer++] =
|
Add implementation of robust mutexes, hopefully close enough to the
intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.
A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held. The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.
The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths. Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.
The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive). Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.
Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot. When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.
The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.
Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
the lifetime of the shared mutex associated with a vnode' page.
Reviewed by: jilles (previous version, supposedly the objection was fixed)
Discussed with: brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by: pho
Sponsored by: The FreeBSD Foundation
2016-05-17 09:56:22 +00:00
|
|
|
&td->wake_addr->value;
|
|
|
|
mp->m_flags |= PMUTEX_FLAG_DEFERRED;
|
2010-12-22 05:01:52 +00:00
|
|
|
} else {
|
|
|
|
if (ba->count >= MAX_DEFER_WAITERS) {
|
|
|
|
_thr_wake_all(ba->waddrs, ba->count);
|
|
|
|
ba->count = 0;
|
|
|
|
}
|
|
|
|
ba->waddrs[ba->count++] = &td->wake_addr->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cond_broadcast_common(pthread_cond_t *cond)
|
|
|
|
{
|
|
|
|
int pshared;
|
|
|
|
struct pthread_cond *cvp;
|
|
|
|
struct sleepqueue *sq;
|
|
|
|
struct broadcast_arg ba;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the condition variable is statically initialized, perform dynamic
|
|
|
|
* initialization.
|
|
|
|
*/
|
|
|
|
CHECK_AND_INIT_COND
|
|
|
|
|
|
|
|
pshared = CV_PSHARED(cvp);
|
|
|
|
|
2016-05-29 19:35:55 +00:00
|
|
|
_thr_ucond_broadcast(&cvp->kcond);
|
2010-12-22 05:01:52 +00:00
|
|
|
|
|
|
|
if (pshared || cvp->__has_user_waiters == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ba.curthread = _get_curthread();
|
|
|
|
ba.count = 0;
|
|
|
|
|
|
|
|
_sleepq_lock(cvp);
|
|
|
|
sq = _sleepq_lookup(cvp);
|
|
|
|
if (sq == NULL) {
|
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
_sleepq_drop(sq, drop_cb, &ba);
|
|
|
|
cvp->__has_user_waiters = 0;
|
|
|
|
_sleepq_unlock(cvp);
|
|
|
|
if (ba.count > 0)
|
|
|
|
_thr_wake_all(ba.waddrs, ba.count);
|
|
|
|
return (0);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_signal(pthread_cond_t * cond)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
return (cond_signal_common(cond));
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
2003-05-29 20:54:00 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
int
|
2019-07-31 19:27:20 +00:00
|
|
|
_thr_cond_broadcast(pthread_cond_t * cond)
|
2003-05-29 20:54:00 +00:00
|
|
|
{
|
|
|
|
|
2010-12-22 05:01:52 +00:00
|
|
|
return (cond_broadcast_common(cond));
|
2005-04-02 01:20:00 +00:00
|
|
|
}
|