MFp4:
Further decreases unexpected context switches by defering mutex wakeup until internal sleep queue lock is released.
This commit is contained in:
parent
decb55dbb5
commit
72ce06de36
@ -217,6 +217,7 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
|
||||
struct sleepqueue *sq;
|
||||
int recurse;
|
||||
int error;
|
||||
int defered;
|
||||
|
||||
if (curthread->wchan != NULL)
|
||||
PANIC("thread was already on queue.");
|
||||
@ -230,13 +231,23 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
|
||||
* us to check it without locking in pthread_cond_signal().
|
||||
*/
|
||||
cvp->__has_user_waiters = 1;
|
||||
curthread->will_sleep = 1;
|
||||
(void)_mutex_cv_unlock(mp, &recurse);
|
||||
defered = 0;
|
||||
(void)_mutex_cv_unlock(mp, &recurse, &defered);
|
||||
curthread->mutex_obj = mp;
|
||||
_sleepq_add(cvp, curthread);
|
||||
for(;;) {
|
||||
_thr_clear_wake(curthread);
|
||||
_sleepq_unlock(cvp);
|
||||
if (defered) {
|
||||
if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
|
||||
(void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
|
||||
mp->m_lock.m_flags, 0, 0);
|
||||
}
|
||||
if (curthread->nwaiter_defer > 0) {
|
||||
_thr_wake_all(curthread->defer_waiters,
|
||||
curthread->nwaiter_defer);
|
||||
curthread->nwaiter_defer = 0;
|
||||
}
|
||||
|
||||
if (cancel) {
|
||||
_thr_cancel_enter2(curthread, 0);
|
||||
|
@ -199,13 +199,6 @@ _thr_sleep(struct pthread *curthread, int clockid,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
|
||||
curthread->will_sleep = 0;
|
||||
if (curthread->nwaiter_defer > 0) {
|
||||
_thr_wake_all(curthread->defer_waiters,
|
||||
curthread->nwaiter_defer);
|
||||
curthread->nwaiter_defer = 0;
|
||||
}
|
||||
|
||||
if (curthread->wake_addr->value != 0)
|
||||
return (0);
|
||||
|
||||
|
@ -92,7 +92,7 @@ int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
|
||||
static int mutex_self_trylock(pthread_mutex_t);
|
||||
static int mutex_self_lock(pthread_mutex_t,
|
||||
const struct timespec *abstime);
|
||||
static int mutex_unlock_common(struct pthread_mutex *, int);
|
||||
static int mutex_unlock_common(struct pthread_mutex *, int, int *);
|
||||
static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
|
||||
const struct timespec *);
|
||||
|
||||
@ -461,7 +461,7 @@ _pthread_mutex_unlock(pthread_mutex_t *mutex)
|
||||
struct pthread_mutex *mp;
|
||||
|
||||
mp = *mutex;
|
||||
return (mutex_unlock_common(mp, 0));
|
||||
return (mutex_unlock_common(mp, 0, NULL));
|
||||
}
|
||||
|
||||
int
|
||||
@ -476,7 +476,7 @@ _mutex_cv_lock(struct pthread_mutex *m, int count)
|
||||
}
|
||||
|
||||
int
|
||||
_mutex_cv_unlock(struct pthread_mutex *m, int *count)
|
||||
_mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -484,7 +484,7 @@ _mutex_cv_unlock(struct pthread_mutex *m, int *count)
|
||||
*/
|
||||
*count = m->m_count;
|
||||
m->m_count = 0;
|
||||
(void)mutex_unlock_common(m, 1);
|
||||
(void)mutex_unlock_common(m, 1, defer);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -629,7 +629,7 @@ mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
|
||||
}
|
||||
|
||||
static int
|
||||
mutex_unlock_common(struct pthread_mutex *m, int cv)
|
||||
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
|
||||
{
|
||||
struct pthread *curthread = _get_curthread();
|
||||
uint32_t id;
|
||||
@ -657,12 +657,12 @@ mutex_unlock_common(struct pthread_mutex *m, int cv)
|
||||
defered = 1;
|
||||
m->m_flags &= ~PMUTEX_FLAG_DEFERED;
|
||||
} else
|
||||
defered = 0;
|
||||
defered = 0;
|
||||
|
||||
DEQUEUE_MUTEX(curthread, m);
|
||||
_thr_umutex_unlock(&m->m_lock, id);
|
||||
_thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
|
||||
|
||||
if (curthread->will_sleep == 0 && defered) {
|
||||
if (mtx_defer == NULL && defered) {
|
||||
_thr_wake_all(curthread->defer_waiters,
|
||||
curthread->nwaiter_defer);
|
||||
curthread->nwaiter_defer = 0;
|
||||
|
@ -727,10 +727,10 @@ extern struct umutex _thr_event_lock __hidden;
|
||||
*/
|
||||
__BEGIN_DECLS
|
||||
int _thr_setthreaded(int) __hidden;
|
||||
int _mutex_cv_lock(struct pthread_mutex *, int count) __hidden;
|
||||
int _mutex_cv_unlock(struct pthread_mutex *, int *count) __hidden;
|
||||
int _mutex_cv_attach(struct pthread_mutex *, int count) __hidden;
|
||||
int _mutex_cv_detach(struct pthread_mutex *, int *count) __hidden;
|
||||
int _mutex_cv_lock(struct pthread_mutex *, int) __hidden;
|
||||
int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden;
|
||||
int _mutex_cv_attach(struct pthread_mutex *, int) __hidden;
|
||||
int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden;
|
||||
int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden;
|
||||
int _mutex_reinit(pthread_mutex_t *) __hidden;
|
||||
void _mutex_fork(struct pthread *curthread) __hidden;
|
||||
|
@ -120,7 +120,7 @@ _thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
|
||||
_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
|
||||
{
|
||||
uint32_t flags = mtx->m_flags;
|
||||
|
||||
@ -132,8 +132,12 @@ _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
|
||||
return (EPERM);
|
||||
} while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner,
|
||||
owner, UMUTEX_UNOWNED)));
|
||||
if ((owner & UMUTEX_CONTESTED))
|
||||
(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0);
|
||||
if ((owner & UMUTEX_CONTESTED)) {
|
||||
if (defer == NULL)
|
||||
(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0);
|
||||
else
|
||||
*defer = 1;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED))
|
||||
@ -141,6 +145,12 @@ _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
|
||||
return (__thr_umutex_unlock(mtx, id));
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
|
||||
{
|
||||
return _thr_umutex_unlock2(mtx, id, NULL);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user