From bcee8cb651dd780e5203bfdfc254532c327be80c Mon Sep 17 00:00:00 2001 From: mjg Date: Thu, 9 Feb 2017 08:19:30 +0000 Subject: [PATCH] locks: tidy up unlock fallback paths Update comments to note these functions are reachable if lockstat is enabled. Check if the lock has any bits set before attempting unlock, which saves an unnecessary atomic operation. --- sys/kern/kern_mutex.c | 17 ++++++++++------- sys/kern/kern_rwlock.c | 22 ++++++++++++---------- sys/kern/kern_sx.c | 14 ++++++++------ 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 76617cf6b76f..581d99b711be 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -853,25 +853,24 @@ thread_lock_set(struct thread *td, struct mtx *new) /* * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * - * We are only called here if the lock is recursed or contested (i.e. we - * need to wake up a blocked thread). + * We are only called here if the lock is recursed, contested (i.e. we + * need to wake up a blocked thread) or lockstat probe is active. */ void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; + uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; + tid = (uintptr_t)curthread; m = mtxlock2mtx(c); + v = MTX_READ_VALUE(m); - if (!mtx_recursed(m)) { - LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); - if (_mtx_release_lock(m, (uintptr_t)curthread)) - return; - } else { + if (v & MTX_RECURSED) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) @@ -879,6 +878,10 @@ __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) return; } + LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); + if (v == tid && _mtx_release_lock(m, tid)) + return; + /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 5a9cdae27ca4..be1c42e8ee0a 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -1030,9 +1030,10 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, } /* - * This function is called if the first try at releasing a write lock failed. - * This means that one of the 2 waiter bits must be set indicating that at - * least one thread is waiting on this lock. + * This function is called if lockstat is active or the first try at releasing + * a write lock failed. The latter means that the lock is recursed or one of + * the 2 waiter bits must be set indicating that at least one thread is waiting + * on this lock. */ void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, @@ -1047,18 +1048,19 @@ __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, return; rw = rwlock2rw(c); - - if (!rw_recursed(rw)) { - LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, - LOCKSTAT_WRITER); - if (_rw_write_unlock(rw, tid)) - return; - } else { + v = RW_READ_VALUE(rw); + if (v & RW_LOCK_WRITER_RECURSED) { if (--(rw->rw_recurse) == 0) atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); + if (LOCK_LOG_TEST(&rw->lock_object, 0)) + CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); return; } + LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER); + if (v == tid && _rw_write_unlock(rw, tid)) + return; + KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), ("%s: neither of the waiter flags are set", __func__)); diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 760c44e665ff..96ca4dffef28 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -749,12 +749,8 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); - if (!sx_recursed(sx)) { - LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, - LOCKSTAT_WRITER); - if (atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) - return; - } else { + x = SX_READ_VALUE(sx); + if (x & SX_LOCK_RECURSED) { /* The lock is recursed, unrecurse one level. */ if ((--sx->sx_recurse) == 0) atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); @@ -762,6 +758,12 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); return; } + + LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); + if (x == tid && + atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) + return; + MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); if (LOCK_LOG_TEST(&sx->lock_object, 0))