diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 183726c8e558..4cce16d301b8 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -310,6 +310,7 @@ sx_try_slock_(struct sx *sx, const char *file, int line) int _sx_xlock(struct sx *sx, int opts, const char *file, int line) { + uintptr_t tid, x; int error = 0; if (SCHEDULER_STOPPED()) @@ -321,7 +322,13 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line) ("sx_xlock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); - error = __sx_xlock(sx, curthread, opts, file, line); + tid = (uintptr_t)curthread; + x = SX_LOCK_UNLOCKED; + if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) + error = _sx_xlock_hard(sx, x, tid, opts, file, line); + else + LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, + 0, 0, file, line, LOCKSTAT_WRITER); if (!error) { LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); @@ -379,7 +386,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line) WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); - __sx_xunlock(sx, curthread, file, line); + _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); TD_LOCKS_DEC(curthread); } @@ -757,8 +764,13 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); - /* If the lock is recursed, then unrecurse one level. */ - if (sx_xlocked(sx) && sx_recursed(sx)) { + if (!sx_recursed(sx)) { + LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, + LOCKSTAT_WRITER); + if (atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) + return; + } else { + /* The lock is recursed, unrecurse one level. */ if ((--sx->sx_recurse) == 0) atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) diff --git a/sys/sys/sx.h b/sys/sys/sx.h index a676d2a3deb8..50b0a2415f28 100644 --- a/sys/sys/sx.h +++ b/sys/sys/sx.h @@ -145,21 +145,19 @@ struct sx_args { * deferred to 'tougher' functions. */ +#if (LOCK_DEBUG == 0) && !defined(SX_NOINLINE) /* Acquire an exclusive lock. */ static __inline int __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file, int line) { uintptr_t tid = (uintptr_t)td; - uintptr_t v; + uintptr_t v = SX_LOCK_UNLOCKED; int error = 0; - v = SX_LOCK_UNLOCKED; - if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)) + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) || + !atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid))) error = _sx_xlock_hard(sx, v, tid, opts, file, line); - else - LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, - 0, 0, file, line, LOCKSTAT_WRITER); return (error); } @@ -170,12 +168,11 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line) { uintptr_t tid = (uintptr_t)td; - if (sx->sx_recurse == 0) - LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, - LOCKSTAT_WRITER); - if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) || + !atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))) _sx_xunlock_hard(sx, tid, file, line); } +#endif /* * Public interface for lock operations.