sx: move lockstat handling out of inline primitives

See r313275 for details.
This commit is contained in:
Mateusz Guzik 2017-02-05 09:54:16 +00:00
parent dc0896512c
commit 6ebb77b6a6
2 changed files with 23 additions and 14 deletions

View File

@ -310,6 +310,7 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
int
_sx_xlock(struct sx *sx, int opts, const char *file, int line)
{
uintptr_t tid, x;
int error = 0;
if (SCHEDULER_STOPPED())
@ -321,7 +322,13 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line, NULL);
error = __sx_xlock(sx, curthread, opts, file, line);
tid = (uintptr_t)curthread;
x = SX_LOCK_UNLOCKED;
if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
error = _sx_xlock_hard(sx, x, tid, opts, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
0, 0, file, line, LOCKSTAT_WRITER);
if (!error) {
LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
file, line);
@ -379,7 +386,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
line);
__sx_xunlock(sx, curthread, file, line);
_sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
TD_LOCKS_DEC(curthread);
}
@ -757,8 +764,13 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
/* If the lock is recursed, then unrecurse one level. */
if (sx_xlocked(sx) && sx_recursed(sx)) {
if (!sx_recursed(sx)) {
LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx,
LOCKSTAT_WRITER);
if (atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
return;
} else {
/* The lock is recursed, unrecurse one level. */
if ((--sx->sx_recurse) == 0)
atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
if (LOCK_LOG_TEST(&sx->lock_object, 0))

View File

@ -145,21 +145,19 @@ struct sx_args {
* deferred to 'tougher' functions.
*/
#if (LOCK_DEBUG == 0) && !defined(SX_NOINLINE)
/* Acquire an exclusive lock. */
static __inline int
__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
int line)
{
uintptr_t tid = (uintptr_t)td;
uintptr_t v;
uintptr_t v = SX_LOCK_UNLOCKED;
int error = 0;
v = SX_LOCK_UNLOCKED;
if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid))
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)))
error = _sx_xlock_hard(sx, v, tid, opts, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
0, 0, file, line, LOCKSTAT_WRITER);
return (error);
}
@ -170,12 +168,11 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
{
uintptr_t tid = (uintptr_t)td;
if (sx->sx_recurse == 0)
LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx,
LOCKSTAT_WRITER);
if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)))
_sx_xunlock_hard(sx, tid, file, line);
}
#endif
/*
* Public interface for lock operations.