Undo LOCK_PROFILING pessimisation after r313454 and r313455

With the option used to compile the kernel both sx and rw shared ops would
always go to the slow path which added avoidable overhead even when the
facility is disabled.

Furthermore the increased time spent doing uncontested shared lock acquire
would be bogusly added to total wait time, somewhat skewing the results.

Restore old behaviour of going there only when profiling is enabled.

This change is a no-op for kernels without LOCK_PROFILING (which is the
default).
This commit is contained in:
Mateusz Guzik 2018-02-17 12:07:09 +00:00
parent ad58e5e86c
commit e4ccf57fdc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=329451
3 changed files with 14 additions and 12 deletions

View File

@ -645,9 +645,12 @@ __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
v = RW_READ_VALUE(rw);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) ||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||
!__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG)))
__rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
else
lock_profile_obtain_lock_success(&rw->lock_object, 0, 0,
file, line);
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
@ -839,9 +842,11 @@ _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
td = curthread;
v = RW_READ_VALUE(rw);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) ||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||
!__rw_runlock_try(rw, td, &v)))
__rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
else
lock_profile_release_lock(&rw->lock_object);
TD_LOCKS_DEC(curthread);
}

View File

@ -1122,9 +1122,12 @@ _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
error = 0;
x = SX_READ_VALUE(sx);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
!__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)))
error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
else
lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
file, line);
if (error == 0) {
LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, 0, file, line);
@ -1252,9 +1255,11 @@ _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
x = SX_READ_VALUE(sx);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
!_sx_sunlock_try(sx, &x)))
_sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG);
else
lock_profile_release_lock(&sx->lock_object);
TD_LOCKS_DEC(curthread);
}

View File

@ -109,12 +109,7 @@ extern volatile int lockstat_enabled;
LOCKSTAT_RECORD1(probe, lp, a); \
} while (0)
#ifndef LOCK_PROFILING
#define LOCKSTAT_PROFILE_ENABLED(probe) __predict_false(lockstat_enabled)
#define LOCKSTAT_OOL_PROFILE_ENABLED(probe) LOCKSTAT_PROFILE_ENABLED(probe)
#else
#define LOCKSTAT_OOL_PROFILE_ENABLED(probe) 1
#endif
struct lock_object;
uint64_t lockstat_nsecs(struct lock_object *);
@ -139,10 +134,7 @@ uint64_t lockstat_nsecs(struct lock_object *);
#define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) \
LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp)
#ifndef LOCK_PROFILING
#define LOCKSTAT_PROFILE_ENABLED(probe) 0
#endif
#define LOCKSTAT_OOL_PROFILE_ENABLED(probe) 1
#endif /* !KDTRACE_HOOKS */