- Wrap lock profiling state variables in #ifdef LOCK_PROFILING blocks.

This commit is contained in:
Jeff Roberson 2009-03-15 08:03:54 +00:00
parent 2e6b8de462
commit 1723a06485
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=189846
4 changed files with 34 additions and 14 deletions

View File

@ -333,16 +333,17 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *wmesg, int pri, int timo, const char *file, int line)
{
GIANT_DECLARE;
uint64_t waittime;
struct lock_class *class;
const char *iwmesg;
uintptr_t tid, v, x;
u_int op;
int contested, error, ipri, itimo, queue, wakeup_swapper;
int error, ipri, itimo, queue, wakeup_swapper;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
contested = 0;
error = 0;
waittime = 0;
tid = (uintptr_t)curthread;
op = (flags & LK_TYPE_MASK);
iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;

View File

@ -254,8 +254,11 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
int rval, contested = 0;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
int rval;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@ -296,15 +299,17 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
struct turnstile *ts;
uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
#ifdef KTR
int cont_logged = 0;
#endif
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
uintptr_t v;
#endif
if (mtx_owned(m)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
@ -448,8 +453,11 @@ void
_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
int i = 0, contested = 0;
int i = 0;
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
#endif
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
@ -486,11 +494,13 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
{
struct mtx *m;
uintptr_t tid;
int i, contested;
uint64_t waittime;
int i;
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
#endif
contested = i = 0;
waittime = 0;
i = 0;
tid = (uintptr_t)curthread;
for (;;) {
retry:

View File

@ -282,8 +282,10 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
int spintries = 0;
int i;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
uintptr_t v;
KASSERT(rw->rw_lock != RW_DESTROYED,
@ -584,9 +586,11 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
int spintries = 0;
int i;
#endif
uint64_t waittime = 0;
uintptr_t v, x;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
if (rw_wlocked(rw)) {
KASSERT(rw->lock_object.lo_flags & RW_RECURSE,

View File

@ -431,9 +431,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
uint64_t waittime = 0;
uintptr_t x;
int contested = 0, error = 0;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
int error = 0;
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
@ -652,8 +655,10 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
uintptr_t x;
int error = 0;