MUTEX_PROFILING has been generalized to LOCK_PROFILING. We now profile

wait (time waited to acquire) and hold times for *all* kernel locks. If
the architecture has a system synchronized TSC, the profiling code will
use that - thereby minimizing profiling overhead. Large chunks of profiling
code have been moved out of line, the overhead measured on the T1 for when
it is compiled in but not enabled is < 1%.

Approved by: scottl (standing in for mentor rwatson)
Reviewed by: des and jhb
This commit is contained in:
Kip Macy 2006-11-11 03:18:07 +00:00
parent 1cede0c9bd
commit 7c0435b933
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=164159
15 changed files with 599 additions and 281 deletions

View File

@ -20,6 +20,12 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 7.x IS SLOW:
in userland, and various verbose features in the kernel. Many
developers choose to disable these features on build machines
to maximize performance.
20061110:
The MUTEX_PROFILING option has been renamed to LOCK_PROFILING.
The lockmgr object layout has been changed as a result of having
a lock_object embedded in it. As a consequence all file system
kernel modules must be re-compiled. The mutex profiling man page
has not yet been updated to reflect this change.
20061026:
KSE in the kernel has now been made optional and turned on by

View File

@ -545,7 +545,7 @@ INVARIANTS opt_global.h
MCLSHIFT opt_global.h
MUTEX_DEBUG opt_global.h
MUTEX_NOINLINE opt_global.h
MUTEX_PROFILING opt_global.h
LOCK_PROFILING opt_global.h
MSIZE opt_global.h
REGRESSION opt_global.h
RESTARTABLE_PANICS opt_global.h

View File

@ -44,6 +44,7 @@
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_global.h"
#include <sys/param.h>
#include <sys/kdb.h>
@ -54,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/lock_profile.h>
#ifdef DEBUG_LOCKS
#include <sys/stack.h>
#endif
@ -148,15 +150,14 @@ acquire(struct lock **lkpp, int extflags, int wanted)
* accepted shared locks and shared-to-exclusive upgrades to go away.
*/
int
lockmgr(lkp, flags, interlkp, td)
struct lock *lkp;
u_int flags;
struct mtx *interlkp;
struct thread *td;
_lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
struct thread *td, char *file, int line)
{
int error;
struct thread *thr;
int extflags, lockflags;
uint64_t waitstart;
error = 0;
if (td == NULL)
@ -164,6 +165,7 @@ lockmgr(lkp, flags, interlkp, td)
else
thr = td;
lock_profile_waitstart(&waitstart);
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
CTR6(KTR_LOCK,
@ -219,6 +221,9 @@ lockmgr(lkp, flags, interlkp, td)
if (error)
break;
sharelock(td, lkp, 1);
if (lkp->lk_sharecount == 1)
lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
#endif
@ -229,6 +234,8 @@ lockmgr(lkp, flags, interlkp, td)
* An alternative would be to fail with EDEADLK.
*/
sharelock(td, lkp, 1);
if (lkp->lk_sharecount == 1)
lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
@ -272,6 +279,8 @@ lockmgr(lkp, flags, interlkp, td)
if (lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade without shared");
shareunlock(td, lkp, 1);
if (lkp->lk_sharecount == 0)
lock_profile_release_lock(&lkp->lk_object);
/*
* If we are just polling, check to see if we will block.
*/
@ -302,6 +311,7 @@ lockmgr(lkp, flags, interlkp, td)
lkp->lk_lockholder = thr;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
#endif
@ -361,6 +371,7 @@ lockmgr(lkp, flags, interlkp, td)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
#endif
@ -380,6 +391,7 @@ lockmgr(lkp, flags, interlkp, td)
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = LK_NOPROC;
lkp->lk_exclusivecount = 0;
lock_profile_release_lock(&lkp->lk_object);
} else {
lkp->lk_exclusivecount--;
}
@ -509,6 +521,7 @@ lockinit(lkp, prio, wmesg, timo, flags)
#ifdef DEBUG_LOCKS
stack_zero(&lkp->lk_stack);
#endif
lock_profile_object_init(&lkp->lk_object, wmesg);
}
/*
@ -520,6 +533,7 @@ lockdestroy(lkp)
{
CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
lkp, lkp->lk_wmesg);
lock_profile_object_destroy(&lkp->lk_object);
}
/*

View File

@ -38,7 +38,7 @@ __FBSDID("$FreeBSD$");
#include "opt_adaptive_mutexes.h"
#include "opt_ddb.h"
#include "opt_mprof.h"
#include "opt_global.h"
#include "opt_mutex_wake_all.h"
#include "opt_sched.h"
@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/turnstile.h>
#include <sys/vmmeter.h>
#include <sys/lock_profile.h>
#include <machine/atomic.h>
#include <machine/bus.h>
@ -115,158 +116,6 @@ struct lock_class lock_class_mtx_spin = {
struct mtx sched_lock;
struct mtx Giant;
#ifdef MUTEX_PROFILING
SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
static int mutex_prof_enable = 0;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
&mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
struct mutex_prof {
const char *name;
const char *file;
int line;
uintmax_t cnt_max;
uintmax_t cnt_tot;
uintmax_t cnt_cur;
uintmax_t cnt_contest_holding;
uintmax_t cnt_contest_locking;
struct mutex_prof *next;
};
/*
* mprof_buf is a static pool of profiling records to avoid possible
* reentrance of the memory allocation functions.
*
* Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
*/
#ifdef MPROF_BUFFERS
#define NUM_MPROF_BUFFERS MPROF_BUFFERS
#else
#define NUM_MPROF_BUFFERS 1000
#endif
static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
static int first_free_mprof_buf;
#ifndef MPROF_HASH_SIZE
#define MPROF_HASH_SIZE 1009
#endif
#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
#endif
static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
/* SWAG: sbuf size = avg stat. line size * number of locks */
#define MPROF_SBUF_SIZE 256 * 400
static int mutex_prof_acquisitions;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
&mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
static int mutex_prof_records;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
&mutex_prof_records, 0, "Number of profiling records");
static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
&mutex_prof_maxrecords, 0, "Maximum number of profiling records");
static int mutex_prof_rejected;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
&mutex_prof_rejected, 0, "Number of rejected profiling records");
static int mutex_prof_hashsize = MPROF_HASH_SIZE;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
&mutex_prof_hashsize, 0, "Hash size");
static int mutex_prof_collisions = 0;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
&mutex_prof_collisions, 0, "Number of hash collisions");
/*
* mprof_mtx protects the profiling buffers and the hash.
*/
static struct mtx mprof_mtx;
MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
static u_int64_t
nanoseconds(void)
{
struct timespec tv;
nanotime(&tv);
return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
}
static int
dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
{
struct sbuf *sb;
int error, i;
static int multiplier = 1;
if (first_free_mprof_buf == 0)
return (SYSCTL_OUT(req, "No locking recorded",
sizeof("No locking recorded")));
retry_sbufops:
sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
"max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
/*
* XXX this spinlock seems to be by far the largest perpetrator
* of spinlock latency (1.6 msec on an Athlon1600 was recorded
* even before I pessimized it further by moving the average
* computation here).
*/
mtx_lock_spin(&mprof_mtx);
for (i = 0; i < first_free_mprof_buf; ++i) {
sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
mprof_buf[i].cnt_max / 1000,
mprof_buf[i].cnt_tot / 1000,
mprof_buf[i].cnt_cur,
mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
mprof_buf[i].cnt_contest_holding,
mprof_buf[i].cnt_contest_locking,
mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
if (sbuf_overflowed(sb)) {
mtx_unlock_spin(&mprof_mtx);
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
}
mtx_unlock_spin(&mprof_mtx);
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
sbuf_delete(sb);
return (error);
}
SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
static int
reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
{
int error, v;
if (first_free_mprof_buf == 0)
return (0);
v = 0;
error = sysctl_handle_int(oidp, &v, 0, req);
if (error)
return (error);
if (req->newptr == NULL)
return (error);
if (v == 0)
return (0);
mtx_lock_spin(&mprof_mtx);
bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
first_free_mprof_buf = 0;
mtx_unlock_spin(&mprof_mtx);
return (0);
}
SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
#endif
/*
* Function versions of the inlined __mtx_* macros. These are used by
* modules and can also be called from assembly language if needed.
@ -274,6 +123,7 @@ SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
void
_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
uint64_t waittime;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@ -283,20 +133,14 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
lock_profile_waitstart(&waittime);
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
#ifdef MUTEX_PROFILING
/* don't reset the timer when/if recursing */
if (m->mtx_acqtime == 0) {
m->mtx_filename = file;
m->mtx_lineno = line;
m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
++mutex_prof_acquisitions;
}
#endif
lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
}
void
@ -314,76 +158,16 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
#ifdef MUTEX_PROFILING
if (m->mtx_acqtime != 0) {
static const char *unknown = "(unknown)";
struct mutex_prof *mpp;
u_int64_t acqtime, now;
const char *p, *q;
volatile u_int hash;
now = nanoseconds();
acqtime = m->mtx_acqtime;
m->mtx_acqtime = 0;
if (now <= acqtime)
goto out;
for (p = m->mtx_filename;
p != NULL && strncmp(p, "../", 3) == 0; p += 3)
/* nothing */ ;
if (p == NULL || *p == '\0')
p = unknown;
for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
mtx_lock_spin(&mprof_mtx);
for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
if (mpp->line == m->mtx_lineno &&
strcmp(mpp->file, p) == 0)
break;
if (mpp == NULL) {
/* Just exit if we cannot get a trace buffer */
if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
++mutex_prof_rejected;
goto unlock;
}
mpp = &mprof_buf[first_free_mprof_buf++];
mpp->name = mtx_name(m);
mpp->file = p;
mpp->line = m->mtx_lineno;
mpp->next = mprof_hash[hash];
if (mprof_hash[hash] != NULL)
++mutex_prof_collisions;
mprof_hash[hash] = mpp;
++mutex_prof_records;
}
/*
* Record if the mutex has been held longer now than ever
* before.
*/
if (now - acqtime > mpp->cnt_max)
mpp->cnt_max = now - acqtime;
mpp->cnt_tot += now - acqtime;
mpp->cnt_cur++;
/*
* There's a small race, really we should cmpxchg
* 0 with the current value, but that would bill
* the contention to the wrong lock instance if
* it followed this also.
*/
mpp->cnt_contest_holding += m->mtx_contest_holding;
m->mtx_contest_holding = 0;
mpp->cnt_contest_locking += m->mtx_contest_locking;
m->mtx_contest_locking = 0;
unlock:
mtx_unlock_spin(&mprof_mtx);
}
out:
#endif
lock_profile_release_lock(&m->mtx_object);
_rel_sleep_lock(m, curthread, opts, file, line);
}
void
_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
uint64_t waittime;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@ -393,10 +177,12 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
lock_profile_waitstart(&waittime);
_get_spin_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
}
void
@ -413,6 +199,7 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
lock_profile_release_lock(&m->mtx_object);
_rel_spin_lock(m);
}
@ -425,6 +212,7 @@ int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
int rval;
uint64_t waittime = 0;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@ -445,6 +233,8 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
curthread->td_locks++;
lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
}
return (rval);
@ -463,13 +253,11 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
volatile struct thread *owner;
#endif
uintptr_t v;
#ifdef KTR
int cont_logged = 0;
#endif
#ifdef MUTEX_PROFILING
uintptr_t v;
int contested;
#endif
if (mtx_owned(m)) {
KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
@ -487,14 +275,8 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
#ifdef MUTEX_PROFILING
contested = 0;
#endif
while (!_obtain_lock(m, tid)) {
#ifdef MUTEX_PROFILING
contested = 1;
atomic_add_int(&m->mtx_contest_holding, 1);
#endif
lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
turnstile_lock(&m->mtx_object);
v = m->mtx_lock;
@ -585,10 +367,10 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
m->mtx_object.lo_name, (void *)tid, file, line);
}
#endif
#ifdef MUTEX_PROFILING
#ifdef LOCK_PROFILING
if (contested)
m->mtx_contest_locking++;
m->mtx_contest_holding = 0;
m->mtx_object.lo_profile_obj.lpo_contest_locking++;
m->mtx_object.lo_profile_obj.lpo_contest_holding = 0;
#endif
return;
}
@ -605,12 +387,13 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
struct thread *td;
int i = 0;
int contested, i = 0;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
while (!_obtain_lock(m, tid)) {
lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
/* Give interrupts a chance while we spin. */
spinlock_exit();
@ -845,7 +628,7 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
int flags;
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK)) == 0);
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
#ifdef MUTEX_DEBUG
/* Diagnostic and error correction */
@ -866,18 +649,14 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
flags |= LO_WITNESS;
if (opts & MTX_DUPOK)
flags |= LO_DUPOK;
if (opts & MTX_NOPROFILE)
flags |= LO_NOPROFILE;
/* Initialize mutex. */
m->mtx_lock = MTX_UNOWNED;
m->mtx_recurse = 0;
#ifdef MUTEX_PROFILING
m->mtx_acqtime = 0;
m->mtx_filename = NULL;
m->mtx_lineno = 0;
m->mtx_contest_holding = 0;
m->mtx_contest_locking = 0;
#endif
lock_profile_object_init(&m->mtx_object, name);
lock_init(&m->mtx_object, class, name, type, flags);
}
@ -908,6 +687,7 @@ mtx_destroy(struct mtx *m)
}
m->mtx_lock = MTX_DESTROYED;
lock_profile_object_destroy(&m->mtx_object);
lock_destroy(&m->mtx_object);
}
@ -931,6 +711,8 @@ mutex_init(void)
mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
mtx_lock(&Giant);
lock_profile_init();
}
#ifdef DDB

View File

@ -44,7 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/systm.h>
#include <sys/turnstile.h>
#include <sys/lock_profile.h>
#include <machine/cpu.h>
#ifdef DDB
@ -86,6 +86,7 @@ rw_init(struct rwlock *rw, const char *name)
rw->rw_lock = RW_UNLOCKED;
lock_profile_object_init(&rw->rw_object, name);
lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
LO_RECURSABLE | LO_UPGRADABLE);
}
@ -95,6 +96,7 @@ rw_destroy(struct rwlock *rw)
{
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
lock_profile_object_destroy(&rw->rw_object);
lock_destroy(&rw->rw_object);
}
@ -109,6 +111,7 @@ rw_sysinit(void *arg)
void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{
uint64_t waitstart;
MPASS(curthread != NULL);
KASSERT(rw_wowner(rw) != curthread,
@ -116,7 +119,9 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
rw->rw_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
lock_profile_waitstart(&waitstart);
__rw_wlock(rw, curthread, file, line);
lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@ -131,6 +136,7 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
curthread->td_locks--;
WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
lock_profile_release_lock(&rw->rw_object);
__rw_wunlock(rw, curthread, file, line);
}
@ -140,6 +146,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
#ifdef SMP
volatile struct thread *owner;
#endif
uint64_t waitstart;
int contested;
uintptr_t x;
KASSERT(rw_wowner(rw) != curthread,
@ -158,6 +166,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* be blocked on the writer, and the writer would be blocked
* waiting for the reader to release its original read lock.
*/
lock_profile_waitstart(&waitstart);
for (;;) {
/*
* Handle the easy case. If no other thread has a write
@ -180,6 +189,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
MPASS((x & RW_LOCK_READ_WAITERS) == 0);
if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
x + RW_ONE_READER)) {
lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p", __func__,
@ -188,6 +198,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
break;
}
cpu_spinwait();
lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
@ -236,6 +247,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(x);
if (TD_IS_RUNNING(owner)) {
lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
@ -301,7 +313,9 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
break;
}
continue;
}
} else
lock_profile_release_lock(&rw->rw_object);
/*
* We should never have read waiters while at least one
@ -397,6 +411,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
#ifdef SMP
volatile struct thread *owner;
#endif
int contested;
uintptr_t v;
if (LOCK_LOG_TEST(&rw->rw_object, 0))
@ -438,6 +453,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
}
turnstile_release(&rw->rw_object);
cpu_spinwait();
lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
@ -451,6 +467,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
v | RW_LOCK_WRITE_WAITERS)) {
turnstile_release(&rw->rw_object);
cpu_spinwait();
lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
if (LOCK_LOG_TEST(&rw->rw_object, 0))
@ -466,6 +483,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",

View File

@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sx.h>
#include <sys/lock_profile.h>
#ifdef DDB
#include <ddb/ddb.h>
@ -85,6 +86,7 @@ sx_init(struct sx *sx, const char *description)
cv_init(&sx->sx_excl_cv, description);
sx->sx_excl_wcnt = 0;
sx->sx_xholder = NULL;
lock_profile_object_init(&sx->sx_object, description);
lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
}
@ -100,13 +102,16 @@ sx_destroy(struct sx *sx)
sx->sx_lock = NULL;
cv_destroy(&sx->sx_shrd_cv);
cv_destroy(&sx->sx_excl_cv);
lock_profile_object_destroy(&sx->sx_object);
lock_destroy(&sx->sx_object);
}
void
_sx_slock(struct sx *sx, const char *file, int line)
{
uint64_t waittime = 0;
int contested;
mtx_lock(sx->sx_lock);
KASSERT(sx->sx_xholder != curthread,
@ -117,8 +122,11 @@ _sx_slock(struct sx *sx, const char *file, int line)
/*
* Loop in case we lose the race for lock acquisition.
*/
if (sx->sx_cnt < 0)
lock_profile_waitstart(&waittime);
while (sx->sx_cnt < 0) {
sx->sx_shrd_wcnt++;
lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
sx->sx_shrd_wcnt--;
}
@ -126,6 +134,9 @@ _sx_slock(struct sx *sx, const char *file, int line)
/* Acquire a shared lock. */
sx->sx_cnt++;
if (sx->sx_cnt == 1)
lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
curthread->td_locks++;
@ -155,6 +166,8 @@ _sx_try_slock(struct sx *sx, const char *file, int line)
void
_sx_xlock(struct sx *sx, const char *file, int line)
{
int contested;
uint64_t waittime = 0;
mtx_lock(sx->sx_lock);
@ -171,9 +184,12 @@ _sx_xlock(struct sx *sx, const char *file, int line)
WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
if (sx->sx_cnt)
lock_profile_waitstart(&waittime);
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
sx->sx_excl_wcnt++;
lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
cv_wait(&sx->sx_excl_cv, sx->sx_lock);
sx->sx_excl_wcnt--;
}
@ -184,6 +200,7 @@ _sx_xlock(struct sx *sx, const char *file, int line)
sx->sx_cnt--;
sx->sx_xholder = curthread;
lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@ -225,6 +242,8 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
/* Release. */
sx->sx_cnt--;
if (sx->sx_cnt == 0)
lock_profile_release_lock(&sx->sx_object);
/*
* If we just released the last shared lock, wake any waiters up, giving
* exclusive lockers precedence. In order to make sure that exclusive
@ -257,6 +276,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
sx->sx_cnt++;
sx->sx_xholder = NULL;
lock_profile_release_lock(&sx->sx_object);
/*
* Wake up waiters if there are any. Give precedence to slock waiters.
*/

View File

@ -36,12 +36,16 @@
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_mprof.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/ktr.h>
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/lock_profile.h>
#ifdef DDB
#include <ddb/ddb.h>
@ -56,6 +60,136 @@ struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
&lock_class_rw,
};
#ifdef LOCK_PROFILING
#include <machine/cpufunc.h>
SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
int lock_prof_enable = 0;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW,
&lock_prof_enable, 0, "Enable lock profiling");
/*
* lprof_buf is a static pool of profiling records to avoid possible
* reentrance of the memory allocation functions.
*
* Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE.
*/
struct lock_prof lprof_buf[LPROF_HASH_SIZE];
static int allocated_lprof_buf;
struct mtx lprof_locks[LPROF_LOCK_SIZE];
/* SWAG: sbuf size = avg stat. line size * number of locks */
#define LPROF_SBUF_SIZE 256 * 400
static int lock_prof_acquisitions;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
&lock_prof_acquisitions, 0, "Number of mutex acquistions recorded");
static int lock_prof_records;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD,
&lock_prof_records, 0, "Number of profiling records");
static int lock_prof_maxrecords = LPROF_HASH_SIZE;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
&lock_prof_maxrecords, 0, "Maximum number of profiling records");
static int lock_prof_rejected;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
&lock_prof_rejected, 0, "Number of rejected profiling records");
static int lock_prof_hashsize = LPROF_HASH_SIZE;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD,
&lock_prof_hashsize, 0, "Hash size");
static int lock_prof_collisions = 0;
SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD,
&lock_prof_collisions, 0, "Number of hash collisions");
#ifndef USE_CPU_NANOSECONDS
static u_int64_t
nanoseconds(void)
{
struct timespec tv;
nanotime(&tv);
return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
}
#endif
static int
dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
{
struct sbuf *sb;
int error, i;
static int multiplier = 1;
const char *p;
if (allocated_lprof_buf == 0)
return (SYSCTL_OUT(req, "No locking recorded",
sizeof("No locking recorded")));
retry_sbufops:
sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
"max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cn\
t_lock", "name");
for (i = 0; i < LPROF_HASH_SIZE; ++i) {
if (lprof_buf[i].name == NULL)
continue;
for (p = lprof_buf[i].file;
p != NULL && strncmp(p, "../", 3) == 0; p += 3)
/* nothing */ ;
sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (\
%s)\n",
lprof_buf[i].cnt_max / 1000,
lprof_buf[i].cnt_tot / 1000,
lprof_buf[i].cnt_wait / 1000,
lprof_buf[i].cnt_cur,
lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000),
lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000),
lprof_buf[i].cnt_contest_holding,
lprof_buf[i].cnt_contest_locking,
p, lprof_buf[i].line, lprof_buf[i].name);
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
}
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
sbuf_delete(sb);
return (error);
}
static int
reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
{
int error, v;
if (allocated_lprof_buf == 0)
return (0);
v = 0;
error = sysctl_handle_int(oidp, &v, 0, req);
if (error)
return (error);
if (req->newptr == NULL)
return (error);
if (v == 0)
return (0);
bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf));
allocated_lprof_buf = 0;
return (0);
}
SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
NULL, 0, dump_lock_prof_stats, "A", "Mutex profiling statistics");
SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
NULL, 0, reset_lock_prof_stats, "I", "Reset mutex profiling statistics");
#endif
void
lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
const char *type, int flags)
@ -113,3 +247,160 @@ DB_SHOW_COMMAND(lock, db_show_lock)
class->lc_ddb_show(lock);
}
#endif
#ifdef LOCK_PROFILING
void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, con\
st char *file, int line)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
/* don't reset the timer when/if recursing */
if (l->lpo_acqtime == 0) {
l->lpo_filename = file;
l->lpo_lineno = line;
l->lpo_acqtime = nanoseconds();
if (waittime) {
if (l->lpo_acqtime > waittime)
l->lpo_waittime = l->lpo_acqtime - waittime;
}
}
}
void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
if (lock_prof_enable && waitstart) {
uint64_t now, waittime;
struct lock_prof *mpp;
u_int hash;
const char *p = l->lpo_filename;
int collision = 0;
now = nanoseconds();
if (now < waitstart)
return;
waittime = now - waitstart;
hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
mpp = &lprof_buf[hash];
while (mpp->name != NULL) {
if (mpp->line == l->lpo_lineno &&
mpp->file == p &&
mpp->namehash == l->lpo_namehash)
break;
/* If the lprof_hash entry is allocated to someone else, try the next one */
collision = 1;
CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
hash = (hash + 1) & LPROF_HASH_MASK;
mpp = &lprof_buf[hash];
}
if (mpp->name == NULL) {
int buf;
buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
/* Just exit if we cannot get a trace buffer */
if (buf >= LPROF_HASH_SIZE) {
++lock_prof_rejected;
return;
}
mpp->file = p;
mpp->line = l->lpo_lineno;
mpp->name = lo->lo_name;
mpp->namehash = l->lpo_namehash;
if (collision)
++lock_prof_collisions;
/* We might have raced someone else but who cares, they'll try again next time */
++lock_prof_records;
}
LPROF_LOCK(hash);
mpp->cnt_wait += waittime;
LPROF_UNLOCK(hash);
}
}
void _lock_profile_release_lock(struct lock_object *lo)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
if (l->lpo_acqtime && !(lo->lo_flags & LO_NOPROFILE)) {
const char *unknown = "(unknown)";
u_int64_t acqtime, now, waittime;
struct lock_prof *mpp;
u_int hash;
const char *p = l->lpo_filename;
int collision = 0;
now = nanoseconds();
acqtime = l->lpo_acqtime;
waittime = l->lpo_waittime;
if (now <= acqtime)
return;
if (p == NULL || *p == '\0')
p = unknown;
hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
CTR5(KTR_SPARE1, "Hashing %s(%x) %s:%d to %d", l->lpo_name,
l->lpo_namehash, p, l->lpo_lineno, hash);
mpp = &lprof_buf[hash];
while (mpp->name != NULL) {
if (mpp->line == l->lpo_lineno &&
mpp->file == p &&
mpp->namehash == l->lpo_namehash)
break;
/* If the lprof_hash entry is allocated to someone
* else, try the next one
*/
collision = 1;
CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file,
mpp->line, mpp->name, mpp->namehash);
hash = (hash + 1) & LPROF_HASH_MASK;
mpp = &lprof_buf[hash];
}
if (mpp->name == NULL) {
int buf;
buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
/* Just exit if we cannot get a trace buffer */
if (buf >= LPROF_HASH_SIZE) {
++lock_prof_rejected;
return;
}
mpp->file = p;
mpp->line = l->lpo_lineno;
mpp->name = lo->lo_name;
mpp->namehash = l->lpo_namehash;
if (collision)
++lock_prof_collisions;
/*
* We might have raced someone else but who cares,
* they'll try again next time
*/
++lock_prof_records;
}
LPROF_LOCK(hash);
/*
* Record if the mutex has been held longer now than ever
* before.
*/
if (now - acqtime > mpp->cnt_max)
mpp->cnt_max = now - acqtime;
mpp->cnt_tot += now - acqtime;
mpp->cnt_wait += waittime;
mpp->cnt_cur++;
/*
* There's a small race, really we should cmpxchg
* 0 with the current value, but that would bill
* the contention to the wrong lock instance if
* it followed this also.
*/
mpp->cnt_contest_holding += l->lpo_contest_holding;
mpp->cnt_contest_locking += l->lpo_contest_locking;
LPROF_UNLOCK(hash);
}
l->lpo_acqtime = 0;
l->lpo_waittime = 0;
l->lpo_contest_locking = 0;
l->lpo_contest_holding = 0;
}
#endif

View File

@ -482,7 +482,7 @@ witness_initialize(void *dummy __unused)
CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
MTX_NOWITNESS);
MTX_NOWITNESS | MTX_NOPROFILE);
for (i = 0; i < WITNESS_COUNT; i++)
witness_free(&w_data[i]);
for (i = 0; i < WITNESS_CHILDCOUNT; i++)

View File

@ -249,6 +249,9 @@ void aszero(u_long asi, vm_offset_t dst, size_t len);
#include <machine/sun4v_cpufunc.h>
#define USE_CPU_NANOSECONDS
#define nanoseconds() rd(tick)
#undef LDNC_GEN
#undef STNC_GEN

View File

@ -31,10 +31,34 @@
#ifndef _SYS__LOCK_H_
#define _SYS__LOCK_H_
struct lock_profile_object {
/*
* This does not result in variant structure sizes because
* MUTEX_PROFILING is in opt_global.h
*/
u_int64_t lpo_acqtime;
u_int64_t lpo_waittime;
const char *lpo_filename;
u_int lpo_namehash;
int lpo_lineno;
/*
* Fields relating to measuring contention on mutexes.
* holding must be accessed atomically since it's
* modified by threads that don't yet hold the mutex.
* locking is only modified and referenced while
* the mutex is held.
*/
u_int lpo_contest_holding;
u_int lpo_contest_locking;
};
struct lock_object {
const char *lo_name; /* Individual lock name. */
const char *lo_type; /* General lock type. */
u_int lo_flags;
#ifdef LOCK_PROFILING
struct lock_profile_object lo_profile_obj;
#endif
union { /* Data for witness. */
STAILQ_ENTRY(lock_object) lod_list;
struct witness *lod_witness;

View File

@ -38,25 +38,6 @@ struct mtx {
struct lock_object mtx_object; /* Common lock properties. */
volatile uintptr_t mtx_lock; /* Owner and flags. */
volatile u_int mtx_recurse; /* Number of recursive holds. */
#ifdef MUTEX_PROFILING
/*
* This does not result in variant structure sizes because
* MUTEX_PROFILING is in opt_global.h
*/
u_int64_t mtx_acqtime;
const char *mtx_filename;
int mtx_lineno;
/*
* Fields relating to measuring contention on mutexes.
* holding must be accessed atomically since it's
* modified by threads that don't yet hold the mutex.
* locking is only modified and referenced while
* the mutex is held.
*/
u_int mtx_contest_holding;
u_int mtx_contest_locking;
#endif
};
#endif /* !_SYS__MUTEX_H_ */

View File

@ -69,6 +69,7 @@ struct lock_class {
#define LO_DUPOK 0x00400000 /* Don't check for duplicate acquires */
#define LO_ENROLLPEND 0x00800000 /* On the pending enroll list. */
#define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */
#define LO_NOPROFILE 0x10000000 /* Don't profile this lock */
/*
* Lock classes are statically assigned an index into the gobal lock_classes
@ -142,7 +143,7 @@ struct lock_list_entry {
* calling conventions for this debugging code in modules so that modules can
* work with both debug and non-debug kernels.
*/
#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(KTR) || defined(MUTEX_PROFILING)
#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(KTR) || defined(LOCK_PROFILING)
#define LOCK_DEBUG 1
#else
#define LOCK_DEBUG 0

170
sys/sys/lock_profile.h Normal file
View File

@ -0,0 +1,170 @@
/*-
* Copyright (c) 2006 Kip Macy kmacy@FreeBSD.org
* Copyright (c) 2006 Dag-Erling Smorgrav des@des.no
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHAL THE AUTHORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_LOCK_PROFILE_H_
#define _SYS_LOCK_PROFILE_H_
#ifdef LOCK_PROFILING
#include <sys/stdint.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#ifndef LPROF_HASH_SIZE
#define LPROF_HASH_SIZE 4096
#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
#endif
struct lock_prof {
const char *name;
const char *file;
u_int namehash;
int line;
uintmax_t cnt_max;
uintmax_t cnt_tot;
uintmax_t cnt_wait;
uintmax_t cnt_cur;
uintmax_t cnt_contest_holding;
uintmax_t cnt_contest_locking;
};
extern struct lock_prof lprof_buf[LPROF_HASH_SIZE];
extern int allocated_lprof_buf;
#define LPROF_SBUF_SIZE 256 * 400
/* We keep a smaller pool of spin mutexes for protecting the lprof hash entries */
#define LPROF_LOCK_SIZE 16
#define LPROF_LOCK_MASK (LPROF_LOCK_SIZE - 1)
#define LPROF_LHASH(hash) ((hash) & LPROF_LOCK_MASK)
#define LPROF_LOCK(hash) mtx_lock_spin(&lprof_locks[LPROF_LHASH(hash)])
#define LPROF_UNLOCK(hash) mtx_unlock_spin(&lprof_locks[LPROF_LHASH(hash)])
extern struct mtx lprof_locks[LPROF_LOCK_SIZE];
extern int lock_prof_enable;
extern int lock_prof_records;
extern int lock_prof_rejected;
extern int lock_prof_collisions;
void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, const char *file, int line);
void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart);
void _lock_profile_release_lock(struct lock_object *lo);
static inline void lock_profile_init(void)
{
int i;
/* Initialize the mutex profiling locks */
for (i = 0; i < LPROF_LOCK_SIZE; i++) {
mtx_init(&lprof_locks[i], "mprof lock",
NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
}
}
static inline void lock_profile_object_init(struct lock_object *lo, const char *name) {
const char *p;
u_int hash = 0;
struct lock_profile_object *l = &lo->lo_profile_obj;
lo->lo_flags = 0;
lo->lo_name = name;
l->lpo_acqtime = 0;
l->lpo_waittime = 0;
l->lpo_filename = NULL;
l->lpo_lineno = 0;
l->lpo_contest_holding = 0;
l->lpo_contest_locking = 0;
/* Hash the mutex name to an int so we don't have to strcmp() it repeatedly */
for (p = name; *p != '\0'; p++)
hash = 31 * hash + *p;
l->lpo_namehash = hash;
#if 0
if (opts & MTX_PROFILE)
l->lpo_stack = stack_create();
#endif
}
static inline void
lock_profile_object_destroy(struct lock_object *lo)
{
#if 0
struct lock_profile_object *l = &lo->lo_profile_obj;
if (lo->lo_flags & LO_PROFILE)
stack_destroy(l->lpo_stack);
#endif
}
static inline void lock_profile_waitstart(uint64_t *waittime)
{
if (lock_prof_enable)
*waittime = rd(tick);
}
static inline void lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
if (lock_prof_enable) {
*contested = 1;
atomic_add_int(&l->lpo_contest_holding, 1);
}
}
static inline void lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, const char *file, int line)
{
if (lock_prof_enable)
_lock_profile_obtain_lock_success(lo, waittime, file, line);
}
static inline void lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
{
if (lock_prof_enable)
_lock_profile_update_wait(lo, waitstart);
}
static inline void lock_profile_release_lock(struct lock_object *lo)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
if (lock_prof_enable || l->lpo_acqtime)
_lock_profile_release_lock(lo);
}
#else /* !LOCK_PROFILING */
static inline void lock_profile_init(void) {;}
static inline void lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart) {;}
static inline void lock_profile_waitstart(uint64_t *waittime) {;}
static inline void lock_profile_release_lock(struct lock_object *lo) {;}
static inline void lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested) {;}
static inline void lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime,
const char *file, int line) {;}
static inline void lock_profile_object_destroy(struct lock_object *lo) {;}
static inline void lock_profile_object_init(struct lock_object *lo, const char *name) {;}
#endif /* !LOCK_PROFILING */
#endif /* _SYS_LOCK_PROFILE_H_ */

View File

@ -40,6 +40,8 @@
#ifdef DEBUG_LOCKS
#include <sys/stack.h> /* XXX */
#endif
#include <sys/queue.h>
#include <sys/_lock.h>
struct mtx;
@ -49,20 +51,23 @@ struct mtx;
* can be gained.
*/
struct lock {
struct lock_object lk_object; /* common lock properties */
struct mtx *lk_interlock; /* lock on remaining fields */
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
short lk_exclusivecount; /* # of recursive exclusive locks */
short lk_prio; /* priority at which to sleep */
const char *lk_wmesg; /* resource sleeping (for tsleep) */
int lk_timo; /* maximum sleep time (for tsleep) */
struct thread *lk_lockholder; /* thread of exclusive lock holder */
struct lock *lk_newlock; /* lock taking over this lock */
#ifdef DEBUG_LOCKS
struct stack lk_stack;
#endif
};
#define lk_flags lk_object.lo_flags
#define lk_wmesg lk_object.lo_name
/*
* Lock request types:
* LK_SHARED - get one of many possible shared locks. If a process
@ -197,13 +202,15 @@ void lockinit(struct lock *, int prio, const char *wmesg,
int timo, int flags);
void lockdestroy(struct lock *);
int lockmgr(struct lock *, u_int flags,
struct mtx *, struct thread *p);
int _lockmgr(struct lock *, int flags,
struct mtx *, struct thread *p, char *file, int line);
void transferlockers(struct lock *, struct lock *);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *);
int lockcount(struct lock *);
int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx, td) _lockmgr((lock), (flags), (mtx), (td), __FILE__, __LINE__)
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#endif

View File

@ -56,6 +56,7 @@
#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
#define MTX_NOPROFILE 0x00000020
/*
* Option flags passed to certain lock/unlock routines, through the use