- Fix some gcc warnings in lock_profile.h

- add cnt_hold cnt_lock support for spin mutexes
- make sure contested is initialized to zero to only bump contested when appropriate
- move initialization function to kern_mutex.c to avoid cyclic dependency between
  mutex.h and lock_profile.h
This commit is contained in:
Kip Macy 2006-12-16 02:37:58 +00:00
parent 6a751174bd
commit 1364a812e7
3 changed files with 40 additions and 24 deletions

View File

@ -116,6 +116,20 @@ struct lock_class lock_class_mtx_spin = {
struct mtx sched_lock;
struct mtx Giant;
#ifdef LOCK_PROFILING
static inline void lock_profile_init(void)
{
int i;
/* Initialize the mutex profiling locks */
for (i = 0; i < LPROF_LOCK_SIZE; i++) {
mtx_init(&lprof_locks[i], "mprof lock",
NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
}
}
#else
static inline void lock_profile_init(void) {;}
#endif
/*
* Function versions of the inlined __mtx_* macros. These are used by
* modules and can also be called from assembly language if needed.
@ -257,7 +271,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
int cont_logged = 0;
#endif
uintptr_t v;
int contested;
int contested = 0;
if (mtx_owned(m)) {
KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
@ -325,10 +339,11 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
*/
owner = (struct thread *)(v & ~MTX_FLAGMASK);
#ifdef ADAPTIVE_GIANT
if (TD_IS_RUNNING(owner)) {
if (TD_IS_RUNNING(owner))
#else
if (m != &Giant && TD_IS_RUNNING(owner)) {
if (m != &Giant && TD_IS_RUNNING(owner))
#endif
{
turnstile_release(&m->mtx_object);
while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
cpu_spinwait();
@ -359,7 +374,6 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
turnstile_wait(&m->mtx_object, mtx_owner(m),
TS_EXCLUSIVE_QUEUE);
}
#ifdef KTR
if (cont_logged) {
CTR4(KTR_CONTENTION,
@ -368,9 +382,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
}
#endif
#ifdef LOCK_PROFILING
m->mtx_object.lo_profile_obj.lpo_contest_holding = 0;
if (contested)
m->mtx_object.lo_profile_obj.lpo_contest_locking++;
m->mtx_object.lo_profile_obj.lpo_contest_holding = 0;
#endif
return;
}
@ -387,7 +401,7 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
struct thread *td;
int contested, i = 0;
int contested = 0, i = 0;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);

View File

@ -35,6 +35,7 @@
#include <sys/stdint.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#ifndef LPROF_HASH_SIZE
@ -61,7 +62,6 @@ struct lock_prof {
};
extern struct lock_prof lprof_buf[LPROF_HASH_SIZE];
extern int allocated_lprof_buf;
#define LPROF_SBUF_SIZE 256 * 400
/* We keep a smaller pool of spin mutexes for protecting the lprof hash entries */
@ -74,24 +74,11 @@ extern int allocated_lprof_buf;
extern struct mtx lprof_locks[LPROF_LOCK_SIZE];
extern int lock_prof_enable;
extern int lock_prof_records;
extern int lock_prof_rejected;
extern int lock_prof_collisions;
void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, const char *file, int line);
void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart);
void _lock_profile_release_lock(struct lock_object *lo);
static inline void lock_profile_init(void)
{
int i;
/* Initialize the mutex profiling locks */
for (i = 0; i < LPROF_LOCK_SIZE; i++) {
mtx_init(&lprof_locks[i], "mprof lock",
NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
}
}
static inline void lock_profile_object_init(struct lock_object *lo, struct lock_class *class, const char *name) {
const char *p;
u_int hash = 0;
@ -138,8 +125,11 @@ static inline void lock_profile_obtain_lock_failed(struct lock_object *lo, int *
{
struct lock_profile_object *l = &lo->lo_profile_obj;
if (lock_prof_enable) {
*contested = 1;
atomic_add_int(&l->lpo_contest_holding, 1);
if (*contested == 0) {
atomic_add_int(&l->lpo_contest_holding, 1);
*contested = 1;
}
}
}
@ -155,6 +145,15 @@ static inline void lock_profile_update_wait(struct lock_object *lo, uint64_t wai
_lock_profile_update_wait(lo, waitstart);
}
static inline void lock_profile_update_contest_locking(struct lock_object *lo, int contested)
{
if (lock_prof_enable) {
lo->lo_profile_obj.lpo_contest_holding = 0;
if (contested)
lo->lo_profile_obj.lpo_contest_locking++;
}
}
static inline void lock_profile_release_lock(struct lock_object *lo)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
@ -163,9 +162,8 @@ static inline void lock_profile_release_lock(struct lock_object *lo)
}
#else /* !LOCK_PROFILING */
static inline void lock_profile_init(void) {;}
static inline void lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart) {;}
static inline void lock_profile_update_contest_locking(struct lock_object *lo, int contested) {;}
static inline void lock_profile_waitstart(uint64_t *waittime) {;}
static inline void lock_profile_release_lock(struct lock_object *lo) {;}
static inline void lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested) {;}

View File

@ -39,6 +39,7 @@
#ifdef _KERNEL
#include <sys/pcpu.h>
#include <sys/lock_profile.h>
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#endif /* _KERNEL_ */
@ -172,14 +173,17 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#ifdef SMP
#define _get_spin_lock(mp, tid, opts, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
int contested = 0; \
\
spinlock_enter(); \
if (!_obtain_lock((mp), _tid)) { \
lock_profile_obtain_lock_failed(&mp->mtx_object, &contested);\
if ((mp)->mtx_lock == _tid) \
(mp)->mtx_recurse++; \
else \
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} \
lock_profile_update_contest_locking(&mp->mtx_object, contested);\
} while (0)
#else /* SMP */
#define _get_spin_lock(mp, tid, opts, file, line) do { \