Add the OpenSolaris dtrace lockstat provider. The lockstat provider

adds probes for mutexes, reader/writer and shared/exclusive locks to
gather contention statistics and other locking information for
dtrace scripts, the lockstat(1M) command and other potential
consumers.

Reviewed by:	attilio jhb jb
Approved by:	gnn (mentor)
This commit is contained in:
Stacey Son 2009-05-26 20:28:22 +00:00
parent e8cdb7739f
commit a5aedd68b4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=192853
16 changed files with 967 additions and 96 deletions

View File

@ -122,6 +122,7 @@
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/dtrace_bsd.h>
#include <netinet/in.h>
@ -3168,14 +3169,11 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
uintptr_t rw;
} r;
#else
struct thread *lowner;
union {
struct mtx *mi;
uintptr_t mx;
} m;
union {
struct sx *si;
uintptr_t sx;
} s;
struct lock_object *li;
uintptr_t lx;
} l;
#endif
switch (subr) {
@ -3272,75 +3270,83 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
break;
#else
/*
* XXX - The following code works because mutex, rwlocks, & sxlocks
* all have similar data structures in FreeBSD. This may not be
* good if someone changes one of the lock data structures.
* Ideally, it would be nice if all these shared a common lock
* object.
*/
case DIF_SUBR_MUTEX_OWNED:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
m.mx = tupregs[0].dttk_value;
#ifdef DOODAD
if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) {
regs[rd] = !(m.mi->mtx_lock & MTX_UNOWNED);
} else {
regs[rd] = !(m.mi->mtx_lock & SX_UNLOCKED);
if (!dtrace_canload(tupregs[0].dttk_value,
sizeof (struct lock_object), mstate, vstate)) {
regs[rd] = 0;
break;
}
#endif
l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
break;
case DIF_SUBR_MUTEX_OWNER:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
m.mx = tupregs[0].dttk_value;
if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) {
regs[rd] = m.mi->mtx_lock & ~MTX_FLAGMASK;
} else {
if (!(m.mi->mtx_lock & SX_LOCK_SHARED))
regs[rd] = SX_OWNER(m.mi->mtx_lock);
else
regs[rd] = 0;
if (!dtrace_canload(tupregs[0].dttk_value,
sizeof (struct lock_object), mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
regs[rd] = (uintptr_t)lowner;
break;
case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
m.mx = tupregs[0].dttk_value;
regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) != 0);
if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
/* XXX - should be only LC_SLEEPABLE? */
regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
(LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
break;
case DIF_SUBR_MUTEX_TYPE_SPIN:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
m.mx = tupregs[0].dttk_value;
regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) == 0);
if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
break;
case DIF_SUBR_RW_READ_HELD:
case DIF_SUBR_SX_SHARED_HELD:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
s.sx = tupregs[0].dttk_value;
regs[rd] = ((s.si->sx_lock & SX_LOCK_SHARED) &&
(SX_OWNER(s.si->sx_lock) >> SX_SHARERS_SHIFT) != 0);
if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
lowner == NULL;
break;
case DIF_SUBR_RW_WRITE_HELD:
case DIF_SUBR_SX_EXCLUSIVE_HELD:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
s.sx = tupregs[0].dttk_value;
regs[rd] = (SX_OWNER(s.si->sx_lock) == (uintptr_t) curthread);
if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr(tupregs[0].dttk_value);
LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
regs[rd] = (lowner == curthread);
break;
case DIF_SUBR_RW_ISWRITER:
case DIF_SUBR_SX_ISEXCLUSIVE:
/* XXX - need to use dtrace_canload() and dtrace_loadptr() */
s.sx = tupregs[0].dttk_value;
regs[rd] = ((s.si->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS) ||
!(s.si->sx_lock & SX_LOCK_SHARED));
if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
mstate, vstate)) {
regs[rd] = 0;
break;
}
l.lx = dtrace_loadptr(tupregs[0].dttk_value);
regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
lowner != NULL;
break;
#endif /* ! defined(sun) */

View File

@ -0,0 +1,327 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Portions Copyright (c) 2008-2009 Stacey Son <sson@FreeBSD.org>
*
* $FreeBSD$
*
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "opt_kdtrace.h"
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/linker.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/dtrace.h>
#include <sys/lockstat.h>
#if defined(__i386__) || defined(__amd64__)
#define LOCKSTAT_AFRAMES 1
#else
#error "architecture not supported"
#endif
static d_open_t lockstat_open;
static void lockstat_provide(void *, dtrace_probedesc_t *);
static void lockstat_destroy(void *, dtrace_id_t, void *);
static void lockstat_enable(void *, dtrace_id_t, void *);
static void lockstat_disable(void *, dtrace_id_t, void *);
static void lockstat_load(void *);
static int lockstat_unload(void);
typedef struct lockstat_probe {
char *lsp_func;
char *lsp_name;
int lsp_probe;
dtrace_id_t lsp_id;
#ifdef __FreeBSD__
int lsp_frame;
#endif
} lockstat_probe_t;
#ifdef __FreeBSD__
lockstat_probe_t lockstat_probes[] =
{
/* Spin Locks */
{ LS_MTX_SPIN_LOCK, LSS_ACQUIRE, LS_MTX_SPIN_LOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_SPIN_LOCK, LSS_SPIN, LS_MTX_SPIN_LOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_SPIN_UNLOCK, LSS_RELEASE, LS_MTX_SPIN_UNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Adaptive Locks */
{ LS_MTX_LOCK, LSA_ACQUIRE, LS_MTX_LOCK_ACQUIRE,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_LOCK, LSA_BLOCK, LS_MTX_LOCK_BLOCK,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_LOCK, LSA_SPIN, LS_MTX_LOCK_SPIN,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_UNLOCK, LSA_RELEASE, LS_MTX_UNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_TRYLOCK, LSA_ACQUIRE, LS_MTX_TRYLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Reader/Writer Locks */
{ LS_RW_RLOCK, LSR_ACQUIRE, LS_RW_RLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RLOCK, LSR_BLOCK, LS_RW_RLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RLOCK, LSR_SPIN, LS_RW_RLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RUNLOCK, LSR_RELEASE, LS_RW_RUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_ACQUIRE, LS_RW_WLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_BLOCK, LS_RW_WLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_SPIN, LS_RW_WLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WUNLOCK, LSR_RELEASE, LS_RW_WUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_TRYUPGRADE, LSR_UPGRADE, LS_RW_TRYUPGRADE_UPGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_DOWNGRADE, LSR_DOWNGRADE, LS_RW_DOWNGRADE_DOWNGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Shared/Exclusive Locks */
{ LS_SX_SLOCK, LSX_ACQUIRE, LS_SX_SLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SLOCK, LSX_BLOCK, LS_SX_SLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SLOCK, LSX_SPIN, LS_SX_SLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SUNLOCK, LSX_RELEASE, LS_SX_SUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_ACQUIRE, LS_SX_XLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_BLOCK, LS_SX_XLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_SPIN, LS_SX_XLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XUNLOCK, LSX_RELEASE, LS_SX_XUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_TRYUPGRADE, LSX_UPGRADE, LS_SX_TRYUPGRADE_UPGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_DOWNGRADE, LSX_DOWNGRADE, LS_SX_DOWNGRADE_DOWNGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Thread Locks */
{ LS_THREAD_LOCK, LST_SPIN, LS_THREAD_LOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ NULL }
};
#else
#error "OS not supported"
#endif
static struct cdevsw lockstat_cdevsw = {
.d_version = D_VERSION,
.d_open = lockstat_open,
.d_name = "lockstat",
};
static struct cdev *lockstat_cdev;
static dtrace_provider_id_t lockstat_id;
/*ARGSUSED*/
static void
lockstat_enable(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
ASSERT(!lockstat_probemap[probe->lsp_probe]);
lockstat_probemap[probe->lsp_probe] = id;
#ifdef DOODAD
membar_producer();
#endif
lockstat_probe_func = dtrace_probe;
#ifdef DOODAD
membar_producer();
lockstat_hot_patch();
membar_producer();
#endif
}
/*ARGSUSED*/
static void
lockstat_disable(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
int i;
ASSERT(lockstat_probemap[probe->lsp_probe]);
lockstat_probemap[probe->lsp_probe] = 0;
#ifdef DOODAD
lockstat_hot_patch();
membar_producer();
#endif
/*
* See if we have any probes left enabled.
*/
for (i = 0; i < LS_NPROBES; i++) {
if (lockstat_probemap[i]) {
/*
* This probe is still enabled. We don't need to deal
* with waiting for all threads to be out of the
* lockstat critical sections; just return.
*/
return;
}
}
}
/*ARGSUSED*/
static int
lockstat_open(struct cdev *dev __unused, int oflags __unused,
int devtype __unused, struct thread *td __unused)
{
return (0);
}
/*ARGSUSED*/
static void
lockstat_provide(void *arg, dtrace_probedesc_t *desc)
{
int i = 0;
for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
lockstat_probe_t *probe = &lockstat_probes[i];
if (dtrace_probe_lookup(lockstat_id, "kernel",
probe->lsp_func, probe->lsp_name) != 0)
continue;
ASSERT(!probe->lsp_id);
#ifdef __FreeBSD__
probe->lsp_id = dtrace_probe_create(lockstat_id,
"kernel", probe->lsp_func, probe->lsp_name,
probe->lsp_frame, probe);
#else
probe->lsp_id = dtrace_probe_create(lockstat_id,
"kernel", probe->lsp_func, probe->lsp_name,
LOCKSTAT_AFRAMES, probe);
#endif
}
}
/*ARGSUSED*/
static void
lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
ASSERT(!lockstat_probemap[probe->lsp_probe]);
probe->lsp_id = 0;
}
static dtrace_pattr_t lockstat_attr = {
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
};
static dtrace_pops_t lockstat_pops = {
lockstat_provide,
NULL,
lockstat_enable,
lockstat_disable,
NULL,
NULL,
NULL,
NULL,
NULL,
lockstat_destroy
};
static void
lockstat_load(void *dummy)
{
/* Create the /dev/dtrace/lockstat entry. */
lockstat_cdev = make_dev(&lockstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
"dtrace/lockstat");
if (dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_USER,
NULL, &lockstat_pops, NULL, &lockstat_id) != 0)
return;
}
static int
lockstat_unload()
{
int error = 0;
if ((error = dtrace_unregister(lockstat_id)) != 0)
return (error);
destroy_dev(lockstat_cdev);
return (error);
}
/* ARGSUSED */
static int
lockstat_modevent(module_t mod __unused, int type, void *data __unused)
{
int error = 0;
switch (type) {
case MOD_LOAD:
break;
case MOD_UNLOAD:
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
SYSINIT(lockstat_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_load, NULL);
SYSUNINIT(lockstat_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_unload, NULL);
DEV_MODULE(lockstat, lockstat_modevent, NULL);
MODULE_VERSION(lockstat, 1);
MODULE_DEPEND(lockstat, dtrace, 1, 1, 1);
MODULE_DEPEND(lockstat, opensolaris, 1, 1, 1);

View File

@ -1917,6 +1917,7 @@ kern/kern_ktrace.c standard
kern/kern_linker.c standard
kern/kern_lock.c standard
kern/kern_lockf.c standard
kern/kern_lockstat.c optional kdtrace_hooks
kern/kern_malloc.c standard
kern/kern_mbuf.c standard
kern/kern_mib.c standard

View File

@ -27,6 +27,7 @@
*/
#include "opt_ddb.h"
#include "opt_kdtrace.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@ -126,6 +127,9 @@ static void assert_lockmgr(struct lock_object *lock, int how);
static void db_show_lockmgr(struct lock_object *lock);
#endif
static void lock_lockmgr(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_lockmgr(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_lockmgr(struct lock_object *lock);
struct lock_class lock_class_lockmgr = {
@ -136,7 +140,10 @@ struct lock_class lock_class_lockmgr = {
.lc_ddb_show = db_show_lockmgr,
#endif
.lc_lock = lock_lockmgr,
.lc_unlock = unlock_lockmgr
.lc_unlock = unlock_lockmgr,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_lockmgr,
#endif
};
static __inline struct thread *
@ -293,6 +300,15 @@ unlock_lockmgr(struct lock_object *lock)
panic("lockmgr locks do not support sleep interlocking");
}
#ifdef KDTRACE_HOOKS
static int
owner_lockmgr(struct lock_object *lock, struct thread **owner)
{
panic("lockmgr locks do not support owner inquiring");
}
#endif
void
lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
{

64
sys/kern/kern_lockstat.c Normal file
View File

@ -0,0 +1,64 @@
/*-
* Copyright 2008-2009 Stacey Son <sson@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Backend for the lock tracing (lockstat) kernel support. This is required
* to allow a module to load even though DTrace kernel support may not be
* present.
*
*/
#include "opt_kdtrace.h"
#ifdef KDTRACE_HOOKS
#include <sys/time.h>
#include <sys/types.h>
#include <sys/lockstat.h>
/*
* The following must match the type definition of dtrace_probe. It is
* defined this way to avoid having to rely on CDDL code.
*/
uint32_t lockstat_probemap[LS_NPROBES];
void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t,
uintptr_t, uintptr_t, uintptr_t);
uint64_t
lockstat_nsecs(void)
{
struct bintime bt;
uint64_t ns;
binuptime(&bt);
ns = bt.sec * (uint64_t)1000000000;
ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
return (ns);
}
#endif /* KDTRACE_HOOKS */

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include "opt_adaptive_mutexes.h"
#include "opt_ddb.h"
#include "opt_global.h"
#include "opt_kdtrace.h"
#include "opt_sched.h"
#include <sys/param.h>
@ -90,6 +91,9 @@ static void db_show_mtx(struct lock_object *lock);
#endif
static void lock_mtx(struct lock_object *lock, int how);
static void lock_spin(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_mtx(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_mtx(struct lock_object *lock);
static int unlock_spin(struct lock_object *lock);
@ -105,6 +109,9 @@ struct lock_class lock_class_mtx_sleep = {
#endif
.lc_lock = lock_mtx,
.lc_unlock = unlock_mtx,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_mtx,
#endif
};
struct lock_class lock_class_mtx_spin = {
.lc_name = "spin mutex",
@ -115,6 +122,9 @@ struct lock_class lock_class_mtx_spin = {
#endif
.lc_lock = lock_spin,
.lc_unlock = unlock_spin,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_mtx,
#endif
};
/*
@ -162,6 +172,17 @@ unlock_spin(struct lock_object *lock)
panic("spin locks can only use msleep_spin");
}
#ifdef KDTRACE_HOOKS
int
owner_mtx(struct lock_object *lock, struct thread **owner)
{
struct mtx *m = (struct mtx *)lock;
*owner = mtx_owner(m);
return (mtx_unowned(m) == 0);
}
#endif
/*
* Function versions of the inlined __mtx_* macros. These are used by
* modules and can also be called from assembly language if needed.
@ -202,7 +223,7 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
mtx_assert(m, MA_OWNED);
if (m->mtx_recurse == 0)
lock_profile_release_lock(&m->lock_object);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
_rel_sleep_lock(m, curthread, opts, file, line);
}
@ -280,8 +301,8 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
file, line);
curthread->td_locks++;
if (m->mtx_recurse == 0)
lock_profile_obtain_lock_success(&m->lock_object, contested,
waittime, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
m, contested, waittime, file, line);
}
@ -310,6 +331,11 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
int contested = 0;
uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
uint64_t sleep_cnt = 0;
int64_t sleep_time = 0;
#endif
if (mtx_owned(m)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
@ -330,6 +356,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
#ifdef ADAPTIVE_MUTEXES
/*
* If the owner is running on another CPU, spin until the
@ -344,8 +373,12 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
"%s: spinning on %p held by %p",
__func__, m, owner);
while (mtx_owner(m) == owner &&
TD_IS_RUNNING(owner))
TD_IS_RUNNING(owner)) {
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
continue;
}
}
@ -408,7 +441,14 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
/*
* Block on the turnstile.
*/
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs();
#endif
turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
#ifdef KDTRACE_HOOKS
sleep_time += lockstat_nsecs();
sleep_cnt++;
#endif
}
#ifdef KTR
if (cont_logged) {
@ -417,8 +457,18 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
m->lock_object.lo_name, (void *)tid, file, line);
}
#endif
lock_profile_obtain_lock_success(&m->lock_object, contested,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
waittime, file, line);
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
/*
* Only record the loops spinning and not sleeping.
*/
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
#endif
}
static void
@ -482,8 +532,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
lock_profile_obtain_lock_success(&m->lock_object, contested,
waittime, (file), (line));
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
contested, waittime, (file), (line));
LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
}
#endif /* SMP */
@ -497,6 +548,9 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
int contested = 0;
uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
#endif
i = 0;
tid = (uintptr_t)curthread;
@ -516,6 +570,9 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
if (m->mtx_lock == tid) {
m->mtx_recurse++;
break;
@ -541,13 +598,17 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
if (m == td->td_lock)
break;
_rel_spin_lock(m); /* does spinlock_exit() */
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
if (m->mtx_recurse == 0)
lock_profile_obtain_lock_success(&m->lock_object, contested,
waittime, (file), (line));
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
m, contested, waittime, (file), (line));
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
}
struct mtx *

View File

@ -35,6 +35,7 @@
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_kdtrace.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -71,6 +72,9 @@ static __inline void compiler_memory_barrier(void) {
static void assert_rm(struct lock_object *lock, int what);
static void lock_rm(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rm(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_rm(struct lock_object *lock);
struct lock_class lock_class_rm = {
@ -84,6 +88,9 @@ struct lock_class lock_class_rm = {
#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_rm,
#endif
};
static void
@ -107,6 +114,15 @@ unlock_rm(struct lock_object *lock)
panic("unlock_rm called");
}
#ifdef KDTRACE_HOOKS
static int
owner_rm(struct lock_object *lock, struct thread **owner)
{
panic("owner_rm called");
}
#endif
static struct mtx rm_spinlock;
MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);

View File

@ -35,6 +35,7 @@
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_kdtrace.h"
#include "opt_no_adaptive_rwlocks.h"
#include <sys/param.h>
@ -71,6 +72,9 @@ static void db_show_rwlock(struct lock_object *lock);
#endif
static void assert_rw(struct lock_object *lock, int what);
static void lock_rw(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rw(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_rw(struct lock_object *lock);
struct lock_class lock_class_rw = {
@ -82,6 +86,9 @@ struct lock_class lock_class_rw = {
#endif
.lc_lock = lock_rw,
.lc_unlock = unlock_rw,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_rw,
#endif
};
/*
@ -149,6 +156,19 @@ unlock_rw(struct lock_object *lock)
}
}
#ifdef KDTRACE_HOOKS
int
owner_rw(struct lock_object *lock, struct thread **owner)
{
struct rwlock *rw = (struct rwlock *)lock;
uintptr_t x = rw->rw_lock;
*owner = rw_wowner(rw);
return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) :
(*owner != NULL));
}
#endif
void
rw_init_flags(struct rwlock *rw, const char *name, int opts)
{
@ -258,7 +278,7 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
line);
if (!rw_recursed(rw))
lock_profile_release_lock(&rw->lock_object);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
__rw_wunlock(rw, curthread, file, line);
}
/*
@ -287,6 +307,11 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
int contested = 0;
#endif
uintptr_t v;
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
uint64_t sleep_cnt = 0;
int64_t sleep_time = 0;
#endif
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
@ -296,6 +321,9 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
for (;;) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
/*
* Handle the easy case. If no other thread has a write
* lock, then try to bump up the count of read locks. Note
@ -342,8 +370,12 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
"%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock) ==
owner && TD_IS_RUNNING(owner))
owner && TD_IS_RUNNING(owner)) {
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
continue;
}
} else if (spintries < rowner_retries) {
@ -423,7 +455,14 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
rw);
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs();
#endif
turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
#ifdef KDTRACE_HOOKS
sleep_time += lockstat_nsecs();
sleep_cnt++;
#endif
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
@ -434,12 +473,22 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* however. turnstiles don't like owners changing between calls to
* turnstile_wait() currently.
*/
lock_profile_obtain_lock_success( &rw->lock_object, contested,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
waittime, file, line);
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
curthread->td_locks++;
curthread->td_rw_rlocks++;
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
/*
* Record only the loops spinning and not sleeping.
*/
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
#endif
}
int
@ -569,7 +618,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
turnstile_chain_unlock(&rw->lock_object);
break;
}
lock_profile_release_lock(&rw->lock_object);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
}
/*
@ -591,6 +640,11 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
uint64_t waittime = 0;
int contested = 0;
#endif
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
uint64_t sleep_cnt = 0;
int64_t sleep_time = 0;
#endif
if (rw_wlocked(rw)) {
KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
@ -607,6 +661,9 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
while (!_rw_write_lock(rw, tid)) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
lock_profile_obtain_lock_failed(&rw->lock_object,
&contested, &waittime);
#ifdef ADAPTIVE_RWLOCKS
@ -622,8 +679,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
TD_IS_RUNNING(owner))
TD_IS_RUNNING(owner)) {
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
continue;
}
if ((v & RW_LOCK_READ) && RW_READERS(v) &&
@ -641,6 +702,9 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
break;
cpu_spinwait();
}
#ifdef KDTRACE_HOOKS
spin_cnt += rowner_loops - i;
#endif
if (i != rowner_loops)
continue;
}
@ -706,7 +770,14 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
rw);
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs();
#endif
turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
#ifdef KDTRACE_HOOKS
sleep_time += lockstat_nsecs();
sleep_cnt++;
#endif
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
@ -714,8 +785,18 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
spintries = 0;
#endif
}
lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
waittime, file, line);
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
/*
* Record only the loops spinning and not sleeping.
*/
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
#endif
}
/*
@ -847,6 +928,7 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
curthread->td_rw_rlocks--;
WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
}
return (success);
}
@ -912,6 +994,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
out:
curthread->td_rw_rlocks++;
LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
}
#ifdef INVARIANT_SUPPORT

View File

@ -38,6 +38,7 @@
#include "opt_adaptive_sx.h"
#include "opt_ddb.h"
#include "opt_kdtrace.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@ -109,6 +110,9 @@ static void assert_sx(struct lock_object *lock, int what);
static void db_show_sx(struct lock_object *lock);
#endif
static void lock_sx(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_sx(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_sx(struct lock_object *lock);
struct lock_class lock_class_sx = {
@ -120,6 +124,9 @@ struct lock_class lock_class_sx = {
#endif
.lc_lock = lock_sx,
.lc_unlock = unlock_sx,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_sx,
#endif
};
#ifndef INVARIANTS
@ -161,6 +168,19 @@ unlock_sx(struct lock_object *lock)
}
}
#ifdef KDTRACE_HOOKS
int
owner_sx(struct lock_object *lock, struct thread **owner)
{
struct sx *sx = (struct sx *)lock;
uintptr_t x = sx->sx_lock;
*owner = (struct thread *)SX_OWNER(x);
return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
(*owner != NULL));
}
#endif
void
sx_sysinit(void *arg)
{
@ -304,7 +324,7 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
__sx_sunlock(sx, file, line);
lock_profile_release_lock(&sx->lock_object);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
}
void
@ -320,7 +340,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
line);
if (!sx_recursed(sx))
lock_profile_release_lock(&sx->lock_object);
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
__sx_xunlock(sx, curthread, file, line);
}
@ -348,9 +368,11 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
(uintptr_t)curthread | x);
LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
if (success)
if (success) {
WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
}
return (success);
}
@ -412,6 +434,7 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
if (wakeup_swapper)
kick_proc0();
@ -437,6 +460,11 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
int contested = 0;
#endif
int error = 0;
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
uint64_t sleep_cnt = 0;
int64_t sleep_time = 0;
#endif
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
@ -455,6 +483,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
&waittime);
#ifdef ADAPTIVE_SX
@ -475,8 +506,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
__func__, sx, owner);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner))
TD_IS_RUNNING(owner)) {
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
continue;
}
}
@ -559,6 +594,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
__func__, sx);
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs();
#endif
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
@ -567,7 +605,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
sleepq_wait(&sx->lock_object, 0);
else
error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
sleep_time += lockstat_nsecs();
sleep_cnt++;
#endif
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK,
@ -582,8 +623,14 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
GIANT_RESTORE();
if (!error)
lock_profile_obtain_lock_success(&sx->lock_object, contested,
waittime, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
#endif
return (error);
}
@ -661,12 +708,20 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
#endif
uintptr_t x;
int error = 0;
#ifdef KDTRACE_HOOKS
uint64_t spin_cnt = 0;
uint64_t sleep_cnt = 0;
int64_t sleep_time = 0;
#endif
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
*/
for (;;) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
x = sx->sx_lock;
/*
@ -707,8 +762,12 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
__func__, sx, owner);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner))
TD_IS_RUNNING(owner)) {
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
cpu_spinwait();
}
continue;
}
}
@ -770,6 +829,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
__func__, sx);
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs();
#endif
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
@ -778,7 +840,10 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
sleepq_wait(&sx->lock_object, 0);
else
error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
sleep_time += lockstat_nsecs();
sleep_cnt++;
#endif
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK,
@ -791,9 +856,14 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
__func__, sx);
}
if (error == 0)
lock_profile_obtain_lock_success(&sx->lock_object, contested,
waittime, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
#endif
GIANT_RESTORE();
return (error);
}

View File

@ -8,6 +8,7 @@ SUBDIR= dtmalloc \
dtrace \
dtraceall \
dtrace_test \
lockstat \
profile \
prototype \
sdt \

View File

@ -69,6 +69,7 @@ MODULE_DEPEND(dtraceall, dtnfsclient, 1, 1, 1);
#if defined(__amd64__) || defined(__i386__)
MODULE_DEPEND(dtraceall, fbt, 1, 1, 1);
#endif
MODULE_DEPEND(dtraceall, lockstat, 1, 1, 1);
MODULE_DEPEND(dtraceall, sdt, 1, 1, 1);
MODULE_DEPEND(dtraceall, systrace, 1, 1, 1);
MODULE_DEPEND(dtraceall, profile, 1, 1, 1);

View File

@ -61,6 +61,7 @@ struct lock_class {
void (*lc_assert)(struct lock_object *lock, int what);
void (*lc_ddb_show)(struct lock_object *lock);
void (*lc_lock)(struct lock_object *lock, int how);
int (*lc_owner)(struct lock_object *lock, struct thread **owner);
int (*lc_unlock)(struct lock_object *lock);
};

220
sys/sys/lockstat.h Normal file
View File

@ -0,0 +1,220 @@
/*-
* Copyright (c) 2008-2009 Stacey Son <sson@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* DTrace lockstat provider definitions
*
*/
#ifndef _SYS_LOCKSTAT_H
#define _SYS_LOCKSTAT_H
#ifdef _KERNEL
/*
* Spin Locks
*/
#define LS_MTX_SPIN_LOCK_ACQUIRE 0
#define LS_MTX_SPIN_UNLOCK_RELEASE 1
#define LS_MTX_SPIN_LOCK_SPIN 2
/*
* Adaptive Locks
*/
#define LS_MTX_LOCK_ACQUIRE 3
#define LS_MTX_UNLOCK_RELEASE 4
#define LS_MTX_LOCK_SPIN 5
#define LS_MTX_LOCK_BLOCK 6
#define LS_MTX_TRYLOCK_ACQUIRE 7
/*
* Reader/Writer Locks
*/
#define LS_RW_RLOCK_ACQUIRE 8
#define LS_RW_RUNLOCK_RELEASE 9
#define LS_RW_WLOCK_ACQUIRE 10
#define LS_RW_WUNLOCK_RELEASE 11
#define LS_RW_RLOCK_SPIN 12
#define LS_RW_RLOCK_BLOCK 13
#define LS_RW_WLOCK_SPIN 14
#define LS_RW_WLOCK_BLOCK 15
#define LS_RW_TRYUPGRADE_UPGRADE 16
#define LS_RW_DOWNGRADE_DOWNGRADE 17
/*
* Shared/Exclusive Locks
*/
#define LS_SX_SLOCK_ACQUIRE 18
#define LS_SX_SUNLOCK_RELEASE 19
#define LS_SX_XLOCK_ACQUIRE 20
#define LS_SX_XUNLOCK_RELEASE 21
#define LS_SX_SLOCK_SPIN 22
#define LS_SX_SLOCK_BLOCK 23
#define LS_SX_XLOCK_SPIN 24
#define LS_SX_XLOCK_BLOCK 25
#define LS_SX_TRYUPGRADE_UPGRADE 26
#define LS_SX_DOWNGRADE_DOWNGRADE 27
/*
* Thread Locks
*/
#define LS_THREAD_LOCK_SPIN 28
/*
* Lockmanager Locks
* According to locking(9) Lockmgr locks are "Largely deprecated"
* so no support for these have been added in the lockstat provider.
*/
#define LS_NPROBES 29
#define LS_MTX_LOCK "mtx_lock"
#define LS_MTX_UNLOCK "mtx_unlock"
#define LS_MTX_SPIN_LOCK "mtx_lock_spin"
#define LS_MTX_SPIN_UNLOCK "mtx_unlock_spin"
#define LS_MTX_TRYLOCK "mtx_trylock"
#define LS_RW_RLOCK "rw_rlock"
#define LS_RW_WLOCK "rw_wlock"
#define LS_RW_RUNLOCK "rw_runlock"
#define LS_RW_WUNLOCK "rw_wunlock"
#define LS_RW_TRYUPGRADE "rw_try_upgrade"
#define LS_RW_DOWNGRADE "rw_downgrade"
#define LS_SX_SLOCK "sx_slock"
#define LS_SX_XLOCK "sx_xlock"
#define LS_SX_SUNLOCK "sx_sunlock"
#define LS_SX_XUNLOCK "sx_xunlock"
#define LS_SX_TRYUPGRADE "sx_try_upgrade"
#define LS_SX_DOWNGRADE "sx_downgrade"
#define LS_THREAD_LOCK "thread_lock"
#define LS_ACQUIRE "acquire"
#define LS_RELEASE "release"
#define LS_SPIN "spin"
#define LS_BLOCK "block"
#define LS_UPGRADE "upgrade"
#define LS_DOWNGRADE "downgrade"
#define LS_TYPE_ADAPTIVE "adaptive"
#define LS_TYPE_SPIN "spin"
#define LS_TYPE_THREAD "thread"
#define LS_TYPE_RW "rw"
#define LS_TYPE_SX "sx"
#define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE)
#define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE)
#define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN)
#define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK)
#define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE)
#define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE)
#define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN)
#define LSR_ACQUIRE (LS_TYPE_RW "-" LS_ACQUIRE)
#define LSR_RELEASE (LS_TYPE_RW "-" LS_RELEASE)
#define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK)
#define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN)
#define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE)
#define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE)
#define LSX_ACQUIRE (LS_TYPE_SX "-" LS_ACQUIRE)
#define LSX_RELEASE (LS_TYPE_SX "-" LS_RELEASE)
#define LSX_BLOCK (LS_TYPE_SX "-" LS_BLOCK)
#define LSX_SPIN (LS_TYPE_SX "-" LS_SPIN)
#define LSX_UPGRADE (LS_TYPE_SX "-" LS_UPGRADE)
#define LSX_DOWNGRADE (LS_TYPE_SX "-" LS_DOWNGRADE)
#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN)
/*
* The following must match the type definition of dtrace_probe. It is
* defined this way to avoid having to rely on CDDL code.
*/
extern uint32_t lockstat_probemap[LS_NPROBES];
typedef void (*lockstat_probe_func_t)(uint32_t, uintptr_t arg0, uintptr_t arg1,
uintptr_t arg2, uintptr_t arg3, uintptr_t arg4);
extern lockstat_probe_func_t lockstat_probe_func;
extern uint64_t lockstat_nsecs(void);
#ifdef KDTRACE_HOOKS
/*
* Macros to record lockstat probes.
*/
#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) do { \
uint32_t id; \
\
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), (arg1), (arg2), \
(arg3), (arg4)); \
} while (0)
#define LOCKSTAT_RECORD(probe, lp, arg1) \
LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
#define LOCKSTAT_RECORD0(probe, lp) \
LOCKSTAT_RECORD4(probe, lp, 0, 0, 0, 0)
#define LOCKSTAT_RECORD1(probe, lp, arg1) \
LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \
LOCKSTAT_RECORD4(probe, lp, arg1, arg2, 0, 0)
#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) \
LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, 0)
#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \
uint32_t id; \
\
lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
} while (0)
#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \
uint32_t id; \
\
lock_profile_release_lock(&(lp)->lock_object); \
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
} while (0)
#else /* !KDTRACE_HOOKS */
#define LOCKSTAT_RECORD(probe, lp, arg1)
#define LOCKSTAT_RECORD0(probe, lp)
#define LOCKSTAT_RECORD1(probe, lp, arg1)
#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2)
#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3)
#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4)
#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) \
lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l)
#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) \
lock_profile_release_lock(&(lp)->lock_object)
#endif /* !KDTRACE_HOOKS */
#endif /* _KERNEL */
#endif /* _SYS_LOCKSTAT_H */

View File

@ -40,6 +40,7 @@
#ifdef _KERNEL
#include <sys/pcpu.h>
#include <sys/lock_profile.h>
#include <sys/lockstat.h>
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#endif /* _KERNEL_ */
@ -166,11 +167,11 @@ void _thread_lock_flags(struct thread *, int, const char *, int);
#ifndef _get_sleep_lock
#define _get_sleep_lock(mp, tid, opts, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
if (!_obtain_lock((mp), _tid)) { \
if (!_obtain_lock((mp), _tid)) \
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
} else \
lock_profile_obtain_lock_success(&(mp)->lock_object, 0, \
0, (file), (line)); \
else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \
mp, 0, 0, (file), (line)); \
} while (0)
#endif
@ -193,8 +194,8 @@ void _thread_lock_flags(struct thread *, int, const char *, int);
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} \
} else \
lock_profile_obtain_lock_success(&(mp)->lock_object, 0, \
0, (file), (line)); \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \
mp, 0, 0, (file), (line)); \
} while (0)
#else /* SMP */
#define _get_spin_lock(mp, tid, opts, file, line) do { \
@ -240,7 +241,8 @@ void _thread_lock_flags(struct thread *, int, const char *, int);
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
lock_profile_release_lock(&(mp)->lock_object); \
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
mp); \
_release_lock_quick((mp)); \
} \
spinlock_exit(); \

View File

@ -35,6 +35,7 @@
#include <sys/_lock.h>
#include <sys/_rwlock.h>
#include <sys/lock_profile.h>
#include <sys/lockstat.h>
#ifdef _KERNEL
#include <sys/pcpu.h>
@ -107,9 +108,9 @@
\
if (!_rw_write_lock((rw), _tid)) \
_rw_wlock_hard((rw), _tid, (file), (line)); \
else \
lock_profile_obtain_lock_success(&(rw)->lock_object, 0, \
0, (file), (line)); \
else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, \
rw, 0, 0, (file), (line)); \
} while (0)
/* Release a write lock. */

View File

@ -38,6 +38,7 @@
#ifdef _KERNEL
#include <sys/pcpu.h>
#include <sys/lock_profile.h>
#include <sys/lockstat.h>
#include <machine/atomic.h>
#endif
@ -152,9 +153,9 @@ __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
error = _sx_xlock_hard(sx, tid, opts, file, line);
else
lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file,
line);
else
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
sx, 0, 0, file, line);
return (error);
}
@ -180,8 +181,8 @@ __sx_slock(struct sx *sx, int opts, const char *file, int line)
!atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
error = _sx_slock_hard(sx, opts, file, line);
else
lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file,
line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0,
0, file, line);
return (error);
}