Implement the lockstat provider using SDT(9) instead of the custom provider

in lockstat.ko. This means that lockstat probes now have typed arguments and
will utilize SDT probe hot-patching support when it arrives.

Reviewed by:	gnn
Differential Revision:	https://reviews.freebsd.org/D2993
This commit is contained in:
Mark Johnston 2015-07-19 22:14:09 +00:00
parent dbfbf7ae92
commit 32cd0147fa
20 changed files with 153 additions and 602 deletions

@ -22,7 +22,6 @@ options KDTRACE_HOOKS
#device dtrace
# DTrace modules
#device dtrace_lockstat
#device dtrace_profile
#device dtrace_sdt
#device dtrace_fbt

@ -32,7 +32,7 @@ makeoptions MODULES_EXTRA="dtb/am335x"
options KDTRACE_HOOKS # Kernel DTrace hooks
options DDB_CTF # all architectures - kernel ELF linker loads CTF data
makeoptions WITH_CTF=1
makeoptions MODULES_EXTRA+="opensolaris dtrace dtrace/lockstat dtrace/profile dtrace/fbt"
makeoptions MODULES_EXTRA+="opensolaris dtrace dtrace/profile dtrace/fbt"
options HZ=100
options SCHED_4BSD # 4BSD scheduler

@ -98,7 +98,6 @@ options KDTRACE_HOOKS
#device dtrace
# DTrace modules
#device dtrace_lockstat
#device dtrace_profile
#device dtrace_sdt
#device dtrace_fbt

@ -1,330 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Portions Copyright (c) 2008-2009 Stacey Son <sson@FreeBSD.org>
*
* $FreeBSD$
*
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/linker.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/dtrace.h>
#include <sys/lockstat.h>
#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
defined(__i386__) || defined(__mips__) || defined(__powerpc__)
#define LOCKSTAT_AFRAMES 1
#else
#error "architecture not supported"
#endif
static d_open_t lockstat_open;
static void lockstat_provide(void *, dtrace_probedesc_t *);
static void lockstat_destroy(void *, dtrace_id_t, void *);
static void lockstat_enable(void *, dtrace_id_t, void *);
static void lockstat_disable(void *, dtrace_id_t, void *);
static void lockstat_load(void *);
static int lockstat_unload(void);
typedef struct lockstat_probe {
char *lsp_func;
char *lsp_name;
int lsp_probe;
dtrace_id_t lsp_id;
#ifdef __FreeBSD__
int lsp_frame;
#endif
} lockstat_probe_t;
#ifdef __FreeBSD__
lockstat_probe_t lockstat_probes[] =
{
/* Spin Locks */
{ LS_MTX_SPIN_LOCK, LSS_ACQUIRE, LS_MTX_SPIN_LOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_SPIN_LOCK, LSS_SPIN, LS_MTX_SPIN_LOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_SPIN_UNLOCK, LSS_RELEASE, LS_MTX_SPIN_UNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Adaptive Locks */
{ LS_MTX_LOCK, LSA_ACQUIRE, LS_MTX_LOCK_ACQUIRE,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_LOCK, LSA_BLOCK, LS_MTX_LOCK_BLOCK,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_LOCK, LSA_SPIN, LS_MTX_LOCK_SPIN,
DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
{ LS_MTX_UNLOCK, LSA_RELEASE, LS_MTX_UNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_MTX_TRYLOCK, LSA_ACQUIRE, LS_MTX_TRYLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Reader/Writer Locks */
{ LS_RW_RLOCK, LSR_ACQUIRE, LS_RW_RLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RLOCK, LSR_BLOCK, LS_RW_RLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RLOCK, LSR_SPIN, LS_RW_RLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_RUNLOCK, LSR_RELEASE, LS_RW_RUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_ACQUIRE, LS_RW_WLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_BLOCK, LS_RW_WLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WLOCK, LSR_SPIN, LS_RW_WLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_WUNLOCK, LSR_RELEASE, LS_RW_WUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_TRYUPGRADE, LSR_UPGRADE, LS_RW_TRYUPGRADE_UPGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_RW_DOWNGRADE, LSR_DOWNGRADE, LS_RW_DOWNGRADE_DOWNGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Shared/Exclusive Locks */
{ LS_SX_SLOCK, LSX_ACQUIRE, LS_SX_SLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SLOCK, LSX_BLOCK, LS_SX_SLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SLOCK, LSX_SPIN, LS_SX_SLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_SUNLOCK, LSX_RELEASE, LS_SX_SUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_ACQUIRE, LS_SX_XLOCK_ACQUIRE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_BLOCK, LS_SX_XLOCK_BLOCK,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XLOCK, LSX_SPIN, LS_SX_XLOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_XUNLOCK, LSX_RELEASE, LS_SX_XUNLOCK_RELEASE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_TRYUPGRADE, LSX_UPGRADE, LS_SX_TRYUPGRADE_UPGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ LS_SX_DOWNGRADE, LSX_DOWNGRADE, LS_SX_DOWNGRADE_DOWNGRADE,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
/* Thread Locks */
{ LS_THREAD_LOCK, LST_SPIN, LS_THREAD_LOCK_SPIN,
DTRACE_IDNONE, LOCKSTAT_AFRAMES },
{ NULL }
};
#else
#error "OS not supported"
#endif
static struct cdevsw lockstat_cdevsw = {
.d_version = D_VERSION,
.d_open = lockstat_open,
.d_name = "lockstat",
};
static struct cdev *lockstat_cdev;
static dtrace_provider_id_t lockstat_id;
/*ARGSUSED*/
static void
lockstat_enable(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
ASSERT(!lockstat_probemap[probe->lsp_probe]);
lockstat_enabled++;
lockstat_probemap[probe->lsp_probe] = id;
#ifdef DOODAD
membar_producer();
#endif
lockstat_probe_func = dtrace_probe;
#ifdef DOODAD
membar_producer();
lockstat_hot_patch();
membar_producer();
#endif
}
/*ARGSUSED*/
static void
lockstat_disable(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
int i;
ASSERT(lockstat_probemap[probe->lsp_probe]);
lockstat_enabled--;
lockstat_probemap[probe->lsp_probe] = 0;
#ifdef DOODAD
lockstat_hot_patch();
membar_producer();
#endif
/*
* See if we have any probes left enabled.
*/
for (i = 0; i < LS_NPROBES; i++) {
if (lockstat_probemap[i]) {
/*
* This probe is still enabled. We don't need to deal
* with waiting for all threads to be out of the
* lockstat critical sections; just return.
*/
return;
}
}
}
/*ARGSUSED*/
static int
lockstat_open(struct cdev *dev __unused, int oflags __unused,
int devtype __unused, struct thread *td __unused)
{
return (0);
}
/*ARGSUSED*/
static void
lockstat_provide(void *arg, dtrace_probedesc_t *desc)
{
int i = 0;
for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
lockstat_probe_t *probe = &lockstat_probes[i];
if (dtrace_probe_lookup(lockstat_id, "kernel",
probe->lsp_func, probe->lsp_name) != 0)
continue;
ASSERT(!probe->lsp_id);
#ifdef __FreeBSD__
probe->lsp_id = dtrace_probe_create(lockstat_id,
"kernel", probe->lsp_func, probe->lsp_name,
probe->lsp_frame, probe);
#else
probe->lsp_id = dtrace_probe_create(lockstat_id,
"kernel", probe->lsp_func, probe->lsp_name,
LOCKSTAT_AFRAMES, probe);
#endif
}
}
/*ARGSUSED*/
static void
lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
{
lockstat_probe_t *probe = parg;
ASSERT(!lockstat_probemap[probe->lsp_probe]);
probe->lsp_id = 0;
}
static dtrace_pattr_t lockstat_attr = {
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
};
static dtrace_pops_t lockstat_pops = {
lockstat_provide,
NULL,
lockstat_enable,
lockstat_disable,
NULL,
NULL,
NULL,
NULL,
NULL,
lockstat_destroy
};
static void
lockstat_load(void *dummy)
{
/* Create the /dev/dtrace/lockstat entry. */
lockstat_cdev = make_dev(&lockstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
"dtrace/lockstat");
if (dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_USER,
NULL, &lockstat_pops, NULL, &lockstat_id) != 0)
return;
}
static int
lockstat_unload()
{
int error = 0;
if ((error = dtrace_unregister(lockstat_id)) != 0)
return (error);
destroy_dev(lockstat_cdev);
return (error);
}
/* ARGSUSED */
static int
lockstat_modevent(module_t mod __unused, int type, void *data __unused)
{
int error = 0;
switch (type) {
case MOD_LOAD:
break;
case MOD_UNLOAD:
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
SYSINIT(lockstat_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_load, NULL);
SYSUNINIT(lockstat_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_unload, NULL);
DEV_MODULE(lockstat, lockstat_modevent, NULL);
MODULE_VERSION(lockstat, 1);
MODULE_DEPEND(lockstat, dtrace, 1, 1, 1);
MODULE_DEPEND(lockstat, opensolaris, 1, 1, 1);

@ -50,6 +50,7 @@
#include <sys/linker.h>
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/lockstat.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
@ -197,6 +198,8 @@ sdt_enable(void *arg __unused, dtrace_id_t id, void *parg)
probe->id = id;
probe->sdtp_lf->nenabled++;
if (strcmp(probe->prov->name, "lockstat") == 0)
lockstat_enabled++;
}
static void
@ -206,6 +209,8 @@ sdt_disable(void *arg __unused, dtrace_id_t id, void *parg)
KASSERT(probe->sdtp_lf->nenabled > 0, ("no probes enabled"));
if (strcmp(probe->prov->name, "lockstat") == 0)
lockstat_enabled--;
probe->id = 0;
probe->sdtp_lf->nenabled--;
}

@ -246,7 +246,6 @@ cddl/contrib/opensolaris/uts/common/zmod/zutil.c optional zfs compile-with "${
cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \
warning "kernel contains CDDL licensed DTRACE"
cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}"
cddl/dev/lockstat/lockstat.c optional dtrace_lockstat | dtraceall compile-with "${CDDL_C}"
cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}"
cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}"
cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"

@ -22,7 +22,6 @@ options KDTRACE_HOOKS
#device dtrace
# DTrace modules
#device dtrace_lockstat
#device dtrace_profile
#device dtrace_sdt
#device dtrace_fbt

@ -21,31 +21,46 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Backend for the lock tracing (lockstat) kernel support. This is required
* to allow a module to load even though DTrace kernel support may not be
* present.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifdef KDTRACE_HOOKS
#include <sys/types.h>
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/lockstat.h>
#include <sys/sdt.h>
#include <sys/time.h>
/*
* The following must match the type definition of dtrace_probe. It is
* defined this way to avoid having to rely on CDDL code.
*/
uint32_t lockstat_probemap[LS_NPROBES];
void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t,
uintptr_t, uintptr_t, uintptr_t);
SDT_PROVIDER_DEFINE(lockstat);
SDT_PROBE_DEFINE1(lockstat, , , adaptive__acquire, "struct mtx *");
SDT_PROBE_DEFINE1(lockstat, , , adaptive__release, "struct mtx *");
SDT_PROBE_DEFINE2(lockstat, , , adaptive__spin, "struct mtx *", "uint64_t");
SDT_PROBE_DEFINE2(lockstat, , , adaptive__block, "struct mtx *", "uint64_t");
SDT_PROBE_DEFINE1(lockstat, , , spin__acquire, "struct mtx *");
SDT_PROBE_DEFINE1(lockstat, , , spin__release, "struct mtx *");
SDT_PROBE_DEFINE2(lockstat, , , spin__spin, "struct mtx *", "uint64_t");
SDT_PROBE_DEFINE1(lockstat, , , rw__acquire, "struct rwlock *");
SDT_PROBE_DEFINE1(lockstat, , , rw__release, "struct rwlock *");
SDT_PROBE_DEFINE5(lockstat, , , rw__block, "struct rwlock *", "uint64_t", "int",
"int", "int");
SDT_PROBE_DEFINE2(lockstat, , , rw__spin, "struct rwlock *", "uint64_t");
SDT_PROBE_DEFINE1(lockstat, , , rw__upgrade, "struct rwlock *");
SDT_PROBE_DEFINE1(lockstat, , , rw__downgrade, "struct rwlock *");
SDT_PROBE_DEFINE1(lockstat, , , sx__acquire, "struct sx *");
SDT_PROBE_DEFINE1(lockstat, , , sx__release, "struct sx *");
SDT_PROBE_DEFINE5(lockstat, , , sx__block, "struct sx *", "uint64_t", "int",
"int", "int");
SDT_PROBE_DEFINE2(lockstat, , , sx__spin, "struct sx *", "uint64_t");
SDT_PROBE_DEFINE1(lockstat, , , sx__upgrade, "struct sx *");
SDT_PROBE_DEFINE1(lockstat, , , sx__downgrade, "struct sx *");
SDT_PROBE_DEFINE2(lockstat, , , thread__spin, "struct mtx *", "uint64_t");
int lockstat_enabled = 0;
uint64_t
@ -64,5 +79,3 @@ lockstat_nsecs(struct lock_object *lo)
ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
return (ns);
}
#endif /* KDTRACE_HOOKS */

@ -349,7 +349,7 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
file, line);
curthread->td_locks++;
if (m->mtx_recurse == 0)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
m, contested, waittime, file, line);
}
@ -531,17 +531,17 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
m->lock_object.lo_name, (void *)tid, file, line);
}
#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
waittime, file, line);
#ifdef KDTRACE_HOOKS
if (sleep_time)
LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
/*
* Only record the loops spinning and not sleeping.
*/
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
#endif
}
@ -628,11 +628,11 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
"running");
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
contested, waittime, (file), (line));
#ifdef KDTRACE_HOOKS
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
contested, waittime, file, line);
if (spin_time != 0)
LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
LOCKSTAT_RECORD1(spin__spin, m, spin_time);
#endif
}
#endif /* SMP */
@ -709,12 +709,12 @@ retry:
spin_time += lockstat_nsecs(&m->lock_object);
#endif
if (m->mtx_recurse == 0)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
m, contested, waittime, (file), (line));
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
contested, waittime, file, line);
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
LOCKSTAT_RECORD1(thread__spin, m, spin_time);
}
struct mtx *

@ -301,7 +301,7 @@ __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
if (!rw_recursed(rw))
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(rw__acquire,
rw, 0, 0, file, line);
curthread->td_locks++;
}
@ -546,13 +546,13 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&rw->lock_object);
if (sleep_time)
LOCKSTAT_RECORD4(LS_RW_RLOCK_BLOCK, rw, sleep_time,
LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_RLOCK_SPIN, rw, all_time - sleep_time,
LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
@ -561,7 +561,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
* however. turnstiles don't like owners changing between calls to
* turnstile_wait() currently.
*/
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(rw__acquire, rw, contested,
waittime, file, line);
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
@ -594,7 +594,7 @@ __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
line);
WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(rw__acquire,
rw, 0, 0, file, line);
curthread->td_locks++;
curthread->td_rw_rlocks++;
@ -713,7 +713,7 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
turnstile_chain_unlock(&rw->lock_object);
break;
}
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
LOCKSTAT_PROFILE_RELEASE_LOCK(rw__release, rw);
curthread->td_locks--;
curthread->td_rw_rlocks--;
}
@ -910,17 +910,17 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&rw->lock_object);
if (sleep_time)
LOCKSTAT_RECORD4(LS_RW_WLOCK_BLOCK, rw, sleep_time,
LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_WLOCK_SPIN, rw, all_time - sleep_time,
LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(rw__acquire, rw, contested,
waittime, file, line);
}
@ -1066,7 +1066,7 @@ __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
curthread->td_rw_rlocks--;
WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
LOCKSTAT_RECORD0(rw__upgrade, rw);
}
return (success);
}
@ -1138,7 +1138,7 @@ __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
out:
curthread->td_rw_rlocks++;
LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
LOCKSTAT_RECORD0(rw__downgrade, rw);
}
#ifdef INVARIANT_SUPPORT

@ -288,7 +288,7 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire,
sx, 0, 0, file, line);
curthread->td_locks++;
return (1);
@ -351,7 +351,7 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
if (!sx_recursed(sx))
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire,
sx, 0, 0, file, line);
curthread->td_locks++;
}
@ -420,7 +420,7 @@ sx_try_upgrade_(struct sx *sx, const char *file, int line)
if (success) {
WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
LOCKSTAT_RECORD0(sx__upgrade, sx);
}
return (success);
}
@ -486,7 +486,7 @@ sx_downgrade_(struct sx *sx, const char *file, int line)
sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
LOCKSTAT_RECORD0(sx__downgrade, sx);
if (wakeup_swapper)
kick_proc0();
@ -719,16 +719,16 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
if (sleep_time)
LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
#endif
if (!error)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire, sx,
contested, waittime, file, line);
GIANT_RESTORE();
return (error);
@ -983,17 +983,17 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
if (sleep_time)
LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
if (spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
#endif
if (error == 0)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
contested, waittime, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire, sx, contested,
waittime, file, line);
GIANT_RESTORE();
return (error);
}

@ -7,7 +7,6 @@ SUBDIR= dtmalloc \
dtrace \
dtraceall \
dtrace_test \
lockstat \
profile \
prototype \
sdt \

@ -6,7 +6,6 @@ load :
-kldload dtrace
.if ${MACHINE_CPUARCH} == "i386"
-kldload sdt
-kldload lockstat
-kldload fbt
-kldload prototype
.endif
@ -20,7 +19,6 @@ unload :
.if ${MACHINE_CPUARCH} == "i386"
-kldunload prototype
-kldunload fbt
-kldunload lockstat
-kldunload sdt
.endif
-kldunload dtrace

@ -76,7 +76,6 @@ MODULE_DEPEND(dtraceall, fbt, 1, 1, 1);
#if defined(__amd64__) || defined(__i386__)
MODULE_DEPEND(dtraceall, fasttrap, 1, 1, 1);
#endif
MODULE_DEPEND(dtraceall, lockstat, 1, 1, 1);
MODULE_DEPEND(dtraceall, sdt, 1, 1, 1);
MODULE_DEPEND(dtraceall, systrace, 1, 1, 1);
#if defined(COMPAT_FREEBSD32)

@ -1,18 +0,0 @@
# $FreeBSD$
SYSDIR?= ${.CURDIR}/../../..
.PATH: ${SYSDIR}/cddl/dev/lockstat
KMOD= lockstat
SRCS= lockstat.c
SRCS+= vnode_if.h
CFLAGS+= -I${SYSDIR}/cddl/compat/opensolaris \
-I${SYSDIR}/cddl/contrib/opensolaris/uts/common \
-I${SYSDIR}
.include <bsd.kmod.mk>
CFLAGS+= -include ${SYSDIR}/cddl/compat/opensolaris/sys/debug_compat.h

@ -14,7 +14,6 @@ options KDTRACE_HOOKS
#device dtrace
# DTrace modules
#device dtrace_lockstat
#device dtrace_profile
#device dtrace_sdt
#device dtrace_fbt

@ -27,185 +27,81 @@
/*
* DTrace lockstat provider definitions
*
*/
#ifndef _SYS_LOCKSTAT_H
#ifndef _SYS_LOCKSTAT_H
#define _SYS_LOCKSTAT_H
#ifdef _KERNEL
#ifdef _KERNEL
/*
* Spin Locks
*/
#define LS_MTX_SPIN_LOCK_ACQUIRE 0
#define LS_MTX_SPIN_UNLOCK_RELEASE 1
#define LS_MTX_SPIN_LOCK_SPIN 2
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/sdt.h>
/*
* Adaptive Locks
*/
#define LS_MTX_LOCK_ACQUIRE 3
#define LS_MTX_UNLOCK_RELEASE 4
#define LS_MTX_LOCK_SPIN 5
#define LS_MTX_LOCK_BLOCK 6
#define LS_MTX_TRYLOCK_ACQUIRE 7
SDT_PROVIDER_DECLARE(lockstat);
/*
* Reader/Writer Locks
*/
#define LS_RW_RLOCK_ACQUIRE 8
#define LS_RW_RUNLOCK_RELEASE 9
#define LS_RW_WLOCK_ACQUIRE 10
#define LS_RW_WUNLOCK_RELEASE 11
#define LS_RW_RLOCK_SPIN 12
#define LS_RW_RLOCK_BLOCK 13
#define LS_RW_WLOCK_SPIN 14
#define LS_RW_WLOCK_BLOCK 15
#define LS_RW_TRYUPGRADE_UPGRADE 16
#define LS_RW_DOWNGRADE_DOWNGRADE 17
SDT_PROBE_DECLARE(lockstat, , , adaptive__acquire);
SDT_PROBE_DECLARE(lockstat, , , adaptive__release);
SDT_PROBE_DECLARE(lockstat, , , adaptive__spin);
SDT_PROBE_DECLARE(lockstat, , , adaptive__block);
/*
* Shared/Exclusive Locks
*/
#define LS_SX_SLOCK_ACQUIRE 18
#define LS_SX_SUNLOCK_RELEASE 19
#define LS_SX_XLOCK_ACQUIRE 20
#define LS_SX_XUNLOCK_RELEASE 21
#define LS_SX_SLOCK_SPIN 22
#define LS_SX_SLOCK_BLOCK 23
#define LS_SX_XLOCK_SPIN 24
#define LS_SX_XLOCK_BLOCK 25
#define LS_SX_TRYUPGRADE_UPGRADE 26
#define LS_SX_DOWNGRADE_DOWNGRADE 27
SDT_PROBE_DECLARE(lockstat, , , spin__acquire);
SDT_PROBE_DECLARE(lockstat, , , spin__release);
SDT_PROBE_DECLARE(lockstat, , , spin__spin);
/*
* Thread Locks
*/
#define LS_THREAD_LOCK_SPIN 28
SDT_PROBE_DECLARE(lockstat, , , rw__acquire);
SDT_PROBE_DECLARE(lockstat, , , rw__release);
SDT_PROBE_DECLARE(lockstat, , , rw__block);
SDT_PROBE_DECLARE(lockstat, , , rw__spin);
SDT_PROBE_DECLARE(lockstat, , , rw__upgrade);
SDT_PROBE_DECLARE(lockstat, , , rw__downgrade);
/*
* Lockmanager Locks
* According to locking(9) Lockmgr locks are "Largely deprecated"
* so no support for these have been added in the lockstat provider.
*/
SDT_PROBE_DECLARE(lockstat, , , sx__acquire);
SDT_PROBE_DECLARE(lockstat, , , sx__release);
SDT_PROBE_DECLARE(lockstat, , , sx__block);
SDT_PROBE_DECLARE(lockstat, , , sx__spin);
SDT_PROBE_DECLARE(lockstat, , , sx__upgrade);
SDT_PROBE_DECLARE(lockstat, , , sx__downgrade);
#define LS_NPROBES 29
#define LS_MTX_LOCK "mtx_lock"
#define LS_MTX_UNLOCK "mtx_unlock"
#define LS_MTX_SPIN_LOCK "mtx_lock_spin"
#define LS_MTX_SPIN_UNLOCK "mtx_unlock_spin"
#define LS_MTX_TRYLOCK "mtx_trylock"
#define LS_RW_RLOCK "rw_rlock"
#define LS_RW_WLOCK "rw_wlock"
#define LS_RW_RUNLOCK "rw_runlock"
#define LS_RW_WUNLOCK "rw_wunlock"
#define LS_RW_TRYUPGRADE "rw_try_upgrade"
#define LS_RW_DOWNGRADE "rw_downgrade"
#define LS_SX_SLOCK "sx_slock"
#define LS_SX_XLOCK "sx_xlock"
#define LS_SX_SUNLOCK "sx_sunlock"
#define LS_SX_XUNLOCK "sx_xunlock"
#define LS_SX_TRYUPGRADE "sx_try_upgrade"
#define LS_SX_DOWNGRADE "sx_downgrade"
#define LS_THREAD_LOCK "thread_lock"
#define LS_ACQUIRE "acquire"
#define LS_RELEASE "release"
#define LS_SPIN "spin"
#define LS_BLOCK "block"
#define LS_UPGRADE "upgrade"
#define LS_DOWNGRADE "downgrade"
#define LS_TYPE_ADAPTIVE "adaptive"
#define LS_TYPE_SPIN "spin"
#define LS_TYPE_THREAD "thread"
#define LS_TYPE_RW "rw"
#define LS_TYPE_SX "sx"
#define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE)
#define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE)
#define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN)
#define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK)
#define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE)
#define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE)
#define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN)
#define LSR_ACQUIRE (LS_TYPE_RW "-" LS_ACQUIRE)
#define LSR_RELEASE (LS_TYPE_RW "-" LS_RELEASE)
#define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK)
#define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN)
#define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE)
#define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE)
#define LSX_ACQUIRE (LS_TYPE_SX "-" LS_ACQUIRE)
#define LSX_RELEASE (LS_TYPE_SX "-" LS_RELEASE)
#define LSX_BLOCK (LS_TYPE_SX "-" LS_BLOCK)
#define LSX_SPIN (LS_TYPE_SX "-" LS_SPIN)
#define LSX_UPGRADE (LS_TYPE_SX "-" LS_UPGRADE)
#define LSX_DOWNGRADE (LS_TYPE_SX "-" LS_DOWNGRADE)
#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN)
/*
* The following must match the type definition of dtrace_probe. It is
* defined this way to avoid having to rely on CDDL code.
*/
struct lock_object;
extern uint32_t lockstat_probemap[LS_NPROBES];
typedef void (*lockstat_probe_func_t)(uint32_t, uintptr_t arg0, uintptr_t arg1,
uintptr_t arg2, uintptr_t arg3, uintptr_t arg4);
extern lockstat_probe_func_t lockstat_probe_func;
extern uint64_t lockstat_nsecs(struct lock_object *);
extern int lockstat_enabled;
#ifdef KDTRACE_HOOKS
/*
* Macros to record lockstat probes.
*/
#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) do { \
uint32_t id; \
\
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), (arg1), (arg2), \
(arg3), (arg4)); \
} while (0)
#define LOCKSTAT_RECORD(probe, lp, arg1) \
LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
#define LOCKSTAT_RECORD0(probe, lp) \
LOCKSTAT_RECORD4(probe, lp, 0, 0, 0, 0)
#define LOCKSTAT_RECORD1(probe, lp, arg1) \
LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \
LOCKSTAT_RECORD4(probe, lp, arg1, arg2, 0, 0)
#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) \
LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, 0)
#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \
uint32_t id; \
\
lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
} while (0)
#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \
uint32_t id; \
\
lock_profile_release_lock(&(lp)->lock_object); \
if ((id = lockstat_probemap[(probe)])) \
(*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
} while (0)
SDT_PROBE_DECLARE(lockstat, , , thread__spin);
#define LOCKSTAT_WRITER 0
#define LOCKSTAT_READER 1
#else /* !KDTRACE_HOOKS */
#ifdef KDTRACE_HOOKS
#define LOCKSTAT_RECORD0(probe, lp) \
SDT_PROBE1(lockstat, , , probe, lp)
#define LOCKSTAT_RECORD1(probe, lp, arg1) \
SDT_PROBE2(lockstat, , , probe, lp, arg1)
#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \
SDT_PROBE3(lockstat, , , probe, lp, arg1, arg2)
#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) \
SDT_PROBE4(lockstat, , , probe, lp, arg1, arg2, arg3)
#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) \
SDT_PROBE5(lockstat, , , probe, lp, arg1, arg2, arg3, arg4)
#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \
lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \
LOCKSTAT_RECORD0(probe, lp); \
} while (0)
#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \
lock_profile_release_lock(&(lp)->lock_object); \
LOCKSTAT_RECORD0(probe, lp); \
} while (0)
extern int lockstat_enabled;
struct lock_object;
extern uint64_t lockstat_nsecs(struct lock_object *);
#else /* !KDTRACE_HOOKS */
#define LOCKSTAT_RECORD(probe, lp, arg1)
#define LOCKSTAT_RECORD0(probe, lp)
#define LOCKSTAT_RECORD1(probe, lp, arg1)
#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2)
@ -218,8 +114,6 @@ extern int lockstat_enabled;
#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) \
lock_profile_release_lock(&(lp)->lock_object)
#endif /* !KDTRACE_HOOKS */
#endif /* _KERNEL */
#endif /* _SYS_LOCKSTAT_H */
#endif /* !KDTRACE_HOOKS */
#endif /* _KERNEL */
#endif /* _SYS_LOCKSTAT_H */

@ -188,8 +188,8 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
if (!_mtx_obtain_lock((mp), _tid)) \
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \
mp, 0, 0, (file), (line)); \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, \
mp, 0, 0, file, line); \
} while (0)
/*
@ -209,8 +209,8 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
else \
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \
mp, 0, 0, (file), (line)); \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \
mp, 0, 0, file, line); \
} while (0)
#else /* SMP */
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
@ -231,8 +231,7 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
uintptr_t _tid = (uintptr_t)(tid); \
\
if ((mp)->mtx_recurse == 0) \
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, \
(mp)); \
LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, mp); \
if (!_mtx_release_lock((mp), _tid)) \
_mtx_unlock_sleep((mp), (opts), (file), (line)); \
} while (0)
@ -252,21 +251,19 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
mp); \
LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \
_mtx_release_lock_quick((mp)); \
} \
spinlock_exit(); \
} \
spinlock_exit(); \
} while (0)
#else /* SMP */
#define __mtx_unlock_spin(mp) do { \
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
mp); \
LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \
(mp)->mtx_lock = MTX_UNOWNED; \
} \
} \
spinlock_exit(); \
} while (0)
#endif /* SMP */

@ -95,12 +95,12 @@
/* Acquire a write lock. */
#define __rw_wlock(rw, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
\
if (!_rw_write_lock((rw), _tid)) \
_rw_wlock_hard((rw), _tid, (file), (line)); \
else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, \
rw, 0, 0, (file), (line)); \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(rw__acquire, rw, \
0, 0, file, line); \
} while (0)
/* Release a write lock. */
@ -110,8 +110,7 @@
if ((rw)->rw_recurse) \
(rw)->rw_recurse--; \
else { \
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, \
(rw)); \
LOCKSTAT_PROFILE_RELEASE_LOCK(rw__release, rw); \
if (!_rw_write_unlock((rw), _tid)) \
_rw_wunlock_hard((rw), _tid, (file), (line)); \
} \

@ -153,8 +153,8 @@ __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
error = _sx_xlock_hard(sx, tid, opts, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
sx, 0, 0, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire, sx,
0, 0, file, line);
return (error);
}
@ -166,7 +166,7 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
uintptr_t tid = (uintptr_t)td;
if (sx->sx_recurse == 0)
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
LOCKSTAT_PROFILE_RELEASE_LOCK(sx__release, sx);
if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
_sx_xunlock_hard(sx, tid, file, line);
}
@ -182,8 +182,8 @@ __sx_slock(struct sx *sx, int opts, const char *file, int line)
!atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
error = _sx_slock_hard(sx, opts, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0,
0, file, line);
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(sx__acquire, sx,
0, 0, file, line);
return (error);
}
@ -200,7 +200,7 @@ __sx_sunlock(struct sx *sx, const char *file, int line)
{
uintptr_t x = sx->sx_lock;
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
LOCKSTAT_PROFILE_RELEASE_LOCK(sx__release, sx);
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
!atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
_sx_sunlock_hard(sx, file, line);