Rework the witness code to work with sx locks as well as mutexes.

- Introduce lock classes and lock objects.  Each lock class specifies a
  name and set of flags (or properties) shared by all locks of a given
  type.  Currently there are three lock classes: spin mutexes, sleep
  mutexes, and sx locks.  A lock object specifies properties of an
  additional lock along with a lock name and all of the extra stuff needed
  to make witness work with a given lock.  This abstract lock stuff is
  defined in sys/lock.h.  The lockmgr constants, types, and prototypes have
  been moved to sys/lockmgr.h.  For temporary backwards compatability,
  sys/lock.h includes sys/lockmgr.h.
- Replace proc->p_spinlocks with a per-CPU list, PCPU(spinlocks), of spin
  locks held.  By making this per-cpu, we do not have to jump through
  magic hoops to deal with sched_lock changing ownership during context
  switches.
- Replace proc->p_heldmtx, formerly a list of held sleep mutexes, with
  proc->p_sleeplocks, which is a list of held sleep locks including sleep
  mutexes and sx locks.
- Add helper macros for logging lock events via the KTR_LOCK KTR logging
  level so that the log messages are consistent.
- Add some new flags that can be passed to mtx_init():
  - MTX_NOWITNESS - specifies that this lock should be ignored by witness.
    This is used for the mutex that blocks a sx lock for example.
  - MTX_QUIET - this is not new, but you can pass this to mtx_init() now
    and no events will be logged for this lock, so that one doesn't have
    to change all the individual mtx_lock/unlock() operations.
- All lock objects maintain an initialized flag.  Use this flag to export
  a mtx_initialized() macro that can be safely called from drivers.  Also,
  we on longer walk the all_mtx list if MUTEX_DEBUG is defined as witness
  performs the corresponding checks using the initialized flag.
- The lock order reversal messages have been improved to output slightly
  more accurate file and line numbers.
This commit is contained in:
John Baldwin 2001-03-28 09:03:24 +00:00
parent c31146a14e
commit 192846463a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=74912
33 changed files with 1297 additions and 3870 deletions

View File

@ -988,7 +988,6 @@ alpha_init(pfn, ptb, bim, bip, biv)
(u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
proc0.p_md.md_tf =
(struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
PCPU_SET(curproc, &proc0);
/*
* Get the right value for the boot cpu's idle ptbr.
@ -1003,8 +1002,8 @@ alpha_init(pfn, ptb, bim, bip, biv)
/* Setup curproc so that mutexes work */
PCPU_SET(curproc, &proc0);
PCPU_SET(spinlocks, NULL);
LIST_INIT(&proc0.p_heldmtx);
LIST_INIT(&proc0.p_contested);
/*

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -59,6 +59,7 @@
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/linker.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
@ -1895,8 +1896,8 @@ init386(first)
/* setup curproc so that mutexes work */
PCPU_SET(curproc, &proc0);
PCPU_SET(spinlocks, NULL);
LIST_INIT(&proc0.p_heldmtx);
LIST_INIT(&proc0.p_contested);
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -65,6 +65,7 @@ struct globaldata {
u_int gd_other_cpus;
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
#ifdef KTR
volatile int gd_ktr_idx;

View File

@ -715,6 +715,7 @@ kern/subr_rman.c standard
kern/subr_sbuf.c standard
kern/subr_scanf.c standard
kern/subr_taskqueue.c standard
kern/subr_witness.c optional witness
kern/subr_xxx.c standard
kern/sys_generic.c standard
kern/sys_pipe.c standard

View File

@ -59,6 +59,7 @@
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/linker.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
@ -1895,8 +1896,8 @@ init386(first)
/* setup curproc so that mutexes work */
PCPU_SET(curproc, &proc0);
PCPU_SET(spinlocks, NULL);
LIST_INIT(&proc0.p_heldmtx);
LIST_INIT(&proc0.p_contested);
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -65,6 +65,7 @@ struct globaldata {
u_int gd_other_cpus;
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
#ifdef KTR
volatile int gd_ktr_idx;

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

View File

@ -65,6 +65,7 @@ struct globaldata {
u_int gd_other_cpus;
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
#ifdef KTR
volatile int gd_ktr_idx;

View File

@ -587,8 +587,8 @@ ia64_init()
/* Setup curproc so that mutexes work */
PCPU_SET(curproc, &proc0);
PCPU_SET(spinlocks, NULL);
LIST_INIT(&proc0.p_heldmtx);
LIST_INIT(&proc0.p_contested);
/*

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -197,7 +197,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
#endif
CV_ASSERT(cvp, mp, p);
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
WITNESS_SAVE(&mp->mtx_object, mp);
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@ -225,7 +225,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
}
/*
@ -250,7 +250,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
#endif
CV_ASSERT(cvp, mp, p);
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
WITNESS_SAVE(&mp->mtx_object, mp);
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@ -290,7 +290,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
ktrcsw(p->p_tracep, 0, 0);
#endif
mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
return (rval);
}
@ -315,7 +315,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
#endif
CV_ASSERT(cvp, mp, p);
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
WITNESS_SAVE(&mp->mtx_object, mp);
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@ -350,7 +350,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
return (rval);
}
@ -377,7 +377,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
#endif
CV_ASSERT(cvp, mp, p);
WITNESS_SLEEP(0, mp);
WITNESS_SAVE(mp, mp);
WITNESS_SAVE(&mp->mtx_object, mp);
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@ -424,7 +424,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
ktrcsw(p->p_tracep, 0, 0);
#endif
mtx_lock(mp);
WITNESS_RESTORE(mp, mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
return (rval);
}

View File

@ -47,6 +47,7 @@
#include <sys/filedesc.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
@ -62,7 +63,6 @@
#include <sys/sx.h>
#include <vm/vm.h>
#include <sys/lock.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
@ -524,7 +524,6 @@ fork1(p1, flags, procp)
PROCTREE_LOCK(PT_RELEASE);
PROC_LOCK(p2);
LIST_INIT(&p2->p_children);
LIST_INIT(&p2->p_heldmtx);
LIST_INIT(&p2->p_contested);
callout_init(&p2->p_itcallout, 0);

File diff suppressed because it is too large Load Diff

View File

@ -46,43 +46,68 @@
#include <sys/systm.h>
#include <sys/ktr.h>
#include <sys/condvar.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sx.h>
/*
* XXX: We don't implement the LO_RECURSED flag for this lock yet.
* We could do this by walking p_sleeplocks if we really wanted to.
*/
struct lock_class lock_class_sx = {
"sx",
LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE
};
void
sx_init(struct sx *sx, const char *description)
{
struct lock_object *lock;
mtx_init(&sx->sx_lock, description, MTX_DEF);
bzero(sx, sizeof(*sx));
lock = &sx->sx_object;
lock->lo_class = &lock_class_sx;
lock->lo_name = description;
lock->lo_flags = LO_WITNESS | LO_SLEEPABLE;
mtx_init(&sx->sx_lock, "sx backing lock",
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
sx->sx_cnt = 0;
cv_init(&sx->sx_shrd_cv, description);
sx->sx_shrd_wcnt = 0;
cv_init(&sx->sx_excl_cv, description);
sx->sx_descr = description;
sx->sx_excl_wcnt = 0;
sx->sx_xholder = NULL;
LOCK_LOG_INIT(lock, 0);
WITNESS_INIT(lock);
}
void
sx_destroy(struct sx *sx)
{
LOCK_LOG_DESTROY(&sx->sx_object, 0);
KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
0), ("%s (%s): holders or waiters\n", __FUNCTION__, sx->sx_descr));
0), ("%s (%s): holders or waiters\n", __FUNCTION__,
sx->sx_object.lo_name));
mtx_destroy(&sx->sx_lock);
cv_destroy(&sx->sx_shrd_cv);
cv_destroy(&sx->sx_excl_cv);
WITNESS_DESTROY(&sx->sx_object);
}
void
sx_slock(struct sx *sx)
_sx_slock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
KASSERT(sx->sx_xholder != curproc,
("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__,
sx->sx_descr));
sx->sx_object.lo_name));
/*
* Loop in case we lose the race for lock acquisition.
@ -96,11 +121,17 @@ sx_slock(struct sx *sx)
/* Acquire a shared lock. */
sx->sx_cnt++;
#ifdef WITNESS
sx->sx_object.lo_flags |= LO_LOCKED;
#endif
LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
mtx_unlock(&sx->sx_lock);
}
void
sx_xlock(struct sx *sx)
_sx_xlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
@ -113,7 +144,8 @@ sx_xlock(struct sx *sx)
* INVARIANTS.
*/
KASSERT(sx->sx_xholder != curproc,
("%s (%s): xlock already held", __FUNCTION__, sx->sx_descr));
("%s (%s): xlock already held @ %s:%d", __FUNCTION__,
sx->sx_object.lo_name, file, line));
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
@ -128,16 +160,28 @@ sx_xlock(struct sx *sx)
sx->sx_cnt--;
sx->sx_xholder = curproc;
#ifdef WITNESS
sx->sx_object.lo_flags |= LO_LOCKED;
#endif
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
mtx_unlock(&sx->sx_lock);
}
void
sx_sunlock(struct sx *sx)
_sx_sunlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
_SX_ASSERT_SLOCKED(sx);
#ifdef WITNESS
if (sx->sx_cnt == 0)
sx->sx_object.lo_flags &= ~LO_LOCKED;
#endif
WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
/* Release. */
sx->sx_cnt--;
@ -153,17 +197,24 @@ sx_sunlock(struct sx *sx)
} else if (sx->sx_shrd_wcnt > 0)
cv_broadcast(&sx->sx_shrd_cv);
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
}
void
sx_xunlock(struct sx *sx)
_sx_xunlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
_SX_ASSERT_XLOCKED(sx);
MPASS(sx->sx_cnt == -1);
#ifdef WITNESS
sx->sx_object.lo_flags &= ~LO_LOCKED;
#endif
WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
/* Release. */
sx->sx_cnt++;
sx->sx_xholder = NULL;
@ -176,5 +227,7 @@ sx_xunlock(struct sx *sx)
else if (sx->sx_excl_wcnt > 0)
cv_signal(&sx->sx_excl_cv);
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
}

View File

@ -382,7 +382,7 @@ msleep(ident, mtx, priority, wmesg, timo)
if (p && KTRPOINT(p, KTR_CSW))
ktrcsw(p->p_tracep, 1, 0);
#endif
WITNESS_SLEEP(0, mtx);
WITNESS_SLEEP(0, &mtx->mtx_object);
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
/*
@ -401,7 +401,7 @@ msleep(ident, mtx, priority, wmesg, timo)
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
WITNESS_SAVE(&mtx->mtx_object, mtx);
mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
@ -498,7 +498,7 @@ msleep(ident, mtx, priority, wmesg, timo)
PICKUP_GIANT();
if (mtx != NULL) {
mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
WITNESS_RESTORE(&mtx->mtx_object, mtx);
}
return (rval);
}
@ -573,12 +573,12 @@ mawait(struct mtx *mtx, int priority, int timo)
int s;
WITNESS_SAVE_DECL(mtx);
WITNESS_SLEEP(0, mtx);
WITNESS_SLEEP(0, &mtx->mtx_object);
mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(mtx, mtx);
WITNESS_SAVE(&mtx->mtx_object, mtx);
mtx_unlock_flags(mtx, MTX_NOSWITCH);
if (priority & PDROP)
mtx = NULL;
@ -691,7 +691,7 @@ mawait(struct mtx *mtx, int priority, int timo)
PICKUP_GIANT();
if (mtx != NULL) {
mtx_lock(mtx);
WITNESS_RESTORE(mtx, mtx);
WITNESS_RESTORE(&mtx->mtx_object, mtx);
}
return (rval);
}

View File

@ -2268,6 +2268,7 @@ ap_init(void)
* something unique to lock with.
*/
PCPU_SET(curproc, PCPU_GET(idleproc));
PCPU_SET(spinlocks, NULL);
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -58,7 +58,7 @@ struct globaldata {
u_int32_t gd_current_asngen; /* ASN rollover check */
SLIST_ENTRY(globaldata) gd_allcpu;
int gd_witness_spin_check;
struct lock_list_entry *gd_spinlocks;
#ifdef KTR_PERCPU
volatile int gd_ktr_idx; /* Index into trace table */
char *gd_ktr_buf;

View File

@ -119,7 +119,7 @@ enum sysinit_sub_id {
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_MUTEX = 0x1A80000, /* mutex (witness) fixup */
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
SI_SUB_CPU = 0x2000000, /* CPU resource(s)*/

View File

@ -1,10 +1,5 @@
/*
* Copyright (c) 1995
* The Regents of the University of California. All rights reserved.
*
* This code contains ideas from software contributed to Berkeley by
* Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
* System project at Carnegie-Mellon University.
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -14,18 +9,14 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -34,189 +25,184 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lock.h 8.12 (Berkeley) 5/19/95
* from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
* $FreeBSD$
*/
#ifndef _LOCK_H_
#define _LOCK_H_
struct mtx;
#ifndef _SYS_LOCK_H_
#define _SYS_LOCK_H_
/*
* The general lock structure. Provides for multiple shared locks,
* upgrading from shared to exclusive, and sleeping until the lock
* can be gained.
* XXX - compatability until lockmgr() goes away or all the #includes are
* updated.
*/
struct lock {
struct mtx *lk_interlock; /* lock on remaining fields */
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
short lk_exclusivecount; /* # of recursive exclusive locks */
short lk_prio; /* priority at which to sleep */
char *lk_wmesg; /* resource sleeping (for tsleep) */
int lk_timo; /* maximum sleep time (for tsleep) */
pid_t lk_lockholder; /* pid of exclusive lock holder */
#ifdef DEBUG_LOCKS
const char *lk_filename;
const char *lk_lockername;
int lk_lineno;
#endif
#include <sys/lockmgr.h>
#include <sys/queue.h>
/*
* Lock classes. Each lock has a class which describes characteristics
* common to all types of locks of a given class.
*
* Spin locks in general must always protect against preemption, as it is
* an error to perform any type of context switch while holding a spin lock.
* Also, for an individual lock to be recursable, its class must allow
* recursion and the lock itself must explicitly allow recursion.
*/
struct lock_class {
const char *lc_name;
u_int lc_flags;
};
/*
* Lock request types:
* LK_SHARED - get one of many possible shared locks. If a process
* holding an exclusive lock requests a shared lock, the exclusive
* lock(s) will be downgraded to shared locks.
* LK_EXCLUSIVE - stop further shared locks, when they are cleared,
* grant a pending upgrade if it exists, then grant an exclusive
* lock. Only one exclusive lock may exist at a time, except that
* a process holding an exclusive lock may get additional exclusive
* locks if it explicitly sets the LK_CANRECURSE flag in the lock
* request, or if the LK_CANRECUSE flag was set when the lock was
* initialized.
* LK_UPGRADE - the process must hold a shared lock that it wants to
* have upgraded to an exclusive lock. Other processes may get
* exclusive access to the resource between the time that the upgrade
* is requested and the time that it is granted.
* LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
* have upgraded to an exclusive lock. If the request succeeds, no
* other processes will have gotten exclusive access to the resource
* between the time that the upgrade is requested and the time that
* it is granted. However, if another process has already requested
* an upgrade, the request will fail (see error returns below).
* LK_DOWNGRADE - the process must hold an exclusive lock that it wants
* to have downgraded to a shared lock. If the process holds multiple
* (recursive) exclusive locks, they will all be downgraded to shared
* locks.
* LK_RELEASE - release one instance of a lock.
* LK_DRAIN - wait for all activity on the lock to end, then mark it
* decommissioned. This feature is used before freeing a lock that
* is part of a piece of memory that is about to be freed.
* LK_EXCLOTHER - return for lockstatus(). Used when another process
* holds the lock exclusively.
*
* These are flags that are passed to the lockmgr routine.
*/
#define LK_TYPE_MASK 0x0000000f /* type of lock sought */
#define LK_SHARED 0x00000001 /* shared lock */
#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
#define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */
#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
#define LK_RELEASE 0x00000006 /* release any type of lock */
#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
#define LK_EXCLOTHER 0x00000008 /* other process holds lock */
/*
* External lock flags.
*
* The first three flags may be set in lock_init to set their mode permanently,
* or passed in as arguments to the lock manager. The LK_REENABLE flag may be
* set only at the release of a lock obtained by drain.
*/
#define LK_EXTFLG_MASK 0x01000070 /* mask of external flags */
#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
#define LK_NOPAUSE 0x01000000 /* no spinloop */
/*
* Internal lock flags.
*
* These flags are used internally to the lock manager.
*/
#define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */
#define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */
#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
#define LK_DRAINING 0x00004000 /* lock is being drained */
/*
* Control flags
*
* Non-persistent external flags.
*/
#define LK_INTERLOCK 0x00010000 /*
* unlock passed mutex after getting
* lk_interlock
*/
#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
#define LK_NOOBJ 0x00040000 /* vget: don't create object */
#define LK_THISLAYER 0x00080000 /* vn_lock: lock/unlock only current layer */
#define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */
#define LC_SPINLOCK 0x00000002 /* Spin lock. */
#define LC_SLEEPABLE 0x00000004 /* Sleeping allowed with this lock. */
#define LC_RECURSABLE 0x00000008 /* Locks of this type may recurse. */
struct witness;
struct lock_object {
struct lock_class *lo_class;
const char *lo_name;
const char *lo_file; /* File and line of last acquire. */
int lo_line;
u_int lo_flags;
STAILQ_ENTRY(lock_object) lo_list; /* List of all locks in system. */
struct witness *lo_witness;
};
#define LO_CLASSFLAGS 0x0000ffff /* Class specific flags. */
#define LO_INITIALIZED 0x00010000 /* Lock has been initialized. */
#define LO_WITNESS 0x00020000 /* Should witness monitor this lock. */
#define LO_QUIET 0x00040000 /* Don't log locking operations. */
#define LO_RECURSABLE 0x00080000 /* Lock may recurse. */
#define LO_SLEEPABLE 0x00100000 /* Lock may be held while sleeping. */
#define LO_LOCKED 0x01000000 /* Someone holds this lock. */
#define LO_RECURSED 0x02000000 /* Someone has recursed on this lock. */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
* Option flags passed to lock operations that witness also needs to know
* about or that are generic across all locks.
*/
#define LK_SHARE_NONZERO 0x00100000
#define LK_WAIT_NONZERO 0x00200000
#define LOP_NOSWITCH 0x00000001 /* Lock doesn't switch on release. */
#define LOP_QUIET 0x00000002 /* Don't log locking operations. */
#define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */
#ifdef _KERNEL
/*
* A simple list type used to build the list of locks held by a process
* or CPU. We can't simply embed the list in struct lock_object since a
* lock may be held by more than one process if it is a shared lock. Locks
* are added to the head of the list, so we fill up each list entry from
* "the back" logically. To ease some of the arithmetic, we actually fill
* in each list entry the normal way (childer[0] then children[1], etc.) but
* when we traverse the list we read children[count-1] as the first entry
* down to children[0] as the final entry.
*/
#define LOCK_NCHILDREN 6
struct lock_list_entry {
struct lock_list_entry *ll_next;
struct lock_object *ll_children[LOCK_NCHILDREN];
u_int ll_count;
};
/*
* Lock return status.
* Macros for KTR_LOCK tracing.
*
* Successfully obtained locks return 0. Locks will always succeed
* unless one of the following is true:
* LK_FORCEUPGRADE is requested and some other process has already
* requested a lock upgrade (returns EBUSY).
* LK_WAIT is set and a sleep would be required (returns EBUSY).
* LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
* PCATCH is set in lock priority and a signal arrives (returns
* either EINTR or ERESTART if system calls is to be restarted).
* Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
* A failed lock attempt always returns a non-zero error value. No lock
* is held after an error return (in particular, a failed LK_UPGRADE
* or LK_FORCEUPGRADE will have released its shared access lock).
* opname - name of this operation (LOCK/UNLOCK/SLOCK, etc.)
* lo - struct lock_object * for this lock
* flags - flags passed to the lock operation
* recurse - this locks recursion level (or 0 if class is not recursable)
* result - result of a try lock operation
* file - file name
* line - line number
*/
#define LOCK_LOG_TEST(lo, flags) \
(((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0)
/*
* Indicator that no process holds exclusive lock
*/
#define LK_KERNPROC ((pid_t) -2)
#define LK_NOPROC ((pid_t) -1)
#ifdef INVARIANTS
#define LOCKMGR_ASSERT(lkp, what, p) do { \
switch ((what)) { \
case LK_SHARED: \
if (lockstatus((lkp), (p)) == LK_SHARED) \
break; \
/* fall into exclusive */ \
case LK_EXCLUSIVE: \
if (lockstatus((lkp), (p)) != LK_EXCLUSIVE) \
panic("lock %s %s not held at %s:%d", \
(lkp)->lk_wmesg, #what, __FILE__, \
__LINE__); \
break; \
default: \
panic("unknown LOCKMGR_ASSERT at %s:%d", __FILE__, \
__LINE__); \
} \
#define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \
if (LOCK_LOG_TEST((lo), (flags))) \
CTR5(KTR_LOCK, opname " (%s) %s r = %d at %s:%d", \
(lo)->lo_class->lc_name, (lo)->lo_name, \
(u_int)(recurse), (file), (line)); \
} while (0)
#else /* INVARIANTS */
#define LOCKMGR_ASSERT(lkp, p, what)
#endif /* INVARIANTS */
void dumplockinfo(struct lock *lkp);
struct proc;
#define LOCK_LOG_TRY(opname, lo, flags, result, file, line) do { \
if (LOCK_LOG_TEST((lo), (flags))) \
CTR5(KTR_LOCK, "TRY_" opname " (%s) %s result=%d at %s:%d",\
(lo)->lo_class->lc_name, (lo)->lo_name, \
(u_int)(result), (file), (line)); \
} while (0)
void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
int flags));
void lockdestroy __P((struct lock *));
#define LOCK_LOG_INIT(lo, flags) do { \
if (LOCK_LOG_TEST((lo), (flags))) \
CTR3(KTR_LOCK, __func__ ": %p (%s) %s", (lo), \
(lo)->lo_class->lc_name, (lo)->lo_name); \
} while (0)
#ifdef DEBUG_LOCKS
int debuglockmgr __P((struct lock *, u_int flags,
struct mtx *, struct proc *p,
const char *,
const char *,
int));
#define lockmgr(lockp, flags, slockp, proc) \
debuglockmgr((lockp), (flags), (slockp), (proc), \
"lockmgr", __FILE__, __LINE__)
#else
int lockmgr __P((struct lock *, u_int flags,
struct mtx *, struct proc *p));
#endif
void lockmgr_printinfo __P((struct lock *));
int lockstatus __P((struct lock *, struct proc *));
int lockcount __P((struct lock *));
#define LOCK_LOG_DESTROY(lo, flags) LOCK_LOG_INIT(lo, flags)
#endif /* !_LOCK_H_ */
/*
* Helpful macros for quickly coming up with assertions with informative
* panic messages.
*/
#define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__)
#define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__)
#define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line)
#define MPASS4(ex, what, file, line) \
KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line))
extern struct lock_class lock_class_mtx_sleep;
extern struct lock_class lock_class_mtx_spin;
extern struct lock_class lock_class_sx;
void witness_init(struct lock_object *);
void witness_destroy(struct lock_object *);
void witness_lock(struct lock_object *, int, const char *, int);
void witness_unlock(struct lock_object *, int, const char *, int);
void witness_save(struct lock_object *, const char **, int *);
void witness_restore(struct lock_object *, const char *, int);
int witness_list(struct proc *);
int witness_sleep(int, struct lock_object *, const char *, int);
#ifdef WITNESS
#define WITNESS_INIT(lock) \
witness_init((lock))
#define WITNESS_DESTROY(lock) \
witness_destroy(lock)
#define WITNESS_LOCK(lock, flags, file, line) \
witness_lock((lock), (flags), (file), (line))
#define WITNESS_UNLOCK(lock, flags, file, line) \
witness_unlock((lock), (flags), (file), (line))
#define WITNESS_SLEEP(check, lock) \
witness_sleep((check), (lock), __FILE__, __LINE__)
#define WITNESS_SAVE_DECL(n) \
const char * __CONCAT(n, __wf); \
int __CONCAT(n, __wl)
#define WITNESS_SAVE(lock, n) \
witness_save((lock), &__CONCAT(n, __wf), &__CONCAT(n, __wl))
#define WITNESS_RESTORE(lock, n) \
witness_restore((lock), __CONCAT(n, __wf), __CONCAT(n, __wl))
#else /* WITNESS */
#define WITNESS_INIT(lock) (lock)->lo_flags |= LO_INITIALIZED
#define WITNESS_DESTROY(lock) (lock)->lo_flags &= ~LO_INITIALIZED
#define WITNESS_LOCK(lock, flags, file, line)
#define WITNESS_UNLOCK(lock, flags, file, line)
#define WITNESS_SLEEP(check, lock)
#define WITNESS_SAVE_DECL(n)
#define WITNESS_SAVE(lock, n)
#define WITNESS_RESTORE(lock, n)
#endif /* WITNESS */
#endif /* _KERNEL */
#endif /* _SYS_LOCK_H_ */

View File

@ -37,7 +37,6 @@
#ifdef _KERNEL
#include <sys/ktr.h>
#include <sys/systm.h>
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#include <machine/globals.h>
@ -49,25 +48,21 @@
#ifdef _KERNEL
/*
* Mutex types and options stored in mutex->mtx_flags
* Mutex types and options passed to mtx_init(). MTX_QUIET can also be
* passed in.
*/
#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
#define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */
#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
#define MTX_SLEEPABLE 0x00000010 /* We can sleep with this lock. */
/*
* Option flags passed to certain lock/unlock routines, through the use
* of corresponding mtx_{lock,unlock}_flags() interface macros.
*
* XXX: The only reason we make these bits not interfere with the above "types
* and options" bits is because we have to pass both to the witness
* routines right now; if/when we clean up the witness interface to
* not check for mutex type from the passed in flag, but rather from
* the mutex lock's mtx_flags field, then we can change these values to
* 0x1, 0x2, ...
*/
#define MTX_NOSWITCH 0x00000004 /* Do not switch on release */
#define MTX_QUIET 0x00000008 /* Don't log a mutex event */
#define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */
#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
/*
* State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
@ -82,22 +77,19 @@
#ifndef LOCORE
struct mtx_debug;
/*
* Sleep/spin mutex
*/
struct mtx {
struct lock_object;
struct mtx {
struct lock_object mtx_object; /* Common lock properties. */
volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
volatile u_int mtx_recurse; /* number of recursive holds */
u_int mtx_savecrit; /* saved flags (for spin locks) */
int mtx_flags; /* flags passed to mtx_init() */
const char *mtx_description;
volatile u_int mtx_recurse; /* number of recursive holds */
critical_t mtx_savecrit; /* saved flags (for spin locks) */
TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */
struct mtx *mtx_next; /* all existing locks */
struct mtx *mtx_prev; /* in system... */
struct mtx_debug *mtx_debug; /* debugging information... */
};
/*
@ -108,21 +100,13 @@ struct mtx {
#ifdef _KERNEL
/*
* Strings for KTR_LOCK tracing.
*/
extern char STR_mtx_lock_slp[];
extern char STR_mtx_lock_spn[];
extern char STR_mtx_unlock_slp[];
extern char STR_mtx_unlock_spn[];
/*
* Prototypes
*
* NOTE: Functions prepended with `_' (underscore) are exported to other parts
* of the kernel via macros, thus allowing us to use the cpp __FILE__
* and __LINE__. These functions should not be called directly by any
* code using the IPI. Their macros cover their functionality.
* code using the API. Their macros cover their functionality.
*
* [See below for descriptions]
*
@ -144,6 +128,9 @@ void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
#ifdef INVARIANT_SUPPORT
void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#endif
#ifdef WITNESS
void _mtx_update_flags(struct mtx *m, int locking);
#endif
/*
* We define our machine-independent (unoptimized) mutex micro-operations
@ -230,6 +217,15 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
} while (0)
#endif
/*
* Update the lock object flags based on the current mutex state.
*/
#ifdef WITNESS
#define mtx_update_flags(m, locking) _mtx_update_flags((m), (locking))
#else
#define mtx_update_flags(m, locking)
#endif
/*
* Exported lock manipulation interface.
*
@ -255,6 +251,8 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
* relevant option flags `opts.'
*
* mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
*
* mtx_owned(m) returns non-zero if the current thread owns the lock `m'
*
* mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
@ -290,48 +288,46 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
KASSERT(((opts) & MTX_NOSWITCH) == 0, \
("MTX_NOSWITCH used at %s:%d", (file), (line))); \
_get_sleep_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_lock_slp, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
(file), (line)); \
WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), (file), (line)); \
LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \
(file), (line)); \
mtx_update_flags((m), 1); \
WITNESS_LOCK(&(m)->mtx_object, (opts), (file), (line)); \
} while (0)
#define __mtx_lock_spin_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
_get_spin_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_lock_spn, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
(file), (line)); \
WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), (file), (line)); \
LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \
(file), (line)); \
mtx_update_flags((m), 1); \
WITNESS_LOCK(&(m)->mtx_object, (opts), (file), (line)); \
} while (0)
#define __mtx_unlock_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
mtx_assert((m), MA_OWNED); \
WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), (file), (line)); \
mtx_update_flags((m), 0); \
WITNESS_UNLOCK(&(m)->mtx_object, (opts), (file), (line)); \
_rel_sleep_lock((m), curproc, (opts), (file), (line)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_unlock_slp, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
(file), (line)); \
LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \
(m)->mtx_recurse, (file), (line)); \
} while (0)
#define __mtx_unlock_spin_flags(m, opts, file, line) do { \
MPASS(curproc != NULL); \
mtx_assert((m), MA_OWNED); \
WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), (file), (line)); \
mtx_update_flags((m), 0); \
WITNESS_UNLOCK(&(m)->mtx_object, (opts), (file), (line)); \
_rel_spin_lock((m)); \
if (((opts) & MTX_QUIET) == 0) \
CTR5(KTR_LOCK, STR_mtx_unlock_spn, \
(m)->mtx_description, (m), (m)->mtx_recurse, \
(file), (line)); \
LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \
(m)->mtx_recurse, (file), (line)); \
} while (0)
#define mtx_trylock_flags(m, opts) \
_mtx_trylock((m), (opts), __FILE__, __LINE__)
#define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED)
#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curproc)
#define mtx_recursed(m) ((m)->mtx_recurse != 0)
@ -354,7 +350,7 @@ do { \
WITNESS_SAVE_DECL(Giant); \
\
if (mtx_owned(&Giant)) \
WITNESS_SAVE(&Giant, Giant); \
WITNESS_SAVE(&Giant.mtx_object, Giant); \
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
mtx_unlock_flags(&Giant, MTX_NOSWITCH)
@ -364,7 +360,7 @@ do { \
WITNESS_SAVE_DECL(Giant); \
\
if (mtx_owned(&Giant)) \
WITNESS_SAVE(&Giant, Giant); \
WITNESS_SAVE(&Giant.mtx_object, Giant); \
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
mtx_unlock(&Giant)
@ -373,7 +369,7 @@ do { \
while (_giantcnt--) \
mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant, Giant); \
WITNESS_RESTORE(&Giant.mtx_object, Giant); \
} while (0)
#define PARTIAL_PICKUP_GIANT() \
@ -381,7 +377,7 @@ do { \
while (_giantcnt--) \
mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant, Giant)
WITNESS_RESTORE(&Giant.mtx_object, Giant)
/*
* The INVARIANTS-enabled mtx_assert() functionality.
@ -405,58 +401,6 @@ do { \
#define mtx_assert(m, what)
#endif /* INVARIANTS */
#define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__)
#define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__)
#define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line)
#define MPASS4(ex, what, file, line) \
KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line))
/*
* Exported WITNESS-enabled functions and corresponding wrapper macros.
*/
#ifdef WITNESS
void witness_save(struct mtx *, const char **, int *);
void witness_restore(struct mtx *, const char *, int);
void witness_enter(struct mtx *, int, const char *, int);
void witness_try_enter(struct mtx *, int, const char *, int);
void witness_exit(struct mtx *, int, const char *, int);
int witness_list(struct proc *);
int witness_sleep(int, struct mtx *, const char *, int);
#define WITNESS_ENTER(m, t, f, l) \
witness_enter((m), (t), (f), (l))
#define WITNESS_EXIT(m, t, f, l) \
witness_exit((m), (t), (f), (l))
#define WITNESS_SLEEP(check, m) \
witness_sleep(check, (m), __FILE__, __LINE__)
#define WITNESS_SAVE_DECL(n) \
const char * __CONCAT(n, __wf); \
int __CONCAT(n, __wl)
#define WITNESS_SAVE(m, n) \
witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl))
#define WITNESS_RESTORE(m, n) \
witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl))
#else /* WITNESS */
#define witness_enter(m, t, f, l)
#define witness_tryenter(m, t, f, l)
#define witness_exit(m, t, f, l)
#define witness_list(p)
#define witness_sleep(c, m, f, l)
#define WITNESS_ENTER(m, t, f, l)
#define WITNESS_EXIT(m, t, f, l)
#define WITNESS_SLEEP(check, m)
#define WITNESS_SAVE_DECL(n)
#define WITNESS_SAVE(m, n)
#define WITNESS_RESTORE(m, n)
#endif /* WITNESS */
#endif /* _KERNEL */
#endif /* !LOCORE */
#endif /* _SYS_MUTEX_H_ */

View File

@ -231,7 +231,7 @@ struct proc {
int p_sig; /* (n) For core dump/debugger XXX. */
u_long p_code; /* (n) For core dump/debugger XXX. */
struct klist p_klist; /* (c) Knotes attached to this process. */
LIST_HEAD(, mtx) p_heldmtx; /* (j) For debugging code. */
struct lock_list_entry *p_sleeplocks; /* (k) Held sleep locks. */
struct mtx *p_blocked; /* (j) Mutex process is blocked on. */
const char *p_mtxname; /* (j) Name of mutex blocked on. */
LIST_HEAD(, mtx) p_contested; /* (j) Contested locks. */

View File

@ -31,12 +31,15 @@
#define _SYS_SX_H_
#ifndef LOCORE
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/lock.h> /* XXX */
#include <sys/mutex.h> /* XXX */
#include <sys/condvar.h> /* XXX */
struct lock_object;
struct sx {
struct lock_object sx_object; /* Common lock properties. */
struct mtx sx_lock; /* General protection lock. */
const char *sx_descr; /* sx lock description. */
int sx_cnt; /* -1: xlock, > 0: slock count. */
struct cv sx_shrd_cv; /* slock waiters. */
int sx_shrd_wcnt; /* Number of slock waiters. */
@ -48,10 +51,15 @@ struct sx {
#ifdef _KERNEL
void sx_init(struct sx *sx, const char *description);
void sx_destroy(struct sx *sx);
void sx_slock(struct sx *sx);
void sx_xlock(struct sx *sx);
void sx_sunlock(struct sx *sx);
void sx_xunlock(struct sx *sx);
void _sx_slock(struct sx *sx, const char *file, int line);
void _sx_xlock(struct sx *sx, const char *file, int line);
void _sx_sunlock(struct sx *sx, const char *file, int line);
void _sx_xunlock(struct sx *sx, const char *file, int line);
#define sx_slock(sx) _sx_slock((sx), __FILE__, __LINE__)
#define sx_xlock(sx) _sx_xlock((sx), __FILE__, __LINE__)
#define sx_sunlock(sx) _sx_sunlock((sx), __FILE__, __LINE__)
#define sx_xunlock(sx) _sx_xunlock((sx), __FILE__, __LINE__)
#ifdef INVARIANTS
/*
@ -65,7 +73,7 @@ void sx_xunlock(struct sx *sx);
} while (0)
#define _SX_ASSERT_SLOCKED(sx) do { \
KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \
__FUNCTION__, (sx)->sx_descr)); \
__FUNCTION__, (sx)->sx_object.lo_name)); \
} while (0)
/*
@ -79,7 +87,7 @@ void sx_xunlock(struct sx *sx);
#define _SX_ASSERT_XLOCKED(sx) do { \
KASSERT(((sx)->sx_xholder == curproc), \
("%s: thread %p lacking xlock %s\n", __FUNCTION__, \
curproc, (sx)->sx_descr)); \
curproc, (sx)->sx_object.lo_name)); \
} while (0)
#else /* INVARIANTS */