Optimize lockmgr in order to get rid of the pool mutex interlock, of the

state transitioning flags and of msleep(9) callings.
Use, instead, an algorithm very similar to what sx(9) and rwlock(9)
alredy do and direct accesses to the sleepqueue(9) primitive.

In order to avoid writer starvation a mechanism very similar to what
rwlock(9) uses now is implemented, with the correspective per-thread
shared lockmgrs counter.

This patch also adds 2 new functions to lockmgr KPI: lockmgr_rw() and
lockmgr_args_rw().  These two are like the 2 "normal" versions, but they
both accept a rwlock as interlock.  In order to realize this, the general
lockmgr manager function "__lockmgr_args()" has been implemented through
the generic lock layer. It supports all the blocking primitives, but
currently only these 2 mappers live.

The patch drops the support for WITNESS atm, but it will be probabilly
added soon. Also, there is a little race in the draining code which is
also present in the current CVS stock implementation: if some sharers,
once they wakeup, are in the runqueue they can contend the lock with
the exclusive drainer.  This is hard to be fixed but the now committed
code mitigate this issue a lot better than the (past) CVS version.
In addition assertive KA_HELD and KA_UNHELD have been made mute
assertions because they are dangerous and they will be nomore supported
soon.

In order to avoid namespace pollution, stack.h is splitted into two
parts: one which includes only the "struct stack" definition (_stack.h)
and one defining the KPI.  In this way, newly added _lockmgr.h can
just include _stack.h.

Kernel ABI results heavilly changed by this commit (the now committed
version of "struct lock" is a lot smaller than the previous one) and
KPI results broken by lockmgr_rw() / lockmgr_args_rw() introduction,
so manpages and __FreeBSD_version will be updated accordingly.

Tested by:      kris, pho, jeff, danger
Reviewed by:    jeff
Sponsored by:   Google, Summer of Code program 2007
This commit is contained in:
Attilio Rao 2008-04-06 20:08:51 +00:00
parent 44aab2c3de
commit 047dd67e96
10 changed files with 1100 additions and 883 deletions

View File

@ -538,12 +538,13 @@ static void
unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
struct thread *td)
{
int count, lockcnt;
unsigned count, lockrec;
struct vnode *vp;
struct vnode *lvp;
vp = UNIONFSTOV(unp);
lvp = unp->un_lowervp;
ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update");
/*
* lock update
@ -551,11 +552,9 @@ unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
VI_LOCK(vp);
unp->un_uppervp = uvp;
vp->v_vnlock = uvp->v_vnlock;
lockcnt = lvp->v_vnlock->lk_exclusivecount;
if (lockcnt <= 0)
panic("unionfs: no exclusive lock");
VI_UNLOCK(vp);
for (count = 1; count < lockcnt; count++)
lockrec = lvp->v_vnlock->lk_recurse;
for (count = 0; count < lockrec; count++)
vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
}

File diff suppressed because it is too large Load Diff

49
sys/sys/_lockmgr.h Normal file
View File

@ -0,0 +1,49 @@
/*-
* Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS__LOCKMGR_H_
#define _SYS__LOCKMGR_H_
#ifdef DEBUG_LOCKS
#include <sys/_stack.h>
#endif
struct lock {
struct lock_object lock_object;
volatile uintptr_t lk_lock;
volatile unsigned lk_recurse;
int lk_timo;
int lk_pri;
#ifdef DEBUG_LOCKS
struct stack lk_stack;
#endif
};
#endif

39
sys/sys/_stack.h Normal file
View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2005 Antoine Brodin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS__STACK_H_
#define _SYS__STACK_H_
#define STACK_MAX 18 /* Don't change, stack_ktr relies on this. */
struct stack {
int depth;
vm_offset_t pcs[STACK_MAX];
};
#endif

View File

@ -1,240 +1,189 @@
/*-
* Copyright (c) 1995
* The Regents of the University of California. All rights reserved.
*
* This code contains ideas from software contributed to Berkeley by
* Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
* System project at Carnegie-Mellon University.
* Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* @(#)lock.h 8.12 (Berkeley) 5/19/95
* $FreeBSD$
*/
#ifndef _SYS_LOCKMGR_H_
#define _SYS_LOCKMGR_H_
#ifdef DEBUG_LOCKS
#include <sys/stack.h> /* XXX */
#endif
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/_lockmgr.h>
#include <sys/_mutex.h>
#include <sys/_rwlock.h>
struct mtx;
#define LK_SHARE 0x01
#define LK_SHARED_WAITERS 0x02
#define LK_EXCLUSIVE_WAITERS 0x04
#define LK_ALL_WAITERS \
(LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
#define LK_FLAGMASK \
(LK_SHARE | LK_ALL_WAITERS)
/*
* The general lock structure. Provides for multiple shared locks,
* upgrading from shared to exclusive, and sleeping until the lock
* can be gained.
*/
struct lock {
struct lock_object lk_object; /* common lock properties */
struct mtx *lk_interlock; /* lock on remaining fields */
u_int lk_flags; /* see below */
int lk_sharecount; /* # of accepted shared locks */
int lk_waitcount; /* # of processes sleeping for lock */
short lk_exclusivecount; /* # of recursive exclusive locks */
short lk_prio; /* priority at which to sleep */
int lk_timo; /* maximum sleep time (for tsleep) */
struct thread *lk_lockholder; /* thread of exclusive lock holder */
struct lock *lk_newlock; /* lock taking over this lock */
#ifdef DEBUG_LOCKS
struct stack lk_stack;
#endif
};
#define lk_wmesg lk_object.lo_name
#define LK_HOLDER(x) ((x) & ~LK_FLAGMASK)
#define LK_SHARERS_SHIFT 3
#define LK_SHARERS(x) (LK_HOLDER(x) >> LK_SHARERS_SHIFT)
#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE)
#define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT)
#define LK_UNLOCKED LK_SHARERS_LOCK(0)
#define LK_KERNPROC ((uintptr_t)(-1) & ~LK_FLAGMASK)
#ifdef _KERNEL
#if !defined(LOCK_FILE) || !defined(LOCK_LINE)
#error "LOCK_FILE and LOCK_LINE not defined, include <sys/lock.h> before"
#endif
struct thread;
/*
* Lock request types:
* LK_SHARED - get one of many possible shared locks. If a process
* holding an exclusive lock requests a shared lock, the exclusive
* lock(s) will be downgraded to shared locks.
* LK_EXCLUSIVE - stop further shared locks, when they are cleared,
* grant a pending upgrade if it exists, then grant an exclusive
* lock. Only one exclusive lock may exist at a time, except that
* a process holding an exclusive lock may get additional exclusive
* locks if it explicitly sets the LK_CANRECURSE flag in the lock
* request, or if the LK_CANRECUSE flag was set when the lock was
* initialized.
* LK_UPGRADE - the process must hold a shared lock that it wants to
* have upgraded to an exclusive lock. Other processes may get
* exclusive access to the resource between the time that the upgrade
* is requested and the time that it is granted.
* LK_DOWNGRADE - the process must hold an exclusive lock that it wants
* to have downgraded to a shared lock. If the process holds multiple
* (recursive) exclusive locks, they will all be downgraded to shared
* locks.
* LK_RELEASE - release one instance of a lock.
* LK_DRAIN - wait for all activity on the lock to end, then mark it
* decommissioned. This feature is used before freeing a lock that
* is part of a piece of memory that is about to be freed.
* LK_EXCLOTHER - return for lockstatus(). Used when another process
* holds the lock exclusively.
*
* These are flags that are passed to the lockmgr routine.
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and might be wrappered with a macro.
*/
#define LK_TYPE_MASK 0x0000000f /* type of lock sought */
#define LK_SHARED 0x00000001 /* shared lock */
#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
#define LK_RELEASE 0x00000006 /* release any type of lock */
#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
#define LK_EXCLOTHER 0x00000008 /* other process holds lock */
int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *wmesg, int prio, int timo, const char *file, int line);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _lockmgr_assert(struct lock *lk, int what, const char *file, int line);
#endif
void _lockmgr_disown(struct lock *lk, const char *file, int line);
void lockdestroy(struct lock *lk);
void lockinit(struct lock *lk, int prio, const char *wmesg, int timo,
int flags);
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#endif
void lockmgr_printinfo(struct lock *lk);
int lockstatus(struct lock *lk);
/*
* External lock flags.
*
* These may be set in lock_init to set their mode permanently,
* or passed in as arguments to the lock manager.
* As far as the ilk can be a static NULL pointer these functions need a
* strict prototype in order to safely use the lock_object member.
*/
#define LK_EXTFLG_MASK 0x0000fff0 /* mask of external flags */
#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
#define LK_NOSHARE 0x00000080 /* Only allow exclusive locks */
#define LK_TIMELOCK 0x00000100 /* use lk_timo, else no timeout */
#define LK_NOWITNESS 0x00000200 /* disable WITNESS */
#define LK_NODUP 0x00000400 /* enable duplication logging */
#define LK_NOPROFILE 0x00000800 /* disable lock profiling */
#define LK_QUIET 0x00001000 /* disable lock operations tracking */
#define LK_FUNC_MASK (LK_NODUP | LK_NOPROFILE | LK_NOWITNESS | LK_QUIET)
static __inline int
_lockmgr_args(struct lock *lk, u_int flags, struct mtx *ilk, const char *wmesg,
int prio, int timo, const char *file, int line)
{
return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
NULL, wmesg, prio, timo, file, line));
}
static __inline int
_lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk,
const char *wmesg, int prio, int timo, const char *file, int line)
{
return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
NULL, wmesg, prio, timo, file, line));
}
/*
* Nonpersistent external flags.
* Define aliases in order to complete lockmgr KPI.
*/
#define LK_RETRY 0x00010000 /* vn_lock: retry until locked */
#define LK_INTERLOCK 0x00020000 /*
* unlock passed mutex after getting
* lk_interlock
*/
#define lockmgr(lk, flags, ilk) \
_lockmgr_args((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
#define lockmgr_args(lk, flags, ilk, wmesg, prio, timo) \
_lockmgr_args((lk), (flags), (ilk), (wmesg), (prio), (timo), \
LOCK_FILE, LOCK_LINE)
#define lockmgr_args_rw(lk, flags, ilk, wmesg, prio, timo) \
_lockmgr_args_rw((lk), (flags), (ilk), (wmesg), (prio), (timo), \
LOCK_FILE, LOCK_LINE)
#define lockmgr_disown(lk) \
_lockmgr_disown((lk), LOCK_FILE, LOCK_LINE)
#define lockmgr_recursed(lk) \
((lk)->lk_recurse != 0)
#define lockmgr_rw(lk, flags, ilk) \
_lockmgr_args_rw((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
#define lockmgr_waiters(lk) \
((lk)->lk_lock & LK_ALL_WAITERS)
#ifdef INVARIANTS
#define lockmgr_assert(lk, what) \
_lockmgr_assert((lk), (what), LOCK_FILE, LOCK_LINE)
#else
#define lockmgr_assert(lk, what)
#endif
/*
* Flags for lockinit().
*/
#define LK_INIT_MASK 0x000FF
#define LK_CANRECURSE 0x00001
#define LK_NODUP 0x00002
#define LK_NOPROFILE 0x00004
#define LK_NOSHARE 0x00008
#define LK_NOWITNESS 0x00010
#define LK_QUIET 0x00020
/*
* Additional attributes to be used in lockmgr().
*/
#define LK_EATTR_MASK 0x0FF00
#define LK_INTERLOCK 0x00100
#define LK_NOWAIT 0x00200
#define LK_RETRY 0x00400
#define LK_SLEEPFAIL 0x00800
#define LK_TIMELOCK 0x01000
/*
* Operations for lockmgr().
*/
#define LK_TYPE_MASK 0xF0000
#define LK_DOWNGRADE 0x10000
#define LK_DRAIN 0x20000
#define LK_EXCLOTHER 0x30000
#define LK_EXCLUSIVE 0x40000
#define LK_RELEASE 0x50000
#define LK_SHARED 0x60000
#define LK_UPGRADE 0x70000
#define LK_TOTAL_MASK (LK_INIT_MASK | LK_EATTR_MASK | LK_TYPE_MASK)
/*
* Default values for lockmgr_args().
*/
#define LK_WMESG_DEFAULT (NULL)
#define LK_PRIO_DEFAULT (-1)
#define LK_PRIO_DEFAULT (0)
#define LK_TIMO_DEFAULT (0)
/*
* Internal lock flags.
*
* These flags are used internally to the lock manager.
*/
#define LK_WANT_UPGRADE 0x00100000 /* waiting for share-to-excl upgrade */
#define LK_WANT_EXCL 0x00200000 /* exclusive lock sought */
#define LK_HAVE_EXCL 0x00400000 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00800000 /* process waiting for lock to drain */
#define LK_DRAINING 0x01000000 /* lock is being drained */
#define LK_DESTROYED 0x02000000 /* lock is destroyed */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
*/
#define LK_SHARE_NONZERO 0x10000000
#define LK_WAIT_NONZERO 0x20000000
#ifndef LOCK_FILE
#error "LOCK_FILE not defined, include <sys/lock.h> before <sys/lockmgr.h>"
#endif
/*
* Assertion flags.
*/
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define KA_BASE (LA_MASKASSERT + 1)
#define KA_LOCKED LA_LOCKED
#define KA_SLOCKED LA_SLOCKED
#define KA_XLOCKED LA_XLOCKED
#define KA_UNLOCKED LA_UNLOCKED
#define KA_RECURSED LA_RECURSED
#define KA_NOTRECURSED LA_NOTRECURSED
#define KA_HELD (KA_BASE << 0x00)
#define KA_UNHELD (KA_BASE << 0x01)
#endif
/*
* Lock return status.
*
* Successfully obtained locks return 0. Locks will always succeed
* unless one of the following is true:
* LK_FORCEUPGRADE is requested and some other process has already
* requested a lock upgrade (returns EBUSY).
* LK_WAIT is set and a sleep would be required (returns EBUSY).
* LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
* PCATCH is set in lock priority and a signal arrives (returns
* either EINTR or ERESTART if system calls is to be restarted).
* Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
* A failed lock attempt always returns a non-zero error value. No lock
* is held after an error return (in particular, a failed LK_UPGRADE
* or LK_FORCEUPGRADE will have released its shared access lock).
*/
/*
* Indicator that no process holds exclusive lock
*/
#define LK_KERNPROC ((struct thread *)-2)
#define LK_NOPROC ((struct thread *) -1)
struct thread;
void lockinit(struct lock *, int prio, const char *wmesg,
int timo, int flags);
void lockdestroy(struct lock *);
int _lockmgr_args(struct lock *, u_int flags, struct mtx *,
const char *wmesg, int prio, int timo, char *file, int line);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _lockmgr_assert(struct lock *, int what, const char *, int);
#endif
void _lockmgr_disown(struct lock *, const char *, int);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *);
#define lockmgr(lock, flags, mtx) \
_lockmgr_args((lock), (flags), (mtx), LK_WMESG_DEFAULT, \
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
#define lockmgr_disown(lock) \
_lockmgr_disown((lock), LOCK_FILE, LOCK_LINE)
#define lockmgr_args(lock, flags, mtx, wmesg, prio, timo) \
_lockmgr_args((lock), (flags), (mtx), (wmesg), (prio), (timo), \
LOCK_FILE, LOCK_LINE)
#define lockmgr_recursed(lkp) \
((lkp)->lk_exclusivecount > 1)
#define lockmgr_waiters(lkp) \
((lkp)->lk_waitcount != 0)
#ifdef INVARIANTS
#define lockmgr_assert(lkp, what) \
_lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE)
#else
#define lockmgr_assert(lkp, what)
#endif
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#define KA_HELD
#define KA_UNHELD
#endif
#endif /* _KERNEL */

View File

@ -200,7 +200,8 @@ struct thread {
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
short td_locks; /* (k) Count of non-spin locks. */
short td_rw_rlocks; /* (k) count of rwlock read locks. */
short td_rw_rlocks; /* (k) Count of rwlock read locks. */
short td_lk_slocks; /* (k) Count of lockmgr shared locks. */
struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
const char *td_lockname; /* (t) Name of lock blocked on. */
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */

View File

@ -87,6 +87,7 @@ struct thread;
#define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */
#define SLEEPQ_PAUSE 0x02 /* Used by pause. */
#define SLEEPQ_SX 0x03 /* Used by an sx lock. */
#define SLEEPQ_LK 0x04 /* Used by a lockmgr. */
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
void init_sleepqueues(void);

View File

@ -29,15 +29,10 @@
#ifndef _SYS_STACK_H_
#define _SYS_STACK_H_
#define STACK_MAX 18 /* Don't change, stack_ktr relies on this. */
#include <sys/_stack.h>
struct sbuf;
struct stack {
int depth;
vm_offset_t pcs[STACK_MAX];
};
/* MI Routines. */
struct stack *stack_create(void);
void stack_destroy(struct stack *);

View File

@ -395,8 +395,10 @@ extern void (*lease_updatetime)(int deltat);
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
#define VN_LOCK_AREC(vp) ((vp)->v_vnlock->lk_flags |= LK_CANRECURSE)
#define VN_LOCK_ASHARE(vp) ((vp)->v_vnlock->lk_flags &= ~LK_NOSHARE)
#define VN_LOCK_AREC(vp) \
((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE)
#define VN_LOCK_ASHARE(vp) \
((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE)
#endif /* _KERNEL */

View File

@ -553,8 +553,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
#define FREE_LOCK(lk) mtx_unlock(lk)
#define BUF_AREC(bp) ((bp)->b_lock.lk_flags |= LK_CANRECURSE)
#define BUF_NOREC(bp) ((bp)->b_lock.lk_flags &= ~LK_CANRECURSE)
#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE)
#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE)
/*
* Worklist queue management.