2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1997-03-25 16:27:20 +00:00
|
|
|
* Copyright (c) 1995
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
1997-08-18 02:06:35 +00:00
|
|
|
* Copyright (C) 1997
|
|
|
|
* John S. Dyson. All rights reserved.
|
|
|
|
*
|
1997-03-25 16:27:20 +00:00
|
|
|
* This code contains ideas from software contributed to Berkeley by
|
|
|
|
* Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
|
|
|
|
* System project at Carnegie-Mellon University.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2006-08-15 16:42:16 +00:00
|
|
|
#include "opt_ddb.h"
|
2006-11-11 03:18:07 +00:00
|
|
|
#include "opt_global.h"
|
2006-08-15 16:42:16 +00:00
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
#include <sys/param.h>
|
2005-10-02 10:03:51 +00:00
|
|
|
#include <sys/kdb.h>
|
2000-10-12 22:37:28 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-10-11 17:53:43 +00:00
|
|
|
#include <sys/ktr.h>
|
1997-03-25 16:27:20 +00:00
|
|
|
#include <sys/lock.h>
|
2002-08-27 09:59:47 +00:00
|
|
|
#include <sys/lockmgr.h>
|
2000-10-20 07:28:00 +00:00
|
|
|
#include <sys/mutex.h>
|
2002-08-27 09:59:47 +00:00
|
|
|
#include <sys/proc.h>
|
1997-03-25 16:36:35 +00:00
|
|
|
#include <sys/systm.h>
|
2006-11-11 03:18:07 +00:00
|
|
|
#include <sys/lock_profile.h>
|
2005-08-03 04:48:22 +00:00
|
|
|
#ifdef DEBUG_LOCKS
|
|
|
|
#include <sys/stack.h>
|
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
|
2008-02-06 00:37:14 +00:00
|
|
|
#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
|
|
|
|
#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
|
2008-02-13 20:44:19 +00:00
|
|
|
#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
|
|
|
|
#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC)
|
2008-02-06 00:37:14 +00:00
|
|
|
|
2007-11-18 14:43:53 +00:00
|
|
|
static void assert_lockmgr(struct lock_object *lock, int what);
|
2006-08-15 16:42:16 +00:00
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
2006-11-13 05:41:46 +00:00
|
|
|
static void db_show_lockmgr(struct lock_object *lock);
|
2006-08-15 16:42:16 +00:00
|
|
|
#endif
|
2007-03-09 16:27:11 +00:00
|
|
|
static void lock_lockmgr(struct lock_object *lock, int how);
|
|
|
|
static int unlock_lockmgr(struct lock_object *lock);
|
2006-11-13 05:41:46 +00:00
|
|
|
|
|
|
|
struct lock_class lock_class_lockmgr = {
|
2007-03-09 16:19:34 +00:00
|
|
|
.lc_name = "lockmgr",
|
|
|
|
.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
|
2007-11-18 14:43:53 +00:00
|
|
|
.lc_assert = assert_lockmgr,
|
2006-11-13 05:41:46 +00:00
|
|
|
#ifdef DDB
|
2007-03-09 16:27:11 +00:00
|
|
|
.lc_ddb_show = db_show_lockmgr,
|
2006-11-13 05:41:46 +00:00
|
|
|
#endif
|
2007-03-09 16:27:11 +00:00
|
|
|
.lc_lock = lock_lockmgr,
|
|
|
|
.lc_unlock = unlock_lockmgr,
|
2006-11-13 05:41:46 +00:00
|
|
|
};
|
|
|
|
|
2008-02-13 20:44:19 +00:00
|
|
|
#ifndef INVARIANTS
|
|
|
|
#define _lockmgr_assert(lkp, what, file, line)
|
|
|
|
#endif
|
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
/*
|
|
|
|
* Locking primitives implementation.
|
|
|
|
* Locks provide shared/exclusive sychronization.
|
|
|
|
*/
|
|
|
|
|
2007-11-18 14:43:53 +00:00
|
|
|
void
|
|
|
|
assert_lockmgr(struct lock_object *lock, int what)
|
|
|
|
{
|
|
|
|
|
|
|
|
panic("lockmgr locks do not support assertions");
|
|
|
|
}
|
|
|
|
|
2007-03-09 16:27:11 +00:00
|
|
|
void
|
|
|
|
lock_lockmgr(struct lock_object *lock, int how)
|
|
|
|
{
|
|
|
|
|
|
|
|
panic("lockmgr locks do not support sleep interlocking");
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
unlock_lockmgr(struct lock_object *lock)
|
|
|
|
{
|
|
|
|
|
|
|
|
panic("lockmgr locks do not support sleep interlocking");
|
|
|
|
}
|
|
|
|
|
2008-01-08 23:48:31 +00:00
|
|
|
#define COUNT(td, x) ((td)->td_locks += (x))
|
1997-09-21 04:24:27 +00:00
|
|
|
#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
|
|
|
|
LK_SHARE_NONZERO | LK_WAIT_NONZERO)
|
|
|
|
|
2008-02-15 21:04:36 +00:00
|
|
|
static int acquire(struct lock **lkpp, int extflags, int wanted,
|
|
|
|
const char *wmesg, int prio, int timo, int *contested, uint64_t *waittime);
|
|
|
|
static int acquiredrain(struct lock *lkp, int extflags, const char *wmesg,
|
|
|
|
int prio, int timo);
|
1997-03-25 16:27:20 +00:00
|
|
|
|
2005-04-06 10:11:14 +00:00
|
|
|
static __inline void
|
2005-03-25 00:00:44 +00:00
|
|
|
sharelock(struct thread *td, struct lock *lkp, int incr) {
|
1997-08-18 02:06:35 +00:00
|
|
|
lkp->lk_flags |= LK_SHARE_NONZERO;
|
|
|
|
lkp->lk_sharecount += incr;
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, incr);
|
1997-08-18 02:06:35 +00:00
|
|
|
}
|
|
|
|
|
2005-04-06 10:11:14 +00:00
|
|
|
static __inline void
|
2005-03-25 00:00:44 +00:00
|
|
|
shareunlock(struct thread *td, struct lock *lkp, int decr) {
|
1999-01-10 01:58:29 +00:00
|
|
|
|
1999-01-08 17:31:30 +00:00
|
|
|
KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
|
1997-03-25 16:27:20 +00:00
|
|
|
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, -decr);
|
1998-03-07 19:25:34 +00:00
|
|
|
if (lkp->lk_sharecount == decr) {
|
1997-08-18 02:06:35 +00:00
|
|
|
lkp->lk_flags &= ~LK_SHARE_NONZERO;
|
1998-03-07 19:25:34 +00:00
|
|
|
if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
|
|
|
|
wakeup(lkp);
|
|
|
|
}
|
|
|
|
lkp->lk_sharecount = 0;
|
|
|
|
} else {
|
|
|
|
lkp->lk_sharecount -= decr;
|
|
|
|
}
|
1997-08-18 02:06:35 +00:00
|
|
|
}
|
1997-03-25 16:27:20 +00:00
|
|
|
|
1997-08-18 02:06:35 +00:00
|
|
|
static int
|
2008-02-15 21:04:36 +00:00
|
|
|
acquire(struct lock **lkpp, int extflags, int wanted, const char *wmesg,
|
|
|
|
int prio, int timo, int *contested, uint64_t *waittime)
|
2005-01-24 10:20:59 +00:00
|
|
|
{
|
2002-11-30 19:00:51 +00:00
|
|
|
struct lock *lkp = *lkpp;
|
2008-02-15 21:04:36 +00:00
|
|
|
const char *iwmesg;
|
|
|
|
int error, iprio, itimo;
|
|
|
|
|
|
|
|
iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
|
|
|
|
iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
|
|
|
|
itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
|
|
|
|
|
2003-03-11 20:00:37 +00:00
|
|
|
CTR3(KTR_LOCK,
|
2004-07-23 20:12:56 +00:00
|
|
|
"acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
|
2000-10-04 01:29:17 +00:00
|
|
|
lkp, extflags, wanted);
|
|
|
|
|
2005-04-03 11:49:02 +00:00
|
|
|
if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
|
1997-08-18 02:06:35 +00:00
|
|
|
return EBUSY;
|
2005-04-03 11:49:02 +00:00
|
|
|
error = 0;
|
2007-02-26 08:26:44 +00:00
|
|
|
if ((lkp->lk_flags & wanted) != 0)
|
|
|
|
lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
|
|
|
|
|
1997-08-18 02:06:35 +00:00
|
|
|
while ((lkp->lk_flags & wanted) != 0) {
|
2005-04-03 11:49:02 +00:00
|
|
|
CTR2(KTR_LOCK,
|
|
|
|
"acquire(): lkp == %p, lk_flags == 0x%x sleeping",
|
|
|
|
lkp, lkp->lk_flags);
|
1997-08-18 02:06:35 +00:00
|
|
|
lkp->lk_flags |= LK_WAIT_NONZERO;
|
|
|
|
lkp->lk_waitcount++;
|
2008-02-15 21:04:36 +00:00
|
|
|
error = msleep(lkp, lkp->lk_interlock, iprio, iwmesg,
|
|
|
|
((extflags & LK_TIMELOCK) ? itimo : 0));
|
2005-04-03 11:49:02 +00:00
|
|
|
lkp->lk_waitcount--;
|
|
|
|
if (lkp->lk_waitcount == 0)
|
1997-08-18 02:06:35 +00:00
|
|
|
lkp->lk_flags &= ~LK_WAIT_NONZERO;
|
2005-04-03 11:49:02 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
1997-08-18 02:06:35 +00:00
|
|
|
if (extflags & LK_SLEEPFAIL) {
|
2005-04-03 11:49:02 +00:00
|
|
|
error = ENOLCK;
|
|
|
|
break;
|
1997-08-18 02:06:35 +00:00
|
|
|
}
|
2002-11-30 19:00:51 +00:00
|
|
|
if (lkp->lk_newlock != NULL) {
|
|
|
|
mtx_lock(lkp->lk_newlock->lk_interlock);
|
|
|
|
mtx_unlock(lkp->lk_interlock);
|
|
|
|
if (lkp->lk_waitcount == 0)
|
|
|
|
wakeup((void *)(&lkp->lk_newlock));
|
|
|
|
*lkpp = lkp = lkp->lk_newlock;
|
|
|
|
}
|
1997-08-18 02:06:35 +00:00
|
|
|
}
|
2005-04-03 11:49:02 +00:00
|
|
|
mtx_assert(lkp->lk_interlock, MA_OWNED);
|
|
|
|
return (error);
|
1997-08-18 02:06:35 +00:00
|
|
|
}
|
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
/*
|
|
|
|
* Set, change, or release a lock.
|
|
|
|
*
|
|
|
|
* Shared requests increment the shared count. Exclusive requests set the
|
|
|
|
* LK_WANT_EXCL flag (preventing further shared locks), and wait for already
|
|
|
|
* accepted shared locks and shared-to-exclusive upgrades to go away.
|
|
|
|
*/
|
|
|
|
int
|
2008-02-15 21:04:36 +00:00
|
|
|
_lockmgr_args(struct lock *lkp, u_int flags, struct mtx *interlkp,
|
|
|
|
const char *wmesg, int prio, int timo, char *file, int line)
|
2006-11-11 03:18:07 +00:00
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
{
|
2008-01-24 12:34:30 +00:00
|
|
|
struct thread *td;
|
1997-03-25 16:27:20 +00:00
|
|
|
int error;
|
2001-02-09 16:27:41 +00:00
|
|
|
int extflags, lockflags;
|
2007-02-26 08:26:44 +00:00
|
|
|
int contested = 0;
|
|
|
|
uint64_t waitstart = 0;
|
2007-12-27 22:56:57 +00:00
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
error = 0;
|
2008-01-24 12:34:30 +00:00
|
|
|
td = curthread;
|
1997-08-18 02:06:35 +00:00
|
|
|
|
2008-02-13 20:44:19 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (lkp->lk_flags & LK_DESTROYED) {
|
|
|
|
if (flags & LK_INTERLOCK)
|
|
|
|
mtx_unlock(interlkp);
|
|
|
|
if (panicstr != NULL)
|
|
|
|
return (0);
|
|
|
|
panic("%s: %p lockmgr is destroyed", __func__, lkp);
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-15 21:04:36 +00:00
|
|
|
mtx_lock(lkp->lk_interlock);
|
2005-01-24 10:20:59 +00:00
|
|
|
CTR6(KTR_LOCK,
|
|
|
|
"lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
|
2008-02-15 21:04:36 +00:00
|
|
|
"td == %p", lkp, (wmesg != LK_WMESG_DEFAULT) ? wmesg :
|
|
|
|
lkp->lk_wmesg, lkp->lk_lockholder, lkp->lk_exclusivecount, flags,
|
|
|
|
td);
|
2005-08-03 04:48:22 +00:00
|
|
|
#ifdef DEBUG_LOCKS
|
|
|
|
{
|
|
|
|
struct stack stack; /* XXX */
|
|
|
|
stack_save(&stack);
|
2005-08-29 11:34:08 +00:00
|
|
|
CTRSTACK(KTR_LOCK, &stack, 0, 1);
|
2005-08-03 04:48:22 +00:00
|
|
|
}
|
2005-01-24 10:20:59 +00:00
|
|
|
#endif
|
|
|
|
|
2001-04-20 22:38:40 +00:00
|
|
|
if (flags & LK_INTERLOCK) {
|
2001-04-28 12:11:01 +00:00
|
|
|
mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(interlkp);
|
2001-04-20 22:38:40 +00:00
|
|
|
}
|
1997-08-18 02:06:35 +00:00
|
|
|
|
2003-02-25 03:37:48 +00:00
|
|
|
if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
|
2003-03-04 21:03:05 +00:00
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
|
2007-03-21 21:20:51 +00:00
|
|
|
&lkp->lk_interlock->lock_object,
|
2008-02-15 21:04:36 +00:00
|
|
|
"Acquiring lockmgr lock \"%s\"",
|
|
|
|
(wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg);
|
2003-02-25 03:37:48 +00:00
|
|
|
|
2001-08-10 23:29:15 +00:00
|
|
|
if (panicstr != NULL) {
|
|
|
|
mtx_unlock(lkp->lk_interlock);
|
|
|
|
return (0);
|
|
|
|
}
|
2005-03-31 05:18:19 +00:00
|
|
|
if ((lkp->lk_flags & LK_NOSHARE) &&
|
|
|
|
(flags & LK_TYPE_MASK) == LK_SHARED) {
|
|
|
|
flags &= ~LK_TYPE_MASK;
|
|
|
|
flags |= LK_EXCLUSIVE;
|
|
|
|
}
|
1997-03-25 16:27:20 +00:00
|
|
|
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
|
|
|
|
|
|
|
|
switch (flags & LK_TYPE_MASK) {
|
|
|
|
|
|
|
|
case LK_SHARED:
|
2008-02-06 00:37:14 +00:00
|
|
|
if (!LOCKMGR_TRYOP(extflags))
|
|
|
|
WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
|
|
|
|
line);
|
1999-03-12 03:09:29 +00:00
|
|
|
/*
|
|
|
|
* If we are not the exclusive lock holder, we have to block
|
|
|
|
* while there is an exclusive lock holder or while an
|
|
|
|
* exclusive lock request or upgrade request is in progress.
|
|
|
|
*
|
2004-06-03 01:47:37 +00:00
|
|
|
* However, if TDP_DEADLKTREAT is set, we override exclusive
|
1999-03-12 03:09:29 +00:00
|
|
|
* lock requests or upgrade requests ( but not the exclusive
|
|
|
|
* lock itself ).
|
|
|
|
*/
|
2008-01-08 23:48:31 +00:00
|
|
|
if (lkp->lk_lockholder != td) {
|
2001-02-09 16:27:41 +00:00
|
|
|
lockflags = LK_HAVE_EXCL;
|
2008-02-06 13:26:01 +00:00
|
|
|
if (!(td->td_pflags & TDP_DEADLKTREAT))
|
2001-09-13 22:33:37 +00:00
|
|
|
lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
|
2008-02-15 21:04:36 +00:00
|
|
|
error = acquire(&lkp, extflags, lockflags, wmesg,
|
|
|
|
prio, timo, &contested, &waitstart);
|
1997-03-25 16:27:20 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
2005-03-25 00:00:44 +00:00
|
|
|
sharelock(td, lkp, 1);
|
2006-11-11 03:18:07 +00:00
|
|
|
if (lkp->lk_sharecount == 1)
|
2007-02-26 08:26:44 +00:00
|
|
|
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
|
|
|
|
file, line);
|
2006-11-11 03:18:07 +00:00
|
|
|
|
2002-05-30 05:55:22 +00:00
|
|
|
#if defined(DEBUG_LOCKS)
|
2005-08-03 04:59:07 +00:00
|
|
|
stack_save(&lkp->lk_stack);
|
2002-05-30 05:55:22 +00:00
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We hold an exclusive lock, so downgrade it to shared.
|
|
|
|
* An alternative would be to fail with EDEADLK.
|
|
|
|
*/
|
2002-08-25 13:23:09 +00:00
|
|
|
/* FALLTHROUGH downgrade */
|
1997-03-25 16:27:20 +00:00
|
|
|
|
|
|
|
case LK_DOWNGRADE:
|
2008-02-13 20:44:19 +00:00
|
|
|
_lockmgr_assert(lkp, KA_XLOCKED, file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
sharelock(td, lkp, lkp->lk_exclusivecount);
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, -lkp->lk_exclusivecount);
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_exclusivecount = 0;
|
|
|
|
lkp->lk_flags &= ~LK_HAVE_EXCL;
|
2003-02-01 12:17:09 +00:00
|
|
|
lkp->lk_lockholder = LK_NOPROC;
|
1997-03-25 16:27:20 +00:00
|
|
|
if (lkp->lk_waitcount)
|
|
|
|
wakeup((void *)lkp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LK_UPGRADE:
|
|
|
|
/*
|
|
|
|
* Upgrade a shared lock to an exclusive one. If another
|
|
|
|
* shared lock has already requested an upgrade to an
|
|
|
|
* exclusive lock, our shared lock is released and an
|
|
|
|
* exclusive lock is requested (which will be granted
|
|
|
|
* after the upgrade). If we return an error, the file
|
|
|
|
* will always be unlocked.
|
|
|
|
*/
|
2008-02-13 20:44:19 +00:00
|
|
|
_lockmgr_assert(lkp, KA_SLOCKED, file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
shareunlock(td, lkp, 1);
|
2006-11-11 03:18:07 +00:00
|
|
|
if (lkp->lk_sharecount == 0)
|
|
|
|
lock_profile_release_lock(&lkp->lk_object);
|
1997-03-25 16:27:20 +00:00
|
|
|
/*
|
|
|
|
* If we are just polling, check to see if we will block.
|
|
|
|
*/
|
|
|
|
if ((extflags & LK_NOWAIT) &&
|
|
|
|
((lkp->lk_flags & LK_WANT_UPGRADE) ||
|
|
|
|
lkp->lk_sharecount > 1)) {
|
|
|
|
error = EBUSY;
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
|
|
|
|
/*
|
|
|
|
* We are first shared lock to request an upgrade, so
|
|
|
|
* request upgrade and wait for the shared count to
|
|
|
|
* drop to zero, then take exclusive lock.
|
|
|
|
*/
|
|
|
|
lkp->lk_flags |= LK_WANT_UPGRADE;
|
2008-02-15 21:04:36 +00:00
|
|
|
error = acquire(&lkp, extflags, LK_SHARE_NONZERO, wmesg,
|
|
|
|
prio, timo, &contested, &waitstart);
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_flags &= ~LK_WANT_UPGRADE;
|
1998-03-07 19:25:34 +00:00
|
|
|
|
2004-08-27 01:41:28 +00:00
|
|
|
if (error) {
|
|
|
|
if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
|
|
|
|
wakeup((void *)lkp);
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
|
2004-08-27 01:41:28 +00:00
|
|
|
break;
|
|
|
|
}
|
1997-03-25 16:27:20 +00:00
|
|
|
if (lkp->lk_exclusivecount != 0)
|
|
|
|
panic("lockmgr: non-zero exclusive count");
|
2004-11-29 22:58:32 +00:00
|
|
|
lkp->lk_flags |= LK_HAVE_EXCL;
|
2008-01-08 23:48:31 +00:00
|
|
|
lkp->lk_lockholder = td;
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_exclusivecount = 1;
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
|
|
|
|
LOP_TRYLOCK, file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, 1);
|
2007-02-26 08:26:44 +00:00
|
|
|
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
|
1999-01-20 14:49:12 +00:00
|
|
|
#if defined(DEBUG_LOCKS)
|
2005-08-03 04:59:07 +00:00
|
|
|
stack_save(&lkp->lk_stack);
|
1999-01-20 14:49:12 +00:00
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Someone else has requested upgrade. Release our shared
|
|
|
|
* lock, awaken upgrade requestor if we are the last shared
|
|
|
|
* lock, then request an exclusive lock.
|
|
|
|
*/
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
|
1997-08-18 02:06:35 +00:00
|
|
|
if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
|
|
|
|
LK_WAIT_NONZERO)
|
1997-03-25 16:27:20 +00:00
|
|
|
wakeup((void *)lkp);
|
2002-08-25 13:23:09 +00:00
|
|
|
/* FALLTHROUGH exclusive request */
|
1997-03-25 16:27:20 +00:00
|
|
|
|
|
|
|
case LK_EXCLUSIVE:
|
2008-02-06 00:37:14 +00:00
|
|
|
if (!LOCKMGR_TRYOP(extflags))
|
|
|
|
WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
|
|
|
|
LOP_EXCLUSIVE, file, line);
|
2008-01-08 23:48:31 +00:00
|
|
|
if (lkp->lk_lockholder == td) {
|
1997-03-25 16:27:20 +00:00
|
|
|
/*
|
|
|
|
* Recursive lock.
|
|
|
|
*/
|
1999-06-28 07:54:58 +00:00
|
|
|
if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
|
1997-03-25 16:27:20 +00:00
|
|
|
panic("lockmgr: locking against myself");
|
1999-06-28 07:54:58 +00:00
|
|
|
if ((extflags & LK_CANRECURSE) != 0) {
|
|
|
|
lkp->lk_exclusivecount++;
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
|
|
|
|
LOCKMGR_TRYW(extflags), file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, 1);
|
1999-06-28 07:54:58 +00:00
|
|
|
break;
|
|
|
|
}
|
1997-03-25 16:27:20 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are just polling, check to see if we will sleep.
|
|
|
|
*/
|
1997-08-18 02:06:35 +00:00
|
|
|
if ((extflags & LK_NOWAIT) &&
|
|
|
|
(lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
|
1997-03-25 16:27:20 +00:00
|
|
|
error = EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Try to acquire the want_exclusive flag.
|
|
|
|
*/
|
2008-02-15 21:04:36 +00:00
|
|
|
error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL),
|
|
|
|
wmesg, prio, timo, &contested, &waitstart);
|
1997-03-25 16:27:20 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
lkp->lk_flags |= LK_WANT_EXCL;
|
|
|
|
/*
|
|
|
|
* Wait for shared locks and upgrades to finish.
|
|
|
|
*/
|
2008-02-15 21:04:36 +00:00
|
|
|
error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE |
|
|
|
|
LK_SHARE_NONZERO, wmesg, prio, timo,
|
|
|
|
&contested, &waitstart);
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_flags &= ~LK_WANT_EXCL;
|
2004-08-27 01:41:28 +00:00
|
|
|
if (error) {
|
|
|
|
if (lkp->lk_flags & LK_WAIT_NONZERO)
|
|
|
|
wakeup((void *)lkp);
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
2004-08-27 01:41:28 +00:00
|
|
|
}
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_flags |= LK_HAVE_EXCL;
|
2008-01-08 23:48:31 +00:00
|
|
|
lkp->lk_lockholder = td;
|
1997-03-25 16:27:20 +00:00
|
|
|
if (lkp->lk_exclusivecount != 0)
|
|
|
|
panic("lockmgr: non-zero exclusive count");
|
|
|
|
lkp->lk_exclusivecount = 1;
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
|
|
|
|
LOCKMGR_TRYW(extflags), file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, 1);
|
2007-02-26 08:26:44 +00:00
|
|
|
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
|
1999-01-20 14:49:12 +00:00
|
|
|
#if defined(DEBUG_LOCKS)
|
2005-08-03 04:59:07 +00:00
|
|
|
stack_save(&lkp->lk_stack);
|
1999-01-20 14:49:12 +00:00
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LK_RELEASE:
|
2008-02-13 20:44:19 +00:00
|
|
|
_lockmgr_assert(lkp, KA_LOCKED, file, line);
|
1997-03-25 16:27:20 +00:00
|
|
|
if (lkp->lk_exclusivecount != 0) {
|
2008-02-06 00:37:14 +00:00
|
|
|
if (lkp->lk_lockholder != LK_KERNPROC) {
|
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
|
|
|
|
file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, -1);
|
2008-02-06 00:37:14 +00:00
|
|
|
}
|
2008-02-13 20:44:19 +00:00
|
|
|
if (lkp->lk_exclusivecount-- == 1) {
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_flags &= ~LK_HAVE_EXCL;
|
|
|
|
lkp->lk_lockholder = LK_NOPROC;
|
2006-11-11 03:18:07 +00:00
|
|
|
lock_profile_release_lock(&lkp->lk_object);
|
1997-03-25 16:27:20 +00:00
|
|
|
}
|
2008-02-06 00:37:14 +00:00
|
|
|
} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
|
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
shareunlock(td, lkp, 1);
|
2005-09-02 15:56:01 +00:00
|
|
|
}
|
|
|
|
|
1997-08-18 02:06:35 +00:00
|
|
|
if (lkp->lk_flags & LK_WAIT_NONZERO)
|
1997-03-25 16:27:20 +00:00
|
|
|
wakeup((void *)lkp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LK_DRAIN:
|
|
|
|
/*
|
|
|
|
* Check that we do not already hold the lock, as it can
|
|
|
|
* never drain if we do. Unfortunately, we have no way to
|
|
|
|
* check for holding a shared lock, but at least we can
|
|
|
|
* check for an exclusive one.
|
|
|
|
*/
|
2008-02-06 00:37:14 +00:00
|
|
|
if (!LOCKMGR_TRYOP(extflags))
|
|
|
|
WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
|
|
|
|
LOP_EXCLUSIVE, file, line);
|
2008-01-08 23:48:31 +00:00
|
|
|
if (lkp->lk_lockholder == td)
|
1997-03-25 16:27:20 +00:00
|
|
|
panic("lockmgr: draining against myself");
|
1997-08-18 02:06:35 +00:00
|
|
|
|
2008-02-15 21:04:36 +00:00
|
|
|
error = acquiredrain(lkp, extflags, wmesg, prio, timo);
|
1997-08-18 02:06:35 +00:00
|
|
|
if (error)
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
|
2008-01-08 23:48:31 +00:00
|
|
|
lkp->lk_lockholder = td;
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_exclusivecount = 1;
|
2008-02-06 00:37:14 +00:00
|
|
|
WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
|
|
|
|
LOCKMGR_TRYW(extflags), file, line);
|
2005-03-25 00:00:44 +00:00
|
|
|
COUNT(td, 1);
|
1999-01-20 14:49:12 +00:00
|
|
|
#if defined(DEBUG_LOCKS)
|
2005-08-03 04:59:07 +00:00
|
|
|
stack_save(&lkp->lk_stack);
|
1999-01-20 14:49:12 +00:00
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(lkp->lk_interlock);
|
1997-03-25 16:27:20 +00:00
|
|
|
panic("lockmgr: unknown locktype request %d",
|
|
|
|
flags & LK_TYPE_MASK);
|
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
1997-08-18 02:06:35 +00:00
|
|
|
if ((lkp->lk_flags & LK_WAITDRAIN) &&
|
|
|
|
(lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
|
|
|
|
LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
|
1997-03-25 16:27:20 +00:00
|
|
|
lkp->lk_flags &= ~LK_WAITDRAIN;
|
|
|
|
wakeup((void *)&lkp->lk_flags);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(lkp->lk_interlock);
|
1997-03-25 16:27:20 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-09-21 04:24:27 +00:00
|
|
|
static int
|
2008-02-15 21:04:36 +00:00
|
|
|
acquiredrain(struct lock *lkp, int extflags, const char *wmesg, int prio,
|
|
|
|
int timo)
|
|
|
|
{
|
|
|
|
const char *iwmesg;
|
|
|
|
int error, iprio, itimo;
|
|
|
|
|
|
|
|
iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
|
|
|
|
iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
|
|
|
|
itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
|
1997-09-21 04:24:27 +00:00
|
|
|
|
|
|
|
if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
|
|
|
|
return EBUSY;
|
|
|
|
}
|
|
|
|
while (lkp->lk_flags & LK_ALL) {
|
|
|
|
lkp->lk_flags |= LK_WAITDRAIN;
|
2008-02-15 21:04:36 +00:00
|
|
|
error = msleep(&lkp->lk_flags, lkp->lk_interlock, iprio, iwmesg,
|
|
|
|
((extflags & LK_TIMELOCK) ? itimo : 0));
|
1997-09-21 04:24:27 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (extflags & LK_SLEEPFAIL) {
|
|
|
|
return ENOLCK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize a lock; required before use.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
lockinit(lkp, prio, wmesg, timo, flags)
|
|
|
|
struct lock *lkp;
|
|
|
|
int prio;
|
2002-03-05 17:45:12 +00:00
|
|
|
const char *wmesg;
|
1997-09-21 04:24:27 +00:00
|
|
|
int timo;
|
|
|
|
int flags;
|
|
|
|
{
|
2008-02-06 00:37:14 +00:00
|
|
|
int iflags;
|
|
|
|
|
2003-03-11 20:00:37 +00:00
|
|
|
CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
|
2000-10-04 01:29:17 +00:00
|
|
|
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
|
1997-09-21 04:24:27 +00:00
|
|
|
|
2003-07-16 01:00:39 +00:00
|
|
|
lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
|
2008-02-06 00:37:14 +00:00
|
|
|
lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_NOWITNESS | LK_NODUP);
|
1997-09-21 04:24:27 +00:00
|
|
|
lkp->lk_sharecount = 0;
|
|
|
|
lkp->lk_waitcount = 0;
|
|
|
|
lkp->lk_exclusivecount = 0;
|
|
|
|
lkp->lk_prio = prio;
|
|
|
|
lkp->lk_timo = timo;
|
|
|
|
lkp->lk_lockholder = LK_NOPROC;
|
2002-11-30 19:00:51 +00:00
|
|
|
lkp->lk_newlock = NULL;
|
2008-02-06 00:37:14 +00:00
|
|
|
iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
|
|
|
|
if (!(flags & LK_NODUP))
|
|
|
|
iflags |= LO_DUPOK;
|
|
|
|
if (!(flags & LK_NOWITNESS))
|
|
|
|
iflags |= LO_WITNESS;
|
2002-10-18 01:34:10 +00:00
|
|
|
#ifdef DEBUG_LOCKS
|
2005-08-03 04:59:07 +00:00
|
|
|
stack_zero(&lkp->lk_stack);
|
2002-10-18 01:34:10 +00:00
|
|
|
#endif
|
2008-02-06 00:37:14 +00:00
|
|
|
lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
|
1997-09-21 04:24:27 +00:00
|
|
|
}
|
|
|
|
|
2000-10-04 01:29:17 +00:00
|
|
|
/*
|
|
|
|
* Destroy a lock.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
lockdestroy(lkp)
|
|
|
|
struct lock *lkp;
|
|
|
|
{
|
2007-05-18 15:04:59 +00:00
|
|
|
|
2003-03-11 20:00:37 +00:00
|
|
|
CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
|
2000-10-04 01:29:17 +00:00
|
|
|
lkp, lkp->lk_wmesg);
|
2008-02-13 20:44:19 +00:00
|
|
|
KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
|
|
|
|
("lockmgr still held"));
|
|
|
|
KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
|
|
|
|
lkp->lk_flags = LK_DESTROYED;
|
2007-03-30 18:07:24 +00:00
|
|
|
lock_destroy(&lkp->lk_object);
|
2000-10-04 01:29:17 +00:00
|
|
|
}
|
|
|
|
|
2008-01-08 23:48:31 +00:00
|
|
|
/*
|
|
|
|
* Disown the lockmgr.
|
|
|
|
*/
|
|
|
|
void
|
2008-02-06 00:37:14 +00:00
|
|
|
_lockmgr_disown(struct lock *lkp, const char *file, int line)
|
2008-01-08 23:48:31 +00:00
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
td = curthread;
|
2008-02-13 20:44:19 +00:00
|
|
|
KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
|
|
|
|
("%s: %p lockmgr is destroyed", __func__, lkp));
|
|
|
|
_lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
|
2008-01-08 23:48:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Drop the lock reference and switch the owner. This will result
|
|
|
|
* in an atomic operation like td_lock is only accessed by curthread
|
2008-01-11 16:38:12 +00:00
|
|
|
* and lk_lockholder only needs one write. Note also that the lock
|
|
|
|
* owner can be alredy KERNPROC, so in that case just skip the
|
|
|
|
* decrement.
|
2008-01-08 23:48:31 +00:00
|
|
|
*/
|
2008-02-06 00:37:14 +00:00
|
|
|
if (lkp->lk_lockholder == td) {
|
|
|
|
WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
|
2008-01-08 23:48:31 +00:00
|
|
|
td->td_locks--;
|
2008-02-06 00:37:14 +00:00
|
|
|
}
|
2008-01-08 23:48:31 +00:00
|
|
|
lkp->lk_lockholder = LK_KERNPROC;
|
|
|
|
}
|
|
|
|
|
1997-09-21 04:24:27 +00:00
|
|
|
/*
|
|
|
|
* Determine the status of a lock.
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lockstatus(lkp, td)
|
1997-09-21 04:24:27 +00:00
|
|
|
struct lock *lkp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1997-09-21 04:24:27 +00:00
|
|
|
{
|
|
|
|
int lock_type = 0;
|
2005-09-27 21:02:59 +00:00
|
|
|
int interlocked;
|
1997-09-21 04:24:27 +00:00
|
|
|
|
2008-02-08 21:45:47 +00:00
|
|
|
KASSERT(td == curthread,
|
2008-01-24 12:34:30 +00:00
|
|
|
("%s: thread passed argument (%p) is not valid", __func__, td));
|
2008-02-13 20:44:19 +00:00
|
|
|
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
|
|
|
|
("%s: %p lockmgr is destroyed", __func__, lkp));
|
2008-01-24 12:34:30 +00:00
|
|
|
|
2005-09-27 21:02:59 +00:00
|
|
|
if (!kdb_active) {
|
|
|
|
interlocked = 1;
|
|
|
|
mtx_lock(lkp->lk_interlock);
|
|
|
|
} else
|
|
|
|
interlocked = 0;
|
1999-12-11 16:13:02 +00:00
|
|
|
if (lkp->lk_exclusivecount != 0) {
|
2008-02-08 21:45:47 +00:00
|
|
|
if (lkp->lk_lockholder == td)
|
1999-12-11 16:13:02 +00:00
|
|
|
lock_type = LK_EXCLUSIVE;
|
|
|
|
else
|
|
|
|
lock_type = LK_EXCLOTHER;
|
|
|
|
} else if (lkp->lk_sharecount != 0)
|
1997-09-21 04:24:27 +00:00
|
|
|
lock_type = LK_SHARED;
|
2005-09-27 21:02:59 +00:00
|
|
|
if (interlocked)
|
|
|
|
mtx_unlock(lkp->lk_interlock);
|
1997-09-21 04:24:27 +00:00
|
|
|
return (lock_type);
|
|
|
|
}
|
|
|
|
|
2006-10-02 02:06:27 +00:00
|
|
|
/*
|
|
|
|
* Determine the number of waiters on a lock.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
lockwaiters(lkp)
|
|
|
|
struct lock *lkp;
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
2008-02-13 20:44:19 +00:00
|
|
|
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
|
|
|
|
("%s: %p lockmgr is destroyed", __func__, lkp));
|
2006-10-02 02:06:27 +00:00
|
|
|
mtx_lock(lkp->lk_interlock);
|
|
|
|
count = lkp->lk_waitcount;
|
|
|
|
mtx_unlock(lkp->lk_interlock);
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
1997-03-25 16:27:20 +00:00
|
|
|
/*
|
|
|
|
* Print out information about state of a lock. Used by VOP_PRINT
|
1997-08-22 07:16:46 +00:00
|
|
|
* routines to display status about contained locks.
|
1997-03-25 16:27:20 +00:00
|
|
|
*/
|
1997-03-25 16:32:46 +00:00
|
|
|
void
|
1997-03-25 16:27:20 +00:00
|
|
|
lockmgr_printinfo(lkp)
|
|
|
|
struct lock *lkp;
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lkp->lk_sharecount)
|
|
|
|
printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
|
|
|
|
lkp->lk_sharecount);
|
|
|
|
else if (lkp->lk_flags & LK_HAVE_EXCL)
|
2004-01-06 04:34:13 +00:00
|
|
|
printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
|
|
|
|
lkp->lk_wmesg, lkp->lk_exclusivecount,
|
|
|
|
lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
|
1997-03-25 16:27:20 +00:00
|
|
|
if (lkp->lk_waitcount > 0)
|
|
|
|
printf(" with %d pending", lkp->lk_waitcount);
|
2005-08-03 04:59:07 +00:00
|
|
|
#ifdef DEBUG_LOCKS
|
2007-12-01 22:04:16 +00:00
|
|
|
stack_print_ddb(&lkp->lk_stack);
|
2005-08-03 04:59:07 +00:00
|
|
|
#endif
|
1997-03-25 16:27:20 +00:00
|
|
|
}
|
2006-08-15 16:42:16 +00:00
|
|
|
|
2008-02-13 20:44:19 +00:00
|
|
|
#ifdef INVARIANT_SUPPORT
|
|
|
|
#ifndef INVARIANTS
|
|
|
|
#undef _lockmgr_assert
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void
|
|
|
|
_lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
u_int x;
|
|
|
|
int slocked = 0;
|
|
|
|
|
|
|
|
x = lkp->lk_flags;
|
|
|
|
td = lkp->lk_lockholder;
|
|
|
|
if (panicstr != NULL)
|
|
|
|
return;
|
|
|
|
switch (what) {
|
|
|
|
case KA_SLOCKED:
|
|
|
|
case KA_SLOCKED | KA_NOTRECURSED:
|
|
|
|
case KA_SLOCKED | KA_RECURSED:
|
|
|
|
slocked = 1;
|
|
|
|
case KA_LOCKED:
|
|
|
|
case KA_LOCKED | KA_NOTRECURSED:
|
|
|
|
case KA_LOCKED | KA_RECURSED:
|
|
|
|
#ifdef WITNESS
|
|
|
|
/*
|
|
|
|
* We cannot trust WITNESS if the lock is held in
|
|
|
|
* exclusive mode and a call to lockmgr_disown() happened.
|
|
|
|
* Workaround this skipping the check if the lock is
|
|
|
|
* held in exclusive mode even for the KA_LOCKED case.
|
|
|
|
*/
|
|
|
|
if (slocked || (x & LK_HAVE_EXCL) == 0) {
|
|
|
|
witness_assert(&lkp->lk_object, what, file, line);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
|
|
|
|
(slocked || LOCKMGR_NOTOWNER(td))))
|
|
|
|
panic("Lock %s not %slocked @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, slocked ? "share " : "",
|
|
|
|
file, line);
|
|
|
|
if ((x & LK_SHARE_NONZERO) == 0) {
|
|
|
|
if (lockmgr_recursed(lkp)) {
|
|
|
|
if (what & KA_NOTRECURSED)
|
|
|
|
panic("Lock %s recursed @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
} else if (what & KA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case KA_XLOCKED:
|
|
|
|
case KA_XLOCKED | KA_NOTRECURSED:
|
|
|
|
case KA_XLOCKED | KA_RECURSED:
|
|
|
|
if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
|
|
|
|
panic("Lock %s not exclusively locked @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
if (lockmgr_recursed(lkp)) {
|
|
|
|
if (what & KA_NOTRECURSED)
|
|
|
|
panic("Lock %s recursed @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
} else if (what & KA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
case KA_UNLOCKED:
|
|
|
|
if (td == curthread || td == LK_KERNPROC)
|
|
|
|
panic("Lock %s exclusively locked @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
case KA_HELD:
|
|
|
|
case KA_UNHELD:
|
|
|
|
if (LOCKMGR_UNHELD(x)) {
|
|
|
|
if (what & KA_HELD)
|
|
|
|
panic("Lock %s not locked by anyone @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
} else if (what & KA_UNHELD)
|
|
|
|
panic("Lock %s locked by someone @ %s:%d\n",
|
|
|
|
lkp->lk_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
|
|
|
|
file, line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* INVARIANT_SUPPORT */
|
|
|
|
|
2006-08-15 16:42:16 +00:00
|
|
|
#ifdef DDB
|
2006-08-15 18:29:01 +00:00
|
|
|
/*
|
|
|
|
* Check to see if a thread that is blocked on a sleep queue is actually
|
|
|
|
* blocked on a 'struct lock'. If so, output some details and return true.
|
|
|
|
* If the lock has an exclusive owner, return that in *ownerp.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
lockmgr_chain(struct thread *td, struct thread **ownerp)
|
|
|
|
{
|
|
|
|
struct lock *lkp;
|
|
|
|
|
|
|
|
lkp = td->td_wchan;
|
|
|
|
|
|
|
|
/* Simple test to see if wchan points to a lockmgr lock. */
|
2007-03-30 18:07:24 +00:00
|
|
|
if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
|
|
|
|
lkp->lk_wmesg == td->td_wmesg)
|
2007-03-21 19:28:20 +00:00
|
|
|
goto ok;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this thread is doing a DRAIN, then it would be asleep on
|
|
|
|
* &lkp->lk_flags rather than lkp.
|
|
|
|
*/
|
|
|
|
lkp = (struct lock *)((char *)td->td_wchan -
|
|
|
|
offsetof(struct lock, lk_flags));
|
2007-03-30 18:07:24 +00:00
|
|
|
if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
|
|
|
|
lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
|
2007-03-21 19:28:20 +00:00
|
|
|
goto ok;
|
|
|
|
|
|
|
|
/* Doen't seem to be a lockmgr lock. */
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ok:
|
2006-08-15 18:29:01 +00:00
|
|
|
/* Ok, we think we have a lockmgr lock, so output some details. */
|
|
|
|
db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
|
|
|
|
if (lkp->lk_sharecount) {
|
|
|
|
db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
|
|
|
|
*ownerp = NULL;
|
|
|
|
} else {
|
|
|
|
db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
|
|
|
|
*ownerp = lkp->lk_lockholder;
|
|
|
|
}
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2006-11-13 05:41:46 +00:00
|
|
|
void
|
|
|
|
db_show_lockmgr(struct lock_object *lock)
|
2006-08-15 16:42:16 +00:00
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
struct lock *lkp;
|
|
|
|
|
2006-11-13 05:41:46 +00:00
|
|
|
lkp = (struct lock *)lock;
|
2006-08-15 16:42:16 +00:00
|
|
|
|
2007-03-30 18:07:24 +00:00
|
|
|
db_printf(" lock type: %s\n", lkp->lk_wmesg);
|
|
|
|
db_printf(" state: ");
|
2006-08-15 16:42:16 +00:00
|
|
|
if (lkp->lk_sharecount)
|
|
|
|
db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
|
|
|
|
else if (lkp->lk_flags & LK_HAVE_EXCL) {
|
|
|
|
td = lkp->lk_lockholder;
|
|
|
|
db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
|
|
|
|
db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
|
2007-11-14 06:21:24 +00:00
|
|
|
td->td_proc->p_pid, td->td_name);
|
2006-08-15 16:42:16 +00:00
|
|
|
} else
|
|
|
|
db_printf("UNLOCKED\n");
|
|
|
|
if (lkp->lk_waitcount > 0)
|
2007-03-30 18:07:24 +00:00
|
|
|
db_printf(" waiters: %d\n", lkp->lk_waitcount);
|
2006-08-15 16:42:16 +00:00
|
|
|
}
|
|
|
|
#endif
|