- Add real assertions to lockmgr locking primitives.

A couple of notes for this:
  * WITNESS support, when enabled, is only used for shared locks in order
    to avoid problems with the "disowned" locks
  * KA_HELD and KA_UNHELD only exists in the lockmgr namespace in order
    to assert for a generic thread (not curthread) owning or not the
    lock.  Really, this kind of check is bogus but it seems very
    widespread in the consumers code.  So, for the moment, we cater this
    untrusted behaviour, until the consumers are not fixed and the
    options could be removed (hopefully during 8.0-CURRENT lifecycle)
  * Implementing KA_HELD and KA_UNHELD (not surported natively by
    WITNESS) made necessary the introduction of LA_MASKASSERT which
    specifies the range for default lock assertion flags
  * About other aspects, lockmgr_assert() follows exactly what other
    locking primitives offer about this operation.

- Build real assertions for buffer cache locks on the top of
  lockmgr_assert().  They can be used with the BUF_ASSERT_*(bp)
  paradigm.

- Add checks at lock destruction time and use a cookie for verifying
  lock integrity at any operation.

- Redefine BUF_LOCKFREE() in order to not use a direct assert but
  let it rely on the aforementioned destruction time check.

KPI results evidently broken, so __FreeBSD_version bumping and
manpage update result necessary and will be committed soon.

Side note: lockmgr_assert() will be used soon in order to implement
real assertions in the vnode namespace replacing the legacy and still
bogus "VOP_ISLOCKED()" way.

Tested by:      kris (earlier version)
Reviewed by:    jhb
This commit is contained in:
Attilio Rao 2008-02-13 20:44:19 +00:00
parent 89d1d7886a
commit 84887fa362
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=176249
8 changed files with 186 additions and 57 deletions

View File

@ -86,8 +86,7 @@ xfs_buf_get_empty(size_t size, xfs_buftarg_t *target)
bp->b_bufsize = size;
bp->b_bcount = size;
KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
BUF_ASSERT_HELD(bp);
xfs_buf_set_target(bp, target);
}
@ -103,8 +102,7 @@ xfs_buf_get_noaddr(size_t len, xfs_buftarg_t *target)
bp = geteblk(len);
if (bp != NULL) {
KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
BUF_ASSERT_HELD(bp);
xfs_buf_set_target(bp, target);
}

View File

@ -62,6 +62,8 @@ __FBSDID("$FreeBSD$");
#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC)
static void assert_lockmgr(struct lock_object *lock, int what);
#ifdef DDB
@ -82,6 +84,10 @@ struct lock_class lock_class_lockmgr = {
.lc_unlock = unlock_lockmgr,
};
#ifndef INVARIANTS
#define _lockmgr_assert(lkp, what, file, line)
#endif
/*
* Locking primitives implementation.
* Locks provide shared/exclusive sychronization.
@ -205,6 +211,15 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
error = 0;
td = curthread;
#ifdef INVARIANTS
if (lkp->lk_flags & LK_DESTROYED) {
if (flags & LK_INTERLOCK)
mtx_unlock(interlkp);
if (panicstr != NULL)
return (0);
panic("%s: %p lockmgr is destroyed", __func__, lkp);
}
#endif
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
CTR6(KTR_LOCK,
@ -280,10 +295,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
lkp->lk_lockholder, td, lkp->lk_exclusivecount));
_lockmgr_assert(lkp, KA_XLOCKED, file, line);
sharelock(td, lkp, lkp->lk_exclusivecount);
WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
COUNT(td, -lkp->lk_exclusivecount);
@ -303,10 +315,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
if (lkp->lk_lockholder == td)
panic("lockmgr: upgrade exclusive lock");
if (lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade without shared");
_lockmgr_assert(lkp, KA_SLOCKED, file, line);
shareunlock(td, lkp, 1);
if (lkp->lk_sharecount == 0)
lock_profile_release_lock(&lkp->lk_object);
@ -419,33 +428,21 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
break;
case LK_RELEASE:
_lockmgr_assert(lkp, KA_LOCKED, file, line);
if (lkp->lk_exclusivecount != 0) {
if (lkp->lk_lockholder != td &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: thread %p, not %s %p unlocking",
td, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_lockholder != LK_KERNPROC) {
WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
file, line);
COUNT(td, -1);
}
if (lkp->lk_exclusivecount == 1) {
if (lkp->lk_exclusivecount-- == 1) {
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = LK_NOPROC;
lkp->lk_exclusivecount = 0;
lock_profile_release_lock(&lkp->lk_object);
} else {
lkp->lk_exclusivecount--;
}
} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
shareunlock(td, lkp, 1);
} else {
printf("lockmgr: thread %p unlocking unheld lock\n",
td);
kdb_backtrace();
}
if (lkp->lk_flags & LK_WAIT_NONZERO)
@ -562,6 +559,10 @@ lockdestroy(lkp)
CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
lkp, lkp->lk_wmesg);
KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
("lockmgr still held"));
KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
lkp->lk_flags = LK_DESTROYED;
lock_destroy(&lkp->lk_object);
}
@ -574,12 +575,9 @@ _lockmgr_disown(struct lock *lkp, const char *file, int line)
struct thread *td;
td = curthread;
KASSERT(panicstr != NULL || lkp->lk_exclusivecount,
("%s: %p lockmgr must be exclusively locked", __func__, lkp));
KASSERT(panicstr != NULL || lkp->lk_lockholder == td ||
lkp->lk_lockholder == LK_KERNPROC,
("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
td));
KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp));
_lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
/*
* Drop the lock reference and switch the owner. This will result
@ -608,6 +606,8 @@ lockstatus(lkp, td)
KASSERT(td == curthread,
("%s: thread passed argument (%p) is not valid", __func__, td));
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp));
if (!kdb_active) {
interlocked = 1;
@ -635,6 +635,8 @@ lockwaiters(lkp)
{
int count;
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp));
mtx_lock(lkp->lk_interlock);
count = lkp->lk_waitcount;
mtx_unlock(lkp->lk_interlock);
@ -664,6 +666,93 @@ lockmgr_printinfo(lkp)
#endif
}
#ifdef INVARIANT_SUPPORT
#ifndef INVARIANTS
#undef _lockmgr_assert
#endif
void
_lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
{
struct thread *td;
u_int x;
int slocked = 0;
x = lkp->lk_flags;
td = lkp->lk_lockholder;
if (panicstr != NULL)
return;
switch (what) {
case KA_SLOCKED:
case KA_SLOCKED | KA_NOTRECURSED:
case KA_SLOCKED | KA_RECURSED:
slocked = 1;
case KA_LOCKED:
case KA_LOCKED | KA_NOTRECURSED:
case KA_LOCKED | KA_RECURSED:
#ifdef WITNESS
/*
* We cannot trust WITNESS if the lock is held in
* exclusive mode and a call to lockmgr_disown() happened.
* Workaround this skipping the check if the lock is
* held in exclusive mode even for the KA_LOCKED case.
*/
if (slocked || (x & LK_HAVE_EXCL) == 0) {
witness_assert(&lkp->lk_object, what, file, line);
break;
}
#endif
if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
(slocked || LOCKMGR_NOTOWNER(td))))
panic("Lock %s not %slocked @ %s:%d\n",
lkp->lk_object.lo_name, slocked ? "share " : "",
file, line);
if ((x & LK_SHARE_NONZERO) == 0) {
if (lockmgr_recursed(lkp)) {
if (what & KA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
} else if (what & KA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
}
break;
case KA_XLOCKED:
case KA_XLOCKED | KA_NOTRECURSED:
case KA_XLOCKED | KA_RECURSED:
if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
panic("Lock %s not exclusively locked @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
if (lockmgr_recursed(lkp)) {
if (what & KA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
} else if (what & KA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
break;
case KA_UNLOCKED:
if (td == curthread || td == LK_KERNPROC)
panic("Lock %s exclusively locked @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
break;
case KA_HELD:
case KA_UNHELD:
if (LOCKMGR_UNHELD(x)) {
if (what & KA_HELD)
panic("Lock %s not locked by anyone @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
} else if (what & KA_UNHELD)
panic("Lock %s locked by someone @ %s:%d\n",
lkp->lk_object.lo_name, file, line);
break;
default:
panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
file, line);
}
}
#endif /* INVARIANT_SUPPORT */
#ifdef DDB
/*
* Check to see if a thread that is blocked on a sleep queue is actually

View File

@ -658,11 +658,11 @@ bremfree(struct buf *bp)
{
CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_ISLOCKED(bp), ("bremfree: buf must be locked."));
KASSERT((bp->b_flags & B_REMFREE) == 0,
("bremfree: buffer %p already marked for delayed removal.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
("bremfree: buffer %p not on a queue.", bp));
BUF_ASSERT_HELD(bp);
bp->b_flags |= B_REMFREE;
/* Fixup numfreebuffers count. */
@ -695,9 +695,9 @@ bremfreel(struct buf *bp)
{
CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_ISLOCKED(bp), ("bremfreel: buffer %p not locked.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
("bremfreel: buffer %p not on a queue.", bp));
BUF_ASSERT_HELD(bp);
mtx_assert(&bqlock, MA_OWNED);
TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
@ -834,8 +834,7 @@ bufwrite(struct buf *bp)
oldflags = bp->b_flags;
if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
BUF_ASSERT_HELD(bp);
if (bp->b_pin_count > 0)
bunpin_wait(bp);
@ -952,7 +951,7 @@ bdwrite(struct buf *bp)
CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(BUF_ISLOCKED(bp), ("bdwrite: buffer is not busy"));
BUF_ASSERT_HELD(bp);
if (bp->b_flags & B_INVAL) {
brelse(bp);
@ -1047,10 +1046,10 @@ bdirty(struct buf *bp)
CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_ISLOCKED(bp), ("bdirty: bp %p not locked",bp));
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
BUF_ASSERT_HELD(bp);
bp->b_flags &= ~(B_RELBUF);
bp->b_iocmd = BIO_WRITE;
@ -1081,7 +1080,7 @@ bundirty(struct buf *bp)
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
KASSERT(BUF_ISLOCKED(bp), ("bundirty: bp %p not locked",bp));
BUF_ASSERT_HELD(bp);
if (bp->b_flags & B_DELWRI) {
bp->b_flags &= ~B_DELWRI;
@ -2660,7 +2659,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
bp->b_flags &= ~B_DONE;
}
CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
KASSERT(BUF_ISLOCKED(bp), ("getblk: bp %p not locked",bp));
BUF_ASSERT_HELD(bp);
KASSERT(bp->b_bufobj == bo,
("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
return (bp);
@ -2681,7 +2680,7 @@ geteblk(int size)
continue;
allocbuf(bp, size);
bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
KASSERT(BUF_ISLOCKED(bp), ("geteblk: bp %p not locked",bp));
BUF_ASSERT_HELD(bp);
return (bp);
}
@ -2707,8 +2706,7 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
if (!BUF_ISLOCKED(bp))
panic("allocbuf: buffer not busy");
BUF_ASSERT_HELD(bp);
if (bp->b_kvasize < size)
panic("allocbuf: buffer too small");
@ -3150,8 +3148,8 @@ bufdone(struct buf *bp)
CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
dropobj = NULL;
KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
BUF_ASSERT_HELD(bp);
runningbufwakeup(bp);
if (bp->b_iocmd == BIO_WRITE)
@ -3175,7 +3173,7 @@ bufdone(struct buf *bp)
void
bufdone_finish(struct buf *bp)
{
KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
BUF_ASSERT_HELD(bp);
if (!LIST_EMPTY(&bp->b_dep))
buf_complete(bp);

View File

@ -2448,7 +2448,7 @@ nfs4_strategy(struct vop_strategy_args *ap)
KASSERT(!(bp->b_flags & B_DONE),
("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_ISLOCKED(bp), ("nfs4_strategy: buffer %p not locked", bp));
BUF_ASSERT_HELD(bp);
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@ -2808,8 +2808,7 @@ nfs4_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
BUF_ASSERT_HELD(bp);
if (bp->b_flags & B_INVAL) {
brelse(bp);

View File

@ -2694,7 +2694,7 @@ nfs_strategy(struct vop_strategy_args *ap)
KASSERT(!(bp->b_flags & B_DONE),
("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_ISLOCKED(bp), ("nfs_strategy: buffer %p not locked", bp));
BUF_ASSERT_HELD(bp);
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@ -3088,8 +3088,7 @@ nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
BUF_ASSERT_HELD(bp);
if (bp->b_flags & B_INVAL) {
brelse(bp);

View File

@ -333,12 +333,33 @@ BUF_UNLOCK(struct buf *bp)
/*
* Free a buffer lock.
*/
#define BUF_LOCKFREE(bp) \
do { \
if (BUF_ISLOCKED(bp)) \
panic("free locked buf"); \
lockdestroy(&(bp)->b_lock); \
} while (0)
#define BUF_LOCKFREE(bp) \
(lockdestroy(&(bp)->b_lock))
/*
* Buffer lock assertions.
*/
#if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
#define BUF_ASSERT_LOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_SLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_XLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_UNLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_HELD(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_HELD, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_UNHELD(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_UNHELD, LOCK_FILE, LOCK_LINE)
#else
#define BUF_ASSERT_LOCKED(bp)
#define BUF_ASSERT_SLOCKED(bp)
#define BUF_ASSERT_XLOCKED(bp)
#define BUF_ASSERT_UNLOCKED(bp)
#define BUF_ASSERT_HELD(bp)
#define BUF_ASSERT_UNHELD(bp)
#endif
#ifdef _SYS_PROC_H_ /* Avoid #include <sys/proc.h> pollution */
/*

View File

@ -105,6 +105,7 @@ struct lock_class {
#define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */
/* Flags passed to witness_assert. */
#define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */
#define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */
#define LA_LOCKED 0x00000001 /* Lock is at least share locked. */
#define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */

View File

@ -138,12 +138,27 @@ struct lock {
#define LK_WAITDRAIN 0x00080000 /* process waiting for lock to drain */
#define LK_DRAINING 0x00100000 /* lock is being drained */
#define LK_INTERNAL 0x00200000/* The internal lock is already held */
#define LK_DESTROYED 0x00400000 /* lock is destroyed */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
*/
#define LK_SHARE_NONZERO 0x01000000
#define LK_WAIT_NONZERO 0x02000000
/*
* Assertion flags.
*/
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define KA_BASE (LA_MASKASSERT + 1)
#define KA_LOCKED LA_LOCKED
#define KA_SLOCKED LA_SLOCKED
#define KA_XLOCKED LA_XLOCKED
#define KA_UNLOCKED LA_UNLOCKED
#define KA_RECURSED LA_RECURSED
#define KA_NOTRECURSED LA_NOTRECURSED
#define KA_HELD (KA_BASE << 0x00)
#define KA_UNHELD (KA_BASE << 0x01)
#endif
/*
* Lock return status.
@ -176,6 +191,9 @@ void lockdestroy(struct lock *);
int _lockmgr(struct lock *, u_int flags, struct mtx *, char *file,
int line);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _lockmgr_assert(struct lock *, int what, const char *, int);
#endif
void _lockmgr_disown(struct lock *, const char *, int);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *);
@ -187,6 +205,12 @@ int lockwaiters(struct lock *);
_lockmgr_disown((lock), LOCK_FILE, LOCK_LINE)
#define lockmgr_recursed(lkp) \
((lkp)->lk_exclusivecount > 1)
#ifdef INVARIANTS
#define lockmgr_assert(lkp, what) \
_lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE)
#else
#define lockmgr_assert(lkp, what)
#endif
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#endif