Handle lock recursion differenty by always checking against LO_RECURSABLE
instead the lock own flag itself. Tested by: pho
This commit is contained in:
parent
11632ace3a
commit
f083018223
@ -51,8 +51,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <ddb/ddb.h>
|
||||
#endif
|
||||
|
||||
CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
|
||||
(LK_CANRECURSE | LK_NOSHARE));
|
||||
CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) == LK_NOSHARE);
|
||||
|
||||
#define SQ_EXCLUSIVE_QUEUE 0
|
||||
#define SQ_SHARED_QUEUE 1
|
||||
@ -316,7 +315,9 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
|
||||
|
||||
MPASS((flags & ~LK_INIT_MASK) == 0);
|
||||
|
||||
iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
iflags = LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
if (flags & LK_CANRECURSE)
|
||||
iflags |= LO_RECURSABLE;
|
||||
if ((flags & LK_NODUP) == 0)
|
||||
iflags |= LO_DUPOK;
|
||||
if (flags & LK_NOPROFILE)
|
||||
@ -325,7 +326,7 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
|
||||
iflags |= LO_WITNESS;
|
||||
if (flags & LK_QUIET)
|
||||
iflags |= LO_QUIET;
|
||||
iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
|
||||
iflags |= flags & LK_NOSHARE;
|
||||
|
||||
lk->lk_lock = LK_UNLOCKED;
|
||||
lk->lk_recurse = 0;
|
||||
@ -530,7 +531,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
*/
|
||||
if (lockmgr_xlocked(lk)) {
|
||||
if ((flags & LK_CANRECURSE) == 0 &&
|
||||
(lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
|
||||
(lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
|
||||
|
||||
/*
|
||||
* If the lock is expected to not panic just
|
||||
|
@ -51,8 +51,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
|
||||
|
||||
#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
|
||||
#define ADAPTIVE_RWLOCKS
|
||||
#endif
|
||||
@ -177,16 +175,17 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
|
||||
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
|
||||
RW_RECURSE)) == 0);
|
||||
|
||||
flags = LO_UPGRADABLE | LO_RECURSABLE;
|
||||
flags = LO_UPGRADABLE;
|
||||
if (opts & RW_DUPOK)
|
||||
flags |= LO_DUPOK;
|
||||
if (opts & RW_NOPROFILE)
|
||||
flags |= LO_NOPROFILE;
|
||||
if (!(opts & RW_NOWITNESS))
|
||||
flags |= LO_WITNESS;
|
||||
if (opts & RW_RECURSE)
|
||||
flags |= LO_RECURSABLE;
|
||||
if (opts & RW_QUIET)
|
||||
flags |= LO_QUIET;
|
||||
flags |= opts & RW_RECURSE;
|
||||
|
||||
rw->rw_lock = RW_UNLOCKED;
|
||||
rw->rw_recurse = 0;
|
||||
@ -249,7 +248,8 @@ _rw_try_wlock(struct rwlock *rw, const char *file, int line)
|
||||
KASSERT(rw->rw_lock != RW_DESTROYED,
|
||||
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
|
||||
|
||||
if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) {
|
||||
if (rw_wlocked(rw) &&
|
||||
(rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
|
||||
rw->rw_recurse++;
|
||||
rval = 1;
|
||||
} else
|
||||
@ -646,7 +646,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
|
||||
#endif
|
||||
|
||||
if (rw_wlocked(rw)) {
|
||||
KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
|
||||
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
|
||||
("%s: recursing but non-recursive rw %s @ %s:%d\n",
|
||||
__func__, rw->lock_object.lo_name, file, line));
|
||||
rw->rw_recurse++;
|
||||
|
@ -66,8 +66,7 @@ __FBSDID("$FreeBSD$");
|
||||
#define ADAPTIVE_SX
|
||||
#endif
|
||||
|
||||
CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) ==
|
||||
(SX_NOADAPTIVE | SX_RECURSE));
|
||||
CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
|
||||
|
||||
/* Handy macros for sleep queues. */
|
||||
#define SQ_EXCLUSIVE_QUEUE 0
|
||||
@ -207,17 +206,19 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
|
||||
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
|
||||
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
|
||||
|
||||
flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
flags = LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
if (opts & SX_DUPOK)
|
||||
flags |= LO_DUPOK;
|
||||
if (opts & SX_NOPROFILE)
|
||||
flags |= LO_NOPROFILE;
|
||||
if (!(opts & SX_NOWITNESS))
|
||||
flags |= LO_WITNESS;
|
||||
if (opts & SX_RECURSE)
|
||||
flags |= LO_RECURSABLE;
|
||||
if (opts & SX_QUIET)
|
||||
flags |= LO_QUIET;
|
||||
|
||||
flags |= opts & (SX_NOADAPTIVE | SX_RECURSE);
|
||||
flags |= opts & SX_NOADAPTIVE;
|
||||
sx->sx_lock = SX_LOCK_UNLOCKED;
|
||||
sx->sx_recurse = 0;
|
||||
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
|
||||
@ -305,7 +306,8 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
|
||||
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
|
||||
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
|
||||
|
||||
if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) {
|
||||
if (sx_xlocked(sx) &&
|
||||
(sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
|
||||
sx->sx_recurse++;
|
||||
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
|
||||
rval = 1;
|
||||
@ -479,7 +481,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
|
||||
|
||||
/* If we already hold an exclusive lock, then recurse. */
|
||||
if (sx_xlocked(sx)) {
|
||||
KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0,
|
||||
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
|
||||
("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
|
||||
sx->lock_object.lo_name, file, line));
|
||||
sx->sx_recurse++;
|
||||
|
@ -419,7 +419,7 @@ extern struct vattr va_null; /* predefined null vattr structure */
|
||||
#define VI_MTX(vp) (&(vp)->v_interlock)
|
||||
|
||||
#define VN_LOCK_AREC(vp) \
|
||||
((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE)
|
||||
((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE)
|
||||
#define VN_LOCK_ASHARE(vp) \
|
||||
((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE)
|
||||
|
||||
|
@ -556,8 +556,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
|
||||
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
|
||||
#define FREE_LOCK(lk) mtx_unlock(lk)
|
||||
|
||||
#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE)
|
||||
#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE)
|
||||
#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE)
|
||||
#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE)
|
||||
|
||||
/*
|
||||
* Worklist queue management.
|
||||
|
Loading…
Reference in New Issue
Block a user