- Introduce the function lockmgr_recursed() which returns true if the

lockmgr lkp, when held in exclusive mode, is recursed
- Introduce the function BUF_RECURSED() which does the same for bufobj
  locks based on the top of lockmgr_recursed()
- Introduce the function BUF_ISLOCKED() which works like the counterpart
  VOP_ISLOCKED(9), showing the state of lockmgr linked with the bufobj

BUF_RECURSED() and BUF_ISLOCKED() entirely replace the usage of bogus
BUF_REFCNT() in a more explicative and SMP-compliant way.
This allows us to axe out BUF_REFCNT() and leaving the function
lockcount() totally unused in our stock kernel. Further commits will
axe lockcount() as well as part of lockmgr() cleanup.

KPI results, obviously, broken so further commits will update manpages
and freebsd version.

Tested by: kris (on UFS and NFS)
This commit is contained in:
Attilio Rao 2008-01-19 17:36:23 +00:00
parent de5910460a
commit d638e093d6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175486
11 changed files with 46 additions and 57 deletions

View File

@ -86,7 +86,7 @@ xfs_buf_get_empty(size_t size, xfs_buftarg_t *target)
bp->b_bufsize = size;
bp->b_bcount = size;
KASSERT(BUF_REFCNT(bp) == 1,
KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
xfs_buf_set_target(bp, target);
@ -103,7 +103,7 @@ xfs_buf_get_noaddr(size_t len, xfs_buftarg_t *target)
bp = geteblk(len);
if (bp != NULL) {
KASSERT(BUF_REFCNT(bp) == 1,
KASSERT(BUF_ISLOCKED(bp),
("xfs_buf_get_empty: bp %p not locked",bp));
xfs_buf_set_target(bp, target);
@ -163,7 +163,7 @@ XFS_bwrite(xfs_buf_t *bp)
if ((bp->b_flags & B_ASYNC) == 0) {
error = bufwait(bp);
#if 0
if (BUF_REFCNT(bp) > 1)
if (BUF_LOCKRECURSED(bp))
BUF_UNLOCK(bp);
else
brelse(bp);

View File

@ -160,7 +160,6 @@ xfs_buf_get_error(struct buf *bp)
#define XFS_BUF_HOLD(x) ((void)0)
#define XFS_BUF_UNHOLD(x) ((void)0)
#define XFS_BUF_ISHOLD(x) BUF_REFCNT(x)
#define XFS_BUF_READ(x) ((x)->b_iocmd = BIO_READ)
#define XFS_BUF_UNREAD(x) ((x)->b_iocmd = 0)
@ -234,7 +233,7 @@ xfs_buf_offset(xfs_buf_t *bp, size_t offset)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_VALUSEMA(bp) (BUF_REFCNT(bp)? 0 : 1)
#define XFS_BUF_VALUSEMA(bp) (BUF_ISLOCKED(bp) ? 0 : 1)
#define XFS_BUF_CPSEMA(bp) \
(BUF_LOCK(bp, LK_EXCLUSIVE|LK_CANRECURSE | LK_SLEEPFAIL, NULL) == 0)

View File

@ -255,7 +255,7 @@ static int
isbufbusy(struct buf *bp)
{
if (((bp->b_flags & (B_INVAL | B_PERSISTENT)) == 0 &&
BUF_REFCNT(bp) > 0) ||
BUF_ISLOCKED(bp)) ||
((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
return (1);
return (0);

View File

@ -658,7 +658,7 @@ bremfree(struct buf *bp)
{
CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_REFCNT(bp), ("bremfree: buf must be locked."));
KASSERT(BUF_ISLOCKED(bp), ("bremfree: buf must be locked."));
KASSERT((bp->b_flags & B_REMFREE) == 0,
("bremfree: buffer %p already marked for delayed removal.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
@ -695,7 +695,7 @@ bremfreel(struct buf *bp)
{
CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_REFCNT(bp), ("bremfreel: buffer %p not locked.", bp));
KASSERT(BUF_ISLOCKED(bp), ("bremfreel: buffer %p not locked.", bp));
KASSERT(bp->b_qindex != QUEUE_NONE,
("bremfreel: buffer %p not on a queue.", bp));
mtx_assert(&bqlock, MA_OWNED);
@ -834,7 +834,7 @@ bufwrite(struct buf *bp)
oldflags = bp->b_flags;
if (BUF_REFCNT(bp) == 0)
if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
if (bp->b_pin_count > 0)
@ -952,7 +952,7 @@ bdwrite(struct buf *bp)
CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
KASSERT(BUF_ISLOCKED(bp), ("bdwrite: buffer is not busy"));
if (bp->b_flags & B_INVAL) {
brelse(bp);
@ -1047,7 +1047,7 @@ bdirty(struct buf *bp)
CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
KASSERT(BUF_REFCNT(bp) == 1, ("bdirty: bp %p not locked",bp));
KASSERT(BUF_ISLOCKED(bp), ("bdirty: bp %p not locked",bp));
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
@ -1081,7 +1081,7 @@ bundirty(struct buf *bp)
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
KASSERT(BUF_REFCNT(bp) == 1, ("bundirty: bp %p not locked",bp));
KASSERT(BUF_ISLOCKED(bp), ("bundirty: bp %p not locked",bp));
if (bp->b_flags & B_DELWRI) {
bp->b_flags &= ~B_DELWRI;
@ -1341,7 +1341,7 @@ brelse(struct buf *bp)
brelvp(bp);
}
if (BUF_REFCNT(bp) > 1) {
if (BUF_LOCKRECURSED(bp)) {
/* do not release to free list */
BUF_UNLOCK(bp);
return;
@ -1446,7 +1446,7 @@ bqrelse(struct buf *bp)
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
if (BUF_REFCNT(bp) > 1) {
if (BUF_LOCKRECURSED(bp)) {
/* do not release to free list */
BUF_UNLOCK(bp);
return;
@ -2660,7 +2660,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
bp->b_flags &= ~B_DONE;
}
CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
KASSERT(BUF_ISLOCKED(bp), ("getblk: bp %p not locked",bp));
KASSERT(bp->b_bufobj == bo,
("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
return (bp);
@ -2681,7 +2681,7 @@ geteblk(int size)
continue;
allocbuf(bp, size);
bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
KASSERT(BUF_REFCNT(bp) == 1, ("geteblk: bp %p not locked",bp));
KASSERT(BUF_ISLOCKED(bp), ("geteblk: bp %p not locked",bp));
return (bp);
}
@ -2707,7 +2707,7 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
if (BUF_REFCNT(bp) == 0)
if (!BUF_ISLOCKED(bp))
panic("allocbuf: buffer not busy");
if (bp->b_kvasize < size)
@ -3150,8 +3150,7 @@ bufdone(struct buf *bp)
CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
dropobj = NULL;
KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
BUF_REFCNT(bp)));
KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
runningbufwakeup(bp);
@ -3176,8 +3175,7 @@ bufdone(struct buf *bp)
void
bufdone_finish(struct buf *bp)
{
KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp,
BUF_REFCNT(bp)));
KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp));
if (!LIST_EMPTY(&bp->b_dep))
buf_complete(bp);
@ -3943,7 +3941,7 @@ DB_SHOW_COMMAND(lockedbufs, lockedbufs)
for (i = 0; i < nbuf; i++) {
bp = &buf[i];
if (lockcount(&bp->b_lock)) {
if (BUF_ISLOCKED(bp)) {
db_show_buffer((uintptr_t)bp, 1, 0, NULL);
db_printf("\n");
}

View File

@ -3570,7 +3570,7 @@ vop_strategy_pre(void *ap)
if ((bp->b_flags & B_CLUSTER) != 0)
return;
if (BUF_REFCNT(bp) < 1) {
if (!BUF_ISLOCKED(bp)) {
if (vfs_badlock_print)
printf(
"VOP_STRATEGY: bp is not locked but should be\n");

View File

@ -2446,8 +2446,9 @@ nfs4_strategy(struct vop_strategy_args *ap)
struct ucred *cr;
int error = 0;
KASSERT(!(bp->b_flags & B_DONE), ("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_REFCNT(bp) > 0, ("nfs4_strategy: buffer %p not locked", bp));
KASSERT(!(bp->b_flags & B_DONE),
("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_ISLOCKED(bp), ("nfs4_strategy: buffer %p not locked", bp));
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@ -2525,7 +2526,7 @@ nfs4_flush(struct vnode *vp, int waitfor, struct thread *td,
bveccount = 0;
VI_LOCK(vp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
if (BUF_REFCNT(bp) == 0 &&
if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bveccount++;
@ -2807,7 +2808,7 @@ nfs4_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
if (BUF_REFCNT(bp) == 0)
if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
if (bp->b_flags & B_INVAL) {

View File

@ -918,7 +918,7 @@ nfs_clearcommit(struct mount *mp)
}
MNT_IUNLOCK(mp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
if (BUF_REFCNT(bp) == 0 &&
if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);

View File

@ -2692,8 +2692,9 @@ nfs_strategy(struct vop_strategy_args *ap)
struct buf *bp = ap->a_bp;
struct ucred *cr;
KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
KASSERT(!(bp->b_flags & B_DONE),
("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_ISLOCKED(bp), ("nfs_strategy: buffer %p not locked", bp));
if (bp->b_iocmd == BIO_READ)
cr = bp->b_rcred;
@ -2771,7 +2772,7 @@ nfs_flush(struct vnode *vp, int waitfor, struct thread *td,
bveccount = 0;
VI_LOCK(vp);
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
if (BUF_REFCNT(bp) == 0 &&
if (!BUF_ISLOCKED(bp) &&
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
== (B_DELWRI | B_NEEDCOMMIT))
bveccount++;
@ -3087,7 +3088,7 @@ nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
off_t off;
#endif
if (BUF_REFCNT(bp) == 0)
if (!BUF_ISLOCKED(bp))
panic("bwrite: buffer is not locked???");
if (bp->b_flags & B_INVAL) {

View File

@ -319,12 +319,23 @@ BUF_UNLOCK(struct buf *bp)
splx(s);
}
/*
* Check if a buffer lock is recursed.
*/
#define BUF_LOCKRECURSED(bp) \
(lockmgr_recursed(&(bp)->b_lock))
/*
* Check if a buffer lock is currently held.
*/
#define BUF_ISLOCKED(bp) \
(lockstatus(&(bp)->b_lock, curthread))
/*
* Free a buffer lock.
*/
#define BUF_LOCKFREE(bp) \
do { \
if (BUF_REFCNT(bp) > 0) \
if (BUF_ISLOCKED(bp)) \
panic("free locked buf"); \
lockdestroy(&(bp)->b_lock); \
} while (0)
@ -344,29 +355,6 @@ BUF_KERNPROC(struct buf *bp)
lockmgr_disown(&bp->b_lock);
}
#endif
/*
* Find out the number of references to a lock.
*/
static __inline int BUF_REFCNT(struct buf *);
static __inline int
BUF_REFCNT(struct buf *bp)
{
int s, ret;
/*
* When the system is panicing, the lock manager grants all lock
* requests whether or not the lock is available. To avoid "unlocked
* buffer" panics after a crash, we just claim that all buffers
* are locked when cleaning up after a system panic.
*/
if (panicstr != NULL)
return (1);
s = splbio();
ret = lockcount(&(bp)->b_lock);
splx(s);
return ret;
}
/*
* Find out the number of waiters on a lock.

View File

@ -204,6 +204,8 @@ int lockcount(struct lock *);
int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx, td) _lockmgr((lock), (flags), (mtx), (td), __FILE__, __LINE__)
#define lockmgr_recursed(lkp) \
((lkp)->lk_exclusivecount > 1)
#ifdef DDB
int lockmgr_chain(struct thread *td, struct thread **ownerp);
#endif

View File

@ -1718,7 +1718,7 @@ ffs_bufwrite(struct buf *bp)
oldflags = bp->b_flags;
if (BUF_REFCNT(bp) == 0)
if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
s = splbio();
/*