The buffers b_vflags field is not always properly protected by
bufobj lock. If b_bufobj is not NULL, then bufobj lock should be held when manipulating the flags. Not doing this sometimes leaves BV_BKGRDINPROG to be erronously set, causing softdep' getdirtybuf() to stuck indefinitely in "getbuf" sleep, waiting for background write to finish which is not actually performed. Add BO_LOCK() in the cases where it was missed. In collaboration with: pho Tested by: bz Reviewed by: jeff MFC after: 1 month
This commit is contained in:
parent
691401eef8
commit
3beb1b723f
@ -398,6 +398,8 @@ bufcountwakeup(struct buf *bp)
|
||||
|
||||
KASSERT((bp->b_vflags & BV_INFREECNT) == 0,
|
||||
("buf %p already counted as free", bp));
|
||||
if (bp->b_bufobj != NULL)
|
||||
mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
|
||||
bp->b_vflags |= BV_INFREECNT;
|
||||
old = atomic_fetchadd_int(&numfreebuffers, 1);
|
||||
KASSERT(old >= 0 && old < nbuf,
|
||||
@ -714,6 +716,8 @@ bremfree(struct buf *bp)
|
||||
if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
|
||||
KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
|
||||
("buf %p not counted in numfreebuffers", bp));
|
||||
if (bp->b_bufobj != NULL)
|
||||
mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
|
||||
bp->b_vflags &= ~BV_INFREECNT;
|
||||
old = atomic_fetchadd_int(&numfreebuffers, -1);
|
||||
KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
|
||||
@ -770,6 +774,8 @@ bremfreel(struct buf *bp)
|
||||
if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
|
||||
KASSERT((bp->b_vflags & BV_INFREECNT) != 0,
|
||||
("buf %p not counted in numfreebuffers", bp));
|
||||
if (bp->b_bufobj != NULL)
|
||||
mtx_assert(BO_MTX(bp->b_bufobj), MA_OWNED);
|
||||
bp->b_vflags &= ~BV_INFREECNT;
|
||||
old = atomic_fetchadd_int(&numfreebuffers, -1);
|
||||
KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
|
||||
@ -1412,8 +1418,16 @@ brelse(struct buf *bp)
|
||||
/* enqueue */
|
||||
mtx_lock(&bqlock);
|
||||
/* Handle delayed bremfree() processing. */
|
||||
if (bp->b_flags & B_REMFREE)
|
||||
if (bp->b_flags & B_REMFREE) {
|
||||
struct bufobj *bo;
|
||||
|
||||
bo = bp->b_bufobj;
|
||||
if (bo != NULL)
|
||||
BO_LOCK(bo);
|
||||
bremfreel(bp);
|
||||
if (bo != NULL)
|
||||
BO_UNLOCK(bo);
|
||||
}
|
||||
if (bp->b_qindex != QUEUE_NONE)
|
||||
panic("brelse: free buffer onto another queue???");
|
||||
|
||||
@ -1474,8 +1488,16 @@ brelse(struct buf *bp)
|
||||
* if B_INVAL is set ).
|
||||
*/
|
||||
|
||||
if (!(bp->b_flags & B_DELWRI))
|
||||
if (!(bp->b_flags & B_DELWRI)) {
|
||||
struct bufobj *bo;
|
||||
|
||||
bo = bp->b_bufobj;
|
||||
if (bo != NULL)
|
||||
BO_LOCK(bo);
|
||||
bufcountwakeup(bp);
|
||||
if (bo != NULL)
|
||||
BO_UNLOCK(bo);
|
||||
}
|
||||
|
||||
/*
|
||||
* Something we can maybe free or reuse
|
||||
@ -1504,6 +1526,8 @@ brelse(struct buf *bp)
|
||||
void
|
||||
bqrelse(struct buf *bp)
|
||||
{
|
||||
struct bufobj *bo;
|
||||
|
||||
CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
|
||||
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
|
||||
("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
|
||||
@ -1514,10 +1538,15 @@ bqrelse(struct buf *bp)
|
||||
return;
|
||||
}
|
||||
|
||||
bo = bp->b_bufobj;
|
||||
if (bp->b_flags & B_MANAGED) {
|
||||
if (bp->b_flags & B_REMFREE) {
|
||||
mtx_lock(&bqlock);
|
||||
if (bo != NULL)
|
||||
BO_LOCK(bo);
|
||||
bremfreel(bp);
|
||||
if (bo != NULL)
|
||||
BO_UNLOCK(bo);
|
||||
mtx_unlock(&bqlock);
|
||||
}
|
||||
bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
|
||||
@ -1527,8 +1556,13 @@ bqrelse(struct buf *bp)
|
||||
|
||||
mtx_lock(&bqlock);
|
||||
/* Handle delayed bremfree() processing. */
|
||||
if (bp->b_flags & B_REMFREE)
|
||||
if (bp->b_flags & B_REMFREE) {
|
||||
if (bo != NULL)
|
||||
BO_LOCK(bo);
|
||||
bremfreel(bp);
|
||||
if (bo != NULL)
|
||||
BO_UNLOCK(bo);
|
||||
}
|
||||
if (bp->b_qindex != QUEUE_NONE)
|
||||
panic("bqrelse: free buffer onto another queue???");
|
||||
/* buffers with stale but valid contents */
|
||||
@ -1563,8 +1597,13 @@ bqrelse(struct buf *bp)
|
||||
}
|
||||
mtx_unlock(&bqlock);
|
||||
|
||||
if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
|
||||
if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) {
|
||||
if (bo != NULL)
|
||||
BO_LOCK(bo);
|
||||
bufcountwakeup(bp);
|
||||
if (bo != NULL)
|
||||
BO_UNLOCK(bo);
|
||||
}
|
||||
|
||||
/*
|
||||
* Something we can maybe free or reuse.
|
||||
@ -1898,7 +1937,11 @@ getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
|
||||
|
||||
KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
|
||||
|
||||
if (bp->b_bufobj != NULL)
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
bremfreel(bp);
|
||||
if (bp->b_bufobj != NULL)
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
mtx_unlock(&bqlock);
|
||||
|
||||
if (qindex == QUEUE_CLEAN) {
|
||||
@ -2635,7 +2678,9 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
bp->b_flags &= ~B_CACHE;
|
||||
else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
|
||||
bp->b_flags |= B_CACHE;
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
|
||||
/*
|
||||
* check for size inconsistancies for non-VMIO case.
|
||||
|
@ -1260,13 +1260,17 @@ flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
|
||||
*/
|
||||
if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
|
||||
(flags & V_SAVE)) {
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
bp->b_flags |= B_ASYNC;
|
||||
bwrite(bp);
|
||||
BO_LOCK(bo);
|
||||
return (EAGAIN); /* XXX: why not loop ? */
|
||||
}
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
bp->b_flags |= (B_INVAL | B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
@ -1318,7 +1322,9 @@ vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
|
||||
BO_MTX(bo)) == ENOLCK)
|
||||
goto restart;
|
||||
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
bp->b_flags |= (B_INVAL | B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
@ -1340,7 +1346,9 @@ vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
|
||||
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
|
||||
BO_MTX(bo)) == ENOLCK)
|
||||
goto restart;
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
bp->b_flags |= (B_INVAL | B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
@ -1372,7 +1380,9 @@ vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
|
||||
VNASSERT((bp->b_flags & B_DELWRI), vp,
|
||||
("buf(%p) on dirty queue without DELWRI", bp));
|
||||
|
||||
BO_LOCK(bo);
|
||||
bremfree(bp);
|
||||
BO_UNLOCK(bo);
|
||||
bawrite(bp);
|
||||
BO_LOCK(bo);
|
||||
goto restartsync;
|
||||
|
Loading…
Reference in New Issue
Block a user