Add b_bufobj to struct buf which eventually will eliminate the need for b_vp.
Initialize b_bufobj for all buffers. Make incore() and gbincore() take a bufobj instead of a vnode. Make inmem() local to vfs_bio.c Change a lot of VI_[UN]LOCK(bp->b_vp) to BO_[UN]LOCK(bp->b_bufobj) also VI_MTX() to BO_MTX(), Make buf_vlist_add() take a bufobj instead of a vnode. Eliminate other uses of bp->b_vp where bp->b_bufobj will do. Various minor polishing: remove "register", turn panic into KASSERT, use new function declarations, TAILQ_FOREACH_SAFE() etc.
This commit is contained in:
parent
ec23a3b685
commit
52a089c526
@ -171,7 +171,7 @@ ext2_bmaparray(vp, bn, bnp, runp, runb)
|
||||
*/
|
||||
|
||||
metalbn = ap->in_lbn;
|
||||
if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
|
||||
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
|
||||
break;
|
||||
/*
|
||||
* If we get here, we've either got the block in the cache
|
||||
|
@ -171,7 +171,7 @@ ext2_bmaparray(vp, bn, bnp, runp, runb)
|
||||
*/
|
||||
|
||||
metalbn = ap->in_lbn;
|
||||
if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
|
||||
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
|
||||
break;
|
||||
/*
|
||||
* If we get here, we've either got the block in the cache
|
||||
|
@ -62,6 +62,7 @@ static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
|
||||
struct bio_ops bioops; /* I/O operation notification */
|
||||
|
||||
static int ibwrite(struct buf *);
|
||||
static int inmem(struct vnode * vp, daddr_t blkno);
|
||||
|
||||
struct buf_ops buf_ops_bio = {
|
||||
"buf_ops_bio",
|
||||
@ -803,20 +804,20 @@ ibwrite(struct buf *bp)
|
||||
* writing this block if it is asynchronous. Otherwise
|
||||
* wait for the background write to complete.
|
||||
*/
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if (bp->b_vflags & BV_BKGRDINPROG) {
|
||||
if (bp->b_flags & B_ASYNC) {
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
splx(s);
|
||||
bdwrite(bp);
|
||||
return (0);
|
||||
}
|
||||
bp->b_vflags |= BV_BKGRDWAIT;
|
||||
msleep(&bp->b_xflags, VI_MTX(bp->b_vp), PRIBIO, "bwrbg", 0);
|
||||
msleep(&bp->b_xflags, BO_MTX(bp->b_bufobj), PRIBIO, "bwrbg", 0);
|
||||
if (bp->b_vflags & BV_BKGRDINPROG)
|
||||
panic("ibwrite: still writing");
|
||||
}
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
|
||||
/* Mark the buffer clean */
|
||||
bundirty(bp);
|
||||
@ -833,10 +834,8 @@ ibwrite(struct buf *bp)
|
||||
(bp->b_flags & B_ASYNC) &&
|
||||
!vm_page_count_severe() &&
|
||||
!buf_dirty_count_severe()) {
|
||||
if (bp->b_iodone != NULL) {
|
||||
printf("bp->b_iodone = %p\n", bp->b_iodone);
|
||||
panic("ibwrite: need chained iodone");
|
||||
}
|
||||
KASSERT(bp->b_iodone == NULL,
|
||||
("bufwrite: needs chained iodone (%p)", bp->b_iodone));
|
||||
|
||||
/* get a new block */
|
||||
newbp = geteblk(bp->b_bufsize);
|
||||
@ -849,10 +848,11 @@ ibwrite(struct buf *bp)
|
||||
memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
|
||||
newbp->b_lblkno = bp->b_lblkno;
|
||||
newbp->b_xflags |= BX_BKGRDMARKER;
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
bp->b_vflags |= BV_BKGRDINPROG;
|
||||
bgetvp(bp->b_vp, newbp);
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
newbp->b_bufobj = &bp->b_vp->v_bufobj;
|
||||
newbp->b_blkno = bp->b_blkno;
|
||||
newbp->b_offset = bp->b_offset;
|
||||
newbp->b_iodone = vfs_backgroundwritedone;
|
||||
@ -880,7 +880,7 @@ ibwrite(struct buf *bp)
|
||||
bp->b_flags |= B_CACHE;
|
||||
bp->b_iocmd = BIO_WRITE;
|
||||
|
||||
bufobj_wref(&bp->b_vp->v_bufobj);
|
||||
bufobj_wref(bp->b_bufobj);
|
||||
vfs_busy_pages(bp, 1);
|
||||
|
||||
/*
|
||||
@ -934,8 +934,8 @@ vfs_backgroundwritedone(struct buf *bp)
|
||||
/*
|
||||
* Find the original buffer that we are writing.
|
||||
*/
|
||||
VI_LOCK(bp->b_vp);
|
||||
if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
|
||||
panic("backgroundwritedone: lost buffer");
|
||||
|
||||
/*
|
||||
@ -951,7 +951,7 @@ vfs_backgroundwritedone(struct buf *bp)
|
||||
origbp->b_vflags &= ~BV_BKGRDWAIT;
|
||||
wakeup(&origbp->b_xflags);
|
||||
}
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
/*
|
||||
* Process dependencies then return any unfinished ones.
|
||||
*/
|
||||
@ -963,7 +963,7 @@ vfs_backgroundwritedone(struct buf *bp)
|
||||
/*
|
||||
* This buffer is marked B_NOCACHE, so when it is released
|
||||
* by biodone, it will be tossed. We mark it with BIO_READ
|
||||
* to avoid biodone doing a second bufobj_wakeup.
|
||||
* to avoid biodone doing a second bufobj_wdrop.
|
||||
*/
|
||||
bp->b_flags |= B_NOCACHE;
|
||||
bp->b_iocmd = BIO_READ;
|
||||
@ -987,11 +987,12 @@ bdwrite(struct buf *bp)
|
||||
struct thread *td = curthread;
|
||||
struct vnode *vp;
|
||||
struct buf *nbp;
|
||||
struct bufobj *bo;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
if (BUF_REFCNT(bp) == 0)
|
||||
panic("bdwrite: buffer is not busy");
|
||||
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
|
||||
KASSERT(BUF_REFCNT(bp) != 0, ("bdwrite: buffer is not busy"));
|
||||
|
||||
if (bp->b_flags & B_INVAL) {
|
||||
brelse(bp);
|
||||
@ -1006,38 +1007,39 @@ bdwrite(struct buf *bp)
|
||||
* disaster and not try to clean up after our own cleanup!
|
||||
*/
|
||||
vp = bp->b_vp;
|
||||
VI_LOCK(vp);
|
||||
bo = bp->b_bufobj;
|
||||
BO_LOCK(bo);
|
||||
if (td->td_pflags & TDP_COWINPROGRESS) {
|
||||
recursiveflushes++;
|
||||
} else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh + 10) {
|
||||
VI_UNLOCK(vp);
|
||||
} else if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
|
||||
BO_UNLOCK(bo);
|
||||
(void) VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td);
|
||||
VI_LOCK(vp);
|
||||
BO_LOCK(bo);
|
||||
altbufferflushes++;
|
||||
} else if (vp != NULL && vp->v_dirtybufcnt > dirtybufthresh) {
|
||||
} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
|
||||
/*
|
||||
* Try to find a buffer to flush.
|
||||
*/
|
||||
TAILQ_FOREACH(nbp, &vp->v_dirtyblkhd, b_bobufs) {
|
||||
TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
|
||||
if ((nbp->b_vflags & BV_BKGRDINPROG) ||
|
||||
buf_countdeps(nbp, 0) ||
|
||||
BUF_LOCK(nbp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
|
||||
continue;
|
||||
if (bp == nbp)
|
||||
panic("bdwrite: found ourselves");
|
||||
VI_UNLOCK(vp);
|
||||
BO_UNLOCK(bo);
|
||||
if (nbp->b_flags & B_CLUSTEROK) {
|
||||
vfs_bio_awrite(nbp);
|
||||
} else {
|
||||
bremfree(nbp);
|
||||
bawrite(nbp);
|
||||
}
|
||||
VI_LOCK(vp);
|
||||
BO_LOCK(bo);
|
||||
dirtybufferflushes++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
VI_UNLOCK(vp);
|
||||
BO_UNLOCK(bo);
|
||||
|
||||
bdirty(bp);
|
||||
/*
|
||||
@ -1110,6 +1112,7 @@ void
|
||||
bdirty(struct buf *bp)
|
||||
{
|
||||
|
||||
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
|
||||
KASSERT(bp->b_qindex == QUEUE_NONE,
|
||||
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
|
||||
bp->b_flags &= ~(B_RELBUF);
|
||||
@ -1139,6 +1142,7 @@ void
|
||||
bundirty(struct buf *bp)
|
||||
{
|
||||
|
||||
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
|
||||
KASSERT(bp->b_qindex == QUEUE_NONE,
|
||||
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
|
||||
|
||||
@ -1287,10 +1291,10 @@ brelse(struct buf *bp)
|
||||
* cleared if it is already pending.
|
||||
*/
|
||||
if (bp->b_vp) {
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if (!(bp->b_vflags & BV_BKGRDINPROG))
|
||||
bp->b_flags |= B_RELBUF;
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
} else
|
||||
bp->b_flags |= B_RELBUF;
|
||||
}
|
||||
@ -1526,9 +1530,9 @@ bqrelse(struct buf *bp)
|
||||
* cannot be set while we hold the buf lock, it can only be
|
||||
* cleared if it is already pending.
|
||||
*/
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if (!vm_page_count_severe() || bp->b_vflags & BV_BKGRDINPROG) {
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
bp->b_qindex = QUEUE_CLEAN;
|
||||
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp,
|
||||
b_freelist);
|
||||
@ -1538,7 +1542,7 @@ bqrelse(struct buf *bp)
|
||||
* the buffer (most importantly: the wired pages
|
||||
* making up its backing store) *now*.
|
||||
*/
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
mtx_unlock(&bqlock);
|
||||
splx(s);
|
||||
brelse(bp);
|
||||
@ -1635,7 +1639,7 @@ vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
|
||||
match = 0;
|
||||
|
||||
/* If the buf isn't in core skip it */
|
||||
if ((bpa = gbincore(vp, lblkno)) == NULL)
|
||||
if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
|
||||
return (0);
|
||||
|
||||
/* If the buf is busy we don't want to wait for it */
|
||||
@ -1851,12 +1855,12 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
|
||||
}
|
||||
}
|
||||
if (bp->b_vp) {
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if (bp->b_vflags & BV_BKGRDINPROG) {
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
continue;
|
||||
}
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1942,6 +1946,7 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
|
||||
bp->b_magic = B_MAGIC_BIO;
|
||||
bp->b_op = &buf_ops_bio;
|
||||
bp->b_object = NULL;
|
||||
bp->b_bufobj = NULL;
|
||||
|
||||
LIST_INIT(&bp->b_dep);
|
||||
|
||||
@ -2168,13 +2173,13 @@ flushbufqueues(int flushdeps)
|
||||
continue;
|
||||
KASSERT((bp->b_flags & B_DELWRI),
|
||||
("unexpected clean buffer %p", bp));
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
BUF_UNLOCK(bp);
|
||||
continue;
|
||||
}
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
if (bp->b_flags & B_INVAL) {
|
||||
bremfreel(bp);
|
||||
mtx_unlock(&bqlock);
|
||||
@ -2224,14 +2229,14 @@ flushbufqueues(int flushdeps)
|
||||
* Check to see if a block is currently memory resident.
|
||||
*/
|
||||
struct buf *
|
||||
incore(struct vnode * vp, daddr_t blkno)
|
||||
incore(struct bufobj *bo, daddr_t blkno)
|
||||
{
|
||||
struct buf *bp;
|
||||
|
||||
int s = splbio();
|
||||
VI_LOCK(vp);
|
||||
bp = gbincore(vp, blkno);
|
||||
VI_UNLOCK(vp);
|
||||
BO_LOCK(bo);
|
||||
bp = gbincore(bo, blkno);
|
||||
BO_UNLOCK(bo);
|
||||
splx(s);
|
||||
return (bp);
|
||||
}
|
||||
@ -2242,7 +2247,7 @@ incore(struct vnode * vp, daddr_t blkno)
|
||||
* it also hunts around in the VM system for the data.
|
||||
*/
|
||||
|
||||
int
|
||||
static int
|
||||
inmem(struct vnode * vp, daddr_t blkno)
|
||||
{
|
||||
vm_object_t obj;
|
||||
@ -2253,7 +2258,7 @@ inmem(struct vnode * vp, daddr_t blkno)
|
||||
GIANT_REQUIRED;
|
||||
ASSERT_VOP_LOCKED(vp, "inmem");
|
||||
|
||||
if (incore(vp, blkno))
|
||||
if (incore(&vp->v_bufobj, blkno))
|
||||
return 1;
|
||||
if (vp->v_mount == NULL)
|
||||
return 0;
|
||||
@ -2420,6 +2425,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
int flags)
|
||||
{
|
||||
struct buf *bp;
|
||||
struct bufobj *bo;
|
||||
int s;
|
||||
int error;
|
||||
ASSERT_VOP_LOCKED(vp, "getblk");
|
||||
@ -2427,6 +2433,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
if (size > MAXBSIZE)
|
||||
panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
|
||||
|
||||
bo = &vp->v_bufobj;
|
||||
s = splbio();
|
||||
loop:
|
||||
/*
|
||||
@ -2448,7 +2455,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
}
|
||||
|
||||
VI_LOCK(vp);
|
||||
if ((bp = gbincore(vp, blkno))) {
|
||||
bp = gbincore(bo, blkno);
|
||||
if (bp != NULL) {
|
||||
int lockflags;
|
||||
/*
|
||||
* Buffer is in-core. If the buffer is not busy, it must
|
||||
@ -2596,7 +2604,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
|
||||
maxsize = imax(maxsize, bsize);
|
||||
|
||||
if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
|
||||
bp = getnewbuf(slpflag, slptimeo, size, maxsize);
|
||||
if (bp == NULL) {
|
||||
if (slpflag || slptimeo) {
|
||||
splx(s);
|
||||
return NULL;
|
||||
@ -2619,9 +2628,9 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
* the splay tree implementation when dealing with duplicate
|
||||
* lblkno's.
|
||||
*/
|
||||
VI_LOCK(vp);
|
||||
if (gbincore(vp, blkno)) {
|
||||
VI_UNLOCK(vp);
|
||||
BO_LOCK(bo);
|
||||
if (gbincore(bo, blkno)) {
|
||||
BO_UNLOCK(bo);
|
||||
bp->b_flags |= B_INVAL;
|
||||
brelse(bp);
|
||||
goto loop;
|
||||
@ -2635,7 +2644,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
bp->b_offset = offset;
|
||||
|
||||
bgetvp(vp, bp);
|
||||
VI_UNLOCK(vp);
|
||||
BO_UNLOCK(bo);
|
||||
|
||||
/*
|
||||
* set B_VMIO bit. allocbuf() the buffer bigger. Since the
|
||||
@ -2663,6 +2672,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
bp->b_flags &= ~B_DONE;
|
||||
}
|
||||
KASSERT(BUF_REFCNT(bp) == 1, ("getblk: bp %p not locked",bp));
|
||||
KASSERT(bp->b_bufobj == bo,
|
||||
("wrong b_bufobj %p should be %p", bp->b_bufobj, bo));
|
||||
return (bp);
|
||||
}
|
||||
|
||||
@ -3153,8 +3164,8 @@ bufdone(struct buf *bp)
|
||||
bp->b_flags |= B_DONE;
|
||||
runningbufwakeup(bp);
|
||||
|
||||
if (bp->b_iocmd == BIO_WRITE && bp->b_vp != NULL)
|
||||
bufobj_wdrop(&bp->b_vp->v_bufobj);
|
||||
if (bp->b_iocmd == BIO_WRITE && bp->b_bufobj != NULL)
|
||||
bufobj_wdrop(bp->b_bufobj);
|
||||
|
||||
/* call optional completion function if requested */
|
||||
if (bp->b_iodone != NULL) {
|
||||
@ -3324,11 +3335,11 @@ vfs_unbusy_pages(struct buf *bp)
|
||||
m = bp->b_pages[i];
|
||||
if (m == bogus_page) {
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
|
||||
if (!m) {
|
||||
if (!m)
|
||||
panic("vfs_unbusy_pages: page missing\n");
|
||||
}
|
||||
bp->b_pages[i] = m;
|
||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
|
||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||
bp->b_pages, bp->b_npages);
|
||||
}
|
||||
vm_object_pip_subtract(obj, 1);
|
||||
vm_page_io_finish(m);
|
||||
@ -3852,7 +3863,6 @@ bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#ifdef DDB
|
||||
#include <ddb/ddb.h>
|
||||
|
@ -149,7 +149,7 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
|
||||
* Stop if the buffer does not exist or it
|
||||
* is invalid (about to go away?)
|
||||
*/
|
||||
rbp = gbincore(vp, lblkno+i);
|
||||
rbp = gbincore(&vp->v_bufobj, lblkno+i);
|
||||
if (rbp == NULL || (rbp->b_flags & B_INVAL))
|
||||
break;
|
||||
|
||||
@ -770,7 +770,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
* partake in the clustered write.
|
||||
*/
|
||||
VI_LOCK(vp);
|
||||
if ((tbp = gbincore(vp, start_lbn)) == NULL ||
|
||||
if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
|
||||
(tbp->b_vflags & BV_BKGRDINPROG)) {
|
||||
VI_UNLOCK(vp);
|
||||
++start_lbn;
|
||||
@ -825,6 +825,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
bp->b_bcount = 0;
|
||||
bp->b_magic = tbp->b_magic;
|
||||
bp->b_op = tbp->b_op;
|
||||
bp->b_bufobj = tbp->b_bufobj;
|
||||
bp->b_bufsize = 0;
|
||||
bp->b_npages = 0;
|
||||
if (tbp->b_wcred != NOCRED)
|
||||
@ -859,7 +860,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
* can't need to be written.
|
||||
*/
|
||||
VI_LOCK(vp);
|
||||
if ((tbp = gbincore(vp, start_lbn)) == NULL ||
|
||||
if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
|
||||
(tbp->b_vflags & BV_BKGRDINPROG)) {
|
||||
VI_UNLOCK(vp);
|
||||
splx(s);
|
||||
@ -965,7 +966,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
tbp->b_flags |= B_ASYNC;
|
||||
tbp->b_iocmd = BIO_WRITE;
|
||||
reassignbuf(tbp); /* put on clean list */
|
||||
bufobj_wref(&tbp->b_vp->v_bufobj);
|
||||
bufobj_wref(tbp->b_bufobj);
|
||||
splx(s);
|
||||
BUF_KERNPROC(tbp);
|
||||
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
|
||||
|
@ -1247,19 +1247,18 @@ buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
|
||||
return (root);
|
||||
}
|
||||
|
||||
static
|
||||
void
|
||||
static void
|
||||
buf_vlist_remove(struct buf *bp)
|
||||
{
|
||||
struct vnode *vp = bp->b_vp;
|
||||
struct buf *root;
|
||||
struct bufv *bv;
|
||||
|
||||
ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
|
||||
KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
|
||||
ASSERT_BO_LOCKED(bp->b_bufobj);
|
||||
if (bp->b_xflags & BX_VNDIRTY)
|
||||
bv = &vp->v_bufobj.bo_dirty;
|
||||
bv = &bp->b_bufobj->bo_dirty;
|
||||
else
|
||||
bv = &vp->v_bufobj.bo_clean;
|
||||
bv = &bp->b_bufobj->bo_clean;
|
||||
if (bp != bv->bv_root) {
|
||||
root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
|
||||
KASSERT(root == bp, ("splay lookup failed in remove"));
|
||||
@ -1282,20 +1281,24 @@ buf_vlist_remove(struct buf *bp)
|
||||
*
|
||||
* NOTE: xflags is passed as a constant, optimizing this inline function!
|
||||
*/
|
||||
static
|
||||
void
|
||||
buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
|
||||
static void
|
||||
buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
|
||||
{
|
||||
struct buf *root;
|
||||
struct bufv *bv;
|
||||
|
||||
ASSERT_VI_LOCKED(vp, "buf_vlist_add");
|
||||
ASSERT_BO_LOCKED(bo);
|
||||
bp->b_xflags |= xflags;
|
||||
if (xflags & BX_VNDIRTY) {
|
||||
root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
|
||||
if (xflags & BX_VNDIRTY)
|
||||
bv = &bo->bo_dirty;
|
||||
else
|
||||
bv = &bo->bo_clean;
|
||||
|
||||
root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
|
||||
if (root == NULL) {
|
||||
bp->b_left = NULL;
|
||||
bp->b_right = NULL;
|
||||
TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_bobufs);
|
||||
TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
|
||||
} else if (bp->b_lblkno < root->b_lblkno ||
|
||||
(bp->b_lblkno == root->b_lblkno &&
|
||||
(bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
|
||||
@ -1307,35 +1310,10 @@ buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
|
||||
bp->b_right = root->b_right;
|
||||
bp->b_left = root;
|
||||
root->b_right = NULL;
|
||||
TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
|
||||
root, bp, b_bobufs);
|
||||
}
|
||||
vp->v_dirtybufcnt++;
|
||||
vp->v_dirtyblkroot = bp;
|
||||
} else {
|
||||
/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
|
||||
root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
|
||||
if (root == NULL) {
|
||||
bp->b_left = NULL;
|
||||
bp->b_right = NULL;
|
||||
TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_bobufs);
|
||||
} else if (bp->b_lblkno < root->b_lblkno ||
|
||||
(bp->b_lblkno == root->b_lblkno &&
|
||||
(bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
|
||||
bp->b_left = root->b_left;
|
||||
bp->b_right = root;
|
||||
root->b_left = NULL;
|
||||
TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
|
||||
} else {
|
||||
bp->b_right = root->b_right;
|
||||
bp->b_left = root;
|
||||
root->b_right = NULL;
|
||||
TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
|
||||
root, bp, b_bobufs);
|
||||
}
|
||||
vp->v_cleanbufcnt++;
|
||||
vp->v_cleanblkroot = bp;
|
||||
TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
|
||||
}
|
||||
bv->bv_cnt++;
|
||||
bv->bv_root = bp;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1351,26 +1329,26 @@ buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
|
||||
* first tree splayed.
|
||||
*/
|
||||
struct buf *
|
||||
gbincore(struct vnode *vp, daddr_t lblkno)
|
||||
gbincore(struct bufobj *bo, daddr_t lblkno)
|
||||
{
|
||||
struct buf *bp;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
ASSERT_VI_LOCKED(vp, "gbincore");
|
||||
if ((bp = vp->v_cleanblkroot) != NULL &&
|
||||
ASSERT_BO_LOCKED(bo);
|
||||
if ((bp = bo->bo_clean.bv_root) != NULL &&
|
||||
bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
|
||||
return (bp);
|
||||
if ((bp = vp->v_dirtyblkroot) != NULL &&
|
||||
if ((bp = bo->bo_dirty.bv_root) != NULL &&
|
||||
bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
|
||||
return (bp);
|
||||
if ((bp = vp->v_cleanblkroot) != NULL) {
|
||||
vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
|
||||
if ((bp = bo->bo_clean.bv_root) != NULL) {
|
||||
bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
|
||||
if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
|
||||
return (bp);
|
||||
}
|
||||
if ((bp = vp->v_dirtyblkroot) != NULL) {
|
||||
vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
|
||||
if ((bp = bo->bo_dirty.bv_root) != NULL) {
|
||||
bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
|
||||
if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
|
||||
return (bp);
|
||||
}
|
||||
@ -1381,9 +1359,7 @@ gbincore(struct vnode *vp, daddr_t lblkno)
|
||||
* Associate a buffer with a vnode.
|
||||
*/
|
||||
void
|
||||
bgetvp(vp, bp)
|
||||
register struct vnode *vp;
|
||||
register struct buf *bp;
|
||||
bgetvp(struct vnode *vp, struct buf *bp)
|
||||
{
|
||||
|
||||
KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
|
||||
@ -1394,20 +1370,21 @@ bgetvp(vp, bp)
|
||||
ASSERT_VI_LOCKED(vp, "bgetvp");
|
||||
vholdl(vp);
|
||||
bp->b_vp = vp;
|
||||
bp->b_bufobj = &vp->v_bufobj;
|
||||
bp->b_dev = vn_todev(vp);
|
||||
/*
|
||||
* Insert onto list for new vnode.
|
||||
*/
|
||||
buf_vlist_add(bp, vp, BX_VNCLEAN);
|
||||
buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disassociate a buffer from a vnode.
|
||||
*/
|
||||
void
|
||||
brelvp(bp)
|
||||
register struct buf *bp;
|
||||
brelvp(struct buf *bp)
|
||||
{
|
||||
struct bufobj *bo;
|
||||
struct vnode *vp;
|
||||
|
||||
KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
|
||||
@ -1417,6 +1394,7 @@ brelvp(bp)
|
||||
*/
|
||||
vp = bp->b_vp;
|
||||
VI_LOCK(vp);
|
||||
bo = bp->b_bufobj;
|
||||
if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
|
||||
buf_vlist_remove(bp);
|
||||
if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
||||
@ -1427,7 +1405,8 @@ brelvp(bp)
|
||||
mtx_unlock(&sync_mtx);
|
||||
}
|
||||
vdropl(vp);
|
||||
bp->b_vp = (struct vnode *) 0;
|
||||
bp->b_vp = NULL;
|
||||
bp->b_bufobj = NULL;
|
||||
if (bp->b_object)
|
||||
bp->b_object = NULL;
|
||||
VI_UNLOCK(vp);
|
||||
@ -1715,6 +1694,7 @@ pbgetvp(vp, bp)
|
||||
bp->b_object = vp->v_object;
|
||||
bp->b_flags |= B_PAGING;
|
||||
bp->b_dev = vn_todev(vp);
|
||||
bp->b_bufobj = &vp->v_bufobj;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1726,9 +1706,10 @@ pbrelvp(bp)
|
||||
{
|
||||
|
||||
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
|
||||
KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
|
||||
|
||||
/* XXX REMOVE ME */
|
||||
VI_LOCK(bp->b_vp);
|
||||
BO_LOCK(bp->b_bufobj);
|
||||
if (TAILQ_NEXT(bp, b_bobufs) != NULL) {
|
||||
panic(
|
||||
"relpbuf(): b_vp was probably reassignbuf()d %p %x",
|
||||
@ -1736,9 +1717,10 @@ pbrelvp(bp)
|
||||
(int)bp->b_flags
|
||||
);
|
||||
}
|
||||
VI_UNLOCK(bp->b_vp);
|
||||
bp->b_vp = (struct vnode *) 0;
|
||||
BO_UNLOCK(bp->b_bufobj);
|
||||
bp->b_vp = NULL;
|
||||
bp->b_object = NULL;
|
||||
bp->b_bufobj = NULL;
|
||||
bp->b_flags &= ~B_PAGING;
|
||||
}
|
||||
|
||||
@ -1751,9 +1733,11 @@ void
|
||||
reassignbuf(struct buf *bp)
|
||||
{
|
||||
struct vnode *vp;
|
||||
struct bufobj *bo;
|
||||
int delay;
|
||||
|
||||
vp = bp->b_vp;
|
||||
bo = bp->b_bufobj;
|
||||
++reassignbufcalls;
|
||||
|
||||
/*
|
||||
@ -1787,12 +1771,11 @@ reassignbuf(struct buf *bp)
|
||||
}
|
||||
vn_syncer_add_to_worklist(vp, delay);
|
||||
}
|
||||
buf_vlist_add(bp, vp, BX_VNDIRTY);
|
||||
buf_vlist_add(bp, bo, BX_VNDIRTY);
|
||||
} else {
|
||||
buf_vlist_add(bp, vp, BX_VNCLEAN);
|
||||
buf_vlist_add(bp, bo, BX_VNCLEAN);
|
||||
|
||||
if ((vp->v_iflag & VI_ONWORKLST) &&
|
||||
TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
||||
if ((vp->v_iflag & VI_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
|
||||
mtx_lock(&sync_mtx);
|
||||
LIST_REMOVE(vp, v_synclist);
|
||||
syncer_worklist_len--;
|
||||
@ -2151,8 +2134,7 @@ vhold(struct vnode *vp)
|
||||
}
|
||||
|
||||
void
|
||||
vholdl(vp)
|
||||
register struct vnode *vp;
|
||||
vholdl(struct vnode *vp)
|
||||
{
|
||||
|
||||
vp->v_holdcnt++;
|
||||
@ -2414,7 +2396,7 @@ vclean(vp, flags, td)
|
||||
*/
|
||||
if (flags & DOCLOSE) {
|
||||
struct buf *bp;
|
||||
bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
|
||||
bp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd);
|
||||
if (bp != NULL)
|
||||
(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
|
||||
if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
|
||||
@ -3129,10 +3111,7 @@ vfs_msync(struct mount *mp, int flags)
|
||||
* vp must be locked when vfs_object_create is called.
|
||||
*/
|
||||
int
|
||||
vfs_object_create(vp, td, cred)
|
||||
struct vnode *vp;
|
||||
struct thread *td;
|
||||
struct ucred *cred;
|
||||
vfs_object_create(struct vnode *vp, struct thread *td, struct ucred *cred)
|
||||
{
|
||||
|
||||
GIANT_REQUIRED;
|
||||
@ -3143,8 +3122,7 @@ vfs_object_create(vp, td, cred)
|
||||
* Mark a vnode as free, putting it up for recycling.
|
||||
*/
|
||||
void
|
||||
vfree(vp)
|
||||
struct vnode *vp;
|
||||
vfree(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_VI_LOCKED(vp, "vfree");
|
||||
@ -3165,8 +3143,7 @@ vfree(vp)
|
||||
* Opposite of vfree() - mark a vnode as in use.
|
||||
*/
|
||||
void
|
||||
vbusy(vp)
|
||||
struct vnode *vp;
|
||||
vbusy(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_VI_LOCKED(vp, "vbusy");
|
||||
|
@ -2641,8 +2641,7 @@ nfs4_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
*/
|
||||
bveccount = 0;
|
||||
VI_LOCK(vp);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_bobufs);
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (BUF_REFCNT(bp) == 0 &&
|
||||
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
|
||||
== (B_DELWRI | B_NEEDCOMMIT))
|
||||
@ -2673,7 +2672,7 @@ nfs4_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
bvec = bvec_on_stack;
|
||||
bvecsize = NFS_COMMITBVECSIZ;
|
||||
}
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (bvecpos >= bvecsize)
|
||||
break;
|
||||
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
|
||||
@ -2801,8 +2800,7 @@ nfs4_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
loop:
|
||||
s = splbio();
|
||||
VI_LOCK(vp);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_bobufs);
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
|
||||
if (waitfor != MNT_WAIT || passone)
|
||||
continue;
|
||||
@ -2863,7 +2861,7 @@ nfs4_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
VI_LOCK(vp);
|
||||
}
|
||||
}
|
||||
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
|
||||
if (vp->v_bufobj.bo_dirty.bv_cnt > 0 && commit) {
|
||||
VI_UNLOCK(vp);
|
||||
goto loop;
|
||||
}
|
||||
@ -2946,7 +2944,7 @@ nfs4_writebp(struct buf *bp, int force __unused, struct thread *td)
|
||||
bp->b_ioflags &= ~BIO_ERROR;
|
||||
bp->b_iocmd = BIO_WRITE;
|
||||
|
||||
bufobj_wref(&bp->b_vp->v_bufobj);
|
||||
bufobj_wref(bp->b_bufobj);
|
||||
curthread->td_proc->p_stats->p_ru.ru_oublock++;
|
||||
splx(s);
|
||||
|
||||
|
@ -455,7 +455,7 @@ nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
|
||||
for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
|
||||
(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
|
||||
rabn = lbn + 1 + nra;
|
||||
if (incore(vp, rabn) == NULL) {
|
||||
if (incore(&vp->v_bufobj, rabn) == NULL) {
|
||||
rabp = nfs_getcacheblk(vp, rabn, biosize, td);
|
||||
if (!rabp) {
|
||||
error = nfs_sigintr(nmp, NULL, td);
|
||||
@ -651,7 +651,7 @@ nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
|
||||
(bp->b_flags & B_INVAL) == 0 &&
|
||||
(np->n_direofoffset == 0 ||
|
||||
(lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
|
||||
incore(vp, lbn + 1) == NULL) {
|
||||
incore(&vp->v_bufobj, lbn + 1) == NULL) {
|
||||
rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
|
||||
if (rabp) {
|
||||
if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
|
||||
|
@ -813,8 +813,7 @@ nfs_clearcommit(struct mount *mp)
|
||||
continue;
|
||||
}
|
||||
MNT_IUNLOCK(mp);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_bobufs);
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (BUF_REFCNT(bp) == 0 &&
|
||||
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
|
||||
== (B_DELWRI | B_NEEDCOMMIT))
|
||||
|
@ -954,7 +954,8 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
|
||||
MNT_VNODE_FOREACH(vp, mp, nvp) {
|
||||
VI_LOCK(vp);
|
||||
MNT_IUNLOCK(mp);
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) ||
|
||||
vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
|
||||
waitfor == MNT_LAZY) {
|
||||
VI_UNLOCK(vp);
|
||||
MNT_ILOCK(mp);
|
||||
|
@ -2618,8 +2618,7 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
*/
|
||||
bveccount = 0;
|
||||
VI_LOCK(vp);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_bobufs);
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (BUF_REFCNT(bp) == 0 &&
|
||||
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
|
||||
== (B_DELWRI | B_NEEDCOMMIT))
|
||||
@ -2650,7 +2649,7 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
bvec = bvec_on_stack;
|
||||
bvecsize = NFS_COMMITBVECSIZ;
|
||||
}
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (bvecpos >= bvecsize)
|
||||
break;
|
||||
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
|
||||
@ -2778,8 +2777,7 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
loop:
|
||||
s = splbio();
|
||||
VI_LOCK(vp);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_bobufs);
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
|
||||
if (waitfor != MNT_WAIT || passone)
|
||||
continue;
|
||||
@ -2838,7 +2836,7 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
|
||||
VI_LOCK(vp);
|
||||
}
|
||||
}
|
||||
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
|
||||
if (vp->v_bufobj.bo_dirty.bv_cnt != 0 && commit) {
|
||||
VI_UNLOCK(vp);
|
||||
goto loop;
|
||||
}
|
||||
@ -2920,7 +2918,7 @@ nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
|
||||
bp->b_ioflags &= ~BIO_ERROR;
|
||||
bp->b_iocmd = BIO_WRITE;
|
||||
|
||||
bufobj_wref(&bp->b_vp->v_bufobj);
|
||||
bufobj_wref(bp->b_bufobj);
|
||||
curthread->td_proc->p_stats->p_ru.ru_oublock++;
|
||||
splx(s);
|
||||
|
||||
|
@ -4093,7 +4093,7 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
|
||||
* should not be set if B_INVAL is set there could be
|
||||
* a race here since we haven't locked the buffer).
|
||||
*/
|
||||
if ((bp = gbincore(vp, lblkno)) != NULL) {
|
||||
if ((bp = gbincore(&vp->v_bufobj, lblkno)) != NULL) {
|
||||
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
|
||||
LK_INTERLOCK, VI_MTX(vp)) == ENOLCK) {
|
||||
VI_LOCK(vp);
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
struct bio;
|
||||
struct buf;
|
||||
struct bufobj;
|
||||
struct mount;
|
||||
struct vnode;
|
||||
|
||||
@ -110,6 +111,7 @@ struct buf {
|
||||
#define b_iooffset b_io.bio_offset
|
||||
#define b_resid b_io.bio_resid
|
||||
struct buf_ops *b_op;
|
||||
struct bufobj *b_bufobj;
|
||||
unsigned b_magic;
|
||||
#define B_MAGIC_BIO 0x10b10b10
|
||||
#define B_MAGIC_NFS 0x67238234
|
||||
@ -495,9 +497,8 @@ void brelse(struct buf *);
|
||||
void bqrelse(struct buf *);
|
||||
int vfs_bio_awrite(struct buf *);
|
||||
struct buf * getpbuf(int *);
|
||||
struct buf *incore(struct vnode *, daddr_t);
|
||||
struct buf *gbincore(struct vnode *, daddr_t);
|
||||
int inmem(struct vnode *, daddr_t);
|
||||
struct buf *incore(struct bufobj *, daddr_t);
|
||||
struct buf *gbincore(struct bufobj *, daddr_t);
|
||||
struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
|
||||
struct buf *geteblk(int);
|
||||
int bufwait(struct buf *);
|
||||
|
@ -2571,7 +2571,7 @@ indir_trunc(freeblks, dbn, level, lbn, countp)
|
||||
bp = getblk(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 0, 0,
|
||||
GB_NOCREAT);
|
||||
#else
|
||||
bp = incore(freeblks->fb_devvp, dbn);
|
||||
bp = incore(&freeblks->fb_devvp->v_bufobj, dbn);
|
||||
#endif
|
||||
ACQUIRE_LOCK(&lk);
|
||||
if (bp != NULL && (wk = LIST_FIRST(&bp->b_dep)) != NULL) {
|
||||
|
@ -203,7 +203,7 @@ ufs_bmaparray(vp, bn, bnp, nbp, runp, runb)
|
||||
*/
|
||||
|
||||
metalbn = ap->in_lbn;
|
||||
if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn)
|
||||
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
|
||||
break;
|
||||
/*
|
||||
* If we get here, we've either got the block in the cache
|
||||
|
@ -2535,7 +2535,7 @@ static void
|
||||
swapdev_strategy(struct buf *bp, struct swdevt *sp)
|
||||
{
|
||||
int s;
|
||||
struct vnode *vp, *vp2;
|
||||
struct vnode *vp2;
|
||||
|
||||
bp->b_dev = NULL;
|
||||
bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
|
||||
@ -2544,9 +2544,8 @@ swapdev_strategy(struct buf *bp, struct swdevt *sp)
|
||||
vhold(vp2);
|
||||
s = splvm();
|
||||
if (bp->b_iocmd == BIO_WRITE) {
|
||||
vp = bp->b_vp;
|
||||
if (vp)
|
||||
bufobj_wdrop(&vp->v_bufobj);
|
||||
if (bp->b_bufobj) /* XXX: should always be true /phk */
|
||||
bufobj_wdrop(bp->b_bufobj);
|
||||
bufobj_wref(&vp2->v_bufobj);
|
||||
}
|
||||
bp->b_vp = vp2;
|
||||
|
Loading…
Reference in New Issue
Block a user