Various of the ISP users have commented that the 1.41 version of the

nfs_bio.c code worked better than the 1.44.  This commit reverts
the important parts of 1.44 to 1.41, and we will fix it when we
can get a handle on the problem.
This commit is contained in:
John Dyson 1997-12-08 00:59:08 +00:00
parent b0624e7fc6
commit 2f29e93460
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=31617
2 changed files with 38 additions and 230 deletions

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.43 1997/08/02 14:33:06 bde Exp $
* $Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $
*/
@ -65,9 +65,6 @@
static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
struct proc *p));
static struct buf *nfs_getwriteblk __P((struct vnode *vp, daddr_t bn,
int size, struct proc *p,
struct ucred *cred, int off, int len));
extern int nfs_numasync;
extern struct nfsstats nfsstats;
@ -596,7 +593,7 @@ nfs_write(ap)
bufsize = np->n_size - lbn * biosize;
bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
}
bp = nfs_getwriteblk(vp, lbn, bufsize, p, cred, on, n);
bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@ -605,6 +602,23 @@ nfs_write(ap)
}
np->n_flag |= NMODIFIED;
if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
* If the new write will leave a contiguous dirty
* area, just update the b_dirtyoff and b_dirtyend,
* otherwise force a write rpc of the old dirty area.
*/
if (bp->b_dirtyend > 0 &&
(on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
bp->b_proc = p;
if (VOP_BWRITE(bp) == EINTR)
return (EINTR);
goto again;
}
/*
* Check for valid write lease and get one as required.
* In case getblk() and/or bwrite() delayed us.
@ -680,116 +694,6 @@ nfs_write(ap)
return (0);
}
/*
* Get a cache block for writing. The range to be written is
* (off..off+len) within the block. This routine ensures that the
* block is either has no dirty region or that the given range is
* contiguous with the existing dirty region.
*/
static struct buf *
nfs_getwriteblk(vp, bn, size, p, cred, off, len)
struct vnode *vp;
daddr_t bn;
int size;
struct proc *p;
struct ucred *cred;
int off, len;
{
struct nfsnode *np = VTONFS(vp);
struct buf *bp;
int error;
again:
bp = nfs_getcacheblk(vp, bn, size, p);
if (!bp)
return (NULL);
if (bp->b_wcred == NOCRED) {
crhold(cred);
bp->b_wcred = cred;
}
if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
* If the new write will leave a contiguous dirty
* area, just update the b_dirtyoff and b_dirtyend,
* otherwise try to extend the dirty region.
*/
if (bp->b_dirtyend > 0 &&
(off > bp->b_dirtyend || (off + len) < bp->b_dirtyoff)) {
struct iovec iov;
struct uio uio;
off_t boff, start, end;
boff = ((off_t)bp->b_blkno) * DEV_BSIZE;
if (off > bp->b_dirtyend) {
start = boff + bp->b_validend;
end = boff + off;
} else {
start = boff + off + len;
end = boff + bp->b_validoff;
}
/*
* It may be that the valid region in the buffer
* covers the region we want, in which case just
* extend the dirty region. Otherwise we try to
* extend the valid region.
*/
if (end > start) {
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = start;
uio.uio_resid = end - start;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
uio.uio_procp = p;
iov.iov_base = bp->b_data + (start - boff);
iov.iov_len = end - start;
error = nfs_readrpc(vp, &uio, cred);
if (error) {
/*
* If we couldn't read, fall back to writing
* out the old dirty region.
*/
bp->b_proc = p;
if (VOP_BWRITE(bp) == EINTR)
return (NULL);
goto again;
} else {
/*
* The read worked.
*/
if (uio.uio_resid > 0) {
/*
* If there was a short read,
* just zero fill.
*/
bzero(iov.iov_base,
uio.uio_resid);
}
if (off > bp->b_dirtyend)
bp->b_validend = off;
else
bp->b_validoff = off + len;
}
}
/*
* We now have a valid region which extends up to the
* dirty region which we want.
*/
if (off > bp->b_dirtyend)
bp->b_dirtyend = off;
else
bp->b_dirtyoff = off + len;
}
return bp;
}
/*
* Get an nfs cache block.
* Allocate a new one if the block isn't currently in the cache

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.43 1997/08/02 14:33:06 bde Exp $
* $Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $
*/
@ -65,9 +65,6 @@
static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
struct proc *p));
static struct buf *nfs_getwriteblk __P((struct vnode *vp, daddr_t bn,
int size, struct proc *p,
struct ucred *cred, int off, int len));
extern int nfs_numasync;
extern struct nfsstats nfsstats;
@ -596,7 +593,7 @@ nfs_write(ap)
bufsize = np->n_size - lbn * biosize;
bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
}
bp = nfs_getwriteblk(vp, lbn, bufsize, p, cred, on, n);
bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@ -605,6 +602,23 @@ nfs_write(ap)
}
np->n_flag |= NMODIFIED;
if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
* If the new write will leave a contiguous dirty
* area, just update the b_dirtyoff and b_dirtyend,
* otherwise force a write rpc of the old dirty area.
*/
if (bp->b_dirtyend > 0 &&
(on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
bp->b_proc = p;
if (VOP_BWRITE(bp) == EINTR)
return (EINTR);
goto again;
}
/*
* Check for valid write lease and get one as required.
* In case getblk() and/or bwrite() delayed us.
@ -680,116 +694,6 @@ nfs_write(ap)
return (0);
}
/*
* Get a cache block for writing. The range to be written is
* (off..off+len) within the block. This routine ensures that the
* block is either has no dirty region or that the given range is
* contiguous with the existing dirty region.
*/
static struct buf *
nfs_getwriteblk(vp, bn, size, p, cred, off, len)
struct vnode *vp;
daddr_t bn;
int size;
struct proc *p;
struct ucred *cred;
int off, len;
{
struct nfsnode *np = VTONFS(vp);
struct buf *bp;
int error;
again:
bp = nfs_getcacheblk(vp, bn, size, p);
if (!bp)
return (NULL);
if (bp->b_wcred == NOCRED) {
crhold(cred);
bp->b_wcred = cred;
}
if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
* If the new write will leave a contiguous dirty
* area, just update the b_dirtyoff and b_dirtyend,
* otherwise try to extend the dirty region.
*/
if (bp->b_dirtyend > 0 &&
(off > bp->b_dirtyend || (off + len) < bp->b_dirtyoff)) {
struct iovec iov;
struct uio uio;
off_t boff, start, end;
boff = ((off_t)bp->b_blkno) * DEV_BSIZE;
if (off > bp->b_dirtyend) {
start = boff + bp->b_validend;
end = boff + off;
} else {
start = boff + off + len;
end = boff + bp->b_validoff;
}
/*
* It may be that the valid region in the buffer
* covers the region we want, in which case just
* extend the dirty region. Otherwise we try to
* extend the valid region.
*/
if (end > start) {
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = start;
uio.uio_resid = end - start;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
uio.uio_procp = p;
iov.iov_base = bp->b_data + (start - boff);
iov.iov_len = end - start;
error = nfs_readrpc(vp, &uio, cred);
if (error) {
/*
* If we couldn't read, fall back to writing
* out the old dirty region.
*/
bp->b_proc = p;
if (VOP_BWRITE(bp) == EINTR)
return (NULL);
goto again;
} else {
/*
* The read worked.
*/
if (uio.uio_resid > 0) {
/*
* If there was a short read,
* just zero fill.
*/
bzero(iov.iov_base,
uio.uio_resid);
}
if (off > bp->b_dirtyend)
bp->b_validend = off;
else
bp->b_validoff = off + len;
}
}
/*
* We now have a valid region which extends up to the
* dirty region which we want.
*/
if (off > bp->b_dirtyend)
bp->b_dirtyend = off;
else
bp->b_dirtyoff = off + len;
}
return bp;
}
/*
* Get an nfs cache block.
* Allocate a new one if the block isn't currently in the cache