Explicitly pass vnode to nfs_doio() and mountpoint to nfs_asyncio().

This commit is contained in:
Poul-Henning Kamp 2004-09-07 08:56:43 +00:00
parent 066a8fea81
commit 35f134080f
5 changed files with 17 additions and 26 deletions

View File

@ -2570,8 +2570,6 @@ nfs4_strategy(struct vop_strategy_args *ap)
struct thread *td;
int error = 0;
KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
__func__, ap->a_vp, ap->a_bp->b_vp));
KASSERT(!(bp->b_flags & B_DONE), ("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_REFCNT(bp) > 0, ("nfs4_strategy: buffer %p not locked", bp));
@ -2591,8 +2589,8 @@ nfs4_strategy(struct vop_strategy_args *ap)
* otherwise just do it ourselves.
*/
if ((bp->b_flags & B_ASYNC) == 0 ||
nfs_asyncio(bp, NOCRED, td))
error = nfs_doio(bp, cr, td);
nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, td))
error = nfs_doio(ap->a_vp, bp, cr, td);
return (error);
}

View File

@ -292,8 +292,8 @@ int nfs_commit(struct vnode *vp, u_quad_t offset, int cnt,
struct ucred *cred, struct thread *td);
int nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *);
int nfs_nfsiodnew(void);
int nfs_asyncio(struct buf *, struct ucred *, struct thread *);
int nfs_doio(struct buf *, struct ucred *, struct thread *);
int nfs_asyncio(struct nfsmount *, struct buf *, struct ucred *, struct thread *);
int nfs_doio(struct vnode *vp, struct buf *, struct ucred *, struct thread *);
void nfs_up(struct nfsreq *, struct nfsmount *, struct thread *,
const char *, int);
void nfs_down(struct nfsreq *, struct nfsmount *, struct thread *,

View File

@ -465,7 +465,7 @@ nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
rabp->b_flags |= B_ASYNC;
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, td)) {
if (nfs_asyncio(nmp, rabp, cred, td)) {
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
@ -531,7 +531,7 @@ again:
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_iocmd = BIO_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, td);
error = nfs_doio(vp, bp, cred, td);
if (error) {
brelse(bp);
return (error);
@ -560,7 +560,7 @@ again:
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_iocmd = BIO_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, td);
error = nfs_doio(vp, bp, cred, td);
if (error) {
bp->b_ioflags |= BIO_ERROR;
brelse(bp);
@ -586,7 +586,7 @@ again:
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_iocmd = BIO_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, td);
error = nfs_doio(vp, bp, cred, td);
if (error) {
brelse(bp);
}
@ -615,7 +615,7 @@ again:
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_iocmd = BIO_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, td);
error = nfs_doio(vp, bp, cred, td);
/*
* no error + B_INVAL == directory EOF,
* use the block.
@ -658,7 +658,7 @@ again:
rabp->b_flags |= B_ASYNC;
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, td)) {
if (nfs_asyncio(nmp, rabp, cred, td)) {
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
@ -926,7 +926,7 @@ again:
if ((bp->b_flags & B_CACHE) == 0) {
bp->b_iocmd = BIO_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, td);
error = nfs_doio(vp, bp, cred, td);
if (error) {
brelse(bp);
break;
@ -1165,17 +1165,14 @@ nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred,
* is eventually dequeued by the async daemon, nfs_doio() *will*.
*/
int
nfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td)
nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
{
struct nfsmount *nmp;
int iod;
int gotiod;
int slpflag = 0;
int slptimeo = 0;
int error, error2;
nmp = VFSTONFS(bp->b_vp->v_mount);
/*
* Commits are usually short and sweet so lets save some cpu and
* leave the async daemons for more important rpc's (such as reads
@ -1298,10 +1295,9 @@ again:
* synchronously or from an nfsiod.
*/
int
nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
{
struct uio *uiop;
struct vnode *vp;
struct nfsnode *np;
struct nfsmount *nmp;
int error = 0, iomode, must_commit = 0;
@ -1309,7 +1305,6 @@ nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
struct iovec io;
struct proc *p = td ? td->td_proc : NULL;
vp = bp->b_vp;
np = VTONFS(vp);
nmp = VFSTONFS(vp->v_mount);
uiop = &uio;

View File

@ -257,9 +257,9 @@ nfssvc_iod(void *instance)
wakeup(&nmp->nm_bufq);
}
if (bp->b_iocmd == BIO_READ)
(void) nfs_doio(bp, bp->b_rcred, NULL);
(void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
else
(void) nfs_doio(bp, bp->b_wcred, NULL);
(void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
/*
* If there are more than one iod on this mount, then defect
* so that the iods can be shared out fairly between the mounts

View File

@ -2562,8 +2562,6 @@ nfs_strategy(struct vop_strategy_args *ap)
struct thread *td;
int error = 0;
KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
__func__, ap->a_vp, ap->a_bp->b_vp));
KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
KASSERT(BUF_REFCNT(bp) > 0, ("nfs_strategy: buffer %p not locked", bp));
@ -2583,8 +2581,8 @@ nfs_strategy(struct vop_strategy_args *ap)
* otherwise just do it ourselves.
*/
if ((bp->b_flags & B_ASYNC) == 0 ||
nfs_asyncio(bp, NOCRED, td))
error = nfs_doio(bp, cr, td);
nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, td))
error = nfs_doio(ap->a_vp, bp, cr, td);
return (error);
}