Replace all mtx_lock()/mtx_unlock() on n_mtx with the macros.

For a long time, some places in the NFS code have locked/unlocked the
NFS node lock with the macros NFSLOCKNODE()/NFSUNLOCKNODE() whereas
others have simply used mtx_lock()/mtx_unlock().
Since the NFS node mutex needs to change to an sx lock so it can be held when
vnode_pager_setsize() is called, replace all occurrences of mtx_lock/mtx_unlock
with the macros to simply making the change to an sx lock in future commit.
There is no semantic change as a result of this commit.

I am not sure if the change to an sx lock will be MFC'd soon, so I put
an MFC of 1 week on this commit so that it could be MFC'd with that commit.

Suggested by:	kib
MFC after:	1 week
This commit is contained in:
Rick Macklem 2019-09-24 01:58:54 +00:00
parent 05cba150d3
commit 5d85e12f44
7 changed files with 199 additions and 199 deletions

View File

@ -855,11 +855,11 @@ MALLOC_DECLARE(M_NEWNFSDSESSION);
#define NFSWRITERPC_SETTIME(w, n, a, v4) \
do { \
if (w) { \
mtx_lock(&((n)->n_mtx)); \
NFSLOCKNODE(n); \
(n)->n_mtime = (a)->na_mtime; \
if (v4) \
(n)->n_change = (a)->na_filerev; \
mtx_unlock(&((n)->n_mtx)); \
NFSUNLOCKNODE(n); \
} \
} while (0)

View File

@ -101,9 +101,9 @@ ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn)
int biosize, bcount;
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
nsize = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
biosize = vp->v_bufobj.bo_bsize;
bcount = biosize;
@ -144,13 +144,13 @@ ncl_getpages(struct vop_getpages_args *ap)
}
if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
printf("ncl_getpages: called on non-cacheable vnode\n");
return (VM_PAGER_ERROR);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
mtx_lock(&nmp->nm_mtx);
@ -301,12 +301,12 @@ ncl_putpages(struct vop_putpages_args *ap)
} else
mtx_unlock(&nmp->nm_mtx);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
(np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
printf("ncl_putpages: called on noncache-able vnode\n");
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
}
/*
* When putting pages, do not extend file past EOF.
@ -316,7 +316,7 @@ ncl_putpages(struct vop_putpages_args *ap)
if (count < 0)
count = 0;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
for (i = 0; i < npages; i++)
rtvals[i] = VM_PAGER_ERROR;
@ -374,9 +374,9 @@ nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
* whether the cache is consistent.
*/
old_lock = ncl_excl_start(vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (vp->v_type != VREG) {
if (vp->v_type != VDIR)
panic("nfs: bioread, not dir");
@ -390,28 +390,28 @@ nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
error = VOP_GETATTR(vp, &vattr, cred);
if (error)
goto out;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_mtime = vattr.va_mtime;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
} else {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = VOP_GETATTR(vp, &vattr, cred);
if (error)
goto out;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((np->n_flag & NSIZECHANGED)
|| (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (vp->v_type == VDIR)
ncl_invaldir(vp);
error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1);
if (error != 0)
goto out;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_mtime = vattr.va_mtime;
np->n_flag &= ~NSIZECHANGED;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
out:
ncl_excl_finish(vp, old_lock);
@ -474,9 +474,9 @@ ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
do {
u_quad_t nsize;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
nsize = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
switch (vp->v_type) {
case VREG:
@ -883,13 +883,13 @@ ncl_write(struct vop_write_args *ap)
("ncl_write proc"));
if (vp->v_type != VREG)
return (EIO);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NWRITEERR) {
np->n_flag &= ~NWRITEERR;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
return (np->n_error);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
mtx_lock(&nmp->nm_mtx);
if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
(nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
@ -906,9 +906,9 @@ ncl_write(struct vop_write_args *ap)
* mode or if we are appending.
*/
if (ioflag & (IO_APPEND | IO_SYNC)) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
/*
* Require non-blocking, synchronous writes to
@ -925,13 +925,13 @@ ncl_write(struct vop_write_args *ap)
if (error != 0)
return (error);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
orig_resid = uio->uio_resid;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
orig_size = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
/*
* If IO_APPEND then load uio_offset. We restart here if we cannot
@ -943,9 +943,9 @@ ncl_write(struct vop_write_args *ap)
error = VOP_GETATTR(vp, &vattr, cred);
if (error)
return (error);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
uio->uio_offset = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
if (uio->uio_offset < 0)
@ -979,9 +979,9 @@ ncl_write(struct vop_write_args *ap)
if (!(ioflag & IO_SYNC)) {
int nflag;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
nflag = np->n_flag;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (nflag & NMODIFIED) {
BO_LOCK(&vp->v_bufobj);
if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
@ -1018,7 +1018,7 @@ ncl_write(struct vop_write_args *ap)
* Handle direct append and file extension cases, calculate
* unaligned buffer size.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((np->n_flag & NHASBEENLOCKED) == 0 &&
(nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
noncontig_write = 1;
@ -1028,7 +1028,7 @@ ncl_write(struct vop_write_args *ap)
(noncontig_write != 0 &&
lbn == (np->n_size / biosize) &&
uio->uio_offset + n > np->n_size)) && n) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
/*
* Get the buffer (in its pre-append state to maintain
* B_CACHE if it was previously set). Resize the
@ -1041,11 +1041,11 @@ ncl_write(struct vop_write_args *ap)
if (bp != NULL) {
long save;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_size = uio->uio_offset + n;
np->n_flag |= NMODIFIED;
vnode_pager_setsize(vp, np->n_size);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
save = bp->b_flags & B_CACHE;
bcount = on + n;
@ -1067,15 +1067,15 @@ ncl_write(struct vop_write_args *ap)
else
bcount = np->n_size - (off_t)lbn * biosize;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
bp = nfs_getcacheblk(vp, lbn, bcount, td);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (uio->uio_offset + n > np->n_size) {
np->n_size = uio->uio_offset + n;
np->n_flag |= NMODIFIED;
vnode_pager_setsize(vp, np->n_size);
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
if (!bp) {
@ -1124,9 +1124,9 @@ ncl_write(struct vop_write_args *ap)
}
if (bp->b_wcred == NOCRED)
bp->b_wcred = crhold(cred);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NMODIFIED;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
/*
* If dirtyend exceeds file size, chop it down. This should
@ -1369,13 +1369,13 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Invalidate the attribute cache, since writes to a DS
* won't update the size attribute.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
} else
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_directio_asyncwr == 0)
np->n_flag &= ~NMODIFIED;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
out:
ncl_excl_finish(vp, old_lock);
return error;
@ -1517,10 +1517,10 @@ ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thr
TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
nmp->nm_bufqlen++;
if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
NFSLOCKNODE(VTONFS(bp->b_vp));
VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
VTONFS(bp->b_vp)->n_directio_asyncwr++;
mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
NFSUNLOCKNODE(VTONFS(bp->b_vp));
}
mtx_unlock(&ncl_iod_mutex);
return (0);
@ -1552,7 +1552,7 @@ ncl_doio_directwrite(struct buf *bp)
free(uiop, M_NFSDIRECTIO);
if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
struct nfsnode *np = VTONFS(bp->b_vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
/*
* Invalidate the attribute cache, since writes to a DS
@ -1568,7 +1568,7 @@ ncl_doio_directwrite(struct buf *bp)
wakeup((caddr_t)&np->n_directio_asyncwr);
}
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
bp->b_vp = NULL;
uma_zfree(ncl_pbuf_zone, bp);
@ -1640,14 +1640,14 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
}
/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
if (p && vp->v_writecount <= -1) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
PROC_LOCK(p);
killproc(p, "text file modification");
PROC_UNLOCK(p);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
break;
case VLNK:
@ -1706,10 +1706,10 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
/*
* Setup for actual write
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (bp->b_dirtyend > bp->b_dirtyoff) {
io.iov_len = uiop->uio_resid = bp->b_dirtyend
@ -1802,11 +1802,11 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
bp->b_ioflags |= BIO_ERROR;
bp->b_flags |= B_INVAL;
bp->b_error = np->n_error = error;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NWRITEERR;
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
bp->b_dirtyoff = bp->b_dirtyend = 0;
}
@ -1839,10 +1839,10 @@ ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize)
int biosize = vp->v_bufobj.bo_bsize;
int error = 0;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
tsize = np->n_size;
np->n_size = nsize;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (nsize < tsize) {
struct buf *bp;

View File

@ -219,7 +219,7 @@ ncl_releasesillyrename(struct vnode *vp, struct thread *td)
} else
sp = NULL;
if (sp != NULL) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
(void) ncl_vinvalbuf(vp, 0, td, 1);
/*
* Remove the silly file that was rename'd earlier
@ -228,7 +228,7 @@ ncl_releasesillyrename(struct vnode *vp, struct thread *td)
crfree(sp->s_cred);
TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp);
taskqueue_enqueue(taskqueue_thread, &sp->s_task);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
}
}
@ -260,7 +260,7 @@ ncl_inactive(struct vop_inactive_args *ap)
}
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
ncl_releasesillyrename(vp, ap->a_td);
/*
@ -271,7 +271,7 @@ ncl_inactive(struct vop_inactive_args *ap)
* None of the other flags are meaningful after the vnode is unused.
*/
np->n_flag &= (NMODIFIED | NDSCOMMIT);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
return (0);
}
@ -292,9 +292,9 @@ ncl_reclaim(struct vop_reclaim_args *ap)
if (nfs_reclaim_p != NULL)
nfs_reclaim_p(ap);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
ncl_releasesillyrename(vp, ap->a_td);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (NFS_ISV4(vp) && vp->v_type == VREG)
/*
@ -348,11 +348,11 @@ ncl_invalcaches(struct vnode *vp)
struct nfsnode *np = VTONFS(vp);
int i;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
np->n_accesscache[i].stamp = 0;
KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}

View File

@ -742,12 +742,12 @@ nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
if (*tl == newnfs_true) {
NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
if (wccflagp != NULL) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
*wccflagp = (np->n_mtime.tv_sec ==
fxdr_unsigned(u_int32_t, *(tl + 2)) &&
np->n_mtime.tv_nsec ==
fxdr_unsigned(u_int32_t, *(tl + 3)));
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
}
error = nfscl_postop_attr(nd, nap, flagp, stuff);
@ -768,12 +768,12 @@ nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
nd->nd_flag |= ND_NOMOREDATA;
if (wccflagp != NULL &&
nfsva.na_vattr.va_mtime.tv_sec != 0) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
*wccflagp = (np->n_mtime.tv_sec ==
nfsva.na_vattr.va_mtime.tv_sec &&
np->n_mtime.tv_nsec ==
nfsva.na_vattr.va_mtime.tv_sec);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
}
nfsmout:

View File

@ -5952,9 +5952,9 @@ nfscl_doflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
error = EIO;
} else {
commit_thru_mds = 0;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NDSCOMMIT;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
if (docommit != 0) {
if (error == 0)
@ -5968,9 +5968,9 @@ nfscl_doflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
*eofp = 1;
uiop->uio_resid = 0;
} else {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag &= ~NDSCOMMIT;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
} else if (rwflag == NFSV4OPEN_ACCESSREAD)
error = nfsrpc_readds(vp, uiop, stateidp, eofp, *dspp,
@ -6036,9 +6036,9 @@ nfscl_dofflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
transfer = dp->nfsdi_rsize;
else
transfer = dp->nfsdi_wsize;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NDSCOMMIT;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (len > transfer && docommit == 0)
xfer = transfer;
else
@ -6075,9 +6075,9 @@ nfscl_dofflayoutio(vnode_t vp, struct uio *uiop, int *iomode, int *must_commit,
*eofp = 1;
uiop->uio_resid = 0;
} else {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag &= ~NDSCOMMIT;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
} else if (rwflag == NFSV4OPEN_ACCESSREAD) {
error = nfsrpc_readds(vp, uiop, stateidp, eofp, *dspp,

View File

@ -121,20 +121,20 @@ ncl_uninit(struct vfsconf *vfsp)
void
ncl_dircookie_lock(struct nfsnode *np)
{
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
while (np->n_flag & NDIRCOOKIELK)
(void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
np->n_flag |= NDIRCOOKIELK;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
void
ncl_dircookie_unlock(struct nfsnode *np)
{
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag &= ~NDIRCOOKIELK;
wakeup(&np->n_flag);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
bool
@ -190,7 +190,7 @@ ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
vap = &np->n_vattr.na_vattr;
nmp = VFSTONFS(vp->v_mount);
mustflush = nfscl_mustflush(vp); /* must be before mtx_lock() */
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
timeo = (time_second - np->n_mtime.tv_sec) / 10;
@ -225,7 +225,7 @@ ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
if ((time_second - np->n_attrstamp) >= timeo &&
(mustflush != 0 || np->n_attrstamp == 0)) {
nfsstatsv1.attrcache_misses++;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
return( ENOENT);
}
@ -252,7 +252,7 @@ ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
if (np->n_flag & NUPD)
vaper->va_mtime = np->n_mtim;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
return (0);
}

View File

@ -310,7 +310,7 @@ nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
if (!error) {
lrupos = 0;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
if (np->n_accesscache[i].uid == cred->cr_uid) {
np->n_accesscache[i].mode = rmode;
@ -326,7 +326,7 @@ nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
np->n_accesscache[lrupos].mode = rmode;
np->n_accesscache[lrupos].stamp = time_second;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (retmode != NULL)
*retmode = rmode;
KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
@ -421,7 +421,7 @@ nfs_access(struct vop_access_args *ap)
* this request?
*/
gotahit = 0;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
if (time_second < (np->n_accesscache[i].stamp
@ -433,7 +433,7 @@ nfs_access(struct vop_access_args *ap)
break;
}
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
#ifdef KDTRACE_HOOKS
if (gotahit != 0)
KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
@ -465,14 +465,14 @@ nfs_access(struct vop_access_args *ap)
* After calling nfsspec_access, we should have the correct
* file size cached.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
&& VTONFS(vp)->n_size > 0) {
struct iovec aiov;
struct uio auio;
char buf[1];
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
aiov.iov_base = buf;
aiov.iov_len = 1;
auio.uio_iov = &aiov;
@ -498,7 +498,7 @@ nfs_access(struct vop_access_args *ap)
else
error = EACCES;
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
return (error);
}
}
@ -543,52 +543,52 @@ nfs_open(struct vop_open_args *ap)
* Now, if this Open will be doing reading, re-validate/flush the
* cache, so that Close/Open coherency is maintained.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error == EINTR || error == EIO) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
if (vp->v_type == VDIR)
np->n_direofoffset = 0;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = VOP_GETATTR(vp, &vattr, ap->a_cred);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_mtime = vattr.va_mtime;
if (NFS_ISV4(vp))
np->n_change = vattr.va_filerev;
} else {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = VOP_GETATTR(vp, &vattr, ap->a_cred);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
if (vp->v_type == VDIR)
np->n_direofoffset = 0;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error == EINTR || error == EIO) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_mtime = vattr.va_mtime;
if (NFS_ISV4(vp))
np->n_change = vattr.va_filerev;
@ -601,14 +601,14 @@ nfs_open(struct vop_open_args *ap)
if (newnfs_directio_enable && (fmode & O_DIRECT) &&
(vp->v_type == VREG)) {
if (np->n_directio_opens == 0) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
if (error) {
if (NFS_ISV4(vp))
(void) nfsrpc_close(vp, 0, ap->a_td);
return (error);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NNONCACHE;
}
np->n_directio_opens++;
@ -630,7 +630,7 @@ nfs_open(struct vop_open_args *ap)
np->n_writecred = crhold(ap->a_cred);
} else
cred = NULL;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (cred != NULL)
crfree(cred);
@ -657,9 +657,9 @@ nfs_open(struct vop_open_args *ap)
/* And, finally, make sure that n_mtime is up to date. */
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_mtime = np->n_vattr.na_mtime;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
return (0);
}
@ -734,9 +734,9 @@ nfs_close(struct vop_close_args *ap)
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if (NFS_ISV3(vp)) {
/*
* Under NFSv3 we have dirty buffers to dispose of. We
@ -770,7 +770,7 @@ nfs_close(struct vop_close_args *ap)
} else {
error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
}
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
}
/*
* Invalidate the attribute cache in all cases.
@ -795,7 +795,7 @@ nfs_close(struct vop_close_args *ap)
np->n_flag &= ~NWRITEERR;
error = np->n_error;
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
if (NFS_ISV4(vp)) {
@ -829,13 +829,13 @@ nfs_close(struct vop_close_args *ap)
("nfs_close: dirty unflushed (%d) directio buffers\n",
np->n_directio_asyncwr));
if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
KASSERT((np->n_directio_opens > 0),
("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
np->n_directio_opens--;
if (np->n_directio_opens == 0)
np->n_flag &= ~NNONCACHE;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
if (localcred)
NFSFREECRED(cred);
@ -859,10 +859,10 @@ nfs_getattr(struct vop_getattr_args *ap)
/*
* Update local times for special files.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & (NACC | NUPD))
np->n_flag |= NCHG;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
/*
* First look in the cache.
*/
@ -976,14 +976,14 @@ nfs_setattr(struct vop_setattr_args *ap)
* V_SAVE races that might setsize a lower
* value.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
tsize = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_meta_setsize(vp, td, vap->va_size);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
tsize = np->n_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_vinvalbuf(vp, vap->va_size == 0 ?
0 : V_SAVE, td, 1);
if (error != 0) {
@ -996,34 +996,34 @@ nfs_setattr(struct vop_setattr_args *ap)
*/
nfscl_delegmodtime(vp);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
/*
* np->n_size has already been set to vap->va_size
* in ncl_meta_setsize(). We must set it again since
* nfs_loadattrcache() could be called through
* ncl_meta_setsize() and could modify np->n_size.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_vattr.na_size = np->n_size = vap->va_size;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
} else {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) &&
(np->n_flag & NMODIFIED) && vp->v_type == VREG) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
if (error == EINTR || error == EIO)
return (error);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
if (error && vap->va_size != VNOVAL) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_size = np->n_vattr.na_size = tsize;
vnode_pager_setsize(vp, tsize);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
return (error);
}
@ -1040,11 +1040,11 @@ nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
struct nfsvattr nfsva;
if (NFS_ISV34(vp)) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
np->n_accesscache[i].stamp = 0;
np->n_flag |= NDELEGMOD;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
}
error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag,
@ -1092,12 +1092,12 @@ nfs_lookup(struct vop_lookup_args *ap)
np = VTONFS(dvp);
/* For NFSv4, wait until any remove is done. */
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
np->n_flag |= NREMOVEWANT;
(void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0)
return (error);
@ -1139,10 +1139,10 @@ nfs_lookup(struct vop_lookup_args *ap)
if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
(flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
!(newnp->n_flag & NMODIFIED)) {
mtx_lock(&newnp->n_mtx);
NFSLOCKNODE(newnp);
newnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
mtx_unlock(&newnp->n_mtx);
NFSUNLOCKNODE(newnp);
}
if (nfscl_nodeleg(newvp, 0) == 0 ||
((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
@ -1224,14 +1224,14 @@ nfs_lookup(struct vop_lookup_args *ap)
* has changed. Don't bother adding the entry
* if the directory has already changed.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (timespeccmp(&np->n_vattr.na_mtime,
&dnfsva.na_mtime, ==)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
cache_enter_time(dvp, NULL, cnp,
&dnfsva.na_mtime, NULL);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
return (ENOENT);
}
@ -1319,10 +1319,10 @@ nfs_lookup(struct vop_lookup_args *ap)
* are fetched in nfs_open() since we did not
* fetch attributes from the LOOKUP reply.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
}
if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
@ -1516,13 +1516,13 @@ nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
vap->va_gid);
}
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
return (error);
}
@ -1593,12 +1593,12 @@ nfs_create(struct vop_create_args *ap)
nmp = VFSTONFS(vnode_mount(dvp));
again:
/* For NFSv4, wait until any remove is done. */
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
dnp->n_flag |= NREMOVEWANT;
(void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
}
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
cverf = nfs_get_cverf();
error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
@ -1664,13 +1664,13 @@ nfs_create(struct vop_create_args *ap)
error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
vap->va_gid);
}
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
return (error);
}
@ -1729,9 +1729,9 @@ nfs_remove(struct vop_remove_args *ap)
error = 0;
} else if (!np->n_sillyrename)
error = nfs_sillyrename(dvp, vp, cnp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
return (error);
}
@ -1763,29 +1763,29 @@ nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
struct nfsnode *dnp = VTONFS(dvp);
int error = 0, dattrflag;
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NREMOVEINPROG;
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
&dattrflag, NULL);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
if ((dnp->n_flag & NREMOVEWANT)) {
dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
wakeup((caddr_t)dnp);
} else {
dnp->n_flag &= ~NREMOVEINPROG;
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
}
if (dattrflag)
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (!dattrflag) {
dnp->n_attrstamp = 0;
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
if (error && NFS_ISV4(dvp))
error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
return (error);
@ -1869,8 +1869,8 @@ nfs_rename(struct vop_rename_args *ap)
sizeof (struct nfsv4node) +
tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
M_NFSV4NODE, M_WAITOK);
mtx_lock(&tdnp->n_mtx);
mtx_lock(&fnp->n_mtx);
NFSLOCKNODE(tdnp);
NFSLOCKNODE(fnp);
if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
(fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
@ -1896,8 +1896,8 @@ printf("ren replace=%s\n",nnn);
NFSBCOPY(tcnp->cn_nameptr,
NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
}
mtx_unlock(&tdnp->n_mtx);
mtx_unlock(&fnp->n_mtx);
NFSUNLOCKNODE(tdnp);
NFSUNLOCKNODE(fnp);
if (newv4 != NULL)
free(newv4, M_NFSV4NODE);
}
@ -1954,24 +1954,24 @@ nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
&tattrflag, NULL, NULL);
mtx_lock(&fdnp->n_mtx);
NFSLOCKNODE(fdnp);
fdnp->n_flag |= NMODIFIED;
if (fattrflag != 0) {
mtx_unlock(&fdnp->n_mtx);
NFSUNLOCKNODE(fdnp);
(void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1);
} else {
fdnp->n_attrstamp = 0;
mtx_unlock(&fdnp->n_mtx);
NFSUNLOCKNODE(fdnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
}
mtx_lock(&tdnp->n_mtx);
NFSLOCKNODE(tdnp);
tdnp->n_flag |= NMODIFIED;
if (tattrflag != 0) {
mtx_unlock(&tdnp->n_mtx);
NFSUNLOCKNODE(tdnp);
(void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1);
} else {
tdnp->n_attrstamp = 0;
mtx_unlock(&tdnp->n_mtx);
NFSUNLOCKNODE(tdnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
}
if (error && NFS_ISV4(fdvp))
@ -2003,23 +2003,23 @@ nfs_link(struct vop_link_args *ap)
cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag,
&dattrflag, NULL);
tdnp = VTONFS(tdvp);
mtx_lock(&tdnp->n_mtx);
NFSLOCKNODE(tdnp);
tdnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&tdnp->n_mtx);
NFSUNLOCKNODE(tdnp);
(void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1);
} else {
tdnp->n_attrstamp = 0;
mtx_unlock(&tdnp->n_mtx);
NFSUNLOCKNODE(tdnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
}
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
else {
np = VTONFS(vp);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
}
/*
@ -2091,14 +2091,14 @@ nfs_symlink(struct vop_symlink_args *ap)
}
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
/*
@ -2137,14 +2137,14 @@ nfs_mkdir(struct vop_mkdir_args *ap)
vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp,
&attrflag, &dattrflag, NULL);
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
if (nfhp) {
@ -2208,14 +2208,14 @@ nfs_rmdir(struct vop_rmdir_args *ap)
error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL);
dnp = VTONFS(dvp);
mtx_lock(&dnp->n_mtx);
NFSLOCKNODE(dnp);
dnp->n_flag |= NMODIFIED;
if (dattrflag != 0) {
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
(void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
} else {
dnp->n_attrstamp = 0;
mtx_unlock(&dnp->n_mtx);
NFSUNLOCKNODE(dnp);
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
}
@ -2256,16 +2256,16 @@ nfs_readdir(struct vop_readdir_args *ap)
if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
(np->n_flag & NMODIFIED) == 0) {
if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
NFSINCRGLOBAL(nfsstatsv1.direofcache_hits);
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 1;
return (0);
} else
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
}
@ -2605,9 +2605,9 @@ ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
error = nfscl_doiods(vp, &uio, NULL, NULL,
NFSV4OPEN_ACCESSWRITE, 1, cred, td);
if (error != 0) {
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag &= ~NDSCOMMIT;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
}
if (error != 0) {
@ -2982,7 +2982,7 @@ ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
* Wait for all the async IO requests to drain
*/
BO_UNLOCK(bo);
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
while (np->n_directio_asyncwr > 0) {
np->n_flag |= NFSYNCWAIT;
error = newnfs_msleep(td, &np->n_directio_asyncwr,
@ -2990,13 +2990,13 @@ ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
"nfsfsync", 0);
if (error) {
if (newnfs_sigintr(nmp, td)) {
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = EINTR;
goto done;
}
}
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
} else
BO_UNLOCK(bo);
if (NFSHASPNFS(nmp)) {
@ -3005,10 +3005,10 @@ ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
* Invalidate the attribute cache, since writes to a DS
* won't update the size attribute.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_attrstamp = 0;
} else
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & NWRITEERR) {
error = np->n_error;
np->n_flag &= ~NWRITEERR;
@ -3016,7 +3016,7 @@ ncl_flush(struct vnode *vp, int waitfor, struct thread *td,
if (commit && bo->bo_dirty.bv_cnt == 0 &&
bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
np->n_flag &= ~NMODIFIED;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
done:
if (bvec != NULL && bvec != bvec_on_stack)
free(bvec, M_TEMP);
@ -3136,9 +3136,9 @@ nfs_advlock(struct vop_advlock_args *ap)
}
}
/* Mark that a file lock has been acquired. */
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NHASBEENLOCKED;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
}
} else if (!NFS_ISV4(vp)) {
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
@ -3157,9 +3157,9 @@ nfs_advlock(struct vop_advlock_args *ap)
error = NFSVOPLOCK(vp, LK_SHARED);
if (error == 0) {
/* Mark that a file lock has been acquired. */
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NHASBEENLOCKED;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
NFSVOPUNLOCK(vp, 0);
}
}
@ -3314,10 +3314,10 @@ nfsfifo_read(struct vop_read_args *ap)
/*
* Set access flag.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NACC;
vfs_timestamp(&np->n_atim);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
error = fifo_specops.vop_read(ap);
return error;
}
@ -3333,10 +3333,10 @@ nfsfifo_write(struct vop_write_args *ap)
/*
* Set update flag.
*/
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
np->n_flag |= NUPD;
vfs_timestamp(&np->n_mtim);
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
return(fifo_specops.vop_write(ap));
}
@ -3353,7 +3353,7 @@ nfsfifo_close(struct vop_close_args *ap)
struct vattr vattr;
struct timespec ts;
mtx_lock(&np->n_mtx);
NFSLOCKNODE(np);
if (np->n_flag & (NACC | NUPD)) {
vfs_timestamp(&ts);
if (np->n_flag & NACC)
@ -3368,12 +3368,12 @@ nfsfifo_close(struct vop_close_args *ap)
vattr.va_atime = np->n_atim;
if (np->n_flag & NUPD)
vattr.va_mtime = np->n_mtim;
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
goto out;
}
}
mtx_unlock(&np->n_mtx);
NFSUNLOCKNODE(np);
out:
return (fifo_specops.vop_close(ap));
}