Remove the NFS client rslock. The rslock was used to serialize

writers that want to extend the file. It was also used to serialize
readers that might want to read the last block of the file (with a
writer extending the file).  Now that we support vnode locking for
NFS, the rslock is unnecessary. Writers grab the exclusive vnode
lock before writing and readers grab the shared (or in some cases
the exclusive) lock.

Submitted by:	Mohan Srinivasan
This commit is contained in:
Paul Saab 2005-07-21 22:46:56 +00:00
parent 9e76f9ad3f
commit 865b5cc7fd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=148268
3 changed files with 2 additions and 114 deletions

View File

@ -474,45 +474,15 @@ nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
np->ra_expect_lbn = lbn + 1;
}
/*
* Obtain the buffer cache block. Figure out the buffer size
* when we are at EOF. If we are modifying the size of the
* buffer based on an EOF condition we need to hold
* nfs_rslock() through obtaining the buffer to prevent
* a potential writer-appender from messing with n_size.
* Otherwise we may accidently truncate the buffer and
* lose dirty data.
*
* Note that bcount is *not* DEV_BSIZE aligned.
*/
again:
/* Note that bcount is *not* DEV_BSIZE aligned. */
bcount = biosize;
if ((off_t)lbn * biosize >= np->n_size) {
bcount = 0;
} else if ((off_t)(lbn + 1) * biosize > np->n_size) {
bcount = np->n_size - (off_t)lbn * biosize;
}
if (bcount != biosize) {
switch(nfs_rslock(np, td)) {
case ENOLCK:
goto again;
/* not reached */
case EIO:
return (EIO);
case EINTR:
case ERESTART:
return(EINTR);
/* not reached */
default:
break;
}
}
bp = nfs_getcacheblk(vp, lbn, bcount, td);
if (bcount != biosize)
nfs_rsunlock(np, td);
if (!bp) {
error = nfs_sigintr(nmp, NULL, td);
return (error ? error : EINTR);
@ -846,7 +816,6 @@ nfs_write(struct vop_write_args *ap)
daddr_t lbn;
int bcount;
int n, on, error = 0;
int haverslock = 0;
struct proc *p = td?td->td_proc:NULL;
GIANT_REQUIRED;
@ -894,7 +863,6 @@ nfs_write(struct vop_write_args *ap)
* If IO_APPEND then load uio_offset. We restart here if we cannot
* get the append lock.
*/
restart:
if (ioflag & IO_APPEND) {
np->n_attrstamp = 0;
error = VOP_GETATTR(vp, &vattr, cred, td);
@ -913,38 +881,6 @@ nfs_write(struct vop_write_args *ap)
if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
return nfs_directio_write(vp, uio, cred, ioflag);
/*
* We need to obtain the rslock if we intend to modify np->n_size
* in order to guarentee the append point with multiple contending
* writers, to guarentee that no other appenders modify n_size
* while we are trying to obtain a truncated buffer (i.e. to avoid
* accidently truncating data written by another appender due to
* the race), and to ensure that the buffer is populated prior to
* our extending of the file. We hold rslock through the entire
* operation.
*
* Note that we do not synchronize the case where someone truncates
* the file while we are appending to it because attempting to lock
* this case may deadlock other parts of the system unexpectedly.
*/
if ((ioflag & IO_APPEND) ||
uio->uio_offset + uio->uio_resid > np->n_size) {
switch(nfs_rslock(np, td)) {
case ENOLCK:
goto restart;
/* not reached */
case EIO:
return (EIO);
case EINTR:
case ERESTART:
return(EINTR);
/* not reached */
default:
break;
}
haverslock = 1;
}
/*
* Maybe this should be above the vnode op call, but so long as
* file servers have no limits, i don't think it matters
@ -955,8 +891,6 @@ nfs_write(struct vop_write_args *ap)
lim_cur(p, RLIMIT_FSIZE)) {
psignal(p, SIGXFSZ);
PROC_UNLOCK(p);
if (haverslock)
nfs_rsunlock(np, td);
return (EFBIG);
}
PROC_UNLOCK(p);
@ -1012,13 +946,8 @@ nfs_write(struct vop_write_args *ap)
if (wouldcommit > nmp->nm_wcommitsize)
needrestart = 1;
}
if (needrestart) {
if (haverslock) {
nfs_rsunlock(np, td);
haverslock = 0;
}
if (needrestart)
goto flush_and_restart;
}
}
do {
@ -1215,9 +1144,6 @@ nfs_write(struct vop_write_args *ap)
}
} while (uio->uio_resid > 0 && n > 0);
if (haverslock)
nfs_rsunlock(np, td);
return (error);
}

View File

@ -107,20 +107,10 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
struct vnode *nvp;
int error;
u_int hash;
int rsflags;
struct nfsmount *nmp;
struct nfs_vncmp ncmp;
/*
* Calculate nfs mount point and figure out whether the rslock should
* be interruptible or not.
*/
nmp = VFSTONFS(mntp);
if (nmp->nm_flag & NFSMNT_INT)
rsflags = PCATCH;
else
rsflags = 0;
*npp = NULL;
hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT);
@ -173,7 +163,6 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, 0);
*npp = np;
return (0);
@ -240,7 +229,6 @@ nfs_reclaim(struct vop_reclaim_args *ap)
FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
}
lockdestroy(&np->n_rslock);
uma_zfree(nfsnode_zone, vp->v_data);
vp->v_data = NULL;
vnode_destroy_vobject(vp);

View File

@ -120,7 +120,6 @@ struct nfsnode {
short n_fhsize; /* size in bytes, of fh */
short n_flag; /* Flag for locking.. */
nfsfh_t n_fh; /* Small File Handle */
struct lock n_rslock;
struct nfs4_fctx n_rfc;
struct nfs4_fctx n_wfc;
u_char *n_name; /* leaf name, for v4 OPEN op */
@ -168,31 +167,6 @@ extern struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
#if defined(_KERNEL)
/*
* nfs_rslock - Attempt to obtain lock on nfsnode
*
* Attempt to obtain a lock on the passed nfsnode, returning ENOLCK
* if the lock could not be obtained due to our having to sleep. This
* function is generally used to lock around code that modifies an
* NFS file's size. In order to avoid deadlocks the lock
* should not be obtained while other locks are being held.
*/
static __inline int
nfs_rslock(struct nfsnode *np, struct thread *td)
{
return(lockmgr(&np->n_rslock,
LK_EXCLUSIVE | LK_CANRECURSE | LK_SLEEPFAIL, NULL, td));
}
static __inline void
nfs_rsunlock(struct nfsnode *np, struct thread *td)
{
(void)lockmgr(&np->n_rslock, LK_RELEASE, NULL, td);
}
extern struct vop_vector nfs_fifoops;
extern struct vop_vector nfs_vnodeops;
extern struct vop_vector nfs4_vnodeops;