- LK_NOPAUSE is a nop now.

Sponsored by:   Isilon Systems, Inc.
This commit is contained in:
jeff 2005-03-31 04:37:09 +00:00
parent 902bc24bce
commit 97c40ebd49
9 changed files with 17 additions and 17 deletions

View File

@ -1910,7 +1910,7 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
* Get the page from backing store.
*/
bsize = vp->v_mount->mnt_stat.f_iosize;
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY, td);
/*
* XXXMAC: Because we don't have fp->f_cred here,
* we pass in NOCRED. This is probably wrong, but

View File

@ -411,7 +411,7 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp,
TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_nvnodelistsize = 0;
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;

View File

@ -352,7 +352,7 @@ vfs_busy(mp, flags, interlkp, td)
}
if (interlkp)
mtx_unlock(interlkp);
lkflags = LK_SHARED | LK_NOPAUSE | LK_INTERLOCK;
lkflags = LK_SHARED | LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
panic("vfs_busy: unexpected lock failure");
return (0);
@ -831,7 +831,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
vp->v_vnlock = &vp->v_lock;
mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, 0);
/*
* Initialize bufobj.
*/

View File

@ -496,10 +496,10 @@ vn_read(fp, uio, active_cred, flags, td)
* Once this field has it's own lock we can acquire this shared.
*/
if ((flags & FOF_OFFSET) == 0) {
vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
uio->uio_offset = fp->f_offset;
} else
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY, td);
ioflag |= sequential_heuristic(uio, fp);
@ -811,7 +811,7 @@ debug_vn_lock(vp, flags, td, filename, line)
* lockmgr drops interlock before it will return for
* any reason. So force the code above to relock it.
*/
error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td);
error = VOP_LOCK(vp, flags | LK_INTERLOCK, td);
flags &= ~LK_INTERLOCK;
/*
* Callers specify LK_RETRY if they wish to get dead vnodes.

View File

@ -173,7 +173,7 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, LK_NOPAUSE);
lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, 0);
*npp = np;
return (0);

View File

@ -119,7 +119,7 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) {
upgraded = 1;
/* Upgrade to exclusive lock, this might block */
VOP_LOCK(vp, LK_UPGRADE | LK_NOPAUSE, td);
VOP_LOCK(vp, LK_UPGRADE, td);
} else
upgraded = 0;

View File

@ -522,7 +522,7 @@ ffs_snapshot(mp, snapfile)
sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO);
TAILQ_INIT(&sn->sn_head);
lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT,
LK_CANRECURSE | LK_NOPAUSE);
LK_CANRECURSE);
VI_LOCK(vp);
vp->v_vnlock = &sn->sn_lock;
devvp->v_rdev->si_snapdata = sn;
@ -1846,7 +1846,7 @@ ffs_snapshot_mount(mp)
sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO);
TAILQ_INIT(&sn->sn_head);
lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT,
LK_CANRECURSE | LK_NOPAUSE);
LK_CANRECURSE);
VI_LOCK(vp);
vp->v_vnlock = &sn->sn_lock;
devvp->v_rdev->si_snapdata = sn;

View File

@ -611,7 +611,7 @@ ufs_extattr_enable(struct ufsmount *ump, int attrnamespace,
auio.uio_rw = UIO_READ;
auio.uio_td = td;
vn_lock(backing_vnode, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
vn_lock(backing_vnode, LK_SHARED | LK_RETRY, td);
error = VOP_READ(backing_vnode, &auio, IO_NODELOCKED,
ump->um_extattr.uepm_ucred);
@ -671,7 +671,7 @@ ufs_extattr_disable(struct ufsmount *ump, int attrnamespace,
LIST_REMOVE(uele, uele_entries);
vn_lock(uele->uele_backing_vnode, LK_SHARED | LK_NOPAUSE | LK_RETRY,
vn_lock(uele->uele_backing_vnode, LK_SHARED | LK_RETRY,
td);
ASSERT_VOP_LOCKED(uele->uele_backing_vnode, "ufs_extattr_disable");
VOP_UNLOCK(uele->uele_backing_vnode, 0, td);
@ -874,7 +874,7 @@ ufs_extattr_get(struct vnode *vp, int attrnamespace, const char *name,
*/
if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode, LK_SHARED |
LK_NOPAUSE | LK_RETRY, td);
LK_RETRY, td);
error = VOP_READ(attribute->uele_backing_vnode, &local_aio,
IO_NODELOCKED, ump->um_extattr.uepm_ucred);
@ -1084,7 +1084,7 @@ ufs_extattr_set(struct vnode *vp, int attrnamespace, const char *name,
*/
if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode,
LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
LK_EXCLUSIVE LK_RETRY, td);
ioflag = IO_NODELOCKED;
if (ufs_extattr_sync)
@ -1181,7 +1181,7 @@ ufs_extattr_rm(struct vnode *vp, int attrnamespace, const char *name,
*/
if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode,
LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_READ(attribute->uele_backing_vnode, &local_aio,
IO_NODELOCKED, ump->um_extattr.uepm_ucred);

View File

@ -1188,7 +1188,7 @@ vnode_pager_lock(vm_object_t first_object)
VM_OBJECT_UNLOCK(object);
if (first_object != object)
VM_OBJECT_UNLOCK(first_object);
if (vget(vp, LK_CANRECURSE | LK_INTERLOCK | LK_NOPAUSE |
if (vget(vp, LK_CANRECURSE | LK_INTERLOCK |
LK_RETRY | LK_SHARED, curthread)) {
VM_OBJECT_LOCK(first_object);
if (object != first_object)