- LK_NOPAUSE is a nop now.

Sponsored by:   Isilon Systems, Inc.
This commit is contained in:
Jeff Roberson 2005-03-31 04:37:09 +00:00
parent ea124bf597
commit f247a5240d
9 changed files with 17 additions and 17 deletions

@ -1910,7 +1910,7 @@ retry_lookup:
* Get the page from backing store. * Get the page from backing store.
*/ */
bsize = vp->v_mount->mnt_stat.f_iosize; bsize = vp->v_mount->mnt_stat.f_iosize;
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); vn_lock(vp, LK_SHARED | LK_RETRY, td);
/* /*
* XXXMAC: Because we don't have fp->f_cred here, * XXXMAC: Because we don't have fp->f_cred here,
* we pass in NOCRED. This is probably wrong, but * we pass in NOCRED. This is probably wrong, but

@ -411,7 +411,7 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp,
TAILQ_INIT(&mp->mnt_nvnodelist); TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_nvnodelistsize = 0; mp->mnt_nvnodelistsize = 0;
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
vfs_busy(mp, LK_NOWAIT, 0, td); vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops; mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp; mp->mnt_vfc = vfsp;

@ -352,7 +352,7 @@ vfs_busy(mp, flags, interlkp, td)
} }
if (interlkp) if (interlkp)
mtx_unlock(interlkp); mtx_unlock(interlkp);
lkflags = LK_SHARED | LK_NOPAUSE | LK_INTERLOCK; lkflags = LK_SHARED | LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td)) if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
panic("vfs_busy: unexpected lock failure"); panic("vfs_busy: unexpected lock failure");
return (0); return (0);
@ -831,7 +831,7 @@ getnewvnode(tag, mp, vops, vpp)
*/ */
vp->v_vnlock = &vp->v_lock; vp->v_vnlock = &vp->v_lock;
mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE); lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, 0);
/* /*
* Initialize bufobj. * Initialize bufobj.
*/ */

@ -496,10 +496,10 @@ vn_read(fp, uio, active_cred, flags, td)
* Once this field has it's own lock we can acquire this shared. * Once this field has it's own lock we can acquire this shared.
*/ */
if ((flags & FOF_OFFSET) == 0) { if ((flags & FOF_OFFSET) == 0) {
vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
uio->uio_offset = fp->f_offset; uio->uio_offset = fp->f_offset;
} else } else
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); vn_lock(vp, LK_SHARED | LK_RETRY, td);
ioflag |= sequential_heuristic(uio, fp); ioflag |= sequential_heuristic(uio, fp);
@ -811,7 +811,7 @@ debug_vn_lock(vp, flags, td, filename, line)
* lockmgr drops interlock before it will return for * lockmgr drops interlock before it will return for
* any reason. So force the code above to relock it. * any reason. So force the code above to relock it.
*/ */
error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td); error = VOP_LOCK(vp, flags | LK_INTERLOCK, td);
flags &= ~LK_INTERLOCK; flags &= ~LK_INTERLOCK;
/* /*
* Callers specify LK_RETRY if they wish to get dead vnodes. * Callers specify LK_RETRY if they wish to get dead vnodes.

@ -173,7 +173,7 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
np->n_fhp = &np->n_fh; np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize; np->n_fhsize = fhsize;
lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, LK_NOPAUSE); lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, 0);
*npp = np; *npp = np;
return (0); return (0);

@ -119,7 +119,7 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) { if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) {
upgraded = 1; upgraded = 1;
/* Upgrade to exclusive lock, this might block */ /* Upgrade to exclusive lock, this might block */
VOP_LOCK(vp, LK_UPGRADE | LK_NOPAUSE, td); VOP_LOCK(vp, LK_UPGRADE, td);
} else } else
upgraded = 0; upgraded = 0;

@ -522,7 +522,7 @@ loop:
sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO); sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO);
TAILQ_INIT(&sn->sn_head); TAILQ_INIT(&sn->sn_head);
lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT, lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT,
LK_CANRECURSE | LK_NOPAUSE); LK_CANRECURSE);
VI_LOCK(vp); VI_LOCK(vp);
vp->v_vnlock = &sn->sn_lock; vp->v_vnlock = &sn->sn_lock;
devvp->v_rdev->si_snapdata = sn; devvp->v_rdev->si_snapdata = sn;
@ -1846,7 +1846,7 @@ ffs_snapshot_mount(mp)
sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO); sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO);
TAILQ_INIT(&sn->sn_head); TAILQ_INIT(&sn->sn_head);
lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT, lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT,
LK_CANRECURSE | LK_NOPAUSE); LK_CANRECURSE);
VI_LOCK(vp); VI_LOCK(vp);
vp->v_vnlock = &sn->sn_lock; vp->v_vnlock = &sn->sn_lock;
devvp->v_rdev->si_snapdata = sn; devvp->v_rdev->si_snapdata = sn;

@ -611,7 +611,7 @@ ufs_extattr_enable(struct ufsmount *ump, int attrnamespace,
auio.uio_rw = UIO_READ; auio.uio_rw = UIO_READ;
auio.uio_td = td; auio.uio_td = td;
vn_lock(backing_vnode, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); vn_lock(backing_vnode, LK_SHARED | LK_RETRY, td);
error = VOP_READ(backing_vnode, &auio, IO_NODELOCKED, error = VOP_READ(backing_vnode, &auio, IO_NODELOCKED,
ump->um_extattr.uepm_ucred); ump->um_extattr.uepm_ucred);
@ -671,7 +671,7 @@ ufs_extattr_disable(struct ufsmount *ump, int attrnamespace,
LIST_REMOVE(uele, uele_entries); LIST_REMOVE(uele, uele_entries);
vn_lock(uele->uele_backing_vnode, LK_SHARED | LK_NOPAUSE | LK_RETRY, vn_lock(uele->uele_backing_vnode, LK_SHARED | LK_RETRY,
td); td);
ASSERT_VOP_LOCKED(uele->uele_backing_vnode, "ufs_extattr_disable"); ASSERT_VOP_LOCKED(uele->uele_backing_vnode, "ufs_extattr_disable");
VOP_UNLOCK(uele->uele_backing_vnode, 0, td); VOP_UNLOCK(uele->uele_backing_vnode, 0, td);
@ -874,7 +874,7 @@ ufs_extattr_get(struct vnode *vp, int attrnamespace, const char *name,
*/ */
if (attribute->uele_backing_vnode != vp) if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode, LK_SHARED | vn_lock(attribute->uele_backing_vnode, LK_SHARED |
LK_NOPAUSE | LK_RETRY, td); LK_RETRY, td);
error = VOP_READ(attribute->uele_backing_vnode, &local_aio, error = VOP_READ(attribute->uele_backing_vnode, &local_aio,
IO_NODELOCKED, ump->um_extattr.uepm_ucred); IO_NODELOCKED, ump->um_extattr.uepm_ucred);
@ -1084,7 +1084,7 @@ ufs_extattr_set(struct vnode *vp, int attrnamespace, const char *name,
*/ */
if (attribute->uele_backing_vnode != vp) if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode, vn_lock(attribute->uele_backing_vnode,
LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); LK_EXCLUSIVE LK_RETRY, td);
ioflag = IO_NODELOCKED; ioflag = IO_NODELOCKED;
if (ufs_extattr_sync) if (ufs_extattr_sync)
@ -1181,7 +1181,7 @@ ufs_extattr_rm(struct vnode *vp, int attrnamespace, const char *name,
*/ */
if (attribute->uele_backing_vnode != vp) if (attribute->uele_backing_vnode != vp)
vn_lock(attribute->uele_backing_vnode, vn_lock(attribute->uele_backing_vnode,
LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td); LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_READ(attribute->uele_backing_vnode, &local_aio, error = VOP_READ(attribute->uele_backing_vnode, &local_aio,
IO_NODELOCKED, ump->um_extattr.uepm_ucred); IO_NODELOCKED, ump->um_extattr.uepm_ucred);

@ -1188,7 +1188,7 @@ vnode_pager_lock(vm_object_t first_object)
VM_OBJECT_UNLOCK(object); VM_OBJECT_UNLOCK(object);
if (first_object != object) if (first_object != object)
VM_OBJECT_UNLOCK(first_object); VM_OBJECT_UNLOCK(first_object);
if (vget(vp, LK_CANRECURSE | LK_INTERLOCK | LK_NOPAUSE | if (vget(vp, LK_CANRECURSE | LK_INTERLOCK |
LK_RETRY | LK_SHARED, curthread)) { LK_RETRY | LK_SHARED, curthread)) {
VM_OBJECT_LOCK(first_object); VM_OBJECT_LOCK(first_object);
if (object != first_object) if (object != first_object)