vn_lock() is currently only used with the 'curthread' passed as argument.

Remove this argument and pass curthread directly to underlying
VOP_LOCK1() VFS method. This modify makes the code cleaner and in
particular remove an annoying dependence helping next lockmgr() cleanup.
KPI results, obviously, changed.

Manpage and FreeBSD_version will be updated through further commits.

As a side note, would be valuable to say that next commits will address
a similar cleanup about VFS methods, in particular vop_lock1 and
vop_unlock.

Tested by:	Diego Sardina <siarodx at gmail dot com>,
		Andrea Di Pasquale <whyx dot it at gmail dot com>
This commit is contained in:
Attilio Rao 2008-01-10 01:10:58 +00:00
parent 71e3b145e3
commit cb05b60a89
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175202
118 changed files with 319 additions and 322 deletions

View File

@ -123,7 +123,7 @@ kobj_get_filesize_vnode(struct _buf *file, uint64_t *size)
struct vattr va;
int error;
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(vp, &va, td->td_ucred, td);
VOP_UNLOCK(vp, 0, td);
if (error == 0)
@ -176,7 +176,7 @@ kobj_read_file_vnode(struct _buf *file, char *buf, unsigned size, unsigned off)
auio.uio_resid = size;
auio.uio_td = td;
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_READ(vp, &auio, IO_UNIT | IO_SYNC, td->td_ucred);
VOP_UNLOCK(vp, 0, td);
return (error != 0 ? -1 : size - auio.uio_resid);

View File

@ -192,7 +192,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
/*
* Allocate and initialize the filesystem.
*/
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
mp = vfs_mount_alloc(vp, vfsp, fspath, td);
VOP_UNLOCK(vp, 0, td);
@ -238,7 +238,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
* mnt_optnew.
*/
mp->mnt_optnew = NULL;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Put the new filesystem on the mount list after root.
*/

View File

@ -222,7 +222,7 @@ zfs_vop_fsync(vnode_t *vp, int flag, cred_t *cr)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, td);
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);

View File

@ -321,7 +321,7 @@ gfs_lookup_dot(vnode_t **vpp, vnode_t *dvp, vnode_t *pvp, const char *nm)
VN_HOLD(pvp);
*vpp = pvp;
}
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (0);
}

View File

@ -429,7 +429,7 @@ zfsctl_root_lookup_vop(ap)
err = zfsctl_root_lookup(dvp, nm, vpp, NULL, 0, NULL, cr);
if (err == 0 && (nm[0] != '.' || nm[1] != '\0'))
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, ap->a_cnp->cn_thread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (err);
}
@ -692,7 +692,7 @@ zfsctl_snapdir_lookup(ap)
*/
goto domount;
}
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, ap->a_cnp->cn_thread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
mutex_exit(&sdp->sd_lock);
ZFS_EXIT(zfsvfs);
return (0);
@ -732,7 +732,7 @@ zfsctl_snapdir_lookup(ap)
kmem_free(mountpoint, mountpoint_len);
/* FreeBSD: This line was moved from below to avoid a lock recursion. */
if (err == 0)
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
mutex_exit(&sdp->sd_lock);
/*

View File

@ -109,7 +109,7 @@ zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap)
cn.cn_thread = curthread;
cn.cn_flags = SAVENAME;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
switch ((int)lr->lr_common.lrc_txtype) {
case TX_CREATE:
error = VOP_CREATE(ZTOV(dzp), &vp, &cn, &va);
@ -162,7 +162,7 @@ zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap)
cn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cn.cn_cred = kcred;
cn.cn_thread = curthread;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(dzp), &vp, &cn);
if (error != 0) {
VOP_UNLOCK(ZTOV(dzp), 0, curthread);
@ -211,8 +211,8 @@ zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap)
cn.cn_thread = curthread;
cn.cn_flags = SAVENAME;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LINK(ZTOV(dzp), ZTOV(zp), &cn);
VOP_UNLOCK(ZTOV(zp), 0, curthread);
VOP_UNLOCK(ZTOV(dzp), 0, curthread);
@ -255,7 +255,7 @@ zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
scn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
scn.cn_cred = kcred;
scn.cn_thread = td;
vn_lock(ZTOV(sdzp), LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ZTOV(sdzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(sdzp), &svp, &scn);
VOP_UNLOCK(ZTOV(sdzp), 0, td);
if (error != 0)
@ -270,7 +270,7 @@ zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
tcn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
tcn.cn_cred = kcred;
tcn.cn_thread = td;
vn_lock(ZTOV(tdzp), LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ZTOV(tdzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(tdzp), &tvp, &tcn);
if (error == EJUSTRETURN)
tvp = NULL;
@ -360,7 +360,7 @@ zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap)
ZFS_TIME_DECODE(&va.va_mtime, lr->lr_mtime);
vp = ZTOV(zp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_SETATTR(vp, &va, kcred, curthread);
VOP_UNLOCK(vp, 0, curthread);
VN_RELE(vp);

View File

@ -663,7 +663,7 @@ zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp, kthread_t *td)
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0) {
*vpp = ZTOV(rootzp);
error = vn_lock(*vpp, flags, td);
error = vn_lock(*vpp, flags);
(*vpp)->v_vflag |= VV_ROOT;
}
@ -763,7 +763,7 @@ zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
*vpp = NULL;
else {
*vpp = ZTOV(zp);
vn_lock(*vpp, flags, curthread);
vn_lock(*vpp, flags);
}
ZFS_EXIT(zfsvfs);
return (err);
@ -830,7 +830,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
}
ZFS_EXIT(zfsvfs);
/* XXX: LK_RETRY? */
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (0);
}
@ -853,7 +853,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
*vpp = ZTOV(zp);
/* XXX: LK_RETRY? */
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
vnode_create_vobject(*vpp, zp->z_phys->zp_size, td);
ZFS_EXIT(zfsvfs);
return (0);

View File

@ -1116,9 +1116,9 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
ltype = VOP_ISLOCKED(dvp, td);
VOP_UNLOCK(dvp, 0, td);
}
error = vn_lock(*vpp, cnp->cn_lkflags, td);
error = vn_lock(*vpp, cnp->cn_lkflags);
if (cnp->cn_flags & ISDOTDOT)
vn_lock(dvp, ltype | LK_RETRY, td);
vn_lock(dvp, ltype | LK_RETRY);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
@ -1302,7 +1302,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (error == 0) {
*vpp = ZTOV(zp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
}
if (dl)
@ -1584,7 +1584,7 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
zfs_log_create(zilog, tx, TX_MKDIR, dzp, zp, dirname);
dmu_tx_commit(tx);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
zfs_dirent_unlock(dl);
@ -2769,7 +2769,7 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link, c
if (error == 0) {
zfs_log_symlink(zilog, tx, TX_SYMLINK, dzp, zp, name, link);
*vpp = ZTOV(zp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
}
dmu_tx_commit(tx);

View File

@ -85,7 +85,7 @@ znode_pageout_func(dmu_buf_t *dbuf, void *user_ptr)
ZTOV(zp) = NULL;
vhold(vp);
mutex_exit(&zp->z_lock);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vrecycle(vp, curthread);
VOP_UNLOCK(vp, 0, curthread);
vdrop(vp);

View File

@ -933,7 +933,7 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
if (vp) {
vn_fullpath(td, vp, &name, &freename);
locked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
VOP_GETATTR(vp, &vat, td->td_ucred, td);
ino = vat.va_fileid;
vput(vp);

View File

@ -452,7 +452,7 @@ getdents_common(struct thread *td, struct linux_getdents64_args *args,
buflen = max(LINUX_DIRBLKSIZ, nbytes);
buflen = min(buflen, MAXBSIZE);
buf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
again:
aiov.iov_base = buf;

View File

@ -325,7 +325,7 @@ linux_getcwd_common (lvp, rvp, bpp, bufp, limit, flags, td)
* uvp is either NULL, or locked and held.
*/
error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
panic("vn_lock LK_RETRY returned error %d", error);
if (bufp)
@ -378,7 +378,7 @@ linux_getcwd_common (lvp, rvp, bpp, bufp, limit, flags, td)
goto out;
}
VREF(lvp);
error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
panic("vn_lock LK_RETRY returned %d", error);
}

View File

@ -123,7 +123,7 @@ kobj_get_filesize_vnode(struct _buf *file, uint64_t *size)
struct vattr va;
int error;
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(vp, &va, td->td_ucred, td);
VOP_UNLOCK(vp, 0, td);
if (error == 0)
@ -176,7 +176,7 @@ kobj_read_file_vnode(struct _buf *file, char *buf, unsigned size, unsigned off)
auio.uio_resid = size;
auio.uio_td = td;
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_READ(vp, &auio, IO_UNIT | IO_SYNC, td->td_ucred);
VOP_UNLOCK(vp, 0, td);
return (error != 0 ? -1 : size - auio.uio_resid);

View File

@ -192,7 +192,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
/*
* Allocate and initialize the filesystem.
*/
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
mp = vfs_mount_alloc(vp, vfsp, fspath, td);
VOP_UNLOCK(vp, 0, td);
@ -238,7 +238,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
* mnt_optnew.
*/
mp->mnt_optnew = NULL;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Put the new filesystem on the mount list after root.
*/

View File

@ -222,7 +222,7 @@ zfs_vop_fsync(vnode_t *vp, int flag, cred_t *cr)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, td);
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);

View File

@ -598,7 +598,7 @@ imgact_pecoff(struct image_params * imgp)
error = exec_pecoff_coff_makecmds(imgp, fp, peofs);
fail:
free(fp, M_TEMP);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
return error;
}

View File

@ -231,7 +231,7 @@ exec_svr4_imgact(imgp)
imgp->proc->p_sysent = &svr4_sysvec;
fail:
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}

View File

@ -270,7 +270,7 @@ fd_revoke(td, fd)
}
#ifdef MAC
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = mac_vnode_check_revoke(td->td_ucred, vp);
VOP_UNLOCK(vp, 0, td);
if (error)

View File

@ -278,7 +278,7 @@ svr4_sys_getdents64(td, uap)
buflen = max(DIRBLKSIZ, nbytes);
buflen = min(buflen, MAXBSIZE);
buf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
again:
aiov.iov_base = buf;
aiov.iov_len = buflen;
@ -447,7 +447,7 @@ svr4_sys_getdents(td, uap)
buflen = min(MAXBSIZE, uap->nbytes);
buf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
off = fp->f_offset;
again:
aiov.iov_base = buf;
@ -620,7 +620,7 @@ svr4_sys_fchroot(td, uap)
VREF(vp);
fdrop(fp, td);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = change_dir(vp, td);
if (error)
goto fail;

View File

@ -321,7 +321,7 @@ gfs_lookup_dot(vnode_t **vpp, vnode_t *dvp, vnode_t *pvp, const char *nm)
VN_HOLD(pvp);
*vpp = pvp;
}
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (0);
}

View File

@ -429,7 +429,7 @@ zfsctl_root_lookup_vop(ap)
err = zfsctl_root_lookup(dvp, nm, vpp, NULL, 0, NULL, cr);
if (err == 0 && (nm[0] != '.' || nm[1] != '\0'))
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, ap->a_cnp->cn_thread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (err);
}
@ -692,7 +692,7 @@ zfsctl_snapdir_lookup(ap)
*/
goto domount;
}
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, ap->a_cnp->cn_thread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
mutex_exit(&sdp->sd_lock);
ZFS_EXIT(zfsvfs);
return (0);
@ -732,7 +732,7 @@ zfsctl_snapdir_lookup(ap)
kmem_free(mountpoint, mountpoint_len);
/* FreeBSD: This line was moved from below to avoid a lock recursion. */
if (err == 0)
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
mutex_exit(&sdp->sd_lock);
/*

View File

@ -109,7 +109,7 @@ zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap)
cn.cn_thread = curthread;
cn.cn_flags = SAVENAME;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
switch ((int)lr->lr_common.lrc_txtype) {
case TX_CREATE:
error = VOP_CREATE(ZTOV(dzp), &vp, &cn, &va);
@ -162,7 +162,7 @@ zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap)
cn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cn.cn_cred = kcred;
cn.cn_thread = curthread;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(dzp), &vp, &cn);
if (error != 0) {
VOP_UNLOCK(ZTOV(dzp), 0, curthread);
@ -211,8 +211,8 @@ zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap)
cn.cn_thread = curthread;
cn.cn_flags = SAVENAME;
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LINK(ZTOV(dzp), ZTOV(zp), &cn);
VOP_UNLOCK(ZTOV(zp), 0, curthread);
VOP_UNLOCK(ZTOV(dzp), 0, curthread);
@ -255,7 +255,7 @@ zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
scn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
scn.cn_cred = kcred;
scn.cn_thread = td;
vn_lock(ZTOV(sdzp), LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ZTOV(sdzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(sdzp), &svp, &scn);
VOP_UNLOCK(ZTOV(sdzp), 0, td);
if (error != 0)
@ -270,7 +270,7 @@ zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
tcn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
tcn.cn_cred = kcred;
tcn.cn_thread = td;
vn_lock(ZTOV(tdzp), LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ZTOV(tdzp), LK_EXCLUSIVE | LK_RETRY);
error = VOP_LOOKUP(ZTOV(tdzp), &tvp, &tcn);
if (error == EJUSTRETURN)
tvp = NULL;
@ -360,7 +360,7 @@ zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap)
ZFS_TIME_DECODE(&va.va_mtime, lr->lr_mtime);
vp = ZTOV(zp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_SETATTR(vp, &va, kcred, curthread);
VOP_UNLOCK(vp, 0, curthread);
VN_RELE(vp);

View File

@ -663,7 +663,7 @@ zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp, kthread_t *td)
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0) {
*vpp = ZTOV(rootzp);
error = vn_lock(*vpp, flags, td);
error = vn_lock(*vpp, flags);
(*vpp)->v_vflag |= VV_ROOT;
}
@ -763,7 +763,7 @@ zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
*vpp = NULL;
else {
*vpp = ZTOV(zp);
vn_lock(*vpp, flags, curthread);
vn_lock(*vpp, flags);
}
ZFS_EXIT(zfsvfs);
return (err);
@ -830,7 +830,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
}
ZFS_EXIT(zfsvfs);
/* XXX: LK_RETRY? */
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
return (0);
}
@ -853,7 +853,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
*vpp = ZTOV(zp);
/* XXX: LK_RETRY? */
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
vnode_create_vobject(*vpp, zp->z_phys->zp_size, td);
ZFS_EXIT(zfsvfs);
return (0);

View File

@ -1116,9 +1116,9 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
ltype = VOP_ISLOCKED(dvp, td);
VOP_UNLOCK(dvp, 0, td);
}
error = vn_lock(*vpp, cnp->cn_lkflags, td);
error = vn_lock(*vpp, cnp->cn_lkflags);
if (cnp->cn_flags & ISDOTDOT)
vn_lock(dvp, ltype | LK_RETRY, td);
vn_lock(dvp, ltype | LK_RETRY);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
@ -1302,7 +1302,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (error == 0) {
*vpp = ZTOV(zp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
}
if (dl)
@ -1584,7 +1584,7 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
zfs_log_create(zilog, tx, TX_MKDIR, dzp, zp, dirname);
dmu_tx_commit(tx);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
zfs_dirent_unlock(dl);
@ -2769,7 +2769,7 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link, c
if (error == 0) {
zfs_log_symlink(zilog, tx, TX_SYMLINK, dzp, zp, name, link);
*vpp = ZTOV(zp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
}
dmu_tx_commit(tx);

View File

@ -85,7 +85,7 @@ znode_pageout_func(dmu_buf_t *dbuf, void *user_ptr)
ZTOV(zp) = NULL;
vhold(vp);
mutex_exit(&zp->z_lock);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vrecycle(vp, curthread);
VOP_UNLOCK(vp, 0, curthread);
vdrop(vp);

View File

@ -683,7 +683,7 @@ pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
td = curthread;
*fullpath = "unknown";
*freepath = NULL;
vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(v, LK_CANRECURSE | LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(td, v, fullpath, freepath);
VOP_UNLOCK(v, 0, td);
}

View File

@ -530,7 +530,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
if (bp->bio_cmd == BIO_FLUSH) {
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, td);
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
@ -560,12 +560,12 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
*/
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if (bp->bio_cmd == BIO_READ) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
VOP_UNLOCK(vp, 0, td);
} else {
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
sc->cred);
VOP_UNLOCK(vp, 0, td);
@ -895,7 +895,7 @@ mdsetcred(struct md_s *sc, struct ucred *cred)
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = aiov.iov_len;
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
VOP_UNLOCK(sc->vnode, 0, curthread);
free(tmpbuf, M_TEMP);
@ -947,7 +947,7 @@ mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
error = mdsetcred(sc, td->td_ucred);
if (error != 0) {
vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
nd.ni_vp->v_vflag &= ~VV_MD;
VOP_UNLOCK(nd.ni_vp, 0, td);
(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
@ -984,7 +984,7 @@ mddestroy(struct md_s *sc, struct thread *td)
mtx_destroy(&sc->queue_mtx);
if (sc->vnode != NULL) {
vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
sc->vnode->v_vflag &= ~VV_MD;
VOP_UNLOCK(sc->vnode, 0, td);
(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?

View File

@ -354,7 +354,7 @@ cd9660_lookup(ap)
LK_EXCLUSIVE, &tdp,
saved_ino != ino, ep);
brelse(bp);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
*vpp = tdp;

View File

@ -170,7 +170,7 @@ cd9660_mount(struct mount *mp, struct thread *td)
* or has superuser abilities
*/
accessmode = VREAD;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_ACCESS(devvp, accessmode, td->td_ucred, td);
if (error)
error = priv_check(td, PRIV_VFS_MOUNT_PERM);
@ -224,7 +224,7 @@ iso_mountfs(devvp, mp, td)
struct bufobj *bo;
char *cs_local, *cs_disk;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
DROP_GIANT();
g_topology_lock();
error = g_vfs_open(devvp, &cp, "cd9660", 0);

View File

@ -297,7 +297,7 @@ coda_root(vfsp, flags, vpp, td)
/* On Mach, this is vref. On NetBSD, VOP_LOCK */
#if 1
vref(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE, td);
vn_lock(*vpp, LK_EXCLUSIVE);
#else
vget(*vpp, LK_EXCLUSIVE, td);
#endif
@ -320,7 +320,7 @@ coda_root(vfsp, flags, vpp, td)
*vpp = mi->mi_rootvp;
#if 1
vref(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE, td);
vn_lock(*vpp, LK_EXCLUSIVE);
#else
vget(*vpp, LK_EXCLUSIVE, td);
#endif
@ -340,7 +340,7 @@ coda_root(vfsp, flags, vpp, td)
*vpp = mi->mi_rootvp;
#if 1
vref(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE, td);
vn_lock(*vpp, LK_EXCLUSIVE);
#else
vget(*vpp, LK_EXCLUSIVE, td);
#endif

View File

@ -931,11 +931,11 @@ coda_lookup(struct vop_lookup_args *ap)
*/
if (*ap->a_vpp) {
if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, td))) {
vn_lock(dvp, LK_RETRY|LK_EXCLUSIVE, td);
vn_lock(dvp, LK_RETRY|LK_EXCLUSIVE);
return (error);
}
}
vn_lock(dvp, LK_RETRY|LK_EXCLUSIVE, td);
vn_lock(dvp, LK_RETRY|LK_EXCLUSIVE);
} else {
/* The parent is locked, and may be the same as the child */
if (*ap->a_vpp && (*ap->a_vpp != dvp)) {

View File

@ -259,7 +259,7 @@ devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked)
vholdl(vp);
sx_unlock(&dm->dm_lock);
if (!vp_locked)
vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
else
VI_UNLOCK(vp);
vgone(vp);

View File

@ -247,7 +247,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, stru
} else {
vp->v_type = VBAD;
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
mtx_lock(&devfs_de_interlock);
vp->v_data = de;
de->de_vnode = vp;
@ -372,7 +372,7 @@ devfs_close(struct vop_close_args *ap)
error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
}
dev_relthread(dev);
vn_lock(vp, vp_locked | LK_RETRY, td);
vn_lock(vp, vp_locked | LK_RETRY);
vdrop(vp);
return (error);
}
@ -593,7 +593,7 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
de = de->de_dir;
error = devfs_allocv(de, dvp->v_mount, vpp, td);
*dm_unlock = 0;
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}
@ -786,7 +786,7 @@ devfs_open(struct vop_open_args *ap)
error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
dev_relthread(dev);
@ -1101,7 +1101,7 @@ devfs_revoke(struct vop_revoke_args *ap)
} else
dev_unlock();
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
return (0);
}

View File

@ -149,7 +149,7 @@ fdesc_root(mp, flags, vpp, td)
*/
vp = VFSTOFDESC(mp)->f_root;
VREF(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
*vpp = vp;
return (0);
}

View File

@ -234,7 +234,7 @@ fdesc_lookup(ap)
goto bad;
VTOFDESC(fvp)->fd_fd = fd;
if (fvp != dvp)
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY);
*vpp = fvp;
return (0);
@ -409,7 +409,7 @@ fdesc_setattr(ap)
}
vp = fp->f_vnode;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) == 0) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_SETATTR(vp, ap->a_vap, ap->a_cred, ap->a_td);
VOP_UNLOCK(vp, 0, ap->a_td);
vn_finished_write(mp);

View File

@ -257,7 +257,7 @@ fifo_open(ap)
VOP_UNLOCK(vp, 0, td);
error = msleep(&fip->fi_readers, &fifo_mtx,
PDROP | PCATCH | PSOCK, "fifoor", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
fip->fi_readers--;
if (fip->fi_readers == 0) {
@ -277,7 +277,7 @@ fifo_open(ap)
VOP_UNLOCK(vp, 0, td);
error = msleep(&fip->fi_writers, &fifo_mtx,
PDROP | PCATCH | PSOCK, "fifoow", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
fip->fi_writers--;
if (fip->fi_writers == 0) {

View File

@ -1066,7 +1066,7 @@ hpfs_lookup(ap)
VOP_UNLOCK(dvp,0,cnp->cn_thread);
error = VFS_VGET(hpmp->hpm_mp,
dhp->h_fn.fn_parent, LK_EXCLUSIVE, ap->a_vpp);
vn_lock(dvp, LK_EXCLUSIVE|LK_RETRY, cnp->cn_thread);
vn_lock(dvp, LK_EXCLUSIVE|LK_RETRY);
if (error)
return(error);
}

View File

@ -522,7 +522,7 @@ msdosfs_lookup(ap)
if (flags & ISDOTDOT) {
VOP_UNLOCK(pdp, 0, td);
error = deget(pmp, cluster, blkoff, &tdp);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
*vpp = DETOV(tdp);

View File

@ -312,7 +312,7 @@ msdosfs_mount(struct mount *mp, struct thread *td)
* that user has necessary permissions on the device.
*/
devvp = pmp->pm_devvp;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_ACCESS(devvp, VREAD | VWRITE,
td->td_ucred, td);
if (error)
@ -928,7 +928,7 @@ msdosfs_sync(struct mount *mp, int waitfor, struct thread *td)
* Flush filesystem control info.
*/
if (waitfor != MNT_LAZY) {
vn_lock(pmp->pm_devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pmp->pm_devvp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(pmp->pm_devvp, waitfor, td);
if (error)
allerror = error;

View File

@ -1032,7 +1032,7 @@ msdosfs_rename(ap)
goto abortit;
}
error = vn_lock(fvp, LK_EXCLUSIVE, td);
error = vn_lock(fvp, LK_EXCLUSIVE);
if (error)
goto abortit;
dp = VTODE(fdvp);
@ -1480,7 +1480,7 @@ msdosfs_rmdir(ap)
error = detrunc(ip, (u_long)0, IO_SYNC, cnp->cn_cred, td);
cache_purge(vp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
out:
return (error);
}

View File

@ -743,7 +743,7 @@ ntfs_vgetex(
ntfs_ntput(ip);
if (lkflags & LK_TYPE_MASK) {
error = vn_lock(vp, lkflags, td);
error = vn_lock(vp, lkflags);
if (error) {
vput(vp);
return (error);

View File

@ -664,7 +664,7 @@ ntfs_lookup(ap)
LK_EXCLUSIVE, ap->a_vpp);
ntfs_ntvattrrele(vap);
if (error) {
vn_lock(dvp,LK_EXCLUSIVE|LK_RETRY,cnp->cn_thread);
vn_lock(dvp,LK_EXCLUSIVE|LK_RETRY);
return (error);
}
} else {

View File

@ -192,7 +192,7 @@ null_insmntque_dtr(struct vnode *vp, void *xp)
vp->v_vnlock = &vp->v_lock;
FREE(xp, M_NULLFSNODE);
vp->v_op = &dead_vnodeops;
(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vgone(vp);
vput(vp);
}

View File

@ -121,7 +121,7 @@ nullfs_mount(struct mount *mp, struct thread *td)
* Re-lock vnode.
*/
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, NULL))
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
@ -250,7 +250,7 @@ nullfs_root(mp, flags, vpp, td)
if (VOP_ISLOCKED(vp, NULL))
panic("root vnode is locked.\n");
#endif
vn_lock(vp, flags | LK_RETRY, td);
vn_lock(vp, flags | LK_RETRY);
*vpp = vp;
return 0;
}

View File

@ -205,7 +205,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
*vpp = vp;
nhpp = NWNOHASH(fid);
LIST_INSERT_HEAD(nhpp, np, n_hash);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp");

View File

@ -960,7 +960,7 @@ printf("dvp %d:%d:%d\n", (int)mp, (int)dvp->v_vflag & VV_ROOT, (int)flags & ISDO
if (flags & ISDOTDOT) {
VOP_UNLOCK(dvp, 0, td); /* race to get the inode */
error = nwfs_nget(mp, fid, NULL, NULL, &vp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
*vpp = vp;

View File

@ -224,7 +224,7 @@ portal_root(mp, flags, vpp, td)
*/
vp = VFSTOPORTAL(mp)->pm_root;
VREF(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
*vpp = vp;
return (0);
}

View File

@ -103,7 +103,6 @@ portal_lookup(ap)
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
char *pname = cnp->cn_nameptr;
struct thread *td = cnp->cn_thread;
struct portalnode *pt;
int error;
struct vnode *fvp = 0;
@ -153,7 +152,7 @@ portal_lookup(ap)
pt->pt_fileid = portal_fileid++;
*vpp = fvp;
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(fvp, dvp->v_mount);
if (error != 0) {
*vpp = NULLVP;

View File

@ -75,7 +75,7 @@ procfs_doprocfile(PFS_FILL_ARGS)
textvp = p->p_textvp;
VI_LOCK(textvp);
vholdl(textvp);
err = vn_lock(textvp, LK_EXCLUSIVE | LK_INTERLOCK, td);
err = vn_lock(textvp, LK_EXCLUSIVE | LK_INTERLOCK);
vdrop(textvp);
if (err)
return (err);

View File

@ -182,7 +182,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
VM_OBJECT_UNLOCK(obj);
if (vp != NULL) {
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(td, vp, &fullpath, &freepath);
vput(vp);
VFS_UNLOCK_GIANT(vfslocked);

View File

@ -197,7 +197,7 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
(*vpp)->v_vflag |= VV_PROCDEP;
pvd->pvd_vnode = *vpp;
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(*vpp, mp);
if (error != 0) {
mtx_lock(&pfs_vncache_mutex);

View File

@ -428,13 +428,13 @@ pfs_lookup(struct vop_cachedlookup_args *va)
goto failed;
if (cnp->cn_flags & ISDOTDOT)
vn_lock(vn, LK_EXCLUSIVE|LK_RETRY, cnp->cn_thread);
vn_lock(vn, LK_EXCLUSIVE|LK_RETRY);
if (cnp->cn_flags & MAKEENTRY)
cache_enter(vn, *vpp, cnp);
PFS_RETURN (0);
failed:
if (cnp->cn_flags & ISDOTDOT)
vn_lock(vn, LK_EXCLUSIVE|LK_RETRY, cnp->cn_thread);
vn_lock(vn, LK_EXCLUSIVE|LK_RETRY);
PFS_RETURN(error);
}

View File

@ -205,10 +205,10 @@ smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
if (vp->v_type == VDIR) {
lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/
if (lks == LK_SHARED)
vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
vn_lock(vp, LK_UPGRADE | LK_RETRY);
error = smbfs_readvdir(vp, uiop, cred);
if (lks == LK_SHARED)
vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td);
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
return error;
}

View File

@ -260,7 +260,7 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
smbfs_hash_lock(smp, td);
LIST_FOREACH(np2, nhpp, n_hash) {

View File

@ -1249,7 +1249,7 @@ smbfs_lookup(ap)
if (flags & ISDOTDOT) {
VOP_UNLOCK(dvp, 0, td);
error = smbfs_nget(mp, dvp, name, nmlen, NULL, &vp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return error;
*vpp = vp;

View File

@ -360,7 +360,7 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
goto unlock;
MPASS(vp != NULL);
(void) vn_lock(vp, lkflag | LK_RETRY, td);
(void) vn_lock(vp, lkflag | LK_RETRY);
vp->v_data = node;
vp->v_type = node->tn_type;

View File

@ -217,7 +217,7 @@ tmpfs_mount(struct mount *mp, struct thread *td)
printf("WARNING: TMPFS is considered to be a highly experimental "
"feature in FreeBSD.\n");
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY, td);
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred, td);
VOP_UNLOCK(mp->mnt_vnodecovered, 0, td);
if (error)

View File

@ -104,7 +104,7 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
cnp->cn_lkflags, vpp, td);
vn_lock(dvp, ltype | LK_RETRY, td);
vn_lock(dvp, ltype | LK_RETRY);
vdrop(dvp);
} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
VREF(dvp);
@ -925,7 +925,7 @@ tmpfs_rename(struct vop_rename_args *v)
/* If we need to move the directory between entries, lock the
* source so that we can safely operate on it. */
if (tdvp != fdvp) {
error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, tcnp->cn_thread);
error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
goto out;
}

View File

@ -958,7 +958,7 @@ udf_lookup(struct vop_cachedlookup_args *a)
VOP_UNLOCK(dvp, 0, a->a_cnp->cn_thread);
error = udf_vget(udfmp->im_mountp, id, LK_EXCLUSIVE, &tdp);
if (flags & ISDOTDOT)
vn_lock(dvp, LK_EXCLUSIVE|LK_RETRY, a->a_cnp->cn_thread);
vn_lock(dvp, LK_EXCLUSIVE|LK_RETRY);
if (!error) {
/*
* Remember where this entry was if it's the final

View File

@ -167,7 +167,7 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
vp->v_vflag |= VV_ROOT;
if (lkflags & LK_TYPE_MASK)
vn_lock(vp, lkflags | LK_RETRY, td);
vn_lock(vp, lkflags | LK_RETRY);
*vpp = vp;
@ -386,7 +386,7 @@ unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
if ((error = relookup(dvp, vpp, cn))) {
uma_zfree(namei_zone, cn->cn_pnbuf);
cn->cn_flags &= ~HASBUF;
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
} else
vrele(dvp);
@ -556,7 +556,7 @@ unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
panic("unionfs: no exclusive lock");
VI_UNLOCK(vp);
for (count = 1; count < lockcnt; count++)
vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
}
/*

View File

@ -190,7 +190,7 @@ unionfs_domount(struct mount *mp, struct thread *td)
if (ufile == 0 && udir != 0)
ufile = udir;
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY, td);
vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred, td);
if (!error) {
if (udir == 0)
@ -289,7 +289,7 @@ unionfs_domount(struct mount *mp, struct thread *td)
*/
if (below) {
VOP_UNLOCK(upperrootvp, 0, td);
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY);
ump->um_lowervp = upperrootvp;
ump->um_uppervp = lowerrootvp;
} else {
@ -429,7 +429,7 @@ unionfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
vref(vp);
if (flags & LK_TYPE_MASK)
vn_lock(vp, flags, td);
vn_lock(vp, flags);
*vpp = vp;

View File

@ -146,7 +146,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
if (dtmpvp == udvp && ldvp != NULLVP) {
VOP_UNLOCK(udvp, 0, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
}
if (error == 0) {
@ -165,11 +165,12 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
vref(dunp->un_dvp);
if (nameiop == DELETE || nameiop == RENAME)
vn_lock(dunp->un_dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dunp->un_dvp, LK_EXCLUSIVE | LK_RETRY);
else if (cnp->cn_lkflags & LK_TYPE_MASK)
vn_lock(dunp->un_dvp, cnp->cn_lkflags | LK_RETRY, td);
vn_lock(dunp->un_dvp, cnp->cn_lkflags |
LK_RETRY);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
} else if (error == ENOENT && (cnflags & MAKEENTRY) &&
nameiop != CREATE)
cache_enter(dvp, NULLVP, cnp);
@ -278,7 +279,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
if (LK_SHARED == (cnp->cn_lkflags & LK_TYPE_MASK))
VOP_UNLOCK(vp, 0, td);
if (LK_EXCLUSIVE != VOP_ISLOCKED(vp, td)) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
lockflag = 1;
}
error = unionfs_mkshadowdir(MOUNTTOUNIONFSMOUNT(dvp->v_mount),
@ -294,7 +295,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
goto unionfs_lookup_out;
}
if ((cnp->cn_lkflags & LK_TYPE_MASK) == LK_SHARED)
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
}
/*
* get unionfs vnode.
@ -314,7 +315,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
}
if ((nameiop == DELETE || nameiop == RENAME) &&
(cnp->cn_lkflags & LK_TYPE_MASK) == 0)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
*(ap->a_vpp) = vp;
@ -532,7 +533,7 @@ unionfs_close(struct vop_close_args *ap)
td = ap->a_td;
if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) {
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
locked = 1;
}
unionfs_get_node_status(unp, td, &unsp);
@ -876,7 +877,7 @@ unionfs_ioctl(struct vop_ioctl_args *ap)
UNIONFS_INTERNAL_DEBUG("unionfs_ioctl: enter\n");
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, ap->a_td);
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
unp = VTOUNIONFS(ap->a_vp);
unionfs_get_node_status(unp, ap->a_td, &unsp);
ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp);
@ -901,7 +902,7 @@ unionfs_poll(struct vop_poll_args *ap)
struct unionfs_node_status *unsp;
struct vnode *ovp;
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, ap->a_td);
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
unp = VTOUNIONFS(ap->a_vp);
unionfs_get_node_status(unp, ap->a_td, &unsp);
ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp);
@ -1107,7 +1108,7 @@ unionfs_rename(struct vop_rename_args *ap)
if (unp->un_uppervp == NULLVP) {
switch (fvp->v_type) {
case VREG:
if ((error = vn_lock(fvp, LK_EXCLUSIVE, td)) != 0)
if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto unionfs_rename_abort;
error = unionfs_copyfile(unp, 1, fcnp->cn_cred, td);
VOP_UNLOCK(fvp, 0, td);
@ -1115,7 +1116,7 @@ unionfs_rename(struct vop_rename_args *ap)
goto unionfs_rename_abort;
break;
case VDIR:
if ((error = vn_lock(fvp, LK_EXCLUSIVE, td)) != 0)
if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto unionfs_rename_abort;
error = unionfs_mkshadowdir(ump, rfdvp, unp, fcnp, td);
VOP_UNLOCK(fvp, 0, td);
@ -1169,7 +1170,7 @@ unionfs_rename(struct vop_rename_args *ap)
}
if (needrelookup != 0) {
if ((error = vn_lock(fdvp, LK_EXCLUSIVE, td)) != 0)
if ((error = vn_lock(fdvp, LK_EXCLUSIVE)) != 0)
goto unionfs_rename_abort;
error = unionfs_relookup_for_delete(fdvp, fcnp, td);
VOP_UNLOCK(fdvp, 0, td);
@ -1181,7 +1182,7 @@ unionfs_rename(struct vop_rename_args *ap)
VOP_UNLOCK(tvp, 0, td);
error = unionfs_relookup_for_rename(tdvp, tcnp, td);
if (tvp != NULLVP && tvp != tdvp)
vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
goto unionfs_rename_abort;
}
@ -1420,7 +1421,7 @@ unionfs_readdir(struct vop_readdir_args *ap)
/* check the open count. unionfs needs to open before readdir. */
if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) {
vn_lock(ap->a_vp, LK_UPGRADE | LK_RETRY, td);
vn_lock(ap->a_vp, LK_UPGRADE | LK_RETRY);
locked = 1;
}
unionfs_get_node_status(unp, td, &unsp);
@ -1430,7 +1431,7 @@ unionfs_readdir(struct vop_readdir_args *ap)
error = EBADF;
}
if (locked == 1)
vn_lock(ap->a_vp, LK_DOWNGRADE | LK_RETRY, td);
vn_lock(ap->a_vp, LK_DOWNGRADE | LK_RETRY);
if (error != 0)
goto unionfs_readdir_exit;
@ -1752,7 +1753,7 @@ unionfs_lock(struct vop_lock1_args *ap)
if ((revlock & LK_TYPE_MASK) == LK_RELEASE)
VOP_UNLOCK(lvp, revlock, td);
else
vn_lock(lvp, revlock | LK_RETRY, td);
vn_lock(lvp, revlock | LK_RETRY);
goto unionfs_lock_abort;
}
}
@ -1872,7 +1873,7 @@ unionfs_advlock(struct vop_advlock_args *ap)
vp = ap->a_vp;
td = curthread;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
unp = VTOUNIONFS(ap->a_vp);
uvp = unp->un_uppervp;
@ -2024,12 +2025,12 @@ unionfs_openextattr(struct vop_openextattr_args *ap)
error = VOP_OPENEXTATTR(tvp, ap->a_cred, ap->a_td);
if (error == 0) {
vn_lock(vp, LK_UPGRADE | LK_RETRY, ap->a_td);
vn_lock(vp, LK_UPGRADE | LK_RETRY);
if (tvp == unp->un_uppervp)
unp->un_flag |= UNIONFS_OPENEXTU;
else
unp->un_flag |= UNIONFS_OPENEXTL;
vn_lock(vp, LK_DOWNGRADE | LK_RETRY, ap->a_td);
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
}
return (error);
@ -2058,12 +2059,12 @@ unionfs_closeextattr(struct vop_closeextattr_args *ap)
error = VOP_CLOSEEXTATTR(tvp, ap->a_commit, ap->a_cred, ap->a_td);
if (error == 0) {
vn_lock(vp, LK_UPGRADE | LK_RETRY, ap->a_td);
vn_lock(vp, LK_UPGRADE | LK_RETRY);
if (tvp == unp->un_uppervp)
unp->un_flag &= ~UNIONFS_OPENEXTU;
else
unp->un_flag &= ~UNIONFS_OPENEXTL;
vn_lock(vp, LK_DOWNGRADE | LK_RETRY, ap->a_td);
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
}
return (error);

View File

@ -659,7 +659,7 @@ ext2_lookup(ap)
saved_ino = dp->i_ino;
VOP_UNLOCK(pdp, 0, td); /* race to get the inode */
error = VFS_VGET(vdp->v_mount, saved_ino, LK_EXCLUSIVE, &tdp);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
return (error);
*vpp = tdp;

View File

@ -199,7 +199,7 @@ ext2_mount(mp, td)
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_ACCESS(devvp, VREAD | VWRITE,
td->td_ucred, td);
if (error)
@ -517,7 +517,7 @@ ext2_reload(struct mount *mp, struct thread *td)
* Step 1: invalidate all cached meta-data.
*/
devvp = VFSTOEXT2(mp)->um_devvp;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
if (vinvalbuf(devvp, 0, td, 0, 0) != 0)
panic("ext2_reload: dirty1");
VOP_UNLOCK(devvp, 0, td);
@ -916,7 +916,7 @@ ext2_sync(mp, waitfor, td)
* Force stale file system control information to be flushed.
*/
if (waitfor != MNT_LAZY) {
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(ump->um_devvp, 0, td);

View File

@ -813,7 +813,7 @@ ext2_rename(ap)
goto abortit;
}
if ((error = vn_lock(fvp, LK_EXCLUSIVE, td)) != 0)
if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto abortit;
dp = VTOI(fdvp);
ip = VTOI(fvp);
@ -1103,7 +1103,7 @@ ext2_rename(ap)
out:
if (doingdirectory)
ip->i_flag &= ~IN_RENAME;
if (vn_lock(fvp, LK_EXCLUSIVE, td) == 0) {
if (vn_lock(fvp, LK_EXCLUSIVE) == 0) {
ip->i_nlink--;
ip->i_flag |= IN_CHANGE;
ip->i_flag &= ~IN_RENAME;
@ -1314,7 +1314,7 @@ ext2_rmdir(ap)
ip->i_nlink -= 2;
error = ext2_truncate(vp, (off_t)0, IO_SYNC, cnp->cn_cred, td);
cache_purge(ITOV(ip));
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
out:
return (error);
}

View File

@ -145,7 +145,7 @@ extern u_char *fragtbl[];
* I think I'll try a VOP_LOCK/VOP_UNLOCK on the device vnode
*/
#define DEVVP(inode) (VFSTOEXT2(ITOV(inode)->v_mount)->um_devvp)
#define lock_super(devvp) vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, curthread)
#define lock_super(devvp) vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY)
#define unlock_super(devvp) VOP_UNLOCK(devvp, 0, curthread)
/*

View File

@ -82,7 +82,7 @@ reiserfs_lookup(struct vop_cachedlookup_args *ap)
VOP_UNLOCK(pdp, 0, td);
error = reiserfs_iget(vdp->v_mount,
saved_ino, &vp, td);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
return (error);
*vpp = vp;

View File

@ -390,7 +390,7 @@ xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
}
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
if (error != 0) {
kmem_free(vdata, sizeof(*vdata));

View File

@ -148,7 +148,7 @@ xfs_blkdev_get(
return (error);
}
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
ronly = ((XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY) != 0);
accessmode = VREAD;

View File

@ -135,7 +135,7 @@ vn_purge(struct xfs_vnode *xfs_vp)
vp = xfs_vp->v_vnode;
vn_lock(vp, LK_EXCLUSIVE, curthread);
vn_lock(vp, LK_EXCLUSIVE);
if (vp->v_holdcnt == 0)
vhold(vp);
vgone(vp);

View File

@ -1294,7 +1294,7 @@ _xfs_cachedlookup(
tvp = cvp->v_vnode;
if (nameiop == DELETE && islastcn) {
if ((error = vn_lock(tvp, LK_EXCLUSIVE, td))) {
if ((error = vn_lock(tvp, LK_EXCLUSIVE))) {
vrele(tvp);
goto err_out;
}
@ -1310,7 +1310,7 @@ _xfs_cachedlookup(
}
if (nameiop == RENAME && islastcn) {
if ((error = vn_lock(tvp, LK_EXCLUSIVE, td))) {
if ((error = vn_lock(tvp, LK_EXCLUSIVE))) {
vrele(tvp);
goto err_out;
}
@ -1323,9 +1323,9 @@ _xfs_cachedlookup(
if (flags & ISDOTDOT) {
VOP_UNLOCK(dvp, 0, td);
error = vn_lock(tvp, cnp->cn_lkflags, td);
error = vn_lock(tvp, cnp->cn_lkflags);
if (error) {
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
vrele(tvp);
goto err_out;
}
@ -1334,7 +1334,7 @@ _xfs_cachedlookup(
*vpp = tvp;
KASSERT(tvp == dvp, ("not same directory"));
} else {
if ((error = vn_lock(tvp, cnp->cn_lkflags, td))) {
if ((error = vn_lock(tvp, cnp->cn_lkflags))) {
vrele(tvp);
goto err_out;
}

View File

@ -356,7 +356,7 @@ ibcs2_getdents(td, uap)
buflen = max(DIRBLKSIZ, uap->nbytes);
buflen = min(buflen, MAXBSIZE);
buf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
again:
aiov.iov_base = buf;
aiov.iov_len = buflen;
@ -518,7 +518,7 @@ ibcs2_read(td, uap)
buflen = max(DIRBLKSIZ, uap->nbytes);
buflen = min(buflen, MAXBSIZE);
buf = malloc(buflen, M_TEMP, M_WAITOK);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
again:
aiov.iov_base = buf;
aiov.iov_len = buflen;

View File

@ -485,7 +485,7 @@ exec_coff_imgact(imgp)
DPRINTF(("%s(%d): returning successfully!\n", __FILE__, __LINE__));
fail:
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
return error;
}

View File

@ -236,7 +236,7 @@ exec_linux_imgact(struct image_params *imgp)
imgp->proc->p_sysent = &linux_sysvec;
fail:
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}

View File

@ -98,7 +98,6 @@ exec_aout_imgact(imgp)
struct image_params *imgp;
{
const struct exec *a_out = (const struct exec *) imgp->image_header;
struct thread *td = curthread;
struct vmspace *vmspace;
vm_map_t map;
vm_object_t object;
@ -193,14 +192,14 @@ exec_aout_imgact(imgp)
* However, in cases where the vnode lock is external, such as nullfs,
* v_usecount may become zero.
*/
VOP_UNLOCK(imgp->vp, 0, td);
VOP_UNLOCK(imgp->vp, 0, curthread);
/*
* Destroy old process VM and create a new one (with a new stack)
*/
error = exec_new_vmspace(imgp, &aout_sysvec);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);

View File

@ -680,7 +680,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
error = exec_new_vmspace(imgp, sv);
imgp->proc->p_sysent = sv;
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
@ -824,7 +824,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
error = __elfN(load_file)(imgp->proc, interp, &addr,
&imgp->entry_addr, sv->sv_pagesize);
}
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) {
uprintf("ELF interpreter %s not found\n", interp);
return (error);

View File

@ -241,7 +241,7 @@ do_aout_hdr(struct imgact_gzip * gz)
*/
error = exec_new_vmspace(gz->ip, &aout_sysvec);
vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
gz->where = __LINE__;
return (error);

View File

@ -294,7 +294,7 @@ alq_doio(struct alq *alq)
*/
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VOP_LEASE(vp, td, alq->aq_cred, LEASE_WRITE);
/*
* XXX: VOP_WRITE error checks are ignored.

View File

@ -1183,7 +1183,7 @@ fpathconf(struct thread *td, struct fpathconf_args *uap)
if (vp != NULL) {
int vfslocked;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_PATHCONF(vp, uap->name, td->td_retval);
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
@ -2579,7 +2579,7 @@ sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
fullpath = "-";
FILEDESC_SUNLOCK(fdp);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(curthread, vp, &fullpath, &freepath);
vput(vp);
VFS_UNLOCK_GIANT(vfslocked);

View File

@ -493,7 +493,7 @@ do_execve(td, args, mac_p)
/* close files on exec */
VOP_UNLOCK(imgp->vp, 0, td);
fdcloseexec(td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
/* Get a reference to the vnode prior to locking the proc */
VREF(ndp->ni_vp);
@ -593,7 +593,7 @@ do_execve(td, args, mac_p)
setugidsafety(td);
VOP_UNLOCK(imgp->vp, 0, td);
error = fdcheckstd(td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
goto done1;
PROC_LOCK(p);
@ -749,7 +749,7 @@ do_execve(td, args, mac_p)
if (tracecred != NULL)
crfree(tracecred);
#endif
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (oldargs != NULL)
pargs_drop(oldargs);
if (newargs != NULL)

View File

@ -254,7 +254,7 @@ jail_attach(struct thread *td, struct jail_attach_args *uap)
sx_sunlock(&allprison_lock);
vfslocked = VFS_LOCK_GIANT(pr->pr_root->v_mount);
vn_lock(pr->pr_root, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(pr->pr_root, LK_EXCLUSIVE | LK_RETRY);
if ((error = change_dir(pr->pr_root, td)) != 0)
goto e_unlock;
#ifdef MAC

View File

@ -924,7 +924,7 @@ ktr_writerequest(struct thread *td, struct ktr_request *req)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
(void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
#ifdef MAC
error = mac_vnode_check_write(cred, NOCRED, vp);

View File

@ -1397,8 +1397,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
VM_OBJECT_UNLOCK(obj);
if (vp != NULL) {
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY,
curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(curthread, vp, &fullpath,
&freepath);
vput(vp);

View File

@ -3141,7 +3141,7 @@ coredump(struct thread *td)
vattr.va_size = 0;
if (set_core_nodump_flag)
vattr.va_flags = UF_NODUMP;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VOP_LEASE(vp, td, cred, LEASE_WRITE);
VOP_SETATTR(vp, &vattr, cred, td);
VOP_UNLOCK(vp, 0, td);

View File

@ -706,8 +706,7 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
if (vd != NULL) {
if (vget(vd->mv_vnode, 0, curthread) == 0) {
*vpp = vd->mv_vnode;
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE,
curthread);
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE);
return (0);
}
/* XXX if this can happen, we're in trouble */
@ -716,7 +715,7 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp);
if (error)
return (error);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(*vpp, mp);
if (error != 0) {
*vpp = NULLVP;
@ -824,7 +823,7 @@ mqfs_lookupx(struct vop_cachedlookup_args *ap)
KASSERT(pd->mn_parent, ("non-root directory has no parent"));
pn = pd->mn_parent;
error = mqfs_allocv(dvp->v_mount, vpp, pn);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}

View File

@ -1777,7 +1777,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
goto out;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
obj = vp->v_object;
if (obj != NULL) {
/*
@ -2024,7 +2024,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
*/
bsize = vp->v_mount->mnt_stat.f_iosize;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
/*
* XXXMAC: Because we don't have fp->f_cred

View File

@ -91,7 +91,7 @@ vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
if (error != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_setacl(td->td_ucred, vp, type, &inkernacl);
if (error != 0)
@ -117,7 +117,7 @@ vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
int error;
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_getacl(td->td_ucred, vp, type);
if (error != 0)
@ -146,7 +146,7 @@ vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_deleteacl(td->td_ucred, vp, type);
if (error)

View File

@ -764,7 +764,7 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);

View File

@ -2206,7 +2206,7 @@ flushbufqueues(int queue, int flushdeps)
BUF_UNLOCK(bp);
continue;
}
if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
mtx_unlock(&bqlock);
CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);

View File

@ -429,7 +429,7 @@ cache_lookup(dvp, vpp, cnp)
if (ltype == VOP_ISLOCKED(*vpp, td))
return (-1);
else if (ltype == LK_EXCLUSIVE)
vn_lock(*vpp, LK_UPGRADE | LK_RETRY, td);
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
return (-1);
}
ltype = 0; /* silence gcc warning */
@ -441,7 +441,7 @@ cache_lookup(dvp, vpp, cnp)
CACHE_UNLOCK();
error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td);
if (cnp->cn_flags & ISDOTDOT)
vn_lock(dvp, ltype | LK_RETRY, td);
vn_lock(dvp, ltype | LK_RETRY);
if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE))
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
if (error) {

View File

@ -162,7 +162,7 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
aiov.iov_base = data;
aiov.iov_len = nbytes;
@ -328,7 +328,7 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
VFS_ASSERT_GIANT(vp->v_mount);
VOP_LEASE(vp, td, td->td_ucred, LEASE_READ);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Slightly unusual semantics: if the user provides a NULL data
@ -509,7 +509,7 @@ extattr_delete_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_deleteextattr(td->td_ucred, vp, attrnamespace,
@ -651,7 +651,7 @@ extattr_list_vp(struct vnode *vp, int attrnamespace, void *data,
VFS_ASSERT_GIANT(vp->v_mount);
VOP_LEASE(vp, td, td->td_ucred, LEASE_READ);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
auiop = NULL;
sizep = NULL;

View File

@ -408,7 +408,8 @@ lookup(struct nameidata *ndp)
cnp->cn_lkflags = LK_EXCLUSIVE;
dp = ndp->ni_startdir;
ndp->ni_startdir = NULLVP;
vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
vn_lock(dp,
compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY));
dirloop:
/*
@ -546,7 +547,9 @@ lookup(struct nameidata *ndp)
VREF(dp);
vput(tdp);
VFS_UNLOCK_GIANT(tvfslocked);
vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
vn_lock(dp,
compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags |
LK_RETRY));
}
}
@ -572,7 +575,7 @@ lookup(struct nameidata *ndp)
if (dp != vp_crossmp &&
VOP_ISLOCKED(dp, td) == LK_SHARED &&
(cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT))
vn_lock(dp, LK_UPGRADE|LK_RETRY, td);
vn_lock(dp, LK_UPGRADE|LK_RETRY);
/*
* If we're looking up the last component and we need an exclusive
* lock, adjust our lkflags.
@ -601,7 +604,9 @@ lookup(struct nameidata *ndp)
VREF(dp);
vput(tdp);
VFS_UNLOCK_GIANT(tvfslocked);
vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
vn_lock(dp,
compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags |
LK_RETRY));
goto unionlookup;
}
@ -678,7 +683,7 @@ lookup(struct nameidata *ndp)
ndp->ni_dvp = vp_crossmp;
error = VFS_ROOT(mp, compute_cn_lkflags(mp, cnp->cn_lkflags), &tdp, td);
vfs_unbusy(mp, td);
if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT, td))
if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT))
panic("vp_crossmp exclusively locked or reclaimed");
if (error) {
dpunlocked = 1;
@ -778,7 +783,7 @@ lookup(struct nameidata *ndp)
*/
if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) ==
(ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp, td) != LK_EXCLUSIVE) {
vn_lock(dp, LK_UPGRADE | LK_RETRY, td);
vn_lock(dp, LK_UPGRADE | LK_RETRY);
}
if (vfslocked && dvfslocked)
VFS_UNLOCK_GIANT(dvfslocked); /* Only need one */
@ -825,7 +830,7 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
cnp->cn_flags &= ~ISSYMLINK;
dp = dvp;
cnp->cn_lkflags = LK_EXCLUSIVE;
vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dp, LK_EXCLUSIVE | LK_RETRY);
/*
* Search a new directory.

View File

@ -1061,7 +1061,7 @@ vfs_domount(
else
mp->mnt_kern_flag &= ~MNTK_ASYNC;
MNT_IUNLOCK(mp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Put the new filesystem on the mount list after root.
*/
@ -1204,7 +1204,7 @@ dounmount(mp, flags, td)
mnt_gen_r = mp->mnt_gen;
VI_LOCK(coveredvp);
vholdl(coveredvp);
vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
vdrop(coveredvp);
/*
* Check for mp being unmounted while waiting for the

View File

@ -1004,7 +1004,7 @@ insmntque_stddtr(struct vnode *vp, void *dtr_arg)
/* XXX non mp-safe fs may still call insmntque with vnode
unlocked */
if (!VOP_ISLOCKED(vp, td))
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vgone(vp);
vput(vp);
}
@ -1662,7 +1662,7 @@ sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
mtx_lock(&sync_mtx);
return (1);
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
(void) VOP_FSYNC(vp, MNT_LAZY, td);
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
@ -2059,7 +2059,7 @@ vget(struct vnode *vp, int flags, struct thread *td)
oweinact = 1;
}
vholdl(vp);
if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
vdrop(vp);
return (error);
}
@ -2154,7 +2154,7 @@ vrele(struct vnode *vp)
* as VI_DOINGINACT to avoid recursion.
*/
vp->v_iflag |= VI_OWEINACT;
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
VI_LOCK(vp);
if (vp->v_usecount > 0)
vp->v_iflag &= ~VI_OWEINACT;
@ -2359,7 +2359,7 @@ vflush( struct mount *mp, int rootrefs, int flags, struct thread *td)
VI_LOCK(vp);
vholdl(vp);
MNT_IUNLOCK(mp);
error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
if (error) {
vdrop(vp);
MNT_ILOCK(mp);
@ -3869,7 +3869,7 @@ vfs_knllock(void *arg)
{
struct vnode *vp = arg;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
static void

View File

@ -364,7 +364,7 @@ kern_fstatfs(struct thread *td, int fd, struct statfs *buf)
return (error);
vp = fp->f_vnode;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef AUDIT
AUDIT_ARG(vnode, vp, ARG_VNODE1);
#endif
@ -732,7 +732,7 @@ fchdir(td, uap)
VREF(vp);
fdrop(fp, td);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
error = change_dir(vp, td);
while (!error && (mp = vp->v_mountedhere) != NULL) {
@ -1103,7 +1103,7 @@ kern_open(struct thread *td, char *path, enum uio_seg pathseg, int flags,
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
VATTR_NULL(&vat);
vat.va_size = 0;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_write(td->td_ucred, fp->f_cred, vp);
if (error == 0)
@ -1462,7 +1462,7 @@ kern_link(struct thread *td, char *path, char *link, enum uio_seg segflg)
vput(nd.ni_dvp);
vrele(nd.ni_vp);
error = EEXIST;
} else if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td))
} else if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY))
== 0) {
VOP_LEASE(nd.ni_dvp, td, td->td_ucred, LEASE_WRITE);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
@ -1767,7 +1767,7 @@ lseek(td, uap)
offset += fp->f_offset;
break;
case L_XTND:
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, cred, td);
VOP_UNLOCK(vp, 0, td);
if (error)
@ -2398,7 +2398,7 @@ setfflags(td, vp, flags)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_flags = flags;
#ifdef MAC
@ -2500,7 +2500,7 @@ fchflags(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@ -2526,7 +2526,7 @@ setfmode(td, vp, mode)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_mode = mode & ALLPERMS;
#ifdef MAC
@ -2640,7 +2640,7 @@ fchmod(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@ -2667,7 +2667,7 @@ setfown(td, vp, uid, gid)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_uid = uid;
vattr.va_gid = gid;
@ -2797,7 +2797,7 @@ fchown(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@ -2860,7 +2860,7 @@ setutimes(td, vp, ts, numtimes, nullflag)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
setbirthtime = 0;
if (numtimes < 3 && VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 &&
timespeccmp(&ts[1], &vattr.va_birthtime, < ))
@ -3010,7 +3010,7 @@ kern_futimes(struct thread *td, int fd, struct timeval *tptr,
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@ -3067,7 +3067,7 @@ kern_truncate(struct thread *td, char *path, enum uio_seg pathseg, off_t length)
}
NDFREE(&nd, NDF_ONLY_PNBUF);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_type == VDIR)
error = EISDIR;
#ifdef MAC
@ -3165,7 +3165,7 @@ fsync(td, uap)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
@ -3550,7 +3550,7 @@ ogetdirentries(td, uap)
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_resid = uap->count;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
loff = auio.uio_offset = fp->f_offset;
#ifdef MAC
error = mac_vnode_check_readdir(td->td_ucred, vp);
@ -3692,8 +3692,8 @@ getdirentries(td, uap)
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_resid = uap->count;
/* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
/* vn_lock(vp, LK_SHARED | LK_RETRY); */
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
loff = auio.uio_offset = fp->f_offset;
#ifdef MAC
@ -4054,7 +4054,7 @@ fhopen(td, uap)
goto out;
}
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
#ifdef MAC
/*
* We don't yet have fp->f_cred, so use td->td_ucred, which
@ -4120,7 +4120,7 @@ fhopen(td, uap)
fdrop(fp, td);
goto out;
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
atomic_set_int(&fp->f_flag, FHASLOCK);
}

View File

@ -287,7 +287,7 @@ vn_close(vp, flags, file_cred, td)
VFS_ASSERT_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (flags & FWRITE) {
VNASSERT(vp->v_writecount > 0, vp,
("vn_close: negative writecount"));
@ -371,14 +371,14 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
!= 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
} else {
/*
* XXX This should be LK_SHARED but I don't trust VFS
* enough to leave it like that until it has been
* reviewed further.
*/
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
}
@ -525,10 +525,10 @@ vn_read(fp, uio, active_cred, flags, td)
}
fp->f_vnread_flags |= FOFFSET_LOCKED;
mtx_unlock(mtxp);
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
uio->uio_offset = fp->f_offset;
} else
vn_lock(vp, LK_SHARED | LK_RETRY, td);
vn_lock(vp, LK_SHARED | LK_RETRY);
ioflag |= sequential_heuristic(uio, fp);
@ -588,7 +588,7 @@ vn_write(fp, uio, active_cred, flags, td)
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto unlock;
VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
ioflag |= sequential_heuristic(uio, fp);
@ -632,7 +632,7 @@ vn_truncate(fp, length, active_cred, td)
return (error);
}
VOP_LEASE(vp, td, active_cred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_type == VDIR) {
error = EISDIR;
goto out;
@ -670,7 +670,7 @@ vn_statfile(fp, sb, active_cred, td)
int error;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
@ -805,7 +805,7 @@ vn_ioctl(fp, com, data, active_cred, td)
case VREG:
case VDIR:
if (com == FIONREAD) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, active_cred, td);
VOP_UNLOCK(vp, 0, td);
if (!error)
@ -842,7 +842,7 @@ vn_poll(fp, events, active_cred, td)
vp = fp->f_vnode;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
#ifdef MAC
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
VOP_UNLOCK(vp, 0, td);
if (!error)
@ -858,7 +858,7 @@ vn_poll(fp, events, active_cred, td)
* acquire requested lock.
*/
int
_vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
_vn_lock(struct vnode *vp, int flags, char *file, int line)
{
int error;
@ -881,7 +881,8 @@ _vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
* lockmgr drops interlock before it will return for
* any reason. So force the code above to relock it.
*/
error = VOP_LOCK1(vp, flags | LK_INTERLOCK, td, file, line);
error = VOP_LOCK1(vp, flags | LK_INTERLOCK, curthread, file,
line);
flags &= ~LK_INTERLOCK;
KASSERT((flags & LK_RETRY) == 0 || error == 0,
("LK_RETRY set with incompatible flags %d\n", flags));
@ -891,7 +892,7 @@ _vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
*/
if (error == 0 && vp->v_iflag & VI_DOOMED &&
(flags & LK_RETRY) == 0) {
VOP_UNLOCK(vp, 0, td);
VOP_UNLOCK(vp, 0, curthread);
error = ENOENT;
break;
}
@ -1222,7 +1223,7 @@ vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
auio.uio_resid = *buflen;
if ((ioflg & IO_NODELOCKED) == 0)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
@ -1266,7 +1267,7 @@ vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
@ -1292,7 +1293,7 @@ vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");

View File

@ -1048,7 +1048,7 @@ nfs4_lookup(struct vop_lookup_args *ap)
VOP_UNLOCK(dvp, 0, td);
error = nfs_nget(dvp->v_mount, fhp, fhsize, &np, LK_EXCLUSIVE);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
newvp = NFSTOV(np);
@ -1709,7 +1709,7 @@ nfs4_rename(struct vop_rename_args *ap)
error = 0;
goto out;
}
if ((error = vn_lock(fvp, LK_EXCLUSIVE, fcnp->cn_thread)) != 0)
if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto out;
/*

View File

@ -487,9 +487,9 @@ nfs_upgrade_vnlock(struct vnode *vp, struct thread *td)
if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) {
if (old_lock == LK_SHARED) {
/* Upgrade to exclusive lock, this might block */
vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
vn_lock(vp, LK_UPGRADE | LK_RETRY);
} else {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
}
return old_lock;
@ -501,7 +501,7 @@ nfs_downgrade_vnlock(struct vnode *vp, struct thread *td, int old_lock)
if (old_lock != LK_EXCLUSIVE) {
if (old_lock == LK_SHARED) {
/* Downgrade from exclusive lock, this might block */
vn_lock(vp, LK_DOWNGRADE, td);
vn_lock(vp, LK_DOWNGRADE);
} else {
VOP_UNLOCK(vp, 0, td);
}

View File

@ -932,7 +932,7 @@ nfs_lookup(struct vop_lookup_args *ap)
if (flags & ISDOTDOT) {
VOP_UNLOCK(dvp, 0, td);
error = nfs_nget(dvp->v_mount, fhp, fhsize, &np, cnp->cn_lkflags);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
newvp = NFSTOV(np);
@ -1605,7 +1605,7 @@ nfs_rename(struct vop_rename_args *ap)
error = 0;
goto out;
}
if ((error = vn_lock(fvp, LK_EXCLUSIVE, fcnp->cn_thread)) != 0)
if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto out;
/*

View File

@ -1484,7 +1484,7 @@ nfsrv_writegather(struct nfsrv_descript **ndp, struct nfssvc_sock *slp,
if (vn_start_write(vp, &mntp, V_NOWAIT) != 0) {
VOP_UNLOCK(vp, 0, td);
error = vn_start_write(NULL, &mntp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
mvfslocked = VFS_LOCK_GIANT(mntp);
}
@ -1888,7 +1888,7 @@ nfsrv_create(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
nd.ni_dvp = NULL;
nd.ni_vp = NULL;
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -2090,7 +2090,7 @@ nfsrv_mknod(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
}
NDFREE(&nd, NDF_ONLY_PNBUF);
if (dirp) {
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -2207,7 +2207,7 @@ nfsrv_remove(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
nd.ni_dvp = NULL;
nd.ni_vp = NULL;
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -2418,12 +2418,12 @@ nfsrv_rename(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
tond.ni_vp = NULL;
if (fdirp) {
vn_lock(fdirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(fdirp, LK_EXCLUSIVE | LK_RETRY);
fdiraft_ret = VOP_GETATTR(fdirp, &fdiraft, cred, td);
VOP_UNLOCK(fdirp, 0, td);
}
if (tdirp) {
vn_lock(tdirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(tdirp, LK_EXCLUSIVE | LK_RETRY);
tdiraft_ret = VOP_GETATTR(tdirp, &tdiraft, cred, td);
VOP_UNLOCK(tdirp, 0, td);
}
@ -2558,7 +2558,7 @@ nfsrv_link(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
vp = NULL;
goto out2;
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
NDFREE(&nd, NDF_ONLY_PNBUF);
/* fall through */
@ -2583,7 +2583,7 @@ nfsrv_link(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
nd.ni_dvp = NULL;
nd.ni_vp = NULL;
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -2759,7 +2759,7 @@ nfsrv_symlink(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
pathcp = NULL;
}
if (dirp) {
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -2921,7 +2921,7 @@ nfsrv_mkdir(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
}
nd.ni_dvp = NULL;
nd.ni_vp = NULL;
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -3058,7 +3058,7 @@ nfsrv_rmdir(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
vput(nd.ni_vp);
nd.ni_dvp = NULL;
nd.ni_vp = NULL;
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(dirp, LK_EXCLUSIVE | LK_RETRY);
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, td);
VOP_UNLOCK(dirp, 0, td);
}
@ -3234,7 +3234,7 @@ nfsrv_readdir(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
io.uio_rw = UIO_READ;
io.uio_td = NULL;
eofflag = 0;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (cookies) {
free((caddr_t)cookies, M_TEMP);
cookies = NULL;
@ -3516,7 +3516,7 @@ nfsrv_readdirplus(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
io.uio_rw = UIO_READ;
io.uio_td = NULL;
eofflag = 0;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (cookies) {
free((caddr_t)cookies, M_TEMP);
cookies = NULL;

Some files were not shown because too many files have changed in this diff Show More