vfs: remove the thread argument from vget
It was already asserted to be curthread. Semantic patch: @@ expression arg1, arg2, arg3; @@ - vget(arg1, arg2, arg3) + vget(arg1, arg2)
This commit is contained in:
parent
26268c7c31
commit
0c7391ed92
@ -260,7 +260,7 @@ igrab(struct inode *inode)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = vget(inode, 0, curthread);
|
||||
error = vget(inode, 0);
|
||||
if (error)
|
||||
return (NULL);
|
||||
|
||||
|
@ -656,7 +656,7 @@ autofs_node_vn(struct autofs_node *anp, struct mount *mp, int flags,
|
||||
|
||||
vp = anp->an_vnode;
|
||||
if (vp != NULL) {
|
||||
error = vget(vp, flags | LK_RETRY, curthread);
|
||||
error = vget(vp, flags | LK_RETRY);
|
||||
if (error != 0) {
|
||||
AUTOFS_WARN("vget failed with error %d", error);
|
||||
sx_xunlock(&anp->an_vnode_lock);
|
||||
|
@ -799,7 +799,7 @@ loop:
|
||||
/*
|
||||
* Step 4: invalidate all cached file data.
|
||||
*/
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto loop;
|
||||
}
|
||||
@ -1158,7 +1158,7 @@ loop:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
|
||||
if (error) {
|
||||
if (error == ENOENT) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
|
@ -163,7 +163,7 @@ fdesc_root(struct mount *mp, int flags, struct vnode **vpp)
|
||||
* Return locked reference to root.
|
||||
*/
|
||||
vp = VFSTOFDESC(mp)->f_root;
|
||||
vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
vget(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
*vpp = vp;
|
||||
return (0);
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ loop:
|
||||
vp = fd->fd_vnode;
|
||||
VI_LOCK(vp);
|
||||
mtx_unlock(&fdesc_hashmtx);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
|
||||
goto loop;
|
||||
*vpp = vp;
|
||||
return (0);
|
||||
@ -232,7 +232,7 @@ loop:
|
||||
vp2 = fd2->fd_vnode;
|
||||
VI_LOCK(vp2);
|
||||
mtx_unlock(&fdesc_hashmtx);
|
||||
error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td);
|
||||
error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK);
|
||||
/* Someone beat us, dec use count and wait for reclaim */
|
||||
vgone(vp);
|
||||
vput(vp);
|
||||
|
@ -610,7 +610,7 @@ fuse_vfsop_root(struct mount *mp, int lkflags, struct vnode **vpp)
|
||||
int err = 0;
|
||||
|
||||
if (data->vroot != NULL) {
|
||||
err = vget(data->vroot, lkflags, curthread);
|
||||
err = vget(data->vroot, lkflags);
|
||||
if (err == 0)
|
||||
*vpp = data->vroot;
|
||||
} else {
|
||||
|
@ -921,7 +921,7 @@ loop:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
|
||||
if (error) {
|
||||
if (error == ENOENT) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, nvp);
|
||||
|
@ -1828,7 +1828,7 @@ loop:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto loop;
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ nullfs_root(mp, flags, vpp)
|
||||
NULLFSDEBUG("nullfs_root(mp = %p, vp = %p)\n", mp,
|
||||
mntdata->nullm_lowerrootvp);
|
||||
|
||||
error = vget(mntdata->nullm_lowerrootvp, flags, curthread);
|
||||
error = vget(mntdata->nullm_lowerrootvp, flags);
|
||||
if (error == 0) {
|
||||
error = null_nodeget(mp, mntdata->nullm_lowerrootvp, &vp);
|
||||
if (error == 0) {
|
||||
|
@ -136,7 +136,7 @@ retry:
|
||||
vp = pvd->pvd_vnode;
|
||||
VI_LOCK(vp);
|
||||
mtx_unlock(&pfs_vncache_mutex);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
|
||||
++pfs_vncache_hits;
|
||||
*vpp = vp;
|
||||
/*
|
||||
@ -218,7 +218,7 @@ retry2:
|
||||
vp = pvd2->pvd_vnode;
|
||||
VI_LOCK(vp);
|
||||
mtx_unlock(&pfs_vncache_mutex);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
|
||||
++pfs_vncache_hits;
|
||||
vgone(*vpp);
|
||||
vput(*vpp);
|
||||
|
@ -124,7 +124,7 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm,
|
||||
if (dvp == NULL)
|
||||
return EINVAL;
|
||||
vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode;
|
||||
error = vget(vp, LK_EXCLUSIVE, td);
|
||||
error = vget(vp, LK_EXCLUSIVE);
|
||||
if (error == 0)
|
||||
*vpp = vp;
|
||||
return error;
|
||||
|
@ -329,7 +329,7 @@ smbfs_root(struct mount *mp, int flags, struct vnode **vpp)
|
||||
|
||||
if (smp->sm_root) {
|
||||
*vpp = SMBTOV(smp->sm_root);
|
||||
return vget(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
return vget(*vpp, LK_EXCLUSIVE | LK_RETRY);
|
||||
}
|
||||
scred = smbfs_malloc_scred();
|
||||
smb_makescred(scred, td, cred);
|
||||
|
@ -620,7 +620,7 @@ loop:
|
||||
goto loop;
|
||||
}
|
||||
TMPFS_NODE_UNLOCK(node);
|
||||
error = vget(vp, lkflag | LK_INTERLOCK, curthread);
|
||||
error = vget(vp, lkflag | LK_INTERLOCK);
|
||||
if (error == ENOENT) {
|
||||
TMPFS_NODE_LOCK(node);
|
||||
goto loop;
|
||||
|
@ -130,8 +130,7 @@ tmpfs_update_mtime(struct mount *mp, bool lazy)
|
||||
* metadata changes now.
|
||||
*/
|
||||
if (!lazy || obj->generation != obj->cleangeneration) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
|
||||
curthread) != 0)
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
|
||||
continue;
|
||||
tmpfs_check_mtime(vp);
|
||||
if (!lazy)
|
||||
|
@ -754,7 +754,7 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
|
||||
found:
|
||||
*vpp = vd->mv_vnode;
|
||||
sx_xunlock(&mqfs->mi_lock);
|
||||
error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
|
||||
error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE);
|
||||
vdrop(*vpp);
|
||||
return (error);
|
||||
}
|
||||
|
@ -1348,7 +1348,7 @@ loop:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if ((error = vget(vp, lockreq, td)) != 0) {
|
||||
if ((error = vget(vp, lockreq)) != 0) {
|
||||
if (error == ENOENT) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto loop;
|
||||
|
@ -2856,12 +2856,10 @@ vget_abort(struct vnode *vp, enum vgetstate vs)
|
||||
}
|
||||
|
||||
int
|
||||
vget(struct vnode *vp, int flags, struct thread *td)
|
||||
vget(struct vnode *vp, int flags)
|
||||
{
|
||||
enum vgetstate vs;
|
||||
|
||||
MPASS(td == curthread);
|
||||
|
||||
vs = vget_prep(vp);
|
||||
return (vget_finish(vp, flags, vs));
|
||||
}
|
||||
@ -4684,7 +4682,7 @@ vfs_periodic_msync_inactive(struct mount *mp, int flags)
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if (vget(vp, lkflags, td) == 0) {
|
||||
if (vget(vp, lkflags) == 0) {
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) {
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
|
@ -680,7 +680,7 @@ void vlazy(struct vnode *);
|
||||
void vdrop(struct vnode *);
|
||||
void vdropl(struct vnode *);
|
||||
int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td);
|
||||
int vget(struct vnode *vp, int flags, struct thread *td);
|
||||
int vget(struct vnode *vp, int flags);
|
||||
enum vgetstate vget_prep_smr(struct vnode *vp);
|
||||
enum vgetstate vget_prep(struct vnode *vp);
|
||||
int vget_finish(struct vnode *vp, int flags, enum vgetstate vs);
|
||||
|
@ -3463,7 +3463,7 @@ sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
|
||||
*/
|
||||
pwd = pwd_hold(td);
|
||||
dvp = pwd->pwd_cdir;
|
||||
if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
|
||||
if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
|
||||
vput(fdvp);
|
||||
pwd_drop(pwd);
|
||||
break;
|
||||
|
@ -13624,8 +13624,7 @@ softdep_request_cleanup_flush(mp, ump)
|
||||
VI_UNLOCK(lvp);
|
||||
continue;
|
||||
}
|
||||
if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
|
||||
td) != 0) {
|
||||
if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT) != 0) {
|
||||
failed_vnode = 1;
|
||||
continue;
|
||||
}
|
||||
|
@ -974,7 +974,7 @@ loop:
|
||||
/*
|
||||
* Step 4: invalidate all cached file data.
|
||||
*/
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto loop;
|
||||
}
|
||||
@ -1758,8 +1758,7 @@ ffs_sync_lazy(mp)
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
|
||||
td)) != 0)
|
||||
if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
|
||||
continue;
|
||||
#ifdef QUOTA
|
||||
qsyncvp(vp);
|
||||
@ -1856,7 +1855,7 @@ loop:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if ((error = vget(vp, lockreq, td)) != 0) {
|
||||
if ((error = vget(vp, lockreq)) != 0) {
|
||||
if (error == ENOENT || error == ENOLCK) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto loop;
|
||||
|
@ -613,7 +613,7 @@ quotaon(struct thread *td, struct mount *mp, int type, void *fname)
|
||||
*/
|
||||
again:
|
||||
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto again;
|
||||
}
|
||||
@ -680,7 +680,7 @@ again:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
goto again;
|
||||
}
|
||||
@ -1064,7 +1064,6 @@ int
|
||||
qsync(struct mount *mp)
|
||||
{
|
||||
struct ufsmount *ump = VFSTOUFS(mp);
|
||||
struct thread *td = curthread; /* XXX */
|
||||
struct vnode *vp, *mvp;
|
||||
struct dquot *dq;
|
||||
int i, error;
|
||||
@ -1088,7 +1087,7 @@ again:
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
}
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK);
|
||||
if (error) {
|
||||
if (error == ENOENT) {
|
||||
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
||||
|
@ -673,7 +673,7 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
|
||||
* paging-in-progress count incremented. Otherwise, we could
|
||||
* deadlock.
|
||||
*/
|
||||
error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread);
|
||||
error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT);
|
||||
if (error == 0) {
|
||||
fs->vp = vp;
|
||||
return (KERN_SUCCESS);
|
||||
@ -684,7 +684,7 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
|
||||
unlock_and_deallocate(fs);
|
||||
else
|
||||
fault_deallocate(fs);
|
||||
error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread);
|
||||
error = vget(vp, locked | LK_RETRY | LK_CANRECURSE);
|
||||
vdrop(vp);
|
||||
fs->vp = vp;
|
||||
KASSERT(error == 0, ("vm_fault: vget failed %d", error));
|
||||
|
@ -1284,7 +1284,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
cred = td->td_ucred;
|
||||
writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
|
||||
(*flagsp & MAP_SHARED) != 0;
|
||||
if ((error = vget(vp, LK_SHARED, td)) != 0)
|
||||
if ((error = vget(vp, LK_SHARED)) != 0)
|
||||
return (error);
|
||||
AUDIT_ARG_VNODE1(vp);
|
||||
foff = *foffp;
|
||||
@ -1305,7 +1305,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
* Bypass filesystems obey the mpsafety of the
|
||||
* underlying fs. Tmpfs never bypasses.
|
||||
*/
|
||||
error = vget(vp, LK_SHARED, td);
|
||||
error = vget(vp, LK_SHARED);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
}
|
||||
|
@ -643,7 +643,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
|
||||
LK_SHARED : LK_EXCLUSIVE;
|
||||
if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
|
||||
if (vget(vp, lockmode | LK_TIMELOCK)) {
|
||||
vp = NULL;
|
||||
error = EDEADLK;
|
||||
goto unlock_mp;
|
||||
|
Loading…
x
Reference in New Issue
Block a user