Reimplement traverse() helper function:

1. Pass locking flags to VFS_ROOT().
2. Check v_mountedhere while the vnode is locked.
3. Always return locked vnode on success.

Change 1 fixes problem reported by Stephen M. Rumble - after
zfs_vfsops.c,1.9 change, zfs_root() no longer locks the vnode
unconditionally and traverse() didn't pass right lock type to
VFS_ROOT(). The result was that kernel paniced when .zfs/ directory
was accessed via NFS.
This commit is contained in:
Pawel Jakub Dawidek 2007-06-04 11:31:46 +00:00
parent 7344a290dc
commit b166b92692
6 changed files with 46 additions and 40 deletions

View File

@ -110,7 +110,7 @@ vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp)
}
int
traverse(vnode_t **cvpp)
traverse(vnode_t **cvpp, int lktype)
{
kthread_t *td = curthread;
vnode_t *cvp;
@ -119,7 +119,7 @@ traverse(vnode_t **cvpp)
int error;
cvp = *cvpp;
error = 0;
tvp = NULL;
/*
* If this vnode is mounted on, then we transparently indirect
@ -135,22 +135,26 @@ traverse(vnode_t **cvpp)
vfsp = vn_mountedvfs(cvp);
if (vfsp == NULL)
break;
VN_RELE(cvp);
/*
* tvp is NULL for *cvpp vnode, which we can't unlock.
*/
if (tvp != NULL)
vput(cvp);
else
vrele(cvp);
/*
* The read lock must be held across the call to VFS_ROOT() to
* prevent a concurrent unmount from destroying the vfs.
*/
error = VFS_ROOT(vfsp, 0, &tvp, td);
if (error)
break;
VOP_UNLOCK(tvp, 0, td);
error = VFS_ROOT(vfsp, lktype, &tvp, td);
if (error != 0)
return (error);
cvp = tvp;
}
*cvpp = cvp;
return (error);
return (0);
}
int

View File

@ -107,7 +107,7 @@ void vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
int flags __unused);
void vfs_clearmntopt(vfs_t *vfsp, const char *name);
int vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp);
int traverse(vnode_t **cvpp);
int traverse(vnode_t **cvpp, int lktype);
int domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
char *fspec, int fsflags);

View File

@ -924,18 +924,14 @@ zfsctl_snapshot_inactive(ap)
}
static int
zfsctl_traverse_begin(vnode_t **vpp, kthread_t *td)
zfsctl_traverse_begin(vnode_t **vpp, int lktype, kthread_t *td)
{
int err;
VN_HOLD(*vpp);
/* Snapshot should be already mounted, but just in case. */
if (vn_mountedvfs(*vpp) == NULL)
return (ENOENT);
err = traverse(vpp);
if (err == 0)
vn_lock(*vpp, LK_SHARED | LK_RETRY, td);
return (err);
return (traverse(vpp, lktype));
}
static void
@ -960,7 +956,7 @@ zfsctl_snapshot_getattr(ap)
vnode_t *vp = ap->a_vp;
int err;
err = zfsctl_traverse_begin(&vp, ap->a_td);
err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY, ap->a_td);
if (err == 0)
err = VOP_GETATTR(vp, ap->a_vap, ap->a_cred, ap->a_td);
zfsctl_traverse_end(vp, err);
@ -977,7 +973,7 @@ zfsctl_snapshot_fid(ap)
vnode_t *vp = ap->a_vp;
int err;
err = zfsctl_traverse_begin(&vp, curthread);
err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY, curthread);
if (err == 0)
err = VOP_VPTOFH(vp, (void *)ap->a_fid);
zfsctl_traverse_end(vp, err);
@ -1026,7 +1022,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
if (sep != NULL) {
VN_HOLD(vp);
error = traverse(&vp);
error = traverse(&vp, LK_SHARED | LK_RETRY);
if (error == 0) {
if (vp == sep->se_root)
error = EINVAL;
@ -1034,7 +1030,10 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
*zfsvfsp = VTOZ(vp)->z_zfsvfs;
}
mutex_exit(&sdp->sd_lock);
VN_RELE(vp);
if (error == 0)
VN_URELE(vp);
else
VN_RELE(vp);
} else {
error = EINVAL;
mutex_exit(&sdp->sd_lock);

View File

@ -110,7 +110,7 @@ vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp)
}
int
traverse(vnode_t **cvpp)
traverse(vnode_t **cvpp, int lktype)
{
kthread_t *td = curthread;
vnode_t *cvp;
@ -119,7 +119,7 @@ traverse(vnode_t **cvpp)
int error;
cvp = *cvpp;
error = 0;
tvp = NULL;
/*
* If this vnode is mounted on, then we transparently indirect
@ -135,22 +135,26 @@ traverse(vnode_t **cvpp)
vfsp = vn_mountedvfs(cvp);
if (vfsp == NULL)
break;
VN_RELE(cvp);
/*
* tvp is NULL for *cvpp vnode, which we can't unlock.
*/
if (tvp != NULL)
vput(cvp);
else
vrele(cvp);
/*
* The read lock must be held across the call to VFS_ROOT() to
* prevent a concurrent unmount from destroying the vfs.
*/
error = VFS_ROOT(vfsp, 0, &tvp, td);
if (error)
break;
VOP_UNLOCK(tvp, 0, td);
error = VFS_ROOT(vfsp, lktype, &tvp, td);
if (error != 0)
return (error);
cvp = tvp;
}
*cvpp = cvp;
return (error);
return (0);
}
int

View File

@ -107,7 +107,7 @@ void vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
int flags __unused);
void vfs_clearmntopt(vfs_t *vfsp, const char *name);
int vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp);
int traverse(vnode_t **cvpp);
int traverse(vnode_t **cvpp, int lktype);
int domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
char *fspec, int fsflags);

View File

@ -924,18 +924,14 @@ zfsctl_snapshot_inactive(ap)
}
static int
zfsctl_traverse_begin(vnode_t **vpp, kthread_t *td)
zfsctl_traverse_begin(vnode_t **vpp, int lktype, kthread_t *td)
{
int err;
VN_HOLD(*vpp);
/* Snapshot should be already mounted, but just in case. */
if (vn_mountedvfs(*vpp) == NULL)
return (ENOENT);
err = traverse(vpp);
if (err == 0)
vn_lock(*vpp, LK_SHARED | LK_RETRY, td);
return (err);
return (traverse(vpp, lktype));
}
static void
@ -960,7 +956,7 @@ zfsctl_snapshot_getattr(ap)
vnode_t *vp = ap->a_vp;
int err;
err = zfsctl_traverse_begin(&vp, ap->a_td);
err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY, ap->a_td);
if (err == 0)
err = VOP_GETATTR(vp, ap->a_vap, ap->a_cred, ap->a_td);
zfsctl_traverse_end(vp, err);
@ -977,7 +973,7 @@ zfsctl_snapshot_fid(ap)
vnode_t *vp = ap->a_vp;
int err;
err = zfsctl_traverse_begin(&vp, curthread);
err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY, curthread);
if (err == 0)
err = VOP_VPTOFH(vp, (void *)ap->a_fid);
zfsctl_traverse_end(vp, err);
@ -1026,7 +1022,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
if (sep != NULL) {
VN_HOLD(vp);
error = traverse(&vp);
error = traverse(&vp, LK_SHARED | LK_RETRY);
if (error == 0) {
if (vp == sep->se_root)
error = EINVAL;
@ -1034,7 +1030,10 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
*zfsvfsp = VTOZ(vp)->z_zfsvfs;
}
mutex_exit(&sdp->sd_lock);
VN_RELE(vp);
if (error == 0)
VN_URELE(vp);
else
VN_RELE(vp);
} else {
error = EINVAL;
mutex_exit(&sdp->sd_lock);