Existing VOP_VPTOCNP() interface has a fatal flow that is critical for
nullfs. The problem is that resulting vnode is only required to be held on return from the successfull call to vop, instead of being referenced. Nullfs VOP_INACTIVE() method reclaims the vnode, which in combination with the VOP_VPTOCNP() interface means that the directory vnode returned from VOP_VPTOCNP() is reclaimed in advance, causing vn_fullpath() to error with EBADF or like. Change the interface for VOP_VPTOCNP(), now the dvp must be referenced. Convert all in-tree implementations of VOP_VPTOCNP(), which is trivial, because vhold(9) and vref(9) are similar in the locking prerequisites. Out-of-tree fs implementation of VOP_VPTOCNP(), if any, should have no trouble with the fix. Tested by: pho Reviewed by: mckusick MFC after: 3 weeks (subject of re approval)
This commit is contained in:
parent
f82ee01c1c
commit
f82360acf2
@ -1594,7 +1594,7 @@ zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
|
||||
*ap->a_buflen -= len;
|
||||
bcopy(sep->se_name, ap->a_buf + *ap->a_buflen, len);
|
||||
mutex_exit(&sdp->sd_lock);
|
||||
vhold(dvp);
|
||||
vref(dvp);
|
||||
*ap->a_vpp = dvp;
|
||||
}
|
||||
VN_RELE(dvp);
|
||||
|
@ -261,7 +261,7 @@ devfs_vptocnp(struct vop_vptocnp_args *ap)
|
||||
} else if (vp->v_type == VDIR) {
|
||||
if (dd == dmp->dm_rootdir) {
|
||||
*dvp = vp;
|
||||
vhold(*dvp);
|
||||
vref(*dvp);
|
||||
goto finished;
|
||||
}
|
||||
i -= dd->de_dirent->d_namlen;
|
||||
@ -289,6 +289,8 @@ devfs_vptocnp(struct vop_vptocnp_args *ap)
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
vholdl(*dvp);
|
||||
VI_UNLOCK(*dvp);
|
||||
vref(*dvp);
|
||||
vdrop(*dvp);
|
||||
} else {
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
error = ENOENT;
|
||||
|
@ -784,6 +784,7 @@ null_vptocnp(struct vop_vptocnp_args *ap)
|
||||
vhold(lvp);
|
||||
VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */
|
||||
ldvp = lvp;
|
||||
vref(lvp);
|
||||
error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen);
|
||||
vdrop(lvp);
|
||||
if (error != 0) {
|
||||
@ -797,19 +798,17 @@ null_vptocnp(struct vop_vptocnp_args *ap)
|
||||
*/
|
||||
error = vn_lock(ldvp, LK_EXCLUSIVE);
|
||||
if (error != 0) {
|
||||
vrele(ldvp);
|
||||
vn_lock(vp, locked | LK_RETRY);
|
||||
vdrop(ldvp);
|
||||
return (ENOENT);
|
||||
}
|
||||
vref(ldvp);
|
||||
vdrop(ldvp);
|
||||
error = null_nodeget(vp->v_mount, ldvp, dvp);
|
||||
if (error == 0) {
|
||||
#ifdef DIAGNOSTIC
|
||||
NULLVPTOLOWERVP(*dvp);
|
||||
#endif
|
||||
vhold(*dvp);
|
||||
vput(*dvp);
|
||||
VOP_UNLOCK(*dvp, 0); /* keep reference on *dvp */
|
||||
} else
|
||||
vput(ldvp);
|
||||
|
||||
|
@ -410,8 +410,7 @@ pfs_vptocnp(struct vop_vptocnp_args *ap)
|
||||
}
|
||||
|
||||
*buflen = i;
|
||||
vhold(*dvp);
|
||||
vput(*dvp);
|
||||
VOP_UNLOCK(*dvp, 0);
|
||||
vn_lock(vp, locked | LK_RETRY);
|
||||
vfs_unbusy(mp);
|
||||
|
||||
|
@ -1068,16 +1068,8 @@ vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
|
||||
|
||||
CACHE_RLOCK();
|
||||
error = vn_vptocnp_locked(vp, cred, buf, buflen);
|
||||
if (error == 0) {
|
||||
/*
|
||||
* vn_vptocnp_locked() dropped hold acquired by
|
||||
* VOP_VPTOCNP immediately after locking the
|
||||
* cache. Since we are going to drop the cache rlock,
|
||||
* re-hold the result.
|
||||
*/
|
||||
vhold(*vp);
|
||||
if (error == 0)
|
||||
CACHE_RUNLOCK();
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1096,6 +1088,9 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
|
||||
if (ncp != NULL) {
|
||||
if (*buflen < ncp->nc_nlen) {
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
|
||||
vrele(*vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
numfullpathfail4++;
|
||||
error = ENOMEM;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return, error,
|
||||
@ -1106,18 +1101,23 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
|
||||
memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
|
||||
SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
|
||||
ncp->nc_name, vp, 0, 0);
|
||||
dvp = *vp;
|
||||
*vp = ncp->nc_dvp;
|
||||
vref(*vp);
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(dvp->v_mount);
|
||||
vrele(dvp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
CACHE_RLOCK();
|
||||
return (0);
|
||||
}
|
||||
SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
|
||||
|
||||
vhold(*vp);
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
|
||||
vn_lock(*vp, LK_SHARED | LK_RETRY);
|
||||
error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
|
||||
VOP_UNLOCK(*vp, 0);
|
||||
vdrop(*vp);
|
||||
vput(*vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
if (error) {
|
||||
numfullpathfail2++;
|
||||
@ -1128,16 +1128,20 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
|
||||
|
||||
*vp = dvp;
|
||||
CACHE_RLOCK();
|
||||
if ((*vp)->v_iflag & VI_DOOMED) {
|
||||
if (dvp->v_iflag & VI_DOOMED) {
|
||||
/* forced unmount */
|
||||
CACHE_RUNLOCK();
|
||||
vdrop(*vp);
|
||||
vfslocked = VFS_LOCK_GIANT(dvp->v_mount);
|
||||
vrele(dvp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
error = ENOENT;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
|
||||
NULL, 0, 0);
|
||||
return (error);
|
||||
}
|
||||
vdrop(*vp);
|
||||
/*
|
||||
* *vp has its use count incremented still.
|
||||
*/
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1149,10 +1153,11 @@ static int
|
||||
vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
char *buf, char **retbuf, u_int buflen)
|
||||
{
|
||||
int error, slash_prefixed;
|
||||
int error, slash_prefixed, vfslocked;
|
||||
#ifdef KDTRACE_HOOKS
|
||||
struct vnode *startvp = vp;
|
||||
#endif
|
||||
struct vnode *vp1;
|
||||
|
||||
buflen--;
|
||||
buf[buflen] = '\0';
|
||||
@ -1161,6 +1166,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
|
||||
SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
|
||||
numfullpathcalls++;
|
||||
vref(vp);
|
||||
CACHE_RLOCK();
|
||||
if (vp->v_type != VDIR) {
|
||||
error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
|
||||
@ -1168,6 +1174,9 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
return (error);
|
||||
if (buflen == 0) {
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
return (ENOMEM);
|
||||
}
|
||||
buf[--buflen] = '/';
|
||||
@ -1177,16 +1186,29 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
if (vp->v_vflag & VV_ROOT) {
|
||||
if (vp->v_iflag & VI_DOOMED) { /* forced unmount */
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
error = ENOENT;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return,
|
||||
error, vp, NULL, 0, 0);
|
||||
break;
|
||||
}
|
||||
vp = vp->v_mount->mnt_vnodecovered;
|
||||
vp1 = vp->v_mount->mnt_vnodecovered;
|
||||
vref(vp1);
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
vp = vp1;
|
||||
CACHE_RLOCK();
|
||||
continue;
|
||||
}
|
||||
if (vp->v_type != VDIR) {
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
numfullpathfail1++;
|
||||
error = ENOTDIR;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return,
|
||||
@ -1198,6 +1220,9 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
break;
|
||||
if (buflen == 0) {
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
error = ENOMEM;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return, error,
|
||||
startvp, NULL, 0, 0);
|
||||
@ -1211,6 +1236,9 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
if (!slash_prefixed) {
|
||||
if (buflen == 0) {
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
numfullpathfail4++;
|
||||
SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
|
||||
startvp, NULL, 0, 0);
|
||||
@ -1220,6 +1248,9 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
||||
}
|
||||
numfullpathfound++;
|
||||
CACHE_RUNLOCK();
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vrele(vp);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
|
||||
SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
|
||||
0, 0);
|
||||
|
@ -844,7 +844,7 @@ out:
|
||||
free(dirbuf, M_TEMP);
|
||||
if (!error) {
|
||||
*buflen = i;
|
||||
vhold(*dvp);
|
||||
vref(*dvp);
|
||||
}
|
||||
if (covered) {
|
||||
vput(*dvp);
|
||||
|
Loading…
x
Reference in New Issue
Block a user