vfs: unlazy before dooming the vnode

With this change having the listmtx lock held postpones dooming the vnode.
Use this fact to simplify iteration over the lazy list. It also allows
filters to safely access ->v_data.

Reviewed by:	kib (early version)
Differential Revision:	https://reviews.freebsd.org/D23397
This commit is contained in:
mjg 2020-01-30 02:12:52 +00:00
parent eb9cc37db3
commit eaa936ec51

View File

@ -1805,20 +1805,13 @@ delmntque(struct vnode *vp)
{
struct mount *mp;
VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
mp = vp->v_mount;
if (mp == NULL)
return;
MNT_ILOCK(mp);
VI_LOCK(vp);
if (vp->v_mflag & VMP_LAZYLIST) {
mtx_lock(&mp->mnt_listmtx);
if (vp->v_mflag & VMP_LAZYLIST) {
vp->v_mflag &= ~VMP_LAZYLIST;
TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
mp->mnt_lazyvnodelistsize--;
}
mtx_unlock(&mp->mnt_listmtx);
}
vp->v_mount = NULL;
VI_UNLOCK(vp);
VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
@ -3079,6 +3072,11 @@ vlazy(struct vnode *vp)
if ((vp->v_mflag & VMP_LAZYLIST) != 0)
return;
/*
* We may get here for inactive routines after the vnode got doomed.
*/
if (VN_IS_DOOMED(vp))
return;
mp = vp->v_mount;
mtx_lock(&mp->mnt_listmtx);
if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
@ -3089,6 +3087,30 @@ vlazy(struct vnode *vp)
mtx_unlock(&mp->mnt_listmtx);
}
/*
* This routine is only meant to be called from vgonel prior to dooming
* the vnode.
*/
static void
vunlazy_gone(struct vnode *vp)
{
struct mount *mp;
ASSERT_VOP_ELOCKED(vp, __func__);
ASSERT_VI_LOCKED(vp, __func__);
VNPASS(!VN_IS_DOOMED(vp), vp);
if (vp->v_mflag & VMP_LAZYLIST) {
mp = vp->v_mount;
mtx_lock(&mp->mnt_listmtx);
VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
vp->v_mflag &= ~VMP_LAZYLIST;
TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
mp->mnt_lazyvnodelistsize--;
mtx_unlock(&mp->mnt_listmtx);
}
}
static void
vdefer_inactive(struct vnode *vp)
{
@ -3808,6 +3830,7 @@ vgonel(struct vnode *vp)
*/
if (vp->v_irflag & VIRF_DOOMED)
return;
vunlazy_gone(vp);
vp->v_irflag |= VIRF_DOOMED;
/*
@ -6221,8 +6244,6 @@ static bool
mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
struct vnode *vp)
{
const struct vnode *tmp;
bool held, ret;
VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER &&
TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp,
@ -6232,65 +6253,37 @@ mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
ASSERT_VI_UNLOCKED(vp, __func__);
mtx_assert(&mp->mnt_listmtx, MA_OWNED);
ret = false;
TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist);
TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
/*
* Use a hold to prevent vp from disappearing while the mount vnode
* list lock is dropped and reacquired. Normally a hold would be
* acquired with vhold(), but that might try to acquire the vnode
* interlock, which would be a LOR with the mount vnode list lock.
*/
held = refcount_acquire_if_not_zero(&vp->v_holdcnt);
vholdnz(vp);
mtx_unlock(&mp->mnt_listmtx);
if (!held)
goto abort;
VI_LOCK(vp);
if (!refcount_release_if_not_last(&vp->v_holdcnt)) {
vdropl(vp);
goto abort;
if (VN_IS_DOOMED(vp)) {
VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
goto out_lost;
}
mtx_lock(&mp->mnt_listmtx);
VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
/*
* Determine whether the vnode is still the next one after the marker,
* excepting any other markers. If the vnode has not been doomed by
* vgone() then the hold should have ensured that it remained on the
* lazy list. If it has been doomed but is still on the lazy list,
* don't abort, but rather skip over it (avoid spinning on doomed
* vnodes).
* Since we had a period with no locks held we may be the last
* remaining user, in which case there is nothing to do.
*/
tmp = mvp;
do {
tmp = TAILQ_NEXT(tmp, v_lazylist);
} while (tmp != NULL && tmp->v_type == VMARKER);
if (tmp != vp) {
mtx_unlock(&mp->mnt_listmtx);
VI_UNLOCK(vp);
goto abort;
}
ret = true;
goto out;
abort:
if (!refcount_release_if_not_last(&vp->v_holdcnt))
goto out_lost;
mtx_lock(&mp->mnt_listmtx);
return (true);
out_lost:
vdropl(vp);
maybe_yield();
mtx_lock(&mp->mnt_listmtx);
out:
if (ret)
ASSERT_VI_LOCKED(vp, __func__);
else
ASSERT_VI_UNLOCKED(vp, __func__);
mtx_assert(&mp->mnt_listmtx, MA_OWNED);
return (ret);
return (false);
}
static struct vnode *
mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
void *cbarg)
{
struct vnode *vp, *nvp;
struct vnode *vp;
mtx_assert(&mp->mnt_listmtx, MA_OWNED);
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
@ -6306,7 +6299,8 @@ restart:
* long string of vnodes we don't care about and hog the list
* as a result. Check for it and requeue the marker.
*/
if (VN_IS_DOOMED(vp) || !cb(vp, cbarg)) {
VNPASS(!VN_IS_DOOMED(vp), vp);
if (!cb(vp, cbarg)) {
if (!should_yield()) {
vp = TAILQ_NEXT(vp, v_lazylist);
continue;
@ -6321,9 +6315,7 @@ restart:
goto restart;
}
/*
* Try-lock because this is the wrong lock order. If that does
* not succeed, drop the mount vnode list lock and try to
* reacquire it and the vnode interlock in the right order.
* Try-lock because this is the wrong lock order.
*/
if (!VI_TRYLOCK(vp) &&
!mnt_vnode_next_lazy_relock(*mvp, mp, vp))
@ -6331,11 +6323,9 @@ restart:
KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
("alien vnode on the lazy list %p %p", vp, mp));
if (vp->v_mount == mp && !VN_IS_DOOMED(vp))
break;
nvp = TAILQ_NEXT(vp, v_lazylist);
VI_UNLOCK(vp);
vp = nvp;
VNPASS(vp->v_mount == mp, vp);
VNPASS(!VN_IS_DOOMED(vp), vp);
break;
}
TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);