vfs: remove production kernel checks and mp == NULL support from vdrop

1. The only place in the tree which calls getnewvnode with mp == NULL does it
for vp_crossmp which will never execute this codepath. Any vnode which legally
has ->v_mount == NULL is also doomed, which once more wont execute this code.
2. Remove an assertion for v_holdcnt from production kernels. It gets taken care
of by refcount macros in debug kernels.

Any code which would want to pass NULL mp can construct a fake one instead.

Reviewed by:	kib (previous version)
Differential Revision:	https://reviews.freebsd.org/D22722
This commit is contained in:
mjg 2019-12-27 11:26:12 +00:00
parent 2c855240cc
commit af68919236

View File

@ -3225,35 +3225,20 @@ vdrop_deactivate(struct vnode *vp)
("vdrop: freeing when we shouldn't")); ("vdrop: freeing when we shouldn't"));
if ((vp->v_iflag & VI_OWEINACT) == 0) { if ((vp->v_iflag & VI_OWEINACT) == 0) {
mp = vp->v_mount; mp = vp->v_mount;
if (mp != NULL) { mtx_lock(&mp->mnt_listmtx);
mtx_lock(&mp->mnt_listmtx); if (vp->v_iflag & VI_ACTIVE) {
if (vp->v_iflag & VI_ACTIVE) { vp->v_iflag &= ~VI_ACTIVE;
vp->v_iflag &= ~VI_ACTIVE; TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist);
TAILQ_REMOVE(&mp->mnt_activevnodelist, mp->mnt_activevnodelistsize--;
vp, v_actfreelist);
mp->mnt_activevnodelistsize--;
}
TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist,
vp, v_actfreelist);
mp->mnt_tmpfreevnodelistsize++;
vp->v_iflag |= VI_FREE;
vp->v_mflag |= VMP_TMPMNTFREELIST;
VI_UNLOCK(vp);
if (mp->mnt_tmpfreevnodelistsize >=
mnt_free_list_batch)
vnlru_return_batch_locked(mp);
mtx_unlock(&mp->mnt_listmtx);
} else {
VNASSERT((vp->v_iflag & VI_ACTIVE) == 0, vp,
("vdrop: active vnode not on per mount vnode list"));
mtx_lock(&vnode_free_list_mtx);
TAILQ_INSERT_TAIL(&vnode_free_list, vp,
v_actfreelist);
freevnodes++;
vp->v_iflag |= VI_FREE;
VI_UNLOCK(vp);
mtx_unlock(&vnode_free_list_mtx);
} }
TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist);
mp->mnt_tmpfreevnodelistsize++;
vp->v_iflag |= VI_FREE;
vp->v_mflag |= VMP_TMPMNTFREELIST;
VI_UNLOCK(vp);
if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch)
vnlru_return_batch_locked(mp);
mtx_unlock(&mp->mnt_listmtx);
} else { } else {
VI_UNLOCK(vp); VI_UNLOCK(vp);
counter_u64_add(free_owe_inact, 1); counter_u64_add(free_owe_inact, 1);
@ -3266,10 +3251,6 @@ vdrop(struct vnode *vp)
ASSERT_VI_UNLOCKED(vp, __func__); ASSERT_VI_UNLOCKED(vp, __func__);
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
if (__predict_false((int)vp->v_holdcnt <= 0)) {
vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt);
panic("vdrop: wrong holdcnt");
}
if (refcount_release_if_not_last(&vp->v_holdcnt)) if (refcount_release_if_not_last(&vp->v_holdcnt))
return; return;
VI_LOCK(vp); VI_LOCK(vp);
@ -3282,10 +3263,6 @@ vdropl(struct vnode *vp)
ASSERT_VI_LOCKED(vp, __func__); ASSERT_VI_LOCKED(vp, __func__);
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
if (__predict_false((int)vp->v_holdcnt <= 0)) {
vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt);
panic("vdrop: wrong holdcnt");
}
if (!refcount_release(&vp->v_holdcnt)) { if (!refcount_release(&vp->v_holdcnt)) {
VI_UNLOCK(vp); VI_UNLOCK(vp);
return; return;