From af6891923640e394110f44cc502b71f3881036fe Mon Sep 17 00:00:00 2001 From: mjg Date: Fri, 27 Dec 2019 11:26:12 +0000 Subject: [PATCH] vfs: remove production kernel checks and mp == NULL support from vdrop 1. The only place in the tree which calls getnewvnode with mp == NULL does it for vp_crossmp which will never execute this codepath. Any vnode which legally has ->v_mount == NULL is also doomed, which once more wont execute this code. 2. Remove an assertion for v_holdcnt from production kernels. It gets taken care of by refcount macros in debug kernels. Any code which would want to pass NULL mp can construct a fake one instead. Reviewed by: kib (previous version) Differential Revision: https://reviews.freebsd.org/D22722 --- sys/kern/vfs_subr.c | 49 ++++++++++++--------------------------------- 1 file changed, 13 insertions(+), 36 deletions(-) diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 2731dd034920..952de0597400 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -3225,35 +3225,20 @@ vdrop_deactivate(struct vnode *vp) ("vdrop: freeing when we shouldn't")); if ((vp->v_iflag & VI_OWEINACT) == 0) { mp = vp->v_mount; - if (mp != NULL) { - mtx_lock(&mp->mnt_listmtx); - if (vp->v_iflag & VI_ACTIVE) { - vp->v_iflag &= ~VI_ACTIVE; - TAILQ_REMOVE(&mp->mnt_activevnodelist, - vp, v_actfreelist); - mp->mnt_activevnodelistsize--; - } - TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, - vp, v_actfreelist); - mp->mnt_tmpfreevnodelistsize++; - vp->v_iflag |= VI_FREE; - vp->v_mflag |= VMP_TMPMNTFREELIST; - VI_UNLOCK(vp); - if (mp->mnt_tmpfreevnodelistsize >= - mnt_free_list_batch) - vnlru_return_batch_locked(mp); - mtx_unlock(&mp->mnt_listmtx); - } else { - VNASSERT((vp->v_iflag & VI_ACTIVE) == 0, vp, - ("vdrop: active vnode not on per mount vnode list")); - mtx_lock(&vnode_free_list_mtx); - TAILQ_INSERT_TAIL(&vnode_free_list, vp, - v_actfreelist); - freevnodes++; - vp->v_iflag |= VI_FREE; - VI_UNLOCK(vp); - mtx_unlock(&vnode_free_list_mtx); + mtx_lock(&mp->mnt_listmtx); + if (vp->v_iflag & VI_ACTIVE) { + vp->v_iflag &= ~VI_ACTIVE; + TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); + mp->mnt_activevnodelistsize--; } + TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); + mp->mnt_tmpfreevnodelistsize++; + vp->v_iflag |= VI_FREE; + vp->v_mflag |= VMP_TMPMNTFREELIST; + VI_UNLOCK(vp); + if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch) + vnlru_return_batch_locked(mp); + mtx_unlock(&mp->mnt_listmtx); } else { VI_UNLOCK(vp); counter_u64_add(free_owe_inact, 1); @@ -3266,10 +3251,6 @@ vdrop(struct vnode *vp) ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false((int)vp->v_holdcnt <= 0)) { - vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt); - panic("vdrop: wrong holdcnt"); - } if (refcount_release_if_not_last(&vp->v_holdcnt)) return; VI_LOCK(vp); @@ -3282,10 +3263,6 @@ vdropl(struct vnode *vp) ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false((int)vp->v_holdcnt <= 0)) { - vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt); - panic("vdrop: wrong holdcnt"); - } if (!refcount_release(&vp->v_holdcnt)) { VI_UNLOCK(vp); return;