From 19337211f83156c151fcc51dc48edc47b4b96cd6 Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Fri, 21 Aug 2020 21:24:14 +0000 Subject: [PATCH] vfs: fix freevnode accounting Most notably add the missing decrement to vhold_smr. .-'---`-. ,' `. | \ | \ \ _ \ ,\ _ ,'-,/-)\ ( * \ \,' ,' ,'-) `._,) -',-') \/ ''/ ) / / / ,'-' Reported by: Dan Nelson Fixes: r362827 ("vfs: protect vnodes with smr") --- sys/kern/vfs_subr.c | 49 ++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index c1e0656095e7..1f9267636a55 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1293,6 +1293,28 @@ static int vnlruproc_sig; */ #define VNLRU_FREEVNODES_SLOP 128 +static __inline void +vn_freevnodes_inc(void) +{ + struct vdbatch *vd; + + critical_enter(); + vd = DPCPU_PTR(vd); + vd->freevnodes++; + critical_exit(); +} + +static __inline void +vn_freevnodes_dec(void) +{ + struct vdbatch *vd; + + critical_enter(); + vd = DPCPU_PTR(vd); + vd->freevnodes--; + critical_exit(); +} + static u_long vnlru_read_freevnodes(void) { @@ -3195,19 +3217,14 @@ vunref(struct vnode *vp) void vhold(struct vnode *vp) { - struct vdbatch *vd; int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); old = atomic_fetchadd_int(&vp->v_holdcnt, 1); VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, ("%s: wrong hold count %d", __func__, old)); - if (old != 0) - return; - critical_enter(); - vd = DPCPU_PTR(vd); - vd->freevnodes--; - critical_exit(); + if (old == 0) + vn_freevnodes_dec(); } void @@ -3268,8 +3285,11 @@ vhold_smr(struct vnode *vp) } VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); - if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) + if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { + if (count == 0) + vn_freevnodes_dec(); return (true); + } } } @@ -3309,17 +3329,13 @@ vdbatch_enqueue(struct vnode *vp) VNASSERT(!VN_IS_DOOMED(vp), vp, ("%s: deferring requeue of a doomed vnode", __func__)); - critical_enter(); - vd = DPCPU_PTR(vd); - vd->freevnodes++; if (vp->v_dbatchcpu != NOCPU) { VI_UNLOCK(vp); - critical_exit(); return; } sched_pin(); - critical_exit(); + vd = DPCPU_PTR(vd); mtx_lock(&vd->lock); MPASS(vd->index < VDBATCH_SIZE); MPASS(vd->tab[vd->index] == NULL); @@ -3444,6 +3460,7 @@ vdropl(struct vnode *vp) return; } if (!VN_IS_DOOMED(vp)) { + vn_freevnodes_inc(); vdrop_deactivate(vp); /* * Also unlocks the interlock. We can't assert on it as we @@ -3458,7 +3475,8 @@ vdropl(struct vnode *vp) * We may be racing against vhold_smr. If they win we can just pretend * we never got this far, they will vdrop later. */ - if (!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR)) { + if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { + vn_freevnodes_inc(); VI_UNLOCK(vp); /* * We lost the aforementioned race. Any subsequent access is @@ -3466,6 +3484,9 @@ vdropl(struct vnode *vp) */ return; } + /* + * Don't bump freevnodes as this one is going away. + */ freevnode(vp); }