vfs: clean up vputx a little

1. replace hand-rolled macros for operation type with enum
2. unlock the vnode in vput itself, there is no need to branch on it. existence
of VPUTX_VPUT remains significant in that the inactive variant adds LK_NOWAIT
to locking request.
3. remove the useless v_usecount assertion. few lines above the checks if
v_usecount > 0 and leaves. should the value be negative, refcount would fail.
4. the CTR return vnode %p to the freelist is incorrect as vdrop may find the
vnode with holdcnt > 1. if the like should exist, it should be moved there
5. no need to error = 0 for everyone

Reviewed by:	kib, jeff (previous version)
Differential Revision:	https://reviews.freebsd.org/D22718
This commit is contained in:
Mateusz Guzik 2019-12-08 21:13:07 +00:00
parent 35e9bfc98f
commit 791a24c7ea
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355536

View File

@ -2968,9 +2968,7 @@ vrefcnt(struct vnode *vp)
return (vp->v_usecount);
}
#define VPUTX_VRELE 1
#define VPUTX_VPUT 2
#define VPUTX_VUNREF 3
enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF };
/*
* Decrement the use and hold counts for a vnode.
@ -2978,36 +2976,19 @@ vrefcnt(struct vnode *vp)
* See an explanation near vget() as to why atomic operation is safe.
*/
static void
vputx(struct vnode *vp, int func)
vputx(struct vnode *vp, enum vputx_op func)
{
int error;
KASSERT(vp != NULL, ("vputx: null vp"));
if (func == VPUTX_VUNREF)
ASSERT_VOP_LOCKED(vp, "vunref");
else if (func == VPUTX_VPUT)
ASSERT_VOP_LOCKED(vp, "vput");
else
KASSERT(func == VPUTX_VRELE, ("vputx: wrong func"));
ASSERT_VI_UNLOCKED(vp, __func__);
VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp,
("%s: wrong ref counts", __func__));
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
/*
* It is an invariant that all VOP_* calls operate on a held vnode.
* We may be only having an implicit hold stemming from our usecount,
* which we are about to release. If we unlock the vnode afterwards we
* open a time window where someone else dropped the last usecount and
* proceeded to free the vnode before our unlock finished. For this
* reason we unlock the vnode early. This is a little bit wasteful as
* it may be the vnode is exclusively locked and inactive processing is
* needed, in which case we are adding work.
*/
if (func == VPUTX_VPUT)
VOP_UNLOCK(vp, 0);
/*
* We want to hold the vnode until the inactive finishes to
* prevent vgone() races. We drop the use count here and the
@ -3034,15 +3015,6 @@ vputx(struct vnode *vp, int func)
return;
}
error = 0;
if (vp->v_usecount != 0) {
vn_printf(vp, "vputx: usecount not zero for vnode ");
panic("vputx: usecount not zero");
}
CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
/*
* Check if the fs wants to perform inactive processing. Note we
* may be only holding the interlock, in which case it is possible
@ -3071,6 +3043,7 @@ vputx(struct vnode *vp, int func)
VI_LOCK(vp);
break;
case VPUTX_VUNREF:
error = 0;
if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
VI_LOCK(vp);
@ -3103,11 +3076,21 @@ vrele(struct vnode *vp)
* Release an already locked vnode. This give the same effects as
* unlock+vrele(), but takes less time and avoids releasing and
* re-aquiring the lock (as vrele() acquires the lock internally.)
*
* It is an invariant that all VOP_* calls operate on a held vnode.
* We may be only having an implicit hold stemming from our usecount,
* which we are about to release. If we unlock the vnode afterwards we
* open a time window where someone else dropped the last usecount and
* proceeded to free the vnode before our unlock finished. For this
* reason we unlock the vnode early. This is a little bit wasteful as
* it may be the vnode is exclusively locked and inactive processing is
* needed, in which case we are adding work.
*/
void
vput(struct vnode *vp)
{
VOP_UNLOCK(vp, 0);
vputx(vp, VPUTX_VPUT);
}