vfs: change si_usecount management to count used vnodes

Currently si_usecount is effectively a sum of usecounts from all associated
vnodes. This is maintained by special-casing for VCHR every time usecount is
modified. Apart from complicating the code a little bit, it has a scalability
impact since it forces a read from a cacheline shared with said count.

There are no consumers of the feature in the ports tree. In head there are only
2: revoke and devfs_close. Both can get away with a weaker requirement than the
exact usecount, namely just the count of active vnodes. Changing the meaning to
the latter means we only need to modify it on 0<->1 transitions, avoiding the
check plenty of times (and entirely in something like vrefact).

Reviewed by:	kib, jeff
Tested by:	pho
Differential Revision:	https://reviews.freebsd.org/D22202
This commit is contained in:
Mateusz Guzik 2019-11-20 12:05:59 +00:00
parent da248a69aa
commit 1fccb43c39
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354890
4 changed files with 42 additions and 96 deletions

View File

@ -481,7 +481,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
vp->v_rdev = dev;
KASSERT(vp->v_usecount == 1,
("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
dev->si_usecount += vp->v_usecount;
dev->si_usecount++;
/* Special casing of ttys for deadfs. Probably redundant. */
dsw = dev->si_devsw;
if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
@ -581,7 +581,7 @@ devfs_close(struct vop_close_args *ap)
* if the reference count is 2 (this last descriptor
* plus the session), release the reference from the session.
*/
if (td != NULL) {
if (vp->v_usecount == 2 && td != NULL) {
p = td->td_proc;
PROC_LOCK(p);
if (vp == p->p_session->s_ttyvp) {
@ -591,7 +591,7 @@ devfs_close(struct vop_close_args *ap)
if (vp == p->p_session->s_ttyvp) {
SESS_LOCK(p->p_session);
VI_LOCK(vp);
if (count_dev(dev) == 2 &&
if (vp->v_usecount == 2 && vcount(vp) == 1 &&
(vp->v_iflag & VI_DOOMED) == 0) {
p->p_session->s_ttyvp = NULL;
p->p_session->s_ttydp = NULL;
@ -620,19 +620,19 @@ devfs_close(struct vop_close_args *ap)
return (ENXIO);
dflags = 0;
VI_LOCK(vp);
if (vp->v_usecount == 1 && vcount(vp) == 1)
dflags |= FLASTCLOSE;
if (vp->v_iflag & VI_DOOMED) {
/* Forced close. */
dflags |= FREVOKE | FNONBLOCK;
} else if (dsw->d_flags & D_TRACKCLOSE) {
/* Keep device updated on status. */
} else if (count_dev(dev) > 1) {
} else if ((dflags & FLASTCLOSE) == 0) {
VI_UNLOCK(vp);
dev_relthread(dev, ref);
return (0);
}
if (count_dev(dev) == 1)
dflags |= FLASTCLOSE;
vholdl(vp);
vholdnz(vp);
VI_UNLOCK(vp);
vp_locked = VOP_ISLOCKED(vp);
VOP_UNLOCK(vp, 0);
@ -1425,7 +1425,7 @@ devfs_reclaim_vchr(struct vop_reclaim_args *ap)
dev = vp->v_rdev;
vp->v_rdev = NULL;
if (dev != NULL)
dev->si_usecount -= vp->v_usecount;
dev->si_usecount -= (vp->v_usecount > 0);
dev_unlock();
VI_UNLOCK(vp);
if (dev != NULL)

View File

@ -2714,26 +2714,11 @@ _vget_prep(struct vnode *vp, bool interlock)
{
enum vgetstate vs;
if (__predict_true(vp->v_type != VCHR)) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
vs = VGET_USECOUNT;
} else {
_vhold(vp, interlock);
vs = VGET_HOLDCNT;
}
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
vs = VGET_USECOUNT;
} else {
if (!interlock)
VI_LOCK(vp);
if (vp->v_usecount == 0) {
vholdl(vp);
vs = VGET_HOLDCNT;
} else {
v_incr_devcount(vp);
refcount_acquire(&vp->v_usecount);
vs = VGET_USECOUNT;
}
if (!interlock)
VI_UNLOCK(vp);
_vhold(vp, interlock);
vs = VGET_HOLDCNT;
}
return (vs);
}
@ -2796,8 +2781,7 @@ vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
* the vnode around. Otherwise someone else lended their hold count and
* we have to drop ours.
*/
if (vp->v_type != VCHR &&
refcount_acquire_if_not_zero(&vp->v_usecount)) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
#ifdef INVARIANTS
int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1;
VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__));
@ -2823,24 +2807,19 @@ vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
* See the previous section. By the time we get here we may find
* ourselves in the same spot.
*/
if (vp->v_type != VCHR) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
#ifdef INVARIANTS
int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1;
VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__));
int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1;
VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__));
#else
refcount_release(&vp->v_holdcnt);
refcount_release(&vp->v_holdcnt);
#endif
VNODE_REFCOUNT_FENCE_ACQ();
VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
("%s: vnode with usecount and VI_OWEINACT set",
__func__));
VI_UNLOCK(vp);
return (0);
}
} else {
if (vp->v_usecount > 0)
refcount_release(&vp->v_holdcnt);
VNODE_REFCOUNT_FENCE_ACQ();
VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
("%s: vnode with usecount and VI_OWEINACT set",
__func__));
VI_UNLOCK(vp);
return (0);
}
if ((vp->v_iflag & VI_OWEINACT) == 0) {
oweinact = 0;
@ -2868,8 +2847,7 @@ vref(struct vnode *vp)
ASSERT_VI_UNLOCKED(vp, __func__);
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
if (vp->v_type != VCHR &&
refcount_acquire_if_not_zero(&vp->v_usecount)) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
VNODE_REFCOUNT_FENCE_ACQ();
VNASSERT(vp->v_holdcnt > 0, vp,
("%s: active vnode not held", __func__));
@ -2888,8 +2866,7 @@ vrefl(struct vnode *vp)
ASSERT_VI_LOCKED(vp, __func__);
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
if (vp->v_type != VCHR &&
refcount_acquire_if_not_zero(&vp->v_usecount)) {
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
VNODE_REFCOUNT_FENCE_ACQ();
VNASSERT(vp->v_holdcnt > 0, vp,
("%s: active vnode not held", __func__));
@ -2897,8 +2874,7 @@ vrefl(struct vnode *vp)
("%s: vnode with usecount and VI_OWEINACT set", __func__));
return;
}
if (vp->v_usecount == 0)
vholdl(vp);
vholdl(vp);
if ((vp->v_iflag & VI_OWEINACT) != 0) {
vp->v_iflag &= ~VI_OWEINACT;
VNODE_REFCOUNT_FENCE_REL();
@ -2912,12 +2888,6 @@ vrefact(struct vnode *vp)
{
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
if (__predict_false(vp->v_type == VCHR)) {
VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp,
("%s: wrong ref counts", __func__));
vref(vp);
return;
}
#ifdef INVARIANTS
int old = atomic_fetchadd_int(&vp->v_usecount, 1);
VNASSERT(old > 0, vp, ("%s: wrong use count", __func__));
@ -2986,31 +2956,22 @@ vputx(struct vnode *vp, int func)
* We want to hold the vnode until the inactive finishes to
* prevent vgone() races. We drop the use count here and the
* hold count below when we're done.
*
* If we release the last usecount we take ownership of the hold
* count which provides liveness of the vnode, in which case we
* have to vdrop.
*/
if (vp->v_type != VCHR) {
/*
* If we release the last usecount we take ownership of the hold
* count which provides liveness of the vnode, in which case we
* have to vdrop.
*/
if (!refcount_release(&vp->v_usecount))
return;
VI_LOCK(vp);
/*
* By the time we got here someone else might have transitioned
* the count back to > 0.
*/
if (vp->v_usecount > 0) {
vdropl(vp);
return;
}
} else {
VI_LOCK(vp);
v_decr_devcount(vp);
if (!refcount_release(&vp->v_usecount)) {
VI_UNLOCK(vp);
return;
}
if (!refcount_release(&vp->v_usecount))
return;
VI_LOCK(vp);
v_decr_devcount(vp);
/*
* By the time we got here someone else might have transitioned
* the count back to > 0.
*/
if (vp->v_usecount > 0) {
vdropl(vp);
return;
}
if (vp->v_iflag & VI_DOINGINACT) {
vdropl(vp);
@ -3739,20 +3700,6 @@ vcount(struct vnode *vp)
return (count);
}
/*
* Same as above, but using the struct cdev *as argument
*/
int
count_dev(struct cdev *dev)
{
int count;
dev_lock();
count = dev->si_usecount;
dev_unlock();
return(count);
}
/*
* Print out a description of a vnode.
*/

View File

@ -4148,7 +4148,7 @@ sys_revoke(struct thread *td, struct revoke_args *uap)
if (error != 0)
goto out;
}
if (vcount(vp) > 1)
if (vp->v_usecount > 1 || vcount(vp) > 1)
VOP_REVOKE(vp, REVOKEALL);
out:
vput(vp);

View File

@ -256,7 +256,6 @@ void make_dev_args_init_impl(struct make_dev_args *_args, size_t _sz);
#define make_dev_args_init(a) \
make_dev_args_init_impl((a), sizeof(struct make_dev_args))
int count_dev(struct cdev *_dev);
void delist_dev(struct cdev *_dev);
void destroy_dev(struct cdev *_dev);
int destroy_dev_sched(struct cdev *dev);