devfs: rework si_usecount to track opens
This removes a lot of special casing from the VFS layer. Reviewed by: kib (previous version) Tested by: pho (previous version) Differential Revision: https://reviews.freebsd.org/D25612
This commit is contained in:
parent
8bc30d2afd
commit
3b44443626
@ -153,6 +153,7 @@ struct devfs_dirent {
|
||||
struct timespec de_ctime;
|
||||
struct vnode *de_vnode;
|
||||
char *de_symlink;
|
||||
int de_usecount;
|
||||
};
|
||||
|
||||
struct devfs_mount {
|
||||
@ -203,6 +204,10 @@ struct devfs_dirent *devfs_vmkdir(struct devfs_mount *, char *, int,
|
||||
struct devfs_dirent *devfs_find(struct devfs_dirent *, const char *, int,
|
||||
int);
|
||||
|
||||
void devfs_ctty_ref(struct vnode *);
|
||||
void devfs_ctty_unref(struct vnode *);
|
||||
int devfs_usecount(struct vnode *);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !_FS_DEVFS_DEVFS_H_ */
|
||||
|
@ -222,6 +222,115 @@ devfs_clear_cdevpriv(void)
|
||||
devfs_fpdrop(fp);
|
||||
}
|
||||
|
||||
static void
|
||||
devfs_usecount_add(struct vnode *vp)
|
||||
{
|
||||
struct devfs_dirent *de;
|
||||
struct cdev *dev;
|
||||
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
VNPASS(vp->v_type == VCHR || vp->v_type == VBAD, vp);
|
||||
if (VN_IS_DOOMED(vp)) {
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
de = vp->v_data;
|
||||
dev = vp->v_rdev;
|
||||
MPASS(de != NULL);
|
||||
MPASS(dev != NULL);
|
||||
dev->si_usecount++;
|
||||
de->de_usecount++;
|
||||
out_unlock:
|
||||
VI_UNLOCK(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
}
|
||||
|
||||
static void
|
||||
devfs_usecount_subl(struct vnode *vp)
|
||||
{
|
||||
struct devfs_dirent *de;
|
||||
struct cdev *dev;
|
||||
|
||||
mtx_assert(&devfs_de_interlock, MA_OWNED);
|
||||
ASSERT_VI_LOCKED(vp, __func__);
|
||||
VNPASS(vp->v_type == VCHR || vp->v_type == VBAD, vp);
|
||||
|
||||
de = vp->v_data;
|
||||
dev = vp->v_rdev;
|
||||
if (de == NULL)
|
||||
return;
|
||||
if (dev == NULL) {
|
||||
MPASS(de->de_usecount == 0);
|
||||
return;
|
||||
}
|
||||
if (dev->si_usecount < de->de_usecount)
|
||||
panic("%s: si_usecount underflow for dev %p "
|
||||
"(has %ld, dirent has %d)\n",
|
||||
__func__, dev, dev->si_usecount, de->de_usecount);
|
||||
if (VN_IS_DOOMED(vp)) {
|
||||
dev->si_usecount -= de->de_usecount;
|
||||
de->de_usecount = 0;
|
||||
} else {
|
||||
if (de->de_usecount == 0)
|
||||
panic("%s: de_usecount underflow for dev %p\n",
|
||||
__func__, dev);
|
||||
dev->si_usecount--;
|
||||
de->de_usecount--;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
devfs_usecount_sub(struct vnode *vp)
|
||||
{
|
||||
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
devfs_usecount_subl(vp);
|
||||
VI_UNLOCK(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
}
|
||||
|
||||
static int
|
||||
devfs_usecountl(struct vnode *vp)
|
||||
{
|
||||
|
||||
VNPASS(vp->v_type == VCHR, vp);
|
||||
mtx_assert(&devfs_de_interlock, MA_OWNED);
|
||||
ASSERT_VI_LOCKED(vp, __func__);
|
||||
return (vp->v_rdev->si_usecount);
|
||||
}
|
||||
|
||||
int
|
||||
devfs_usecount(struct vnode *vp)
|
||||
{
|
||||
int count;
|
||||
|
||||
VNPASS(vp->v_type == VCHR, vp);
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
count = devfs_usecountl(vp);
|
||||
VI_UNLOCK(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
return (count);
|
||||
}
|
||||
|
||||
void
|
||||
devfs_ctty_ref(struct vnode *vp)
|
||||
{
|
||||
|
||||
vrefact(vp);
|
||||
devfs_usecount_add(vp);
|
||||
}
|
||||
|
||||
void
|
||||
devfs_ctty_unref(struct vnode *vp)
|
||||
{
|
||||
|
||||
devfs_usecount_sub(vp);
|
||||
vrele(vp);
|
||||
}
|
||||
|
||||
/*
|
||||
* On success devfs_populate_vp() returns with dmp->dm_lock held.
|
||||
*/
|
||||
@ -487,7 +596,6 @@ loop:
|
||||
/* XXX: v_rdev should be protect by vnode lock */
|
||||
vp->v_rdev = dev;
|
||||
VNPASS(vp->v_usecount == 1, vp);
|
||||
dev->si_usecount++;
|
||||
/* Special casing of ttys for deadfs. Probably redundant. */
|
||||
dsw = dev->si_devsw;
|
||||
if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
|
||||
@ -569,6 +677,7 @@ devfs_close(struct vop_close_args *ap)
|
||||
struct proc *p;
|
||||
struct cdev *dev = vp->v_rdev;
|
||||
struct cdevsw *dsw;
|
||||
struct devfs_dirent *de = vp->v_data;
|
||||
int dflags, error, ref, vp_locked;
|
||||
|
||||
/*
|
||||
@ -587,7 +696,7 @@ devfs_close(struct vop_close_args *ap)
|
||||
* if the reference count is 2 (this last descriptor
|
||||
* plus the session), release the reference from the session.
|
||||
*/
|
||||
if (vp->v_usecount == 2 && td != NULL) {
|
||||
if (de->de_usecount == 2 && td != NULL) {
|
||||
p = td->td_proc;
|
||||
PROC_LOCK(p);
|
||||
if (vp == p->p_session->s_ttyvp) {
|
||||
@ -596,19 +705,20 @@ devfs_close(struct vop_close_args *ap)
|
||||
sx_xlock(&proctree_lock);
|
||||
if (vp == p->p_session->s_ttyvp) {
|
||||
SESS_LOCK(p->p_session);
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
if (vp->v_usecount == 2 && vcount(vp) == 1 &&
|
||||
!VN_IS_DOOMED(vp)) {
|
||||
if (devfs_usecountl(vp) == 2 && !VN_IS_DOOMED(vp)) {
|
||||
p->p_session->s_ttyvp = NULL;
|
||||
p->p_session->s_ttydp = NULL;
|
||||
oldvp = vp;
|
||||
}
|
||||
VI_UNLOCK(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
SESS_UNLOCK(p->p_session);
|
||||
}
|
||||
sx_xunlock(&proctree_lock);
|
||||
if (oldvp != NULL)
|
||||
vrele(oldvp);
|
||||
devfs_ctty_unref(oldvp);
|
||||
} else
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
@ -625,9 +735,12 @@ devfs_close(struct vop_close_args *ap)
|
||||
if (dsw == NULL)
|
||||
return (ENXIO);
|
||||
dflags = 0;
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
if (vp->v_usecount == 1 && vcount(vp) == 1)
|
||||
if (devfs_usecountl(vp) == 1)
|
||||
dflags |= FLASTCLOSE;
|
||||
devfs_usecount_subl(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
if (VN_IS_DOOMED(vp)) {
|
||||
/* Forced close. */
|
||||
dflags |= FREVOKE | FNONBLOCK;
|
||||
@ -850,7 +963,7 @@ devfs_ioctl(struct vop_ioctl_args *ap)
|
||||
return (0);
|
||||
}
|
||||
|
||||
vrefact(vp);
|
||||
devfs_ctty_ref(vp);
|
||||
SESS_LOCK(sess);
|
||||
vpold = sess->s_ttyvp;
|
||||
sess->s_ttyvp = vp;
|
||||
@ -1159,6 +1272,9 @@ devfs_open(struct vop_open_args *ap)
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
if (vp->v_type == VCHR)
|
||||
devfs_usecount_add(vp);
|
||||
|
||||
vlocked = VOP_ISLOCKED(vp);
|
||||
VOP_UNLOCK(vp);
|
||||
|
||||
@ -1178,6 +1294,9 @@ devfs_open(struct vop_open_args *ap)
|
||||
td->td_fpop = fpop;
|
||||
|
||||
vn_lock(vp, vlocked | LK_RETRY);
|
||||
if (error != 0 && vp->v_type == VCHR)
|
||||
devfs_usecount_sub(vp);
|
||||
|
||||
dev_relthread(dev, ref);
|
||||
if (error != 0) {
|
||||
if (error == ERESTART)
|
||||
@ -1406,19 +1525,28 @@ devfs_readlink(struct vop_readlink_args *ap)
|
||||
return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
|
||||
}
|
||||
|
||||
static void
|
||||
devfs_reclaiml(struct vnode *vp)
|
||||
{
|
||||
struct devfs_dirent *de;
|
||||
|
||||
mtx_assert(&devfs_de_interlock, MA_OWNED);
|
||||
de = vp->v_data;
|
||||
if (de != NULL) {
|
||||
MPASS(de->de_usecount == 0);
|
||||
de->de_vnode = NULL;
|
||||
vp->v_data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
devfs_reclaim(struct vop_reclaim_args *ap)
|
||||
{
|
||||
struct vnode *vp;
|
||||
struct devfs_dirent *de;
|
||||
|
||||
vp = ap->a_vp;
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
de = vp->v_data;
|
||||
if (de != NULL) {
|
||||
de->de_vnode = NULL;
|
||||
vp->v_data = NULL;
|
||||
}
|
||||
devfs_reclaiml(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
return (0);
|
||||
}
|
||||
@ -1432,14 +1560,14 @@ devfs_reclaim_vchr(struct vop_reclaim_args *ap)
|
||||
vp = ap->a_vp;
|
||||
MPASS(vp->v_type == VCHR);
|
||||
|
||||
devfs_reclaim(ap);
|
||||
|
||||
mtx_lock(&devfs_de_interlock);
|
||||
VI_LOCK(vp);
|
||||
devfs_usecount_subl(vp);
|
||||
devfs_reclaiml(vp);
|
||||
mtx_unlock(&devfs_de_interlock);
|
||||
dev_lock();
|
||||
dev = vp->v_rdev;
|
||||
vp->v_rdev = NULL;
|
||||
if (dev != NULL)
|
||||
dev->si_usecount -= (vp->v_usecount > 0);
|
||||
dev_unlock();
|
||||
VI_UNLOCK(vp);
|
||||
if (dev != NULL)
|
||||
|
@ -88,6 +88,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#include <fs/devfs/devfs.h>
|
||||
|
||||
#ifdef COMPAT_FREEBSD32
|
||||
#include <compat/freebsd32/freebsd32.h>
|
||||
#include <compat/freebsd32/freebsd32_util.h>
|
||||
@ -858,7 +860,7 @@ killjobc(void)
|
||||
VOP_REVOKE(ttyvp, REVOKEALL);
|
||||
VOP_UNLOCK(ttyvp);
|
||||
}
|
||||
vrele(ttyvp);
|
||||
devfs_ctty_unref(ttyvp);
|
||||
sx_xlock(&proctree_lock);
|
||||
}
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/vnode.h>
|
||||
|
||||
#include <fs/devfs/devfs.h>
|
||||
|
||||
#include <machine/stdarg.h>
|
||||
|
||||
static MALLOC_DEFINE(M_TTY, "tty", "tty device");
|
||||
@ -1256,7 +1258,7 @@ tty_drop_ctty(struct tty *tp, struct proc *p)
|
||||
* is either changed or released.
|
||||
*/
|
||||
if (vp != NULL)
|
||||
vrele(vp);
|
||||
devfs_ctty_unref(vp);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -108,8 +108,6 @@ static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
|
||||
static void syncer_shutdown(void *arg, int howto);
|
||||
static int vtryrecycle(struct vnode *vp);
|
||||
static void v_init_counters(struct vnode *);
|
||||
static void v_incr_devcount(struct vnode *);
|
||||
static void v_decr_devcount(struct vnode *);
|
||||
static void vgonel(struct vnode *);
|
||||
static void vfs_knllock(void *arg);
|
||||
static void vfs_knlunlock(void *arg);
|
||||
@ -2793,59 +2791,6 @@ v_init_counters(struct vnode *vp)
|
||||
refcount_init(&vp->v_usecount, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment si_usecount of the associated device, if any.
|
||||
*/
|
||||
static void
|
||||
v_incr_devcount(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_VI_LOCKED(vp, __FUNCTION__);
|
||||
if (vp->v_type == VCHR && vp->v_rdev != NULL) {
|
||||
dev_lock();
|
||||
vp->v_rdev->si_usecount++;
|
||||
dev_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrement si_usecount of the associated device, if any.
|
||||
*
|
||||
* The caller is required to hold the interlock when transitioning a VCHR use
|
||||
* count to zero. This prevents a race with devfs_reclaim_vchr() that would
|
||||
* leak a si_usecount reference. The vnode lock will also prevent this race
|
||||
* if it is held while dropping the last ref.
|
||||
*
|
||||
* The race is:
|
||||
*
|
||||
* CPU1 CPU2
|
||||
* devfs_reclaim_vchr
|
||||
* make v_usecount == 0
|
||||
* VI_LOCK
|
||||
* sees v_usecount == 0, no updates
|
||||
* vp->v_rdev = NULL;
|
||||
* ...
|
||||
* VI_UNLOCK
|
||||
* VI_LOCK
|
||||
* v_decr_devcount
|
||||
* sees v_rdev == NULL, no updates
|
||||
*
|
||||
* In this scenario si_devcount decrement is not performed.
|
||||
*/
|
||||
static void
|
||||
v_decr_devcount(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_VOP_LOCKED(vp, __func__);
|
||||
ASSERT_VI_LOCKED(vp, __FUNCTION__);
|
||||
if (vp->v_type == VCHR && vp->v_rdev != NULL) {
|
||||
dev_lock();
|
||||
VNPASS(vp->v_rdev->si_usecount > 0, vp);
|
||||
vp->v_rdev->si_usecount--;
|
||||
dev_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab a particular vnode from the free list, increment its
|
||||
* reference count and lock it. VIRF_DOOMED is set if the vnode
|
||||
@ -2921,41 +2866,6 @@ vget(struct vnode *vp, int flags, struct thread *td)
|
||||
return (vget_finish(vp, flags, vs));
|
||||
}
|
||||
|
||||
static void __noinline
|
||||
vget_finish_vchr(struct vnode *vp)
|
||||
{
|
||||
|
||||
VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)"));
|
||||
|
||||
/*
|
||||
* See the comment in vget_finish before usecount bump.
|
||||
*/
|
||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
#ifdef INVARIANTS
|
||||
int old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
|
||||
VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old));
|
||||
#else
|
||||
refcount_release(&vp->v_holdcnt);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
VI_LOCK(vp);
|
||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
#ifdef INVARIANTS
|
||||
int old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
|
||||
VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
|
||||
#else
|
||||
refcount_release(&vp->v_holdcnt);
|
||||
#endif
|
||||
VI_UNLOCK(vp);
|
||||
return;
|
||||
}
|
||||
v_incr_devcount(vp);
|
||||
refcount_acquire(&vp->v_usecount);
|
||||
VI_UNLOCK(vp);
|
||||
}
|
||||
|
||||
int
|
||||
vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
|
||||
{
|
||||
@ -2993,11 +2903,6 @@ vget_finish_ref(struct vnode *vp, enum vgetstate vs)
|
||||
if (vs == VGET_USECOUNT)
|
||||
return;
|
||||
|
||||
if (__predict_false(vp->v_type == VCHR)) {
|
||||
vget_finish_vchr(vp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We hold the vnode. If the usecount is 0 it will be utilized to keep
|
||||
* the vnode around. Otherwise someone else lended their hold count and
|
||||
@ -3019,61 +2924,12 @@ vget_finish_ref(struct vnode *vp, enum vgetstate vs)
|
||||
* Increase the reference (use) and hold count of a vnode.
|
||||
* This will also remove the vnode from the free list if it is presently free.
|
||||
*/
|
||||
static void __noinline
|
||||
vref_vchr(struct vnode *vp, bool interlock)
|
||||
{
|
||||
|
||||
/*
|
||||
* See the comment in vget_finish before usecount bump.
|
||||
*/
|
||||
if (!interlock) {
|
||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
VNODE_REFCOUNT_FENCE_ACQ();
|
||||
VNASSERT(vp->v_holdcnt > 0, vp,
|
||||
("%s: active vnode not held", __func__));
|
||||
return;
|
||||
}
|
||||
VI_LOCK(vp);
|
||||
/*
|
||||
* By the time we get here the vnode might have been doomed, at
|
||||
* which point the 0->1 use count transition is no longer
|
||||
* protected by the interlock. Since it can't bounce back to
|
||||
* VCHR and requires vref semantics, punt it back
|
||||
*/
|
||||
if (__predict_false(vp->v_type == VBAD)) {
|
||||
VI_UNLOCK(vp);
|
||||
vref(vp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)"));
|
||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
VNODE_REFCOUNT_FENCE_ACQ();
|
||||
VNASSERT(vp->v_holdcnt > 0, vp,
|
||||
("%s: active vnode not held", __func__));
|
||||
if (!interlock)
|
||||
VI_UNLOCK(vp);
|
||||
return;
|
||||
}
|
||||
vhold(vp);
|
||||
v_incr_devcount(vp);
|
||||
refcount_acquire(&vp->v_usecount);
|
||||
if (!interlock)
|
||||
VI_UNLOCK(vp);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
vref(struct vnode *vp)
|
||||
{
|
||||
int old;
|
||||
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
if (__predict_false(vp->v_type == VCHR)) {
|
||||
vref_vchr(vp, false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
VNODE_REFCOUNT_FENCE_ACQ();
|
||||
VNASSERT(vp->v_holdcnt > 0, vp,
|
||||
@ -3102,10 +2958,6 @@ vrefl(struct vnode *vp)
|
||||
|
||||
ASSERT_VI_LOCKED(vp, __func__);
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
if (__predict_false(vp->v_type == VCHR)) {
|
||||
vref_vchr(vp, true);
|
||||
return;
|
||||
}
|
||||
vref(vp);
|
||||
}
|
||||
|
||||
@ -3246,9 +3098,6 @@ enum vput_op { VRELE, VPUT, VUNREF };
|
||||
* By releasing the last usecount we take ownership of the hold count which
|
||||
* provides liveness of the vnode, meaning we have to vdrop.
|
||||
*
|
||||
* If the vnode is of type VCHR we may need to decrement si_usecount, see
|
||||
* v_decr_devcount for details.
|
||||
*
|
||||
* For all vnodes we may need to perform inactive processing. It requires an
|
||||
* exclusive lock on the vnode, while it is legal to call here with only a
|
||||
* shared lock (or no locks). If locking the vnode in an expected manner fails,
|
||||
@ -3269,8 +3118,6 @@ vput_final(struct vnode *vp, enum vput_op func)
|
||||
VNPASS(vp->v_holdcnt > 0, vp);
|
||||
|
||||
VI_LOCK(vp);
|
||||
if (__predict_false(vp->v_type == VCHR && func != VRELE))
|
||||
v_decr_devcount(vp);
|
||||
|
||||
/*
|
||||
* By the time we got here someone else might have transitioned
|
||||
@ -3358,28 +3205,9 @@ out:
|
||||
* Releasing the last use count requires additional processing, see vput_final
|
||||
* above for details.
|
||||
*
|
||||
* Note that releasing use count without the vnode lock requires special casing
|
||||
* for VCHR, see v_decr_devcount for details.
|
||||
*
|
||||
* Comment above each variant denotes lock state on entry and exit.
|
||||
*/
|
||||
|
||||
static void __noinline
|
||||
vrele_vchr(struct vnode *vp)
|
||||
{
|
||||
|
||||
if (refcount_release_if_not_last(&vp->v_usecount))
|
||||
return;
|
||||
VI_LOCK(vp);
|
||||
if (!refcount_release(&vp->v_usecount)) {
|
||||
VI_UNLOCK(vp);
|
||||
return;
|
||||
}
|
||||
v_decr_devcount(vp);
|
||||
VI_UNLOCK(vp);
|
||||
vput_final(vp, VRELE);
|
||||
}
|
||||
|
||||
/*
|
||||
* in: any
|
||||
* out: same as passed in
|
||||
@ -3389,10 +3217,6 @@ vrele(struct vnode *vp)
|
||||
{
|
||||
|
||||
ASSERT_VI_UNLOCKED(vp, __func__);
|
||||
if (__predict_false(vp->v_type == VCHR)) {
|
||||
vrele_vchr(vp);
|
||||
return;
|
||||
}
|
||||
if (!refcount_release(&vp->v_usecount))
|
||||
return;
|
||||
vput_final(vp, VRELE);
|
||||
@ -4143,20 +3967,6 @@ vgonel(struct vnode *vp)
|
||||
vp->v_type = VBAD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the total number of references to a special device.
|
||||
*/
|
||||
int
|
||||
vcount(struct vnode *vp)
|
||||
{
|
||||
int count;
|
||||
|
||||
dev_lock();
|
||||
count = vp->v_rdev->si_usecount;
|
||||
dev_unlock();
|
||||
return (count);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print out a description of a vnode.
|
||||
*/
|
||||
|
@ -87,6 +87,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#include <fs/devfs/devfs.h>
|
||||
|
||||
#include <ufs/ufs/quota.h>
|
||||
|
||||
MALLOC_DEFINE(M_FADVISE, "fadvise", "posix_fadvise(2) information");
|
||||
@ -4213,7 +4215,7 @@ sys_revoke(struct thread *td, struct revoke_args *uap)
|
||||
if (error != 0)
|
||||
goto out;
|
||||
}
|
||||
if (vp->v_usecount > 1 || vcount(vp) > 1)
|
||||
if (devfs_usecount(vp) > 0)
|
||||
VOP_REVOKE(vp, REVOKEALL);
|
||||
out:
|
||||
vput(vp);
|
||||
|
@ -676,7 +676,6 @@ int vaccess_acl_posix1e(enum vtype type, uid_t file_uid,
|
||||
gid_t file_gid, struct acl *acl, accmode_t accmode,
|
||||
struct ucred *cred);
|
||||
void vattr_null(struct vattr *vap);
|
||||
int vcount(struct vnode *vp);
|
||||
void vlazy(struct vnode *);
|
||||
void vdrop(struct vnode *);
|
||||
void vdropl(struct vnode *);
|
||||
|
Loading…
x
Reference in New Issue
Block a user