vfs: introduce v_irflag and make v_type smaller

The current vnode layout is not smp-friendly by having frequently read data
avoidably sharing cachelines with very frequently modified fields. In
particular v_iflag inspected for VI_DOOMED can be found in the same line with
v_usecount. Instead make it available in the same cacheline as the v_op, v_data
and v_type which all get read all the time.

v_type is avoidably 4 bytes while the necessary data will easily fit in 1.
Shrinking it frees up 3 bytes, 2 of which get used here to introduce a new
flag field with a new value: VIRF_DOOMED.

Reviewed by:	kib, jeff
Differential Revision:	https://reviews.freebsd.org/D22715
This commit is contained in:
Mateusz Guzik 2019-12-08 21:30:04 +00:00
parent 791a24c7ea
commit abd80ddb94
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355537
46 changed files with 143 additions and 132 deletions

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 12, 2010
.Dd December 8, 2019
.Dt VGONE 9
.Os
.Sh NAME
@ -47,7 +47,7 @@ the removal from its mount point vnode list.
If the vnode has a
.Va v_usecount
of zero, and its
.Dv VI_DOOMED
.Dv VIRF_DOOMED
flag is not set, it is moved to the head of the free list
as in most cases the vnode
is about to be reused, or its file system is being unmounted.

View File

@ -843,7 +843,7 @@ ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname)
val = vattr.va_bytes / be_lun->cbe_lun.blocksize;
}
if (strcmp(attrname, "blocksavail") == 0 &&
(be_lun->vn->v_iflag & VI_DOOMED) == 0) {
!VN_IS_DOOMED(be_lun->vn)) {
error = VFS_STATFS(be_lun->vn->v_mount, &statfs);
if (error == 0)
val = statfs.f_bavail * statfs.f_bsize /

View File

@ -605,7 +605,7 @@ zfsctl_relock_dot(vnode_t *dvp, int ltype)
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/* Relock for the "." case may left us with reclaimed vnode. */
if ((dvp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}

View File

@ -1413,7 +1413,7 @@ zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
* Relock for the "." case could leave us with
* reclaimed vnode.
*/
if (dvp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
@ -5913,7 +5913,7 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(vp))
error = SET_ERROR(ENOENT);
return (error);
}
@ -5936,7 +5936,7 @@ zfs_lock(ap)
if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) {
vp = ap->a_vp;
zp = vp->v_data;
if (vp->v_mount != NULL && (vp->v_iflag & VI_DOOMED) == 0 &&
if (vp->v_mount != NULL && !VN_IS_DOOMED(vp) &&
zp != NULL && (zp->z_pflags & ZFS_XATTR) == 0)
VERIFY(!RRM_LOCK_HELD(&zp->z_zfsvfs->z_teardown_lock));
}

View File

@ -1216,8 +1216,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
locked = VOP_ISLOCKED(vp);
VI_LOCK(vp);
if ((vp->v_iflag & VI_DOOMED) != 0 &&
locked != LK_EXCLUSIVE) {
if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) {
/*
* The vnode is doomed and this thread doesn't
* hold the exclusive lock on it, so the vnode

View File

@ -258,7 +258,7 @@ open_file(struct beri_vtblk_softc *sc, struct thread *td)
if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
if (nd.ni_vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(nd.ni_vp)) {
return (1);
}
}

View File

@ -1453,7 +1453,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
goto bad;
if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
if (nd.ni_vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(nd.ni_vp)) {
/* Forced unmount. */
error = EBADF;
goto bad;

View File

@ -2627,7 +2627,7 @@ xbb_open_file(struct xbb_softc *xbb)
*/
if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
if (xbb->vn->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(xbb->vn)) {
error = EBADF;
xenbus_dev_fatal(xbb->dev, error,
"error locking file %s",

View File

@ -169,8 +169,8 @@ autofs_trigger_vn(struct vnode *vp, const char *path, int pathlen,
sx_xunlock(&autofs_softc->sc_lock);
vn_lock(vp, lock_flags | LK_RETRY);
vunref(vp);
if ((vp->v_iflag & VI_DOOMED) != 0) {
AUTOFS_DEBUG("VI_DOOMED");
if (VN_IS_DOOMED(vp)) {
AUTOFS_DEBUG("VIRF_DOOMED");
return (ENOENT);
}
@ -661,7 +661,7 @@ autofs_node_vn(struct autofs_node *anp, struct mount *mp, int flags,
sx_xunlock(&anp->an_vnode_lock);
return (error);
}
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
/*
* We got forcibly unmounted.
*/

View File

@ -260,7 +260,7 @@ cd9660_ioctl(ap)
vp = ap->a_vp;
vn_lock(vp, LK_SHARED | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VOP_UNLOCK(vp, 0);
return (EBADF);
}

View File

@ -252,7 +252,7 @@ devfs_populate_vp(struct vnode *vp)
devfs_unmount_final(dmp);
return (ERESTART);
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
sx_xunlock(&dmp->dm_lock);
return (ERESTART);
}
@ -441,7 +441,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
vput(vp);
return (ENOENT);
}
else if ((vp->v_iflag & VI_DOOMED) != 0) {
else if (VN_IS_DOOMED(vp)) {
mtx_lock(&devfs_de_interlock);
if (de->de_vnode == vp) {
de->de_vnode = NULL;
@ -592,7 +592,7 @@ devfs_close(struct vop_close_args *ap)
SESS_LOCK(p->p_session);
VI_LOCK(vp);
if (vp->v_usecount == 2 && vcount(vp) == 1 &&
(vp->v_iflag & VI_DOOMED) == 0) {
!VN_IS_DOOMED(vp)) {
p->p_session->s_ttyvp = NULL;
p->p_session->s_ttydp = NULL;
oldvp = vp;
@ -622,7 +622,7 @@ devfs_close(struct vop_close_args *ap)
VI_LOCK(vp);
if (vp->v_usecount == 1 && vcount(vp) == 1)
dflags |= FLASTCLOSE;
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
/* Forced close. */
dflags |= FREVOKE | FNONBLOCK;
} else if (dsw->d_flags & D_TRACKCLOSE) {
@ -1562,7 +1562,7 @@ devfs_rioctl(struct vop_ioctl_args *ap)
vp = ap->a_vp;
vn_lock(vp, LK_SHARED | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VOP_UNLOCK(vp, 0);
return (EBADF);
}

View File

@ -665,7 +665,7 @@ ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp
pdp = vdp;
if (flags & ISDOTDOT) {
error = vn_vget_ino(pdp, ino, cnp->cn_lkflags, &tdp);
if (pdp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(pdp)) {
if (error == 0)
vput(tdp);
error = ENOENT;

View File

@ -347,7 +347,7 @@ fdesc_lookup(struct vop_lookup_args *ap)
vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE);
vdrop(dvp);
fvp = dvp;
if ((dvp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(dvp))
error = ENOENT;
} else {
/*

View File

@ -1116,7 +1116,7 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
struct fuse_vnode_data *fvdat = VTOFUD(vp);
int error = 0;
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
return 0;
ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");

View File

@ -149,13 +149,13 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
* get called on this vnode between when NFSVOPLOCK() drops
* the VI_LOCK() and vget() acquires it again, so that it
* hasn't yet had v_usecount incremented. If this were to
* happen, the VI_DOOMED flag would be set, so check for
* happen, the VIRF_DOOMED flag would be set, so check for
* that here. Since we now have the v_usecount incremented,
* we should be ok until we vrele() it, if the VI_DOOMED
* we should be ok until we vrele() it, if the VIRF_DOOMED
* flag isn't set now.
*/
VI_LOCK(nvp);
if ((nvp->v_iflag & VI_DOOMED)) {
if (VN_IS_DOOMED(nvp)) {
VI_UNLOCK(nvp);
vrele(nvp);
error = ENOENT;
@ -350,7 +350,7 @@ nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize,
vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp);
if (nvp == NULL) {
error = ENOENT;
} else if ((nvp->v_iflag & VI_DOOMED) != 0) {
} else if (VN_IS_DOOMED(nvp)) {
error = ENOENT;
vrele(nvp);
} else {

View File

@ -1235,7 +1235,7 @@ nfs_lookup(struct vop_lookup_args *ap)
vrele(newvp);
*vpp = NULLVP;
} else if (error == ENOENT) {
if (dvp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(dvp))
return (ENOENT);
/*
* We only accept a negative hit in the cache if the
@ -1340,7 +1340,7 @@ nfs_lookup(struct vop_lookup_args *ap)
error = vfs_busy(mp, 0);
NFSVOPLOCK(dvp, ltype | LK_RETRY);
vfs_rel(mp);
if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
if (error == 0 && VN_IS_DOOMED(dvp)) {
vfs_unbusy(mp);
error = ENOENT;
}
@ -1355,7 +1355,7 @@ nfs_lookup(struct vop_lookup_args *ap)
vfs_unbusy(mp);
if (newvp != dvp)
NFSVOPLOCK(dvp, ltype | LK_RETRY);
if (dvp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dvp)) {
if (error == 0) {
if (newvp == dvp)
vrele(newvp);
@ -3139,7 +3139,7 @@ nfs_advlock(struct vop_advlock_args *ap)
else
cred = td->td_ucred;
NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
error = EBADF;
goto out;
}
@ -3169,7 +3169,7 @@ nfs_advlock(struct vop_advlock_args *ap)
if (error)
return (EINTR);
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
error = EBADF;
goto out;
}

View File

@ -1457,7 +1457,7 @@ nfsvno_link(struct nameidata *ndp, struct vnode *vp, struct ucred *cred,
}
if (!error) {
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) == 0)
if (!VN_IS_DOOMED(vp))
error = VOP_LINK(ndp->ni_dvp, vp, &ndp->ni_cnd);
else
error = EPERM;
@ -1738,7 +1738,7 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp,
* Updates the file rev and sets the mtime and ctime
* to the current clock time, returning the va_filerev and va_Xtime
* values.
* Return ESTALE to indicate the vnode is VI_DOOMED.
* Return ESTALE to indicate the vnode is VIRF_DOOMED.
*/
int
nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap,
@ -1750,7 +1750,7 @@ nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap,
vfs_timestamp(&va.va_mtime);
if (NFSVOPISLOCKED(vp) != LK_EXCLUSIVE) {
NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(vp))
return (ESTALE);
}
(void) VOP_SETATTR(vp, &va, nd->nd_cred);

View File

@ -3024,7 +3024,7 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram,
}
vp = dp;
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) == 0)
if (!VN_IS_DOOMED(vp))
nd->nd_repstat = nfsrv_opencheck(clientid, &stateid,
stp, vp, nd, p, nd->nd_repstat);
else

View File

@ -2159,7 +2159,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp,
NFSUNLOCKSTATE();
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
vnode_unlocked = 0;
if ((vp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(vp))
ret = NFSERR_SERVERFAULT;
NFSLOCKSTATE();
}
@ -2257,7 +2257,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp,
NFSUNLOCKSTATE();
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
vnode_unlocked = 0;
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
error = NFSERR_SERVERFAULT;
goto out;
}
@ -2379,7 +2379,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp,
}
if (vnode_unlocked != 0) {
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0)
if (error == 0 && VN_IS_DOOMED(vp))
error = NFSERR_SERVERFAULT;
}
if (other_lop)
@ -5133,7 +5133,7 @@ nfsrv_checkstable(struct nfsclient *clp)
* Return 0 to indicate the conflict can't be revoked and 1 to indicate
* the revocation worked and the conflicting client is "bye, bye", so it
* can be tried again.
* Return 2 to indicate that the vnode is VI_DOOMED after NFSVOPLOCK().
* Return 2 to indicate that the vnode is VIRF_DOOMED after NFSVOPLOCK().
* Unlocks State before a non-zero value is returned.
*/
static int
@ -5164,7 +5164,7 @@ nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, vnode_t vp,
*haslockp = 1;
if (vp != NULL) {
NFSVOPLOCK(vp, lktype | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(vp))
return (2);
}
return (1);
@ -5339,7 +5339,7 @@ nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p,
*haslockp = 1;
if (vp != NULL) {
NFSVOPLOCK(vp, lktype | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
*haslockp = 0;
NFSLOCKV4ROOTMUTEX();
nfsv4_unlock(&nfsv4rootfs_lock, 1);
@ -8313,7 +8313,7 @@ nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds,
* changed until the copy is complete.
*/
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if (ret == 0 && (vp->v_iflag & VI_DOOMED) != 0) {
if (ret == 0 && VN_IS_DOOMED(vp)) {
NFSD_DEBUG(4, "nfsrv_copymr: lk_exclusive doomed\n");
ret = ESTALE;
}

View File

@ -225,7 +225,7 @@ null_nodeget(mp, lowervp, vpp)
*/
if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) {
vn_lock(lowervp, LK_UPGRADE | LK_RETRY);
if ((lowervp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(lowervp)) {
vput(lowervp);
return (ENOENT);
}

View File

@ -442,7 +442,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp)
* extra unlock before allowing the final vdrop() to
* free the vnode.
*/
KASSERT((vp->v_iflag & VI_DOOMED) != 0,
KASSERT(VN_IS_DOOMED(vp),
("not reclaimed nullfs vnode %p", vp));
VOP_UNLOCK(vp, 0);
} else {
@ -453,7 +453,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp)
* relevant for future reclamations.
*/
ASSERT_VOP_ELOCKED(vp, "unlink_lowervp");
KASSERT((vp->v_iflag & VI_DOOMED) == 0,
KASSERT(!VN_IS_DOOMED(vp),
("reclaimed nullfs vnode %p", vp));
xp->null_flags &= ~NULLV_NOUNLOCK;
}

View File

@ -396,7 +396,7 @@ null_lookup(struct vop_lookup_args *ap)
* doomed state and return error.
*/
if ((error == 0 || error == EJUSTRETURN) &&
(dvp->v_iflag & VI_DOOMED) != 0) {
VN_IS_DOOMED(dvp)) {
error = ENOENT;
if (lvp != NULL)
vput(lvp);

View File

@ -290,7 +290,7 @@ pfs_ioctl(struct vop_ioctl_args *va)
vn = va->a_vp;
vn_lock(vn, LK_SHARED | LK_RETRY);
if (vn->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vn)) {
VOP_UNLOCK(vn, 0);
return (EBADF);
}
@ -512,7 +512,7 @@ pfs_lookup(struct vop_cachedlookup_args *va)
vfs_rel(mp);
if (error != 0)
PFS_RETURN(ENOENT);
if (vn->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vn)) {
vfs_unbusy(mp);
PFS_RETURN(ENOENT);
}
@ -581,13 +581,13 @@ pfs_lookup(struct vop_cachedlookup_args *va)
if (cnp->cn_flags & ISDOTDOT) {
vfs_unbusy(mp);
vn_lock(vn, LK_EXCLUSIVE | LK_RETRY);
if (vn->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vn)) {
vput(*vpp);
*vpp = NULL;
PFS_RETURN(ENOENT);
}
}
if (cnp->cn_flags & MAKEENTRY && !(vn->v_iflag & VI_DOOMED))
if (cnp->cn_flags & MAKEENTRY && !VN_IS_DOOMED(vn))
cache_enter(vn, *vpp, cnp);
PFS_RETURN (0);
failed:

View File

@ -637,7 +637,7 @@ smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
struct smbnode *np = VTOSMB(vp);
int error = 0;
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
return 0;
while (np->n_flag & NFLUSHINPROG) {

View File

@ -1345,7 +1345,7 @@ smbfs_lookup(ap)
error = ENOENT;
goto out;
}
if ((dvp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(dvp)) {
vfs_unbusy(mp);
error = ENOENT;
goto out;
@ -1355,7 +1355,7 @@ smbfs_lookup(ap)
error = smbfs_nget(mp, dvp, name, nmlen, NULL, &vp);
vfs_unbusy(mp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if ((dvp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(dvp)) {
if (error == 0)
vput(vp);
error = ENOENT;

View File

@ -598,15 +598,15 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag,
MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0);
VI_LOCK(vp);
if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) ||
((vp->v_iflag & VI_DOOMED) != 0 &&
(lkflag & LK_NOWAIT) != 0)) {
(VN_IS_DOOMED(vp) &&
(lkflag & LK_NOWAIT) != 0)) {
VI_UNLOCK(vp);
TMPFS_NODE_UNLOCK(node);
error = ENOENT;
vp = NULL;
goto out;
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
node->tn_vpstate |= TMPFS_VNODE_WRECLAIM;
while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) {

View File

@ -1578,7 +1578,7 @@ tmpfs_vptocnp(struct vop_vptocnp_args *ap)
tmpfs_free_node(tm, tnp);
return (0);
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
tmpfs_free_node(tm, tnp);
return (ENOENT);
}

View File

@ -127,7 +127,8 @@ unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
VI_LOCK_FLAGS(vp, MTX_DUPOK);
VI_UNLOCK(dvp);
vp->v_iflag &= ~VI_OWEINACT;
if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
if (VN_IS_DOOMED(vp) ||
((vp->v_iflag & VI_DOINGINACT) != 0)) {
VI_UNLOCK(vp);
vp = NULLVP;
} else
@ -163,7 +164,8 @@ unionfs_ins_cached_vnode(struct unionfs_node *uncp,
vp = UNIONFSTOV(unp);
VI_LOCK_FLAGS(vp, MTX_DUPOK);
vp->v_iflag &= ~VI_OWEINACT;
if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) {
if (VN_IS_DOOMED(vp) ||
((vp->v_iflag & VI_DOINGINACT) != 0)) {
LIST_INSERT_HEAD(hd, uncp, un_hash);
VI_UNLOCK(vp);
vp = NULLVP;

View File

@ -592,7 +592,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* the vnode interlock.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
lf_free_lock(lock);
return (ENOENT);
@ -622,7 +622,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* trying to allocate memory.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
sx_xlock(&lf_lock_states_lock);
LIST_REMOVE(ls, ls_link);
@ -655,10 +655,10 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
/*
* Recheck the doomed vnode after state->ls_lock is
* locked. lf_purgelocks() requires that no new threads add
* pending locks when vnode is marked by VI_DOOMED flag.
* pending locks when vnode is marked by VIRF_DOOMED flag.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
state->ls_threads--;
wakeup(state);
VI_UNLOCK(vp);
@ -771,12 +771,12 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep)
/*
* For this to work correctly, the caller must ensure that no
* other threads enter the locking system for this vnode,
* e.g. by checking VI_DOOMED. We wake up any threads that are
* e.g. by checking VIRF_DOOMED. We wake up any threads that are
* sleeping waiting for locks on this vnode and then free all
* the remaining locks.
*/
VI_LOCK(vp);
KASSERT(vp->v_iflag & VI_DOOMED,
KASSERT(VN_IS_DOOMED(vp),
("lf_purgelocks: vp %p has not vgone yet", vp));
state = *statep;
if (state == NULL) {

View File

@ -518,7 +518,7 @@ kern_reroot(void)
VOP_UNLOCK(vp, 0);
return (ENOENT);
}
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VOP_UNLOCK(vp, 0);
vfs_unbusy(mp);
return (ENOENT);

View File

@ -1250,9 +1250,9 @@ tty_drop_ctty(struct tty *tp, struct proc *p)
* If we did have a vnode, release our reference. Ordinarily we manage
* these at the devfs layer, but we can't necessarily know that we were
* invoked on the vnode referenced in the session (i.e. the vnode we
* hold a reference to). We explicitly don't check VBAD/VI_DOOMED here
* hold a reference to). We explicitly don't check VBAD/VIRF_DOOMED here
* to avoid a vnode leak -- in circumstances elsewhere where we'd hit a
* VI_DOOMED vnode, release has been deferred until the controlling TTY
* VIRF_DOOMED vnode, release has been deferred until the controlling TTY
* is either changed or released.
*/
if (vp != NULL)

View File

@ -1158,7 +1158,7 @@ cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cn
if (ltype != VOP_ISLOCKED(*vpp)) {
if (ltype == LK_EXCLUSIVE) {
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
if ((*vpp)->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED((*vpp))) {
/* forced unmount */
vrele(*vpp);
*vpp = NULL;
@ -1401,7 +1401,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
error = vget_finish(*vpp, cnp->cn_lkflags, vs);
if (cnp->cn_flags & ISDOTDOT) {
vn_lock(dvp, ltype | LK_RETRY);
if (dvp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dvp)) {
if (error == 0)
vput(*vpp);
*vpp = NULL;
@ -1706,9 +1706,9 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
u_long lnumcache;
CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
VNASSERT(vp == NULL || !VN_IS_DOOMED(vp), vp,
("cache_enter: Adding a doomed vnode"));
VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
VNASSERT(dvp == NULL || !VN_IS_DOOMED(dvp), dvp,
("cache_enter: Doomed vnode used as src"));
#ifdef DEBUG_CACHE
@ -2365,7 +2365,7 @@ vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
}
*vp = dvp;
if (dvp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dvp)) {
/* forced unmount */
vrele(dvp);
error = ENOENT;
@ -2429,7 +2429,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
* mnt_vnodecovered can be NULL only for the
* case of unmount.
*/
if ((vp->v_iflag & VI_DOOMED) != 0 ||
if (VN_IS_DOOMED(vp) ||
(vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
vp1->v_mountedhere != vp->v_mount) {
vput(vp);

View File

@ -594,7 +594,7 @@ vop_stdgetwritemount(ap)
* Note that having a reference does not prevent forced unmount from
* setting ->v_mount to NULL after the lock gets released. This is of
* no consequence for typical consumers (most notably vn_start_write)
* since in this case the vnode is VI_DOOMED. Unmount might have
* since in this case the vnode is VIRF_DOOMED. Unmount might have
* progressed far enough that its completion is only delayed by the
* reference obtained here. The consumer only needs to concern itself
* with releasing it.
@ -1019,7 +1019,7 @@ vop_stdadvise(struct vop_advise_args *ap)
case POSIX_FADV_DONTNEED:
error = 0;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
VOP_UNLOCK(vp, 0);
break;
}

View File

@ -867,7 +867,7 @@ lookup(struct nameidata *ndp)
}
if ((dp->v_vflag & VV_ROOT) == 0)
break;
if (dp->v_iflag & VI_DOOMED) { /* forced unmount */
if (VN_IS_DOOMED(dp)) { /* forced unmount */
error = ENOENT;
goto bad;
}
@ -911,7 +911,7 @@ lookup(struct nameidata *ndp)
if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) &&
dp != vp_crossmp && VOP_ISLOCKED(dp) == LK_SHARED)
vn_lock(dp, LK_UPGRADE|LK_RETRY);
if ((dp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(dp)) {
error = ENOENT;
goto bad;
}
@ -1028,7 +1028,7 @@ lookup(struct nameidata *ndp)
((cnp->cn_flags & FOLLOW) || (cnp->cn_flags & TRAILINGSLASH) ||
*ndp->ni_next == '/')) {
cnp->cn_flags |= ISSYMLINK;
if (dp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dp)) {
/*
* We can't know whether the directory was mounted with
* NOSYMFOLLOW, so we can't follow safely.
@ -1135,7 +1135,7 @@ lookup(struct nameidata *ndp)
if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags) &&
VOP_ISLOCKED(dp) != LK_EXCLUSIVE) {
vn_lock(dp, LK_UPGRADE | LK_RETRY);
if (dp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(dp)) {
error = ENOENT;
goto bad2;
}

View File

@ -137,7 +137,7 @@ static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
/*
* Number of vnodes in existence. Increased whenever getnewvnode()
* allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode.
* allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
*/
static unsigned long numvnodes;
@ -367,7 +367,7 @@ sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)
goto out;
vp = nd.ni_vp;
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
/*
* This vnode is being recycled. Return != 0 to let the caller
* know that the sysctl had no effect. Return EAGAIN because a
@ -1033,7 +1033,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger)
if (vp->v_usecount ||
(!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
((vp->v_iflag & VI_FREE) != 0) ||
(vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
VN_IS_DOOMED(vp) || (vp->v_object != NULL &&
vp->v_object->resident_page_count > trigger)) {
VI_UNLOCK(vp);
goto next_iter;
@ -1049,7 +1049,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger)
* v_usecount may have been bumped after VOP_LOCK() dropped
* the vnode interlock and before it was locked again.
*
* It is not necessary to recheck VI_DOOMED because it can
* It is not necessary to recheck VIRF_DOOMED because it can
* only be set by another thread that holds both the vnode
* lock and vnode interlock. If another thread has the
* vnode lock before we get to VOP_LOCK() and obtains the
@ -1066,8 +1066,8 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger)
vdropl(vp);
goto next_iter_mntunlocked;
}
KASSERT((vp->v_iflag & VI_DOOMED) == 0,
("VI_DOOMED unexpectedly detected in vlrureclaim()"));
KASSERT(!VN_IS_DOOMED(vp),
("VIRF_DOOMED unexpectedly detected in vlrureclaim()"));
counter_u64_add(recycles_count, 1);
vgonel(vp);
VOP_UNLOCK(vp, 0);
@ -1436,7 +1436,7 @@ vtryrecycle(struct vnode *vp)
__func__, vp);
return (EBUSY);
}
if ((vp->v_iflag & VI_DOOMED) == 0) {
if (!VN_IS_DOOMED(vp)) {
counter_u64_add(recycles_count, 1);
vgonel(vp);
}
@ -1715,6 +1715,7 @@ freevnode(struct vnode *vp)
vp->v_rdev = NULL;
vp->v_fifoinfo = NULL;
vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
vp->v_irflag = 0;
vp->v_iflag = 0;
vp->v_vflag = 0;
bo->bo_flag = 0;
@ -2752,7 +2753,7 @@ v_decr_devcount(struct vnode *vp)
/*
* Grab a particular vnode from the free list, increment its
* reference count and lock it. VI_DOOMED is set if the vnode
* reference count and lock it. VIRF_DOOMED is set if the vnode
* is being destroyed. Only callers who specify LK_RETRY will
* see doomed vnodes. If inactive processing was delayed in
* vput try to do it here.
@ -3022,7 +3023,7 @@ vputx(struct vnode *vp, enum vputx_op func)
* Since vgone performs inactive on its own there is nothing to do
* here but to drop our hold count.
*/
if (__predict_false(vp->v_iflag & VI_DOOMED) ||
if (__predict_false(VN_IS_DOOMED(vp)) ||
VOP_NEED_INACTIVE(vp) == 0) {
vdropl(vp);
return;
@ -3182,7 +3183,7 @@ vholdnz(struct vnode *vp)
/*
* Drop the hold count of the vnode. If this is the last reference to
* the vnode we place it on the free list unless it has been vgone'd
* (marked VI_DOOMED) in which case we will free it.
* (marked VIRF_DOOMED) in which case we will free it.
*
* Because the vnode vm object keeps a hold reference on the vnode if
* there is at least one resident non-cached page, the vnode cannot
@ -3211,7 +3212,7 @@ _vdrop(struct vnode *vp, bool locked)
VI_UNLOCK(vp);
return;
}
if ((vp->v_iflag & VI_DOOMED) == 0) {
if (!VN_IS_DOOMED(vp)) {
/*
* Mark a vnode as free: remove it from its active list
* and put it up for recycling on the freelist.
@ -3572,9 +3573,9 @@ vgonel(struct vnode *vp)
/*
* Don't vgonel if we're already doomed.
*/
if (vp->v_iflag & VI_DOOMED)
if (vp->v_irflag & VIRF_DOOMED)
return;
vp->v_iflag |= VI_DOOMED;
vp->v_irflag |= VIRF_DOOMED;
/*
* Check to see if the vnode is in use. If so, we have to call
@ -3723,6 +3724,13 @@ vn_printf(struct vnode *vp, const char *fmt, ...)
}
buf[0] = '\0';
buf[1] = '\0';
if (vp->v_irflag & VIRF_DOOMED)
strlcat(buf, "|VIRF_DOOMED", sizeof(buf));
flags = vp->v_irflag & ~(VIRF_DOOMED);
if (flags != 0) {
snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags);
strlcat(buf, buf2, sizeof(buf));
}
if (vp->v_vflag & VV_ROOT)
strlcat(buf, "|VV_ROOT", sizeof(buf));
if (vp->v_vflag & VV_ISTTY)
@ -3762,8 +3770,6 @@ vn_printf(struct vnode *vp, const char *fmt, ...)
strlcat(buf, "|VI_TEXT_REF", sizeof(buf));
if (vp->v_iflag & VI_MOUNT)
strlcat(buf, "|VI_MOUNT", sizeof(buf));
if (vp->v_iflag & VI_DOOMED)
strlcat(buf, "|VI_DOOMED", sizeof(buf));
if (vp->v_iflag & VI_FREE)
strlcat(buf, "|VI_FREE", sizeof(buf));
if (vp->v_iflag & VI_ACTIVE)
@ -3772,8 +3778,8 @@ vn_printf(struct vnode *vp, const char *fmt, ...)
strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
if (vp->v_iflag & VI_OWEINACT)
strlcat(buf, "|VI_OWEINACT", sizeof(buf));
flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOOMED | VI_FREE |
VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT);
flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE |
VI_DOINGINACT | VI_OWEINACT);
if (flags != 0) {
snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
strlcat(buf, buf2, sizeof(buf));
@ -5198,7 +5204,7 @@ vop_close_post(void *ap, int rc)
struct vop_close_args *a = ap;
if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
(a->a_vp->v_iflag & VI_DOOMED) == 0)) {
!VN_IS_DOOMED(a->a_vp))) {
VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
NOTE_CLOSE_WRITE : NOTE_CLOSE);
}
@ -5667,7 +5673,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
MNT_ILOCK(mp);
vp = mp->mnt_rootvnode;
if (vp != NULL) {
if ((vp->v_iflag & VI_DOOMED) == 0) {
if (!VN_IS_DOOMED(vp)) {
vrefact(vp);
MNT_IUNLOCK(mp);
error = vn_lock(vp, flags);
@ -5707,7 +5713,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
mp->mnt_rootvnode = *vpp;
} else {
if (mp->mnt_rootvnode != *vpp) {
if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) {
if (!VN_IS_DOOMED(mp->mnt_rootvnode)) {
panic("%s: mismatch between vnode returned "
" by VFS_CACHEDROOT and the one cached "
" (%p != %p)",
@ -5729,7 +5735,7 @@ vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
if (!vfs_op_thread_enter(mp))
return (vfs_cache_root_fallback(mp, flags, vpp));
vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode);
if (vp == NULL || (vp->v_iflag & VI_DOOMED)) {
if (vp == NULL || VN_IS_DOOMED(vp)) {
vfs_op_thread_exit(mp);
return (vfs_cache_root_fallback(mp, flags, vpp));
}
@ -5787,11 +5793,11 @@ __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
/* Allow a racy peek at VI_DOOMED to save a lock acquisition. */
if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)
/* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
continue;
VI_LOCK(vp);
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
continue;
}
@ -5821,11 +5827,11 @@ __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
(*mvp)->v_type = VMARKER;
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
/* Allow a racy peek at VI_DOOMED to save a lock acquisition. */
if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)
/* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
continue;
VI_LOCK(vp);
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
continue;
}
@ -5982,7 +5988,7 @@ mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
("alien vnode on the active list %p %p", vp, mp));
if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0)
if (vp->v_mount == mp && !VN_IS_DOOMED(vp))
break;
nvp = TAILQ_NEXT(vp, v_actfreelist);
VI_UNLOCK(vp);

View File

@ -1820,7 +1820,7 @@ kern_funlinkat(struct thread *td, int dfd, const char *path, int fd,
sb.st_ino != oldinum) {
error = EIDRM; /* Identifier removed */
} else if (fp != NULL && fp->f_vnode != vp) {
if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(fp->f_vnode))
error = EBADF;
else
error = EDEADLK;
@ -3779,7 +3779,7 @@ kern_frmdirat(struct thread *td, int dfd, const char *path, int fd,
}
if (fp != NULL && fp->f_vnode != vp) {
if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(fp->f_vnode))
error = EBADF;
else
error = EDEADLK;

View File

@ -328,7 +328,7 @@ vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp)
fp->f_flag |= FHASLOCK;
vn_lock(vp, lock_flags | LK_RETRY);
if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0)
if (error == 0 && VN_IS_DOOMED(vp))
error = ENOENT;
return (error);
}
@ -1579,7 +1579,7 @@ _vn_lock(struct vnode *vp, int flags, char *file, int line)
("vn_lock: error %d incompatible with flags %#x", error, flags));
if ((flags & LK_RETRY) == 0) {
if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) {
if (error == 0 && VN_IS_DOOMED(vp)) {
VOP_UNLOCK(vp, 0);
error = ENOENT;
}
@ -2132,7 +2132,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg,
vfs_rel(mp);
if (error != 0)
return (ENOENT);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
vfs_unbusy(mp);
return (ENOENT);
}
@ -2142,7 +2142,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg,
vfs_unbusy(mp);
if (error != 0 || *rvp != vp)
vn_lock(vp, ltype | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
if (error == 0) {
if (*rvp == vp)
vunref(vp);

View File

@ -60,7 +60,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1300062 /* Master, propagated to newvers */
#define __FreeBSD_version 1300063 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View File

@ -103,7 +103,8 @@ struct vnode {
* Fields which define the identity of the vnode. These fields are
* owned by the filesystem (XXX: and vgone() ?)
*/
enum vtype v_type; /* u vnode type */
enum vtype v_type:8; /* u vnode type */
short v_irflag; /* i frequently read flags */
struct vop_vector *v_op; /* u vnode operations vector */
void *v_data; /* u private data for fs */
@ -231,12 +232,13 @@ struct xvnode {
* VI flags are protected by interlock and live in v_iflag
* VV flags are protected by the vnode lock and live in v_vflag
*
* VI_DOOMED is doubly protected by the interlock and vnode lock. Both
* VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both
* are required for writing but the status may be checked with either.
*/
#define VIRF_DOOMED 0x0001 /* This vnode is being recycled */
#define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */
#define VI_MOUNT 0x0020 /* Mount in progress */
#define VI_DOOMED 0x0080 /* This vnode is being recycled */
#define VI_FREE 0x0100 /* This vnode is on the freelist */
#define VI_ACTIVE 0x0200 /* This vnode is on the active list */
#define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */
@ -889,6 +891,8 @@ do { \
#define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
#endif
#define VN_IS_DOOMED(vp) ((vp)->v_irflag & VIRF_DOOMED)
void vput(struct vnode *vp);
void vrele(struct vnode *vp);
void vref(struct vnode *vp);

View File

@ -124,7 +124,7 @@ ffs_update(vp, waitfor)
*
* Hold a reference to the vnode to protect against
* ffs_snapgone(). Since we hold a reference, it can only
* get reclaimed (VI_DOOMED flag) in a forcible downgrade
* get reclaimed (VIRF_DOOMED flag) in a forcible downgrade
* or unmount. For an unmount, the entire filesystem will be
* gone, so we cannot attempt to touch anything associated
* with it while the vnode is unlocked; all we can do is
@ -137,7 +137,7 @@ ffs_update(vp, waitfor)
pause("ffsupd", 1);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vrele(vp);
if ((vp->v_iflag & VI_DOOMED) != 0)
if (VN_IS_DOOMED(vp))
return (ENOENT);
goto loop;
}

View File

@ -131,7 +131,7 @@ ffs_rawread_sync(struct vnode *vp)
VI_LOCK(vp);
/* Check if vnode was reclaimed while unlocked. */
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
VI_UNLOCK(vp);
if (upgraded != 0)
VOP_LOCK(vp, LK_DOWNGRADE);

View File

@ -12511,7 +12511,7 @@ softdep_fsync(vp)
* not now, but then the user was not asking to have it
* written, so we are not breaking any promises.
*/
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
break;
/*
* We prevent deadlock by always fetching inodes from the
@ -12532,7 +12532,7 @@ softdep_fsync(vp)
error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
&pvp, FFSV_FORCEINSMQ);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vp)) {
if (error == 0)
vput(pvp);
error = ENOENT;

View File

@ -102,7 +102,7 @@ ufs_inactive(ap)
loop:
if (vn_start_secondary_write(vp, &mp, V_NOWAIT) != 0) {
/* Cannot delete file while file system is suspended */
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (VN_IS_DOOMED(vp)) {
/* Cannot return before file is deleted */
(void) vn_start_secondary_write(vp, &mp,
V_WAIT);

View File

@ -728,7 +728,7 @@ ufs_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp,
* Relock for the "." case may left us with
* reclaimed vnode.
*/
if (vdp->v_iflag & VI_DOOMED) {
if (VN_IS_DOOMED(vdp)) {
vrele(vdp);
return (ENOENT);
}

View File

@ -2917,7 +2917,7 @@ swapongeom(struct vnode *vp)
int error;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) {
error = ENOENT;
} else {
g_topology_lock();

View File

@ -374,7 +374,7 @@ vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
*/
if (vp == NULL || vp->v_iflag & VI_DOOMED)
if (vp == NULL || VN_IS_DOOMED(vp))
return FALSE;
/*
* If the offset is beyond end of file we do
@ -553,7 +553,7 @@ vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
if (address < 0)
return -1;
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
return -1;
bsize = vp->v_mount->mnt_stat.f_iosize;
@ -591,7 +591,7 @@ vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
error = 0;
vp = object->handle;
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
return VM_PAGER_BAD;
bsize = vp->v_mount->mnt_stat.f_iosize;
@ -815,7 +815,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
("%s does not support devices", __func__));
if (vp->v_iflag & VI_DOOMED)
if (VN_IS_DOOMED(vp))
return (VM_PAGER_BAD);
object = vp->v_object;