- Replace v_flag with v_iflag and v_vflag

- v_vflag is protected by the vnode lock and is used when synchronization
   with VOP calls is needed.
 - v_iflag is protected by interlock and is used for dealing with vnode
   management issues.  These flags include X/O LOCK, FREE, DOOMED, etc.
 - All accesses to v_iflag and v_vflag have either been locked or marked with
   mp_fixme's.
 - Many ASSERT_VOP_LOCKED calls have been added where the locking was not
   clear.
 - Many functions in vfs_subr.c were restructured to provide for stronger
   locking.

Idea stolen from:	BSD/OS
This commit is contained in:
jeff 2002-08-04 10:29:36 +00:00
parent a96dd1fe9f
commit da7d3beee0
88 changed files with 619 additions and 413 deletions

View File

@ -188,15 +188,16 @@ exec_osf1_imgact(struct image_params *imgp)
imgp->interpreted = 0;
imgp->proc->p_sysent = &osf1_sysvec;
mp_fixme("Unlocked writecount and v_vflag access.");
if ((eap->tsize != 0 || eap->dsize != 0) &&
imgp->vp->v_writecount != 0) {
#ifdef DIAGNOSTIC
if (imgp->vp->v_flag & VTEXT)
panic("exec: a VTEXT vnode has writecount != 0\n");
if (imgp->vp->v_vflag & VV_TEXT)
panic("exec: a VV_TEXT vnode has writecount != 0\n");
#endif
return ETXTBSY;
}
imgp->vp->v_flag |= VTEXT;
imgp->vp->v_vflag |= VV_TEXT;
/* set up text segment */
if ((error = vm_mmap(&vmspace->vm_map, &taddr, tsize,

View File

@ -613,7 +613,8 @@ coda_nc_flush(dcstat)
}
vrele(CTOV(cncp->dcp));
if (CTOV(cncp->cp)->v_flag & VTEXT) {
ASSERT_VOP_LOCKED(CTOV(cnp->cp), "coda_nc_flush");
if (CTOV(cncp->cp)->v_vflag & VV_TEXT) {
if (coda_vmflush(cncp->cp))
CODADEBUG(CODA_FLUSH,
myprintf(("coda_nc_flush: (%lx.%lx.%lx) busy\n", cncp->cp->c_fid.Volume, cncp->cp->c_fid.Vnode, cncp->cp->c_fid.Unique)); )

View File

@ -413,7 +413,8 @@ int handleDownCall(opcode, out)
vref(CTOV(cp));
cp->c_flags &= ~C_VATTR;
if (CTOV(cp)->v_flag & VTEXT)
ASSERT_VOP_LOCKED(CTOV(cp), "coda HandleDownCall");
if (CTOV(cp)->v_vflag & VV_TEXT)
error = coda_vmflush(cp);
CODADEBUG(CODA_ZAPFILE, myprintf((
"zapfile: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n",
@ -470,8 +471,9 @@ int handleDownCall(opcode, out)
}
cp->c_flags &= ~C_VATTR;
coda_nc_zapfid(&out->coda_purgefid.CodaFid, IS_DOWNCALL);
ASSERT_VOP_LOCKED(CTOV(cp), "coda HandleDownCall");
if (!(ODD(out->coda_purgefid.CodaFid.Vnode))
&& (CTOV(cp)->v_flag & VTEXT)) {
&& (CTOV(cp)->v_vflag & VV_TEXT)) {
error = coda_vmflush(cp);
}

View File

@ -192,7 +192,7 @@ coda_mount(vfsp, path, data, ndp, td)
rootfid.Unique = 0;
cp = make_coda_node(&rootfid, vfsp, VDIR);
rootvp = CTOV(cp);
rootvp->v_flag |= VROOT;
rootvp->v_vflag |= VV_ROOT;
ctlfid.Volume = CTL_VOL;
ctlfid.Vnode = CTL_VNO;
@ -257,7 +257,8 @@ coda_unmount(vfsp, mntflags, td)
vrele(mi->mi_rootvp);
active = coda_kill(vfsp, NOT_DOWNCALL);
mi->mi_rootvp->v_flag &= ~VROOT;
ASSERT_VOP_LOCKED(mi->mi_rootvp, "coda_unmount");
mi->mi_rootvp->v_vflag &= ~VV_ROOT;
error = vflush(mi->mi_vfsp, 0, FORCECLOSE);
printf("coda_unmount: active = %d, vflush active %d\n", active, error);
error = 0;

View File

@ -351,7 +351,7 @@ linux_getcwd_common (lvp, rvp, bpp, bufp, limit, flags, td)
/*
* step up if we're a covered vnode..
*/
while (lvp->v_flag & VROOT) {
while (lvp->v_vflag & VV_ROOT) {
struct vnode *tvp;
if (lvp == rvp)

View File

@ -381,8 +381,9 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
goto cleanup;
}
mp_fixme("Unlocked vflags access.");
/* prevent more writers */
vp->v_flag |= VTEXT;
vp->v_vflag |= VV_TEXT;
/*
* Check if file_offset page aligned. Currently we cannot handle

View File

@ -533,7 +533,8 @@ exec_pecoff_coff_prep_zmagic(struct image_params * imgp,
imgp->auxarg_size = sizeof(struct pecoff_args);
imgp->interpreted = 0;
imgp->vp->v_flag |= VTEXT;
mp_fixme("Unlocked vflag access.");
imgp->vp->v_vflag |= VV_TEXT;
if (sh != NULL)
free(sh, M_TEMP);
return 0;

View File

@ -216,7 +216,8 @@ exec_svr4_imgact(imgp)
}
/* Indicate that this file should not be modified */
imgp->vp->v_flag |= VTEXT;
mp_fixme("Unlocked vflag access.");
imgp->vp->v_vflag |= VV_TEXT;
}
/* Fill in process VM information */
vmspace->vm_tsize = round_page(a_out->a_text) >> PAGE_SHIFT;

View File

@ -869,7 +869,7 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
}
if (ip->iso_extent == imp->root_extent)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
/*
* XXX need generation number?

View File

@ -613,7 +613,8 @@ coda_nc_flush(dcstat)
}
vrele(CTOV(cncp->dcp));
if (CTOV(cncp->cp)->v_flag & VTEXT) {
ASSERT_VOP_LOCKED(CTOV(cnp->cp), "coda_nc_flush");
if (CTOV(cncp->cp)->v_vflag & VV_TEXT) {
if (coda_vmflush(cncp->cp))
CODADEBUG(CODA_FLUSH,
myprintf(("coda_nc_flush: (%lx.%lx.%lx) busy\n", cncp->cp->c_fid.Volume, cncp->cp->c_fid.Vnode, cncp->cp->c_fid.Unique)); )

View File

@ -413,7 +413,8 @@ int handleDownCall(opcode, out)
vref(CTOV(cp));
cp->c_flags &= ~C_VATTR;
if (CTOV(cp)->v_flag & VTEXT)
ASSERT_VOP_LOCKED(CTOV(cp), "coda HandleDownCall");
if (CTOV(cp)->v_vflag & VV_TEXT)
error = coda_vmflush(cp);
CODADEBUG(CODA_ZAPFILE, myprintf((
"zapfile: fid = (%lx.%lx.%lx), refcnt = %d, error = %d\n",
@ -470,8 +471,9 @@ int handleDownCall(opcode, out)
}
cp->c_flags &= ~C_VATTR;
coda_nc_zapfid(&out->coda_purgefid.CodaFid, IS_DOWNCALL);
ASSERT_VOP_LOCKED(CTOV(cp), "coda HandleDownCall");
if (!(ODD(out->coda_purgefid.CodaFid.Vnode))
&& (CTOV(cp)->v_flag & VTEXT)) {
&& (CTOV(cp)->v_vflag & VV_TEXT)) {
error = coda_vmflush(cp);
}

View File

@ -192,7 +192,7 @@ coda_mount(vfsp, path, data, ndp, td)
rootfid.Unique = 0;
cp = make_coda_node(&rootfid, vfsp, VDIR);
rootvp = CTOV(cp);
rootvp->v_flag |= VROOT;
rootvp->v_vflag |= VV_ROOT;
ctlfid.Volume = CTL_VOL;
ctlfid.Vnode = CTL_VNO;
@ -257,7 +257,8 @@ coda_unmount(vfsp, mntflags, td)
vrele(mi->mi_rootvp);
active = coda_kill(vfsp, NOT_DOWNCALL);
mi->mi_rootvp->v_flag &= ~VROOT;
ASSERT_VOP_LOCKED(mi->mi_rootvp, "coda_unmount");
mi->mi_rootvp->v_vflag &= ~VV_ROOT;
error = vflush(mi->mi_vfsp, 0, FORCECLOSE);
printf("coda_unmount: active = %d, vflush active %d\n", active, error);
error = 0;

View File

@ -145,7 +145,7 @@ dead_read(ap)
/*
* Return EOF for tty devices, EIO for others
*/
if ((ap->a_vp->v_flag & VISTTY) == 0)
if ((ap->a_vp->v_vflag & VV_ISTTY) == 0)
return (EIO);
return (0);
}
@ -262,11 +262,13 @@ chkvnlock(vp)
{
int locked = 0;
while (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "ckvnlk", 0);
VI_LOCK(vp);
while (vp->v_iflag & VI_XLOCK) {
vp->v_iflag |= VI_XWANT;
(void) msleep((caddr_t)vp, VI_MTX(vp), PINOD, "ckvnlk", 0);
locked = 1;
}
VI_UNLOCK(vp);
return (locked);
}

View File

@ -157,7 +157,7 @@ devfs_root(mp, vpp)
error = devfs_allocv(dmp->dm_rootdir, mp, &vp, td);
if (error)
return (error);
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
*vpp = vp;
return (0);
}

View File

@ -313,7 +313,7 @@ devfs_lookupx(ap)
if (dvp->v_type != VDIR)
return (ENOTDIR);
if ((flags & ISDOTDOT) && (dvp->v_flag & VROOT))
if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
return (EIO);
error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);

View File

@ -91,7 +91,7 @@ fdesc_mount(mp, ndp, td)
MALLOC(fmp, struct fdescmount *, sizeof(struct fdescmount),
M_FDESCMNT, M_WAITOK); /* XXX */
rvp->v_type = VDIR;
rvp->v_flag |= VROOT;
rvp->v_vflag |= VV_ROOT;
fmp->f_root = rvp;
/* XXX -- don't mark as local to work around fts() problems */
/*mp->mnt_flag |= MNT_LOCAL;*/

View File

@ -508,7 +508,7 @@ hpfs_vget(
vp->v_data = hp;
if (ino == (ino_t)hpmp->hpm_su.su_rootfno)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
mtx_init(&hp->h_interlock, "hpfsnode interlock", NULL, MTX_DEF);

View File

@ -107,10 +107,13 @@ hpfs_fsync(ap)
(void) bwrite(bp);
goto loop;
}
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hpfsn", 0);
vp->v_iflag |= VI_BWAIT;
msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp), PRIBIO + 1,
"hpfsn", 0);
}
VI_UNLOCK(vp);
#ifdef DIAGNOSTIC
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vprint("hpfs_fsync: dirty", vp);

View File

@ -300,7 +300,7 @@ deget(pmp, dirclust, diroffset, depp)
* exists), and then use the time and date from that entry
* as the time and date for the root denode.
*/
nvp->v_flag |= VROOT; /* should be further down XXX */
nvp->v_vflag |= VV_ROOT; /* should be further down XXX */
ldep->de_Attributes = ATTR_DIRECTORY;
ldep->de_LowerCase = 0;
@ -442,7 +442,7 @@ detrunc(dep, length, flags, cred, td)
* recognize the root directory at this point in a file or
* directory's life.
*/
if ((DETOV(dep)->v_flag & VROOT) && !FAT32(pmp)) {
if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp)) {
printf("detrunc(): can't truncate root directory, clust %ld, offset %ld\n",
dep->de_dirclust, dep->de_diroffset);
return (EINVAL);
@ -575,7 +575,7 @@ deextend(dep, length, cred)
/*
* The root of a DOS filesystem cannot be extended.
*/
if ((DETOV(dep)->v_flag & VROOT) && !FAT32(pmp))
if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp))
return (EINVAL);
/*

View File

@ -136,7 +136,7 @@ msdosfs_lookup(ap)
* they won't find it. DOS filesystems don't have them in the root
* directory. So, we fake it. deget() is in on this scam too.
*/
if ((vdp->v_flag & VROOT) && cnp->cn_nameptr[0] == '.' &&
if ((vdp->v_vflag & VV_ROOT) && cnp->cn_nameptr[0] == '.' &&
(cnp->cn_namelen == 1 ||
(cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.'))) {
isadir = ATTR_DIRECTORY;

View File

@ -632,8 +632,9 @@ msdosfs_unmount(mp, mntflags, td)
struct vnode *vp = pmp->pm_devvp;
printf("msdosfs_umount(): just before calling VOP_CLOSE()\n");
printf("flag %08lx, usecount %d, writecount %d, holdcnt %ld\n",
vp->v_flag, vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
printf("iflag %08lx, usecount %d, writecount %d, holdcnt %ld\n",
vp->vi_flag, vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
printf("id %lu, mount %p, op %p\n",
vp->v_id, vp->v_mount, vp->v_op);
printf("freef %p, freeb %p, mount %p\n",

View File

@ -815,6 +815,7 @@ msdosfs_fsync(ap)
*/
loop:
s = splbio();
VI_LOCK(vp);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
@ -823,13 +824,17 @@ msdosfs_fsync(ap)
panic("msdosfs_fsync: not dirty");
bremfree(bp);
splx(s);
VI_UNLOCK(vp);
/* XXX Could do bawrite */
(void) bwrite(bp);
goto loop;
}
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "msdosfsn", 0);
vp->v_vflag |= VI_BWAIT;
(void) msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
PRIBIO + 1, "msdosfsn", 0);
}
VI_UNLOCK(vp);
#ifdef DIAGNOSTIC
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vprint("msdosfs_fsync: dirty", vp);

View File

@ -369,7 +369,7 @@ ntfs_mountfs(devvp, mp, argsp, td)
&(ntmp->ntm_sysvn[pi[i]]));
if(error)
goto out1;
ntmp->ntm_sysvn[pi[i]]->v_flag |= VSYSTEM;
ntmp->ntm_sysvn[pi[i]]->v_vflag |= VV_SYSTEM;
VREF(ntmp->ntm_sysvn[pi[i]]);
vput(ntmp->ntm_sysvn[pi[i]]);
}
@ -746,7 +746,7 @@ ntfs_vgetex(
vp->v_type = f_type;
if (ino == NTFS_ROOTINO)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
ntfs_ntput(ip);

View File

@ -187,8 +187,9 @@ nullfs_mount(mp, ndp, td)
* Keep a held reference to the root vnode.
* It is vrele'd in nullfs_unmount.
*/
mp_fixme("Unlocked vflag access");
nullm_rootvp = vp;
nullm_rootvp->v_flag |= VROOT;
nullm_rootvp->v_vflag |= VV_ROOT;
xmp->nullm_rootvp = nullm_rootvp;
if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;

View File

@ -794,7 +794,7 @@ null_createvobject(ap)
error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
if (error)
return (error);
vp->v_flag |= VOBJBUF;
vp->v_vflag |= VV_OBJBUF;
return (0);
}
@ -809,7 +809,7 @@ null_destroyvobject(ap)
{
struct vnode *vp = ap->a_vp;
vp->v_flag &= ~VOBJBUF;
vp->v_vflag &= ~VV_OBJBUF;
return (0);
}

View File

@ -599,9 +599,13 @@ nwfs_vinvalbuf(vp, flags, cred, td, intrflg)
/* struct nwmount *nmp = VTONWFS(vp);*/
int error = 0, slpflag, slptimeo;
if (vp->v_flag & VXLOCK) {
VI_LOCK(vp);
if (vp->v_iflag & VI_XLOCK) {
VI_UNLOCK(vp);
return (0);
}
VI_UNLOCK(vp);
if (intrflg) {
slpflag = PCATCH;
slptimeo = 2 * hz;

View File

@ -204,7 +204,8 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
if (vp->v_type == VDIR && dvp && (dvp->v_flag & VROOT) == 0) {
ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp");
if (vp->v_type == VDIR && dvp && (dvp->v_vflag & VV_ROOT) == 0) {
np->n_flag |= NREFPARENT;
vref(dvp);
}

View File

@ -340,7 +340,7 @@ nwfs_root(struct mount *mp, struct vnode **vpp) {
error = nwfs_nget(mp, nmp->n_rootent, &fattr, NULL, &vp);
if (error)
return (error);
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
np = VTONW(vp);
if (nmp->m.root_path[0] == 0)
np->n_flag |= NVOLUME;

View File

@ -289,7 +289,7 @@ nwfs_getattr(ap)
int error;
u_int32_t oldsize;
NCPVNDEBUG("%lx:%d: '%s' %d\n", (long)vp, nmp->n_volume, np->n_name, (vp->v_flag & VROOT) != 0);
NCPVNDEBUG("%lx:%d: '%s' %d\n", (long)vp, nmp->n_volume, np->n_name, (vp->v_vflag & VV_ROOT) != 0);
error = nwfs_attr_cachelookup(vp, va);
if (!error) return 0;
NCPVNDEBUG("not in cache\n");
@ -859,7 +859,7 @@ nwfs_lookup(ap)
if (dvp->v_type != VDIR)
return (ENOTDIR);
if ((flags & ISDOTDOT) && (dvp->v_flag & VROOT)) {
if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) {
printf("nwfs_lookup: invalid '..'\n");
return EIO;
}
@ -877,7 +877,7 @@ nwfs_lookup(ap)
nmp = VFSTONWFS(mp);
dnp = VTONW(dvp);
/*
printf("dvp %d:%d:%d\n", (int)mp, (int)dvp->v_flag & VROOT, (int)flags & ISDOTDOT);
printf("dvp %d:%d:%d\n", (int)mp, (int)dvp->v_vflag & VV_ROOT, (int)flags & ISDOTDOT);
*/
error = ncp_pathcheck(cnp->cn_nameptr, cnp->cn_namelen, &nmp->m.nls,
(nameiop == CREATE || nameiop == RENAME) && (nmp->m.nls.opt & NWHP_NOSTRICT) == 0);

View File

@ -128,7 +128,7 @@ portal_mount(mp, path, data, ndp, td)
rvp->v_data = pn;
rvp->v_type = VDIR;
rvp->v_flag |= VROOT;
rvp->v_vflag |= VV_ROOT;
VTOPORTAL(rvp)->pt_arg = 0;
VTOPORTAL(rvp)->pt_size = 0;
VTOPORTAL(rvp)->pt_fileid = PORTAL_ROOTFILEID;

View File

@ -229,7 +229,7 @@ portal_open(ap)
/*
* Nothing to do when opening the root node.
*/
if (vp->v_flag & VROOT)
if (vp->v_vflag & VV_ROOT)
return (0);
/*
@ -462,7 +462,7 @@ portal_getattr(ap)
/* vap->va_qbytes = 0; */
vap->va_bytes = 0;
/* vap->va_qsize = 0; */
if (vp->v_flag & VROOT) {
if (vp->v_vflag & VV_ROOT) {
vap->va_type = VDIR;
vap->va_mode = S_IRUSR|S_IWUSR|S_IXUSR|
S_IRGRP|S_IWGRP|S_IXGRP|
@ -493,7 +493,7 @@ portal_setattr(ap)
/*
* Can't mess with the root vnode
*/
if (ap->a_vp->v_flag & VROOT)
if (ap->a_vp->v_vflag & VV_ROOT)
return (EACCES);
if (ap->a_vap->va_flags != VNOVAL)

View File

@ -143,7 +143,7 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
(*vpp)->v_data = pvd;
switch (pn->pn_type) {
case pfstype_root:
(*vpp)->v_flag = VROOT;
(*vpp)->v_vflag = VV_ROOT;
#if 0
printf("root vnode allocated\n");
#endif

View File

@ -671,8 +671,13 @@ smbfs_vinvalbuf(vp, flags, cred, td, intrflg)
struct smbnode *np = VTOSMB(vp);
int error = 0, slpflag, slptimeo;
if (vp->v_flag & VXLOCK)
VI_LOCK(vp);
if (vp->v_iflag & VI_XLOCK) {
VI_UNLOCK(vp);
return 0;
}
VI_UNLOCK(vp);
if (intrflg) {
slpflag = PCATCH;
slptimeo = 2 * hz;

View File

@ -235,8 +235,9 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
np->n_ino = fap->fa_ino;
if (dvp) {
ASSERT_VOP_LOCKED(dvp, "smbfs_node_alloc");
np->n_parent = dnp;
if (/*vp->v_type == VDIR &&*/ (dvp->v_flag & VROOT) == 0) {
if (/*vp->v_type == VDIR &&*/ (dvp->v_vflag & VV_ROOT) == 0) {
vref(dvp);
np->n_flag |= NREFPARENT;
}

View File

@ -293,7 +293,8 @@ smbfs_root(struct mount *mp, struct vnode **vpp)
error = smbfs_nget(mp, NULL, "TheRooT", 7, &fattr, &vp);
if (error)
return error;
vp->v_flag |= VROOT;
ASSERT_VOP_LOCKED(vp, "smbfs_root");
vp->v_vflag |= VV_ROOT;
np = VTOSMB(vp);
smp->sm_root = np;
*vpp = vp;

View File

@ -293,7 +293,7 @@ smbfs_close(ap)
int error, dolock;
VI_LOCK(vp);
dolock = (vp->v_flag & VXLOCK) == 0;
dolock = (vp->v_iflag & VI_XLOCK) == 0;
if (dolock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
else
@ -324,7 +324,7 @@ smbfs_getattr(ap)
u_int32_t oldsize;
int error;
SMBVDEBUG("%lx: '%s' %d\n", (long)vp, np->n_name, (vp->v_flag & VROOT) != 0);
SMBVDEBUG("%lx: '%s' %d\n", (long)vp, np->n_name, (vp->v_vflag & VV_ROOT) != 0);
error = smbfs_attr_cachelookup(vp, va);
if (!error)
return 0;
@ -1135,7 +1135,7 @@ smbfs_lookup(ap)
cnp->cn_flags &= ~PDIRUNLOCK;
if (dvp->v_type != VDIR)
return ENOTDIR;
if ((flags & ISDOTDOT) && (dvp->v_flag & VROOT)) {
if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) {
SMBFSERR("invalid '..'\n");
return EIO;
}

View File

@ -195,7 +195,7 @@ spec_open(ap)
/* XXX: Special casing of ttys for deadfs. Probably redundant. */
if (dsw->d_flags & D_TTY)
vp->v_flag |= VISTTY;
vp->v_vflag |= VV_ISTTY;
VOP_UNLOCK(vp, 0, td);
error = (*dsw->d_open)(dev, ap->a_mode, S_IFCHR, td);
@ -401,7 +401,7 @@ spec_fsync(ap)
continue;
if ((bp->b_flags & B_DELWRI) == 0)
panic("spec_fsync: not dirty");
if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
BUF_UNLOCK(bp);
vfs_bio_awrite(bp);
splx(s);
@ -420,11 +420,13 @@ spec_fsync(ap)
* retry if dirty blocks still exist.
*/
if (ap->a_waitfor == MNT_WAIT) {
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
(void)tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1,
"spfsyn", 0);
vp->v_iflag |= VI_BWAIT;
msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
PRIBIO + 1, "spfsyn", 0);
}
VI_UNLOCK(vp);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
if (--maxretry != 0) {
splx(s);
@ -462,7 +464,9 @@ spec_strategy(ap)
bp->b_flags &= ~B_VALIDSUSPWRT;
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_start(bp);
if ((vp->v_flag & VCOPYONWRITE) && vp->v_rdev->si_copyonwrite &&
mp_fixme("This should require the vnode lock.");
if ((vp->v_vflag & VV_COPYONWRITE) &&
vp->v_rdev->si_copyonwrite &&
(error = (*vp->v_rdev->si_copyonwrite)(vp, bp)) != 0 &&
error != EOPNOTSUPP) {
bp->b_io.bio_error = error;
@ -580,6 +584,7 @@ spec_close(ap)
struct thread *td = ap->a_td;
dev_t dev = vp->v_rdev;
mp_fixme("Use of v_iflags bogusly locked.");
/*
* Hack: a tty device that is a controlling terminal
* has a reference from the session structure.
@ -589,9 +594,15 @@ spec_close(ap)
* if the reference count is 2 (this last descriptor
* plus the session), release the reference from the session.
*/
/*
* This needs to be rewritten to take the vp interlock into
* consideration.
*/
oldvp = NULL;
sx_xlock(&proctree_lock);
if (vcount(vp) == 2 && td && (vp->v_flag & VXLOCK) == 0 &&
if (vcount(vp) == 2 && td && (vp->v_iflag & VI_XLOCK) == 0 &&
vp == td->td_proc->p_session->s_ttyvp) {
SESS_LOCK(td->td_proc->p_session);
td->td_proc->p_session->s_ttyvp = NULL;
@ -610,7 +621,7 @@ spec_close(ap)
* sum of the reference counts on all the aliased
* vnodes descends to one, we are on last close.
*/
if (vp->v_flag & VXLOCK) {
if (vp->v_iflag & VI_XLOCK) {
/* Forced close. */
} else if (devsw(dev)->d_flags & D_TRACKCLOSE) {
/* Keep device updated on status. */

View File

@ -497,7 +497,7 @@ udf_root(struct mount *mp, struct vnode **vpp)
return error;
vp = *vpp;
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
udfmp->root_vp = vp;
return (0);

View File

@ -223,8 +223,9 @@ umapfs_mount(mp, path, data, ndp, td)
* Keep a held reference to the root vnode.
* It is vrele'd in umapfs_unmount.
*/
ASSERT_VOP_LOCKED(vp, "umapfs_mount");
umapm_rootvp = vp;
umapm_rootvp->v_flag |= VROOT;
umapm_rootvp->v_vflag |= VV_ROOT;
amp->umapm_rootvp = umapm_rootvp;
if (UMAPVPTOLOWERVP(umapm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;

View File

@ -373,7 +373,7 @@ union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache)
if (lowervp != NULLVP)
VREF(lowervp);
}
vflag = VROOT;
vflag = VV_ROOT;
}
loop:
@ -563,7 +563,8 @@ union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache)
MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
M_TEMP, M_WAITOK);
(*vpp)->v_flag |= vflag;
ASSERT_VOP_LOCKED(*vpp, "union_allocvp");
(*vpp)->v_vflag |= vflag;
if (uppervp)
(*vpp)->v_type = uppervp->v_type;
else

View File

@ -190,7 +190,7 @@ union_lookup1(udvp, pdvp, vpp, cnp)
* hierarchy.
*/
if (cnp->cn_flags & ISDOTDOT) {
while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
/*
* Don't do the NOCROSSMOUNT check
* at this level. By definition,
@ -1625,7 +1625,7 @@ union_getwritemount(ap)
if (uvp == NULL) {
VI_LOCK(vp);
if (vp->v_flag & VFREE) {
if (vp->v_iflag & VI_FREE) {
VI_UNLOCK(vp);
return (EOPNOTSUPP);
}
@ -1788,7 +1788,7 @@ union_createvobject(ap)
{
struct vnode *vp = ap->a_vp;
vp->v_flag |= VOBJBUF;
vp->v_vflag |= VV_OBJBUF;
return (0);
}
@ -1803,7 +1803,7 @@ union_destroyvobject(ap)
{
struct vnode *vp = ap->a_vp;
vp->v_flag &= ~VOBJBUF;
vp->v_vflag &= ~VV_OBJBUF;
return (0);
}

View File

@ -718,10 +718,13 @@ ext2_fsync(ap)
goto loop;
}
if (ap->a_waitfor == MNT_WAIT) {
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PRIBIO + 1, "e2fsyn", 0);
vp->v_iflag |= VI_BWAIT;
msleep(&vp->v_numoutput, VI_MTX(vp),
PRIBIO + 1, "e2fsyn", 0);
}
VI_UNLOCK(vp);
#if DIAGNOSTIC
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vprint("ext2_fsync: dirty", vp);
@ -1861,7 +1864,7 @@ ext2_vinit(mntp, specops, fifoops, vpp)
}
if (ip->i_number == ROOTINO)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
/*
* Initialize modrev times
*/

View File

@ -718,10 +718,13 @@ ext2_fsync(ap)
goto loop;
}
if (ap->a_waitfor == MNT_WAIT) {
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PRIBIO + 1, "e2fsyn", 0);
vp->v_iflag |= VI_BWAIT;
msleep(&vp->v_numoutput, VI_MTX(vp),
PRIBIO + 1, "e2fsyn", 0);
}
VI_UNLOCK(vp);
#if DIAGNOSTIC
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vprint("ext2_fsync: dirty", vp);
@ -1861,7 +1864,7 @@ ext2_vinit(mntp, specops, fifoops, vpp)
}
if (ip->i_number == ROOTINO)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
/*
* Initialize modrev times
*/

View File

@ -473,7 +473,8 @@ exec_coff_imgact(imgp)
DPRINTF(("%s(%d): returning successfully!\n", __FILE__, __LINE__));
/* Indicate that this file should not be modified */
imgp->vp->v_flag |= VTEXT;
mp_fixme("Unlocked v_flag access");
imgp->vp->v_vflag |= VV_TEXT;
return 0;
}

View File

@ -215,7 +215,8 @@ exec_linux_imgact(imgp)
}
/* Indicate that this file should not be modified */
imgp->vp->v_flag |= VTEXT;
mp_fixme("Unlocked v_flag access");
imgp->vp->v_vflag |= VV_TEXT;
}
/* Fill in process VM information */
vmspace->vm_tsize = round_page(a_out->a_text) >> PAGE_SHIFT;

View File

@ -869,7 +869,7 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
}
if (ip->iso_extent == imp->root_extent)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
/*
* XXX need generation number?

View File

@ -240,7 +240,8 @@ exec_aout_imgact(imgp)
imgp->proc->p_sysent = &aout_sysvec;
/* Indicate that this file should not be modified */
imgp->vp->v_flag |= VTEXT;
mp_fixme("Unlocked vflag access.");
imgp->vp->v_vflag |= VV_TEXT;
return (0);
}

View File

@ -500,10 +500,11 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
error = exec_map_first_page(imgp);
/*
* Also make certain that the interpreter stays the same, so set
* its VTEXT flag, too.
* its VV_TEXT flag, too.
*/
if (error == 0)
nd->ni_vp->v_flag |= VTEXT;
nd->ni_vp->v_vflag |= VV_TEXT;
VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
vm_object_reference(imgp->object);
@ -628,10 +629,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* VTEXT now since from here on out, there are places we can have
* a context switch. Better safe than sorry; I really don't want
* the file to change while it's being loaded.
*
* XXX We can't really set this flag safely without the vnode lock.
*/
mtx_lock(&imgp->vp->v_interlock);
imgp->vp->v_flag |= VTEXT;
mtx_unlock(&imgp->vp->v_interlock);
mp_fixme("This needs the vnode lock to be safe.");
imgp->vp->v_vflag |= VV_TEXT;
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -2107,7 +2107,7 @@ inmem(struct vnode * vp, daddr_t blkno)
return 1;
if (vp->v_mount == NULL)
return 0;
if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_vflag & VV_OBJBUF) == 0)
return 0;
size = PAGE_SIZE;
@ -2408,7 +2408,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
bsize = size;
offset = blkno * bsize;
vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
(vp->v_vflag & VV_OBJBUF);
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
maxsize = imax(maxsize, bsize);
@ -2912,11 +2913,12 @@ bufdone(struct buf *bp)
obj = bp->b_object;
#if defined(VFS_BIO_DEBUG)
mp_fixme("usecount and vflag accessed without locks.");
if (vp->v_usecount == 0) {
panic("biodone: zero vnode ref count");
}
if ((vp->v_flag & VOBJBUF) == 0) {
if ((vp->v_vflag & VV_OBJBUF) == 0) {
panic("biodone: vnode is not setup for merged cache");
}
#endif

View File

@ -726,8 +726,9 @@ __getcwd(td, uap)
fdp = td->td_proc->p_fd;
slash_prefixed = 0;
FILEDESC_LOCK(fdp);
mp_fixme("No vnode locking done!");
for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
if (vp->v_flag & VROOT) {
if (vp->v_vflag & VV_ROOT) {
if (vp->v_mount == NULL) { /* forced unmount */
FILEDESC_UNLOCK(fdp);
free(buf, M_TEMP);
@ -827,6 +828,7 @@ vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
return (ENODEV);
if (vn == NULL)
return (EINVAL);
ASSERT_VOP_LOCKED(vp, "vn_fullpath");
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
bp = buf + MAXPATHLEN - 1;
*bp = '\0';
@ -834,7 +836,7 @@ vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
slash_prefixed = 0;
FILEDESC_LOCK(fdp);
for (vp = vn; vp != fdp->fd_rdir && vp != rootvnode;) {
if (vp->v_flag & VROOT) {
if (vp->v_vflag & VV_ROOT) {
if (vp->v_mount == NULL) { /* forced unmount */
FILEDESC_UNLOCK(fdp);
free(buf, M_TEMP);

View File

@ -595,7 +595,7 @@ vop_stdcreatevobject(ap)
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
vp->v_flag |= VOBJBUF;
vp->v_vflag |= VV_OBJBUF;
retn:
return (error);

View File

@ -1155,7 +1155,8 @@ unlink(td, uap)
*
* XXX: can this only be a VDIR case?
*/
if (vp->v_flag & VROOT)
mp_fixme("Accessing vflags w/o the vn lock.");
if (vp->v_vflag & VV_ROOT)
error = EBUSY;
}
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
@ -2778,7 +2779,7 @@ rmdir(td, uap)
/*
* The root of a mounted filesystem cannot be deleted.
*/
if (vp->v_flag & VROOT) {
if (vp->v_vflag & VV_ROOT) {
error = EBUSY;
goto out;
}
@ -2939,7 +2940,8 @@ ogetdirentries(td, uap)
return (error);
}
}
if ((vp->v_flag & VROOT) &&
mp_fixme("Accessing vflags w/o vn lock.");
if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
@ -3030,7 +3032,8 @@ getdirentries(td, uap)
return (error);
}
}
if ((vp->v_flag & VROOT) &&
mp_fixme("Accessing vflag without vn lock.");
if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;

View File

@ -451,7 +451,7 @@ lookup(ndp)
VREF(dp);
goto nextname;
}
if ((dp->v_flag & VROOT) == 0 ||
if ((dp->v_vflag & VV_ROOT) == 0 ||
(cnp->cn_flags & NOCROSSMOUNT))
break;
if (dp->v_mount == NULL) { /* forced unmount */
@ -485,7 +485,7 @@ lookup(ndp)
printf("not found\n");
#endif
if ((error == ENOENT) &&
(dp->v_flag & VROOT) && (dp->v_mount != NULL) &&
(dp->v_vflag & VV_ROOT) && (dp->v_mount != NULL) &&
(dp->v_mount->mnt_flag & MNT_UNION)) {
tdp = dp;
dp = dp->v_mount->mnt_vnodecovered;

View File

@ -505,7 +505,7 @@ vfs_nmount(td, fsflags, fsoptions)
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
if (fsflags & MNT_UPDATE) {
if ((vp->v_flag & VROOT) == 0) {
if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
error = EINVAL;
goto bad;
@ -539,16 +539,17 @@ vfs_nmount(td, fsflags, fsoptions)
error = EBUSY;
goto bad;
}
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vput(vp);
error = EBUSY;
goto bad;
}
vp->v_flag |= VMOUNT;
mtx_unlock(&vp->v_interlock);
vp->v_iflag |= VI_MOUNT;
VI_UNLOCK(vp);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, td);
@ -616,16 +617,16 @@ vfs_nmount(td, fsflags, fsoptions)
goto bad;
}
}
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
vput(vp);
error = EBUSY;
goto bad;
}
vp->v_flag |= VMOUNT;
mtx_unlock(&vp->v_interlock);
vp->v_iflag |= VI_MOUNT;
VI_UNLOCK(vp);
/*
* Allocate and initialize the filesystem.
@ -660,9 +661,9 @@ vfs_nmount(td, fsflags, fsoptions)
if (mp->mnt_op->vfs_mount != NULL) {
printf("%s doesn't support the new mount syscall\n",
mp->mnt_vfc->vfc_name);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
if (mp->mnt_flag & MNT_UPDATE)
vfs_unbusy(mp, td);
else {
@ -722,9 +723,9 @@ vfs_nmount(td, fsflags, fsoptions)
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, td);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
vrele(vp);
return (error);
}
@ -736,10 +737,10 @@ vfs_nmount(td, fsflags, fsoptions)
if (!error) {
struct vnode *newdp;
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
vp->v_mountedhere = mp;
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mtx_unlock(&mountlist_mtx);
@ -756,9 +757,9 @@ vfs_nmount(td, fsflags, fsoptions)
goto bad;
}
} else {
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, td);
#ifdef MAC
@ -880,7 +881,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
if (fsflags & MNT_UPDATE) {
if ((vp->v_flag & VROOT) == 0) {
if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
return (EINVAL);
}
@ -911,15 +912,16 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
vput(vp);
return (EBUSY);
}
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
mtx_unlock(&vp->v_interlock);
vp->v_iflag |= VI_MOUNT;
VI_UNLOCK(vp);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
VOP_UNLOCK(vp, 0, td);
@ -983,15 +985,15 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
return (ENODEV);
}
}
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VMOUNT) != 0 ||
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
vput(vp);
return (EBUSY);
}
vp->v_flag |= VMOUNT;
mtx_unlock(&vp->v_interlock);
vp->v_iflag |= VI_MOUNT;
VI_UNLOCK(vp);
/*
* Allocate and initialize the filesystem.
@ -1024,9 +1026,9 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
if (mp->mnt_op->vfs_mount == NULL) {
printf("%s doesn't support the old mount syscall\n",
mp->mnt_vfc->vfc_name);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
if (mp->mnt_flag & MNT_UPDATE)
vfs_unbusy(mp, td);
else {
@ -1075,9 +1077,9 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, td);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
vrele(vp);
return (error);
}
@ -1089,10 +1091,11 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
if (!error) {
struct vnode *newdp;
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mp_fixme("Does interlock protect mounted here or not?");
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
vp->v_mountedhere = mp;
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mtx_unlock(&mountlist_mtx);
@ -1107,9 +1110,9 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
if ((error = VFS_START(mp, 0, td)) != 0)
vrele(vp);
} else {
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, td);
#ifdef MAC
@ -1226,7 +1229,7 @@ unmount(td, uap)
/*
* Must be the root of the filesystem
*/
if ((vp->v_flag & VROOT) == 0) {
if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
return (EINVAL);
}

View File

@ -572,18 +572,16 @@ vlrureclaim(struct mount *mp, int count)
if (vp->v_type != VNON &&
vp->v_type != VBAD &&
VMIGHTFREE(vp) && /* critical path opt */
(vp->v_object == NULL || vp->v_object->resident_page_count < trigger) &&
mtx_trylock(&vp->v_interlock)
) {
VI_TRYLOCK(vp)) {
if (VMIGHTFREE(vp) && /* critical path opt */
(vp->v_object == NULL ||
vp->v_object->resident_page_count < trigger)) {
mtx_unlock(&mntvnode_mtx);
if (VMIGHTFREE(vp)) {
vgonel(vp, curthread);
done++;
} else {
mtx_unlock(&vp->v_interlock);
}
mtx_lock(&mntvnode_mtx);
} else
VI_UNLOCK(vp);
}
--count;
}
@ -771,8 +769,9 @@ getnewvnode(tag, mp, vops, vpp)
}
}
if (vp) {
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
mp_fixme("Unlocked v_iflags access.\n");
vp->v_iflag |= VI_DOOMED;
vp->v_iflag &= ~VI_FREE;
freevnodes--;
mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
@ -806,7 +805,8 @@ getnewvnode(tag, mp, vops, vpp)
#ifdef MAC
mac_destroy_vnode(vp);
#endif
vp->v_flag = 0;
vp->v_iflag = 0;
vp->v_vflag = 0;
vp->v_lastw = 0;
vp->v_lasta = 0;
vp->v_cstart = 0;
@ -893,13 +893,15 @@ vwakeup(bp)
bp->b_flags &= ~B_WRITEINPROG;
if ((vp = bp->b_vp)) {
VI_LOCK(vp);
vp->v_numoutput--;
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
vp->v_flag &= ~VBWAIT;
if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
vp->v_iflag &= ~VI_BWAIT;
wakeup(&vp->v_numoutput);
}
VI_UNLOCK(vp);
}
}
@ -923,24 +925,33 @@ vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
if (flags & V_SAVE) {
s = splbio();
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
error = tsleep(&vp->v_numoutput,
vp->v_iflag |= VI_BWAIT;
error = msleep(&vp->v_numoutput, VI_MTX(vp),
slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
if (error) {
VI_UNLOCK(vp);
splx(s);
return (error);
}
}
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
splx(s);
VI_UNLOCK(vp);
if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
return (error);
/*
* XXX We could save a lock/unlock if this was only
* enabled under INVARIANTS
*/
VI_LOCK(vp);
s = splbio();
if (vp->v_numoutput > 0 ||
!TAILQ_EMPTY(&vp->v_dirtyblkhd))
panic("vinvalbuf: dirty bufs");
}
VI_UNLOCK(vp);
splx(s);
}
s = splbio();
@ -969,28 +980,30 @@ vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
* have write I/O in-progress but if there is a VM object then the
* VM object can also have read-I/O in-progress.
*/
VI_LOCK(vp);
do {
while (vp->v_numoutput > 0) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
vp->v_iflag |= VI_BWAIT;
msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
}
VI_UNLOCK(vp);
if (VOP_GETVOBJECT(vp, &object) == 0) {
while (object->paging_in_progress)
vm_object_pip_sleep(object, "vnvlbx");
}
VI_LOCK(vp);
} while (vp->v_numoutput > 0);
VI_UNLOCK(vp);
splx(s);
/*
* Destroy the copy in the VM cache, too.
*/
mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
mtx_unlock(&vp->v_interlock);
if ((flags & (V_ALT | V_NORMAL)) == 0 &&
(!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
@ -1157,11 +1170,12 @@ vtruncbuf(vp, cred, td, length, blksize)
}
}
VI_LOCK(vp);
while (vp->v_numoutput > 0) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
vp->v_iflag |= VI_BWAIT;
msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
}
VI_UNLOCK(vp);
splx(s);
vnode_pager_setsize(vp, length);
@ -1407,10 +1421,12 @@ brelvp(bp)
s = splbio();
if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
buf_vlist_remove(bp);
if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vp->v_flag &= ~VONWORKLST;
VI_LOCK(vp);
if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
vp->v_iflag &= ~VI_ONWORKLST;
LIST_REMOVE(vp, v_synclist);
}
VI_UNLOCK(vp);
splx(s);
bp->b_vp = (struct vnode *) 0;
vdrop(vp);
@ -1427,17 +1443,19 @@ vn_syncer_add_to_worklist(struct vnode *vp, int delay)
int s, slot;
s = splbio();
mtx_assert(VI_MTX(vp), MA_OWNED);
if (vp->v_flag & VONWORKLST) {
if (vp->v_iflag & VI_ONWORKLST)
LIST_REMOVE(vp, v_synclist);
}
else
vp->v_iflag |= VI_ONWORKLST;
if (delay > syncer_maxdelay - 2)
delay = syncer_maxdelay - 2;
slot = (syncer_delayno + delay) & syncer_mask;
LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
vp->v_flag |= VONWORKLST;
splx(s);
}
@ -1509,7 +1527,9 @@ sched_sync(void)
* position and then add us back in at a later
* position.
*/
VI_LOCK(vp);
vn_syncer_add_to_worklist(vp, syncdelay);
VI_UNLOCK(vp);
}
splx(s);
}
@ -1653,7 +1673,8 @@ reassignbuf(bp, newvp)
* of clean buffers.
*/
if (bp->b_flags & B_DELWRI) {
if ((newvp->v_flag & VONWORKLST) == 0) {
VI_LOCK(newvp);
if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
switch (newvp->v_type) {
case VDIR:
delay = dirdelay;
@ -1669,15 +1690,18 @@ reassignbuf(bp, newvp)
}
vn_syncer_add_to_worklist(newvp, delay);
}
VI_UNLOCK(newvp);
buf_vlist_add(bp, newvp, BX_VNDIRTY);
} else {
buf_vlist_add(bp, newvp, BX_VNCLEAN);
if ((newvp->v_flag & VONWORKLST) &&
VI_LOCK(newvp);
if ((newvp->v_iflag & VI_ONWORKLST) &&
TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
newvp->v_flag &= ~VONWORKLST;
newvp->v_iflag &= ~VI_ONWORKLST;
LIST_REMOVE(newvp, v_synclist);
}
VI_UNLOCK(newvp);
}
if (bp->b_vp != newvp) {
bp->b_vp = newvp;
@ -1811,19 +1835,19 @@ vget(vp, flags, td)
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
* return failure. Cleaning is determined by checking that
* the VXLOCK flag is set.
* the VI_XLOCK flag is set.
*/
if ((flags & LK_INTERLOCK) == 0)
mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
VI_LOCK(vp);
if (vp->v_iflag & VI_XLOCK) {
if (vp->v_vxproc == curthread) {
#if 0
/* this can now occur in normal operation */
log(LOG_INFO, "VXLOCK interlock avoided\n");
#endif
} else {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP, "vget", 0);
vp->v_iflag |= VI_XWANT;
msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
return (ENOENT);
}
}
@ -1842,17 +1866,17 @@ vget(vp, flags, td)
* before sleeping so that multiple processes do
* not try to recycle it.
*/
mtx_lock(&vp->v_interlock);
VI_LOCK(vp);
vp->v_usecount--;
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
}
return (error);
}
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
return (0);
}
@ -1879,7 +1903,7 @@ vrele(vp)
KASSERT(vp != NULL, ("vrele: null vp"));
mtx_lock(&vp->v_interlock);
VI_LOCK(vp);
/* Skip this v_writecount check if we're going to panic below. */
KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
@ -1888,7 +1912,7 @@ vrele(vp)
if (vp->v_usecount > 1) {
vp->v_usecount--;
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
return;
}
@ -1903,15 +1927,17 @@ vrele(vp)
*/
if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0)
VOP_INACTIVE(vp, td);
VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
VI_UNLOCK(vp);
} else {
#ifdef DIAGNOSTIC
vprint("vrele: negative ref count", vp);
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
#endif
panic("vrele: negative ref cnt");
}
@ -1949,12 +1975,14 @@ vput(vp)
* If we are doing a vput, the node is already locked,
* so we just need to release the vnode mutex.
*/
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
VOP_INACTIVE(vp, td);
VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
VI_UNLOCK(vp);
} else {
#ifdef DIAGNOSTIC
@ -1975,8 +2003,10 @@ vhold(vp)
s = splbio();
vp->v_holdcnt++;
VI_LOCK(vp);
if (VSHOULDBUSY(vp))
vbusy(vp);
VI_UNLOCK(vp);
splx(s);
}
@ -1994,10 +2024,12 @@ vdrop(vp)
if (vp->v_holdcnt <= 0)
panic("vdrop: holdcnt");
vp->v_holdcnt--;
VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
else
vlruvp(vp);
VI_UNLOCK(vp);
splx(s);
}
@ -2012,7 +2044,7 @@ vdrop(vp)
* If WRITECLOSE is set, only flush out regular file vnodes open for
* writing.
*
* SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
* SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
*
* `rootrefs' specifies the base reference count for the root vnode
* of this filesystem. The root vnode is considered busy if its
@ -2061,12 +2093,12 @@ vflush(mp, rootrefs, flags)
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
VI_LOCK(vp);
/*
* Skip over a vnodes marked VSYSTEM.
* Skip over a vnodes marked VV_SYSTEM.
*/
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
mtx_unlock(&vp->v_interlock);
if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
continue;
}
@ -2075,6 +2107,7 @@ vflush(mp, rootrefs, flags)
* files (even if open only for reading) and regular file
* vnodes open for writing.
*/
mp_fixme("Getattr called with interlock held!");
if ((flags & WRITECLOSE) &&
(vp->v_type == VNON ||
(VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 &&
@ -2105,6 +2138,7 @@ vflush(mp, rootrefs, flags)
vgonel(vp, td);
} else {
vclean(vp, 0, td);
VI_UNLOCK(vp);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
@ -2115,7 +2149,7 @@ vflush(mp, rootrefs, flags)
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
busy++;
}
@ -2173,6 +2207,7 @@ vclean(vp, flags, td)
{
int active;
mtx_assert(VI_MTX(vp), MA_OWNED);
/*
* Check to see if the vnode is in use. If so we have to reference it
* before we clean it out so that its count cannot fall to zero and
@ -2185,9 +2220,9 @@ vclean(vp, flags, td)
* Prevent the vnode from being recycled or brought into use while we
* clean it out.
*/
if (vp->v_flag & VXLOCK)
if (vp->v_iflag & VI_XLOCK)
panic("vclean: deadlock");
vp->v_flag |= VXLOCK;
vp->v_iflag |= VI_XLOCK;
vp->v_vxproc = curthread;
/*
* Even if the count is zero, the VOP_INACTIVE routine may still
@ -2241,7 +2276,7 @@ vclean(vp, flags, td)
* Inline copy of vrele() since VOP_INACTIVE
* has already been called.
*/
mtx_lock(&vp->v_interlock);
VI_LOCK(vp);
if (--vp->v_usecount <= 0) {
#ifdef DIAGNOSTIC
if (vp->v_usecount < 0 || vp->v_writecount != 0) {
@ -2251,13 +2286,14 @@ vclean(vp, flags, td)
#endif
vfree(vp);
}
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
}
cache_purge(vp);
vp->v_vnlock = NULL;
lockdestroy(&vp->v_lock);
VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
@ -2268,10 +2304,10 @@ vclean(vp, flags, td)
if (vp->v_pollinfo != NULL)
vn_pollgone(vp);
vp->v_tag = VT_NON;
vp->v_flag &= ~VXLOCK;
vp->v_iflag &= ~VI_XLOCK;
vp->v_vxproc = NULL;
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
if (vp->v_iflag & VI_XWANT) {
vp->v_iflag &= ~VI_XWANT;
wakeup(vp);
}
}
@ -2293,16 +2329,19 @@ vop_revoke(ap)
KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
vp = ap->a_vp;
VI_LOCK(vp);
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP,
if (vp->v_iflag & VI_XLOCK) {
vp->v_iflag |= VI_XWANT;
msleep(vp, VI_MTX(vp), PINOD | PDROP,
"vop_revokeall", 0);
VI_UNLOCK(vp);
return (0);
}
VI_UNLOCK(vp);
dev = vp->v_rdev;
for (;;) {
mtx_lock(&spechash_mtx);
@ -2348,7 +2387,7 @@ vgone(vp)
{
struct thread *td = curthread; /* XXX */
mtx_lock(&vp->v_interlock);
VI_LOCK(vp);
vgonel(vp, td);
}
@ -2366,9 +2405,11 @@ vgonel(vp, td)
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP, "vgone", 0);
mtx_assert(VI_MTX(vp), MA_OWNED);
if (vp->v_iflag & VI_XLOCK) {
vp->v_iflag |= VI_XWANT;
VI_UNLOCK(vp);
tsleep(vp, PINOD | PDROP, "vgone", 0);
return;
}
@ -2376,7 +2417,7 @@ vgonel(vp, td)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, td);
mtx_lock(&vp->v_interlock);
VI_UNLOCK(vp);
/*
* Delete from old mount point vnode list, if on one.
@ -2405,21 +2446,23 @@ vgonel(vp, td)
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
VI_LOCK(vp);
if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
s = splbio();
mtx_lock(&vnode_free_list_mtx);
if (vp->v_flag & VFREE)
if (vp->v_iflag & VI_FREE) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
} else {
vp->v_iflag |= VI_FREE;
freevnodes++;
vp->v_flag |= VFREE;
}
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
mtx_unlock(&vnode_free_list_mtx);
splx(s);
}
vp->v_type = VBAD;
mtx_unlock(&vp->v_interlock);
VI_UNLOCK(vp);
}
/*
@ -2499,24 +2542,24 @@ vprint(label, vp)
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
buf[0] = '\0';
if (vp->v_flag & VROOT)
strcat(buf, "|VROOT");
if (vp->v_flag & VTEXT)
strcat(buf, "|VTEXT");
if (vp->v_flag & VSYSTEM)
strcat(buf, "|VSYSTEM");
if (vp->v_flag & VXLOCK)
strcat(buf, "|VXLOCK");
if (vp->v_flag & VXWANT)
strcat(buf, "|VXWANT");
if (vp->v_flag & VBWAIT)
strcat(buf, "|VBWAIT");
if (vp->v_flag & VDOOMED)
strcat(buf, "|VDOOMED");
if (vp->v_flag & VFREE)
strcat(buf, "|VFREE");
if (vp->v_flag & VOBJBUF)
strcat(buf, "|VOBJBUF");
if (vp->v_vflag & VV_ROOT)
strcat(buf, "|VV_ROOT");
if (vp->v_vflag & VV_TEXT)
strcat(buf, "|VV_TEXT");
if (vp->v_vflag & VV_SYSTEM)
strcat(buf, "|VV_SYSTEM");
if (vp->v_iflag & VI_XLOCK)
strcat(buf, "|VI_XLOCK");
if (vp->v_iflag & VI_XWANT)
strcat(buf, "|VI_XWANT");
if (vp->v_iflag & VI_BWAIT)
strcat(buf, "|VI_BWAIT");
if (vp->v_iflag & VI_DOOMED)
strcat(buf, "|VI_DOOMED");
if (vp->v_iflag & VI_FREE)
strcat(buf, "|VI_FREE");
if (vp->v_vflag & VV_OBJBUF)
strcat(buf, "|VV_OBJBUF");
if (buf[0] != '\0')
printf(" flags (%s)", &buf[1]);
if (vp->v_data == NULL) {
@ -2673,7 +2716,6 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
xvn[n].xv_size = sizeof *xvn;
xvn[n].xv_vnode = vp;
#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
XV_COPY(flag);
XV_COPY(usecount);
XV_COPY(writecount);
XV_COPY(holdcnt);
@ -2682,6 +2724,8 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
XV_COPY(numoutput);
XV_COPY(type);
#undef XV_COPY
xvn[n].xv_flag = vp->v_vflag;
switch (vp->v_type) {
case VREG:
case VDIR:
@ -2801,13 +2845,14 @@ vfs_msync(struct mount *mp, int flags)
}
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */
mp_fixme("What locks do we need here?");
if (vp->v_iflag & VI_XLOCK) /* XXX: what if MNT_WAIT? */
continue;
if (vp->v_flag & VNOSYNC) /* unlinked, skip it */
if (vp->v_vflag & VV_NOSYNC) /* unlinked, skip it */
continue;
if ((vp->v_flag & VOBJDIRTY) &&
if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
mtx_unlock(&mntvnode_mtx);
if (!vget(vp,
@ -2857,18 +2902,19 @@ vfree(vp)
{
int s;
mtx_assert(VI_MTX(vp), MA_OWNED);
s = splbio();
mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
if (vp->v_iflag & VI_AGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
vp->v_iflag &= ~VI_AGE;
vp->v_iflag |= VI_FREE;
splx(s);
}
@ -2882,12 +2928,13 @@ vbusy(vp)
int s;
s = splbio();
mtx_assert(VI_MTX(vp), MA_OWNED);
mtx_lock(&vnode_free_list_mtx);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
mtx_unlock(&vnode_free_list_mtx);
vp->v_flag &= ~(VFREE|VAGE);
vp->v_iflag &= ~(VI_FREE|VI_AGE);
splx(s);
}
@ -3044,7 +3091,9 @@ vfs_allocate_syncvnode(mp)
}
next = start;
}
VI_LOCK(vp);
vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
VI_UNLOCK(vp);
mp->mnt_syncer = vp;
return (0);
}
@ -3075,7 +3124,9 @@ sync_fsync(ap)
/*
* Move ourselves to the back of the sync list.
*/
VI_LOCK(syncvp);
vn_syncer_add_to_worklist(syncvp, syncdelay);
VI_UNLOCK(syncvp);
/*
* Walk the list of vnodes pushing all that are dirty and
@ -3133,10 +3184,12 @@ sync_reclaim(ap)
s = splbio();
vp->v_mount->mnt_syncer = NULL;
if (vp->v_flag & VONWORKLST) {
VI_LOCK(vp);
if (vp->v_iflag & VI_ONWORKLST) {
LIST_REMOVE(vp, v_synclist);
vp->v_flag &= ~VONWORKLST;
vp->v_iflag &= ~VI_ONWORKLST;
}
VI_UNLOCK(vp);
splx(s);
return (0);

View File

@ -1155,7 +1155,8 @@ unlink(td, uap)
*
* XXX: can this only be a VDIR case?
*/
if (vp->v_flag & VROOT)
mp_fixme("Accessing vflags w/o the vn lock.");
if (vp->v_vflag & VV_ROOT)
error = EBUSY;
}
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
@ -2778,7 +2779,7 @@ rmdir(td, uap)
/*
* The root of a mounted filesystem cannot be deleted.
*/
if (vp->v_flag & VROOT) {
if (vp->v_vflag & VV_ROOT) {
error = EBUSY;
goto out;
}
@ -2939,7 +2940,8 @@ ogetdirentries(td, uap)
return (error);
}
}
if ((vp->v_flag & VROOT) &&
mp_fixme("Accessing vflags w/o vn lock.");
if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
@ -3030,7 +3032,8 @@ getdirentries(td, uap)
return (error);
}
}
if ((vp->v_flag & VROOT) &&
mp_fixme("Accessing vflag without vn lock.");
if ((vp->v_vflag & VV_ROOT) &&
(vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;

View File

@ -279,13 +279,15 @@ vn_writechk(vp)
register struct vnode *vp;
{
ASSERT_VOP_LOCKED(vp, "vn_writechk");
/*
* If there's shared text associated with
* the vnode, try to free it up once. If
* we fail, we can't allow writing.
*/
if (vp->v_flag & VTEXT)
if (vp->v_vflag & VV_TEXT)
return (ETXTBSY);
return (0);
}
@ -818,10 +820,10 @@ debug_vn_lock(vp, flags, td, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
mtx_lock(&vp->v_interlock);
if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP,
VI_LOCK(vp);
if ((vp->v_iflag & VI_XLOCK) && vp->v_vxproc != curthread) {
vp->v_iflag |= VI_XWANT;
msleep(vp, VI_MTX(vp), PINOD | PDROP,
"vn_lock", 0);
error = ENOENT;
} else {

View File

@ -1063,9 +1063,13 @@ nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred,
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int error = 0, slpflag, slptimeo;
if (vp->v_flag & VXLOCK) {
VI_LOCK(vp);
if (vp->v_iflag & VI_XLOCK) {
/* XXX Should we wait here? */
VI_UNLOCK(vp);
return (0);
}
VI_UNLOCK(vp);
if ((nmp->nm_flag & NFSMNT_INT) == 0)
intrflg = 0;
@ -1340,7 +1344,8 @@ nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
uiop->uio_resid = 0;
}
}
if (p && (vp->v_flag & VTEXT) &&
mp_fixme("Accessing VV_TEXT without a lock.");
if (p && (vp->v_vflag & VV_TEXT) &&
(np->n_mtime != np->n_vattr.va_mtime.tv_sec)) {
uprintf("Process killed due to text file modification\n");
PROC_LOCK(p);

View File

@ -505,7 +505,8 @@ nfs_mountroot(struct mount *mp, struct thread *td)
* hack it to a regular file.
*/
vp->v_type = VREG;
vp->v_flag = 0;
vp->v_vflag = 0;
vp->v_iflag = 0;
VREF(vp);
swaponvp(td, vp, NODEV, nd->swap_nblks);
}
@ -951,7 +952,7 @@ nfs_root(struct mount *mp, struct vnode **vpp)
vp = NFSTOV(np);
if (vp->v_type == VNON)
vp->v_type = VDIR;
vp->v_flag = VROOT;
vp->v_vflag |= VV_ROOT;
*vpp = vp;
return (0);
}

View File

@ -2807,9 +2807,10 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
goto again;
}
if (waitfor == MNT_WAIT) {
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
error = tsleep((caddr_t)&vp->v_numoutput,
vp->v_iflag |= VI_BWAIT;
error = msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
if (error) {
if (nfs_sigintr(nmp, NULL, td)) {
@ -2822,6 +2823,7 @@ nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct thread *td,
}
}
}
VI_UNLOCK(vp);
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
goto loop;
}

View File

@ -2103,7 +2103,7 @@ nfsrv_remove(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
/*
* The root of a mounted filesystem cannot be deleted.
*/
if (nd.ni_vp->v_flag & VROOT) {
if (nd.ni_vp->v_vflag & VV_ROOT) {
error = EBUSY;
goto out;
}
@ -2891,7 +2891,7 @@ nfsrv_rmdir(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
/*
* The root of a mounted filesystem cannot be deleted.
*/
if (vp->v_flag & VROOT)
if (vp->v_vflag & VV_ROOT)
error = EBUSY;
out:
/*
@ -4052,7 +4052,7 @@ nfsrv_access(struct vnode *vp, int flags, struct ucred *cred, int rdonly,
* If there's shared text associated with
* the inode, we can't allow writing.
*/
if (vp->v_flag & VTEXT)
if (vp->v_vflag & VV_TEXT)
return (ETXTBSY);
}
error = VOP_GETATTR(vp, &vattr, cred, td);

View File

@ -374,7 +374,7 @@ set_object_lattr(lomac_object_t *obj, lattr_t lattr) {
vp = obj->lo_object.vnode;
KASSERT(!VISLOMAC(vp), ("is a LOMACFS vnode"));
VI_LOCK(vp);
vp->v_flag = (vp->v_flag & ~(UV_LEVEL_MASK | UV_ATTR_MASK)) |
vp->v_iflag = (vp->v_iflag & ~(UV_LEVEL_MASK | UV_ATTR_MASK)) |
level2uvnodebits(lattr.level) |
attr2uvnodebits(lattr.flags);
VI_UNLOCK(vp);
@ -453,8 +453,8 @@ get_object_lattr(const lomac_object_t *obj, lattr_t *lattr) {
vp = obj->lo_object.vnode;
KASSERT(!VISLOMAC(vp), ("is a LOMACFS vnode"));
VI_LOCK(vp);
lattr->level = uvnodebits2level(vp->v_flag);
lattr->flags = uvnodebits2attr(vp->v_flag);
lattr->level = uvnodebits2level(vp->v_iflag);
lattr->flags = uvnodebits2attr(vp->v_iflag);
VI_UNLOCK(vp);
break;
case LO_TYPE_VM_OBJECT:

View File

@ -400,7 +400,7 @@ unmount(td, uap)
/*
* Must be the root of the filesystem
*/
if ((vp->v_flag & VROOT) == 0) {
if ((vp->v_vflag & VV_ROOT) == 0) {
vput(vp);
return (EINVAL);
}
@ -542,7 +542,8 @@ lomac_getcwd(
struct dirent *dp;
int direof;
if (vp->v_flag & VROOT) {
ASSERT_VOP_LOCKED(vp, "lomac_getcwd");
if (vp->v_vflag & VV_ROOT) {
if (vp->v_mount == NULL) /* forced unmount */
return (EBADF);
dvp = vp->v_mount->mnt_vnodecovered;

View File

@ -112,7 +112,7 @@ lomacfs_node_alloc(struct mount *mp, struct componentname *cnp,
if (error)
panic("lomacfs_node_alloc: can't lock new vnode\n");
if (cnp == NULL)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
else if (cnp->cn_flags & MAKEENTRY)
cache_enter(dvp, vp, cnp);

View File

@ -121,11 +121,10 @@ lomacfs_unmount(struct mount *mp, int mntflags, struct thread *td) {
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
if (VFSTOLOMAC(mp)->lm_flags & LM_TOOKROOT) {
mtx_lock(&crootvp->v_interlock);
crootvp->v_flag |= VROOT;
mtx_unlock(&crootvp->v_interlock);
}
ASSERT_VOP_LOCKED(crootvp, "lomacfs_unmount");
if (VFSTOLOMAC(mp)->lm_flags & LM_TOOKROOT)
crootvp->v_vflag |= VV_ROOT;
error = vflush(mp, 1, flags); /* have an extra root ref */
if (error)
@ -167,9 +166,10 @@ lomacfs_root(struct mount *mp, struct vnode **vpp) {
* that the mounted-on directory isn't a root vnode if I
* want things like __getcwd() to just fail and not crash.
*/
mp_fixme("This code needs the vn lock, not interlock.");
mtx_lock(&crootvp->v_interlock);
if (crootvp->v_flag & VROOT && crootvp == rootvnode) {
crootvp->v_flag &= ~VROOT;
if (crootvp->v_vflag & VV_ROOT && crootvp == rootvnode) {
crootvp->v_vflag &= ~VV_ROOT;
VFSTOLOMAC(mp)->lm_flags |= LM_TOOKROOT;
}
mtx_unlock(&crootvp->v_interlock);

View File

@ -868,7 +868,7 @@ lomacfs_createvobject(
error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
if (error)
return (error);
vp->v_flag |= VOBJBUF;
vp->v_vflag |= VV_OBJBUF;
return (error);
}
@ -884,7 +884,7 @@ lomacfs_destroyvobject(
) {
struct vnode *vp = ap->a_vp;
vp->v_flag &= ~VOBJBUF;
vp->v_vflag &= ~VV_OBJBUF;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -978,8 +978,9 @@ mac_update_vnode_from_mount(struct vnode *vp, struct mount *mp)
MAC_PERFORM(update_vnode_from_mount, vp, &vp->v_label, mp,
&mp->mnt_fslabel);
ASSERT_VOP_LOCKED(vp, "mac_update_vnode_from_mount");
if (mac_cache_fslabel_in_vnode)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
}
/*
@ -1031,7 +1032,7 @@ vop_stdrefreshlabel_ea(struct vop_refreshlabel_args *ap)
if (error == 0)
error = mac_update_vnode_from_externalized(vp, &extmac);
if (error == 0)
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
else {
struct vattr va;
@ -1084,7 +1085,7 @@ vn_refreshlabel(struct vnode *vp, struct ucred *cred)
return (EBADF);
}
if (vp->v_flag & VCACHEDLABEL) {
if (vp->v_vflag & VV_CACHEDLABEL) {
mac_vnode_label_cache_hits++;
return (0);
} else
@ -1124,6 +1125,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
struct mac extmac;
int error;
ASSERT_VOP_LOCKED(tvp, "vop_stdcreatevnode_ea");
if ((dvp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) {
mac_update_vnode_from_mount(tvp, tvp->v_mount);
} else {
@ -1156,7 +1158,7 @@ vop_stdcreatevnode_ea(struct vnode *dvp, struct vnode *tvp, struct ucred *cred)
FREEBSD_MAC_EXTATTR_NAMESPACE, FREEBSD_MAC_EXTATTR_NAME,
sizeof(extmac), (char *)&extmac, curthread);
if (error == 0)
tvp->v_flag |= VCACHEDLABEL;
tvp->v_vflag |= VV_CACHEDLABEL;
else {
#if 0
/*
@ -2771,7 +2773,7 @@ vop_stdsetlabel_ea(struct vop_setlabel_args *ap)
mac_relabel_vnode(ap->a_cred, vp, intlabel);
vp->v_flag |= VCACHEDLABEL;
vp->v_vflag |= VV_CACHEDLABEL;
return (0);
}

View File

@ -95,14 +95,20 @@ struct vpollinfo {
* Reading or writing any of these items requires holding the appropriate lock.
* v_freelist is locked by the global vnode_free_list mutex.
* v_mntvnodes is locked by the global mntvnodes mutex.
* v_flag, v_usecount, v_holdcount and v_writecount are
* v_iflag, v_usecount, v_holdcount and v_writecount are
* locked by the v_interlock mutex.
* v_pollinfo is locked by the lock contained inside it.
* V vnode lock
* I inter lock
*/
struct vnode {
u_long v_flag; /* vnode flags (see below) */
int v_usecount; /* reference count of users */
int v_writecount; /* reference count of writers */
struct mtx v_interlock; /* lock on usecount and flag */
u_long v_iflag; /* I vnode flags (see below) */
int v_usecount; /* I ref count of users */
int v_writecount; /* I ref count of writers */
long v_numoutput; /* I writes in progress */
struct thread *v_vxproc; /* I thread owning VXLOCK */
u_long v_vflag; /* V vnode flags */
int v_holdcnt; /* page & buffer references */
u_long v_id; /* capability identifier */
struct mount *v_mount; /* ptr to vfs we are in */
@ -114,7 +120,6 @@ struct vnode {
struct buflists v_dirtyblkhd; /* SORTED dirty blocklist */
struct buf *v_dirtyblkroot; /* dirty buf splay tree root */
LIST_ENTRY(vnode) v_synclist; /* vnodes with dirty buffers */
long v_numoutput; /* num of writes in progress */
enum vtype v_type; /* vnode type */
union {
struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */
@ -130,7 +135,6 @@ struct vnode {
daddr_t v_lasta; /* last allocation (cluster) */
int v_clen; /* length of current cluster */
struct vm_object *v_object; /* Place to store VM object */
struct mtx v_interlock; /* lock on usecount and flag */
struct lock v_lock; /* used if fs don't have one */
struct lock *v_vnlock; /* pointer to vnode lock */
enum vtagtype v_tag; /* type of underlying data */
@ -140,7 +144,6 @@ struct vnode {
struct vnode *v_dd; /* .. vnode */
u_long v_ddid; /* .. capability identifier */
struct vpollinfo *v_pollinfo;
struct thread *v_vxproc; /* thread owning VXLOCK */
struct label v_label; /* MAC label for vnode */
#ifdef DEBUG_LOCKS
const char *filename; /* Source file doing locking */
@ -161,7 +164,7 @@ struct vnode {
struct xvnode {
size_t xv_size; /* sizeof(struct xvnode) */
void *xv_vnode; /* address of real vnode */
u_long xv_flag; /* vnode flags */
u_long xv_flag; /* vnode vflags */
int xv_usecount; /* reference count of users */
int xv_writecount; /* reference count of writers */
int xv_holdcnt; /* page & buffer references */
@ -200,27 +203,33 @@ struct xvnode {
/*
* Vnode flags.
* VI flags are protected by interlock and live in v_iflag
* VV flags are protected by the vnode lock and live in v_vflag
*/
#define VROOT 0x00001 /* root of its filesystem */
#define VTEXT 0x00002 /* vnode is a pure text prototype */
#define VSYSTEM 0x00004 /* vnode being used by kernel */
#define VISTTY 0x00008 /* vnode represents a tty */
#define VXLOCK 0x00100 /* vnode is locked to change underlying type */
#define VXWANT 0x00200 /* thread is waiting for vnode */
#define VBWAIT 0x00400 /* waiting for output to complete */
#define VNOSYNC 0x01000 /* unlinked, stop syncing */
/* open for business 0x01000 */
#define VOBJBUF 0x02000 /* Allocate buffers in VM object */
#define VCOPYONWRITE 0x04000 /* vnode is doing copy-on-write */
#define VAGE 0x08000 /* Insert vnode at head of free list */
#define VOLOCK 0x10000 /* vnode is locked waiting for an object */
#define VOWANT 0x20000 /* a thread is waiting for VOLOCK */
#define VDOOMED 0x40000 /* This vnode is being recycled */
#define VFREE 0x80000 /* This vnode is on the freelist */
#define VCACHEDLABEL 0x100000 /* Vnode has valid cached MAC label */
#define VONWORKLST 0x200000 /* On syncer work-list */
#define VMOUNT 0x400000 /* Mount in progress */
#define VOBJDIRTY 0x800000 /* object might be dirty */
#define VI_XLOCK 0x0001 /* vnode is locked to change vtype */
#define VI_XWANT 0x0002 /* thread is waiting for vnode */
#define VI_BWAIT 0x0004 /* waiting for output to complete */
#define VI_OLOCK 0x0008 /* vnode is locked waiting for an object */
#define VI_OWANT 0x0010 /* a thread is waiting for VOLOCK */
#define VI_MOUNT 0x0020 /* Mount in progress */
#define VI_AGE 0x0040 /* Insert vnode at head of free list */
#define VI_DOOMED 0x0080 /* This vnode is being recycled */
#define VI_FREE 0x0100 /* This vnode is on the freelist */
#define VI_OBJDIRTY 0x0400 /* object might be dirty */
/*
* XXX VI_ONWORKLST could be replaced with a check for NULL list elements
* in v_synclist.
*/
#define VI_ONWORKLST 0x0200 /* On syncer work-list */
#define VV_ROOT 0x0001 /* root of its filesystem */
#define VV_ISTTY 0x0002 /* vnode represents a tty */
#define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
#define VV_OBJBUF 0x0008 /* Allocate buffers in VM object */
#define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
#define VV_TEXT 0x0020 /* vnode is a pure text prototype */
#define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
#define VV_SYSTEM 0x0080 /* vnode being used by kernel */
/*
* Vnode attributes. A field value of VNOVAL represents a field whose value
@ -361,23 +370,27 @@ extern int vfs_ioopt;
extern void (*lease_updatetime)(int deltat);
/* Requires interlock */
#define VSHOULDFREE(vp) \
(!((vp)->v_flag & (VFREE|VDOOMED)) && \
(!((vp)->v_iflag & (VI_FREE|VI_DOOMED)) && \
!(vp)->v_holdcnt && !(vp)->v_usecount && \
(!(vp)->v_object || \
!((vp)->v_object->ref_count || (vp)->v_object->resident_page_count)))
/* Requires interlock */
#define VMIGHTFREE(vp) \
(!((vp)->v_flag & (VFREE|VDOOMED|VXLOCK)) && \
(!((vp)->v_iflag & (VI_FREE|VI_DOOMED|VI_XLOCK)) && \
LIST_EMPTY(&(vp)->v_cache_src) && !(vp)->v_usecount)
/* Requires interlock */
#define VSHOULDBUSY(vp) \
(((vp)->v_flag & VFREE) && \
(((vp)->v_iflag & VI_FREE) && \
((vp)->v_holdcnt || (vp)->v_usecount))
#define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
#define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
#endif /* _KERNEL */

View File

@ -928,7 +928,8 @@ ffs_dirpref(pip)
/*
* Force allocation in another cg if creating a first level dir.
*/
if (ITOV(pip)->v_flag & VROOT) {
ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
if (ITOV(pip)->v_vflag & VV_ROOT) {
prefcg = arc4random() % fs->fs_ncg;
mincg = prefcg;
minndir = fs->fs_ipg;
@ -1697,7 +1698,8 @@ ffs_blkfree(fs, devvp, bno, size, inum)
/* devvp is a normal disk device */
dev = devvp->v_rdev;
cgblkno = fsbtodb(fs, cgtod(fs, cg));
if ((devvp->v_flag & VCOPYONWRITE) &&
ASSERT_VOP_LOCKED(devvp, "ffs_blkfree");
if ((devvp->v_vflag & VV_COPYONWRITE) &&
ffs_snapblkfree(fs, devvp, bno, size, inum))
return;
VOP_FREEBLKS(devvp, fsbtodb(fs, bno), size);

View File

@ -456,9 +456,12 @@ ffs_snapshot(mp, snapfile)
panic("ffs_snapshot: %d already on list", ip->i_number);
snaphead = &ip->i_devvp->v_rdev->si_snapshots;
TAILQ_INSERT_TAIL(snaphead, ip, i_nextsnap);
ASSERT_VOP_LOCKED(ip->i_devvp, "ffs_snapshot devvp");
ip->i_devvp->v_rdev->si_copyonwrite = ffs_copyonwrite;
ip->i_devvp->v_flag |= VCOPYONWRITE;
vp->v_flag |= VSYSTEM;
ip->i_devvp->v_vflag |= VV_COPYONWRITE;
ASSERT_VOP_LOCKED(vp, "ffs_snapshot vp");
vp->v_vflag |= VV_SYSTEM;
out1:
/*
* Resume operation on filesystem.
@ -1225,9 +1228,10 @@ ffs_snapremove(vp)
devvp = ip->i_devvp;
TAILQ_REMOVE(&devvp->v_rdev->si_snapshots, ip, i_nextsnap);
ip->i_nextsnap.tqe_prev = 0;
ASSERT_VOP_LOCKED(devvp, "ffs_snapremove devvp");
if (TAILQ_FIRST(&devvp->v_rdev->si_snapshots) == 0) {
devvp->v_rdev->si_copyonwrite = 0;
devvp->v_flag &= ~VCOPYONWRITE;
devvp->v_vflag &= ~VV_COPYONWRITE;
}
}
/*
@ -1537,9 +1541,10 @@ ffs_snapshot_mount(mp)
ip->i_number);
else
TAILQ_INSERT_TAIL(snaphead, ip, i_nextsnap);
vp->v_flag |= VSYSTEM;
vp->v_vflag |= VV_SYSTEM;
ump->um_devvp->v_rdev->si_copyonwrite = ffs_copyonwrite;
ump->um_devvp->v_flag |= VCOPYONWRITE;
ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_snapshot_mount");
ump->um_devvp->v_vflag |= VV_COPYONWRITE;
VOP_UNLOCK(vp, 0, td);
}
}
@ -1561,8 +1566,9 @@ ffs_snapshot_unmount(mp)
if (xp->i_effnlink > 0)
vrele(ITOV(xp));
}
ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_snapshot_unmount");
ump->um_devvp->v_rdev->si_copyonwrite = 0;
ump->um_devvp->v_flag &= ~VCOPYONWRITE;
ump->um_devvp->v_vflag &= ~VV_COPYONWRITE;
}
/*

View File

@ -291,7 +291,7 @@ softdep_panic(msg)
}
#endif /* DEBUG */
static int interlocked_sleep(struct lockit *, int, void *, int,
static int interlocked_sleep(struct lockit *, int, void *, struct mtx *, int,
const char *, int);
/*
@ -306,10 +306,11 @@ static int interlocked_sleep(struct lockit *, int, void *, int,
#define LOCKBUF 2
static int
interlocked_sleep(lk, op, ident, flags, wmesg, timo)
interlocked_sleep(lk, op, ident, mtx, flags, wmesg, timo)
struct lockit *lk;
int op;
void *ident;
struct mtx *mtx;
int flags;
const char *wmesg;
int timo;
@ -325,7 +326,7 @@ interlocked_sleep(lk, op, ident, flags, wmesg, timo)
# endif /* DEBUG */
switch (op) {
case SLEEP:
retval = tsleep(ident, flags, wmesg, timo);
retval = msleep(ident, mtx, flags, wmesg, timo);
break;
case LOCKBUF:
retval = BUF_LOCK((struct buf *)ident, flags);
@ -386,7 +387,8 @@ sema_get(semap, interlock)
if (semap->value++ > 0) {
if (interlock != NULL) {
interlocked_sleep(interlock, SLEEP, (caddr_t)semap,
semap->prio, semap->name, semap->timo);
NULL, semap->prio, semap->name,
semap->timo);
FREE_LOCK(interlock);
} else {
tsleep((caddr_t)semap, semap->prio, semap->name,
@ -4778,8 +4780,13 @@ softdep_fsync(vp)
* not now, but then the user was not asking to have it
* written, so we are not breaking any promises.
*/
if (vp->v_flag & VXLOCK)
mp_fixme("This operation is not atomic wrt the rest of the code");
VI_LOCK(vp);
if (vp->v_iflag & VI_XLOCK) {
VI_UNLOCK(vp);
break;
} else
VI_UNLOCK(vp);
/*
* We prevent deadlock by always fetching inodes from the
* root, moving down the directory tree. Thus, when fetching
@ -5502,7 +5509,7 @@ request_cleanup(resource, islocked)
proc_waiting += 1;
if (handle.callout == NULL)
handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2);
interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, PPAUSE,
interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, NULL, PPAUSE,
"softupdate", 0);
proc_waiting -= 1;
if (islocked == 0)
@ -5760,13 +5767,13 @@ getdirtybuf(bpp, waitfor)
if (waitfor != MNT_WAIT)
return (0);
bp->b_xflags |= BX_BKGRDWAIT;
interlocked_sleep(&lk, SLEEP, &bp->b_xflags, PRIBIO,
"getbuf", 0);
interlocked_sleep(&lk, SLEEP, &bp->b_xflags, NULL,
PRIBIO, "getbuf", 0);
continue;
}
if (waitfor != MNT_WAIT)
return (0);
error = interlocked_sleep(&lk, LOCKBUF, bp,
error = interlocked_sleep(&lk, LOCKBUF, bp, NULL,
LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0);
if (error != ENOLCK) {
FREE_LOCK(&lk);
@ -5793,11 +5800,13 @@ drain_output(vp, islocked)
if (!islocked)
ACQUIRE_LOCK(&lk);
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
vp->v_iflag |= VI_BWAIT;
interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput,
PRIBIO + 1, "drainvp", 0);
VI_MTX(vp), PRIBIO + 1, "drainvp", 0);
}
VI_UNLOCK(vp);
if (!islocked)
FREE_LOCK(&lk);
}

View File

@ -1001,7 +1001,8 @@ ffs_flushfiles(mp, flags, td)
*/
}
#endif
if (ump->um_devvp->v_flag & VCOPYONWRITE) {
ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
return (error);
ffs_snapshot_unmount(mp);

View File

@ -234,11 +234,13 @@ ffs_fsync(ap)
}
if (wait) {
VI_LOCK(vp);
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
(void) tsleep((caddr_t)&vp->v_numoutput,
vp->v_iflag |= VI_BWAIT;
msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
PRIBIO + 4, "ffsfsn", 0);
}
VI_UNLOCK(vp);
/*
* Ensure that any filesystem metatdata associated

View File

@ -659,7 +659,8 @@ ufs_extattr_enable(struct ufsmount *ump, int attrnamespace,
goto free_exit;
}
backing_vnode->v_flag |= VSYSTEM;
ASSERT_VOP_LOCKED(backing_vnode, "ufs_extattr_enable");
backing_vnode->v_vflag |= VV_SYSTEM;
LIST_INSERT_HEAD(&ump->um_extattr.uepm_list, attribute,
uele_entries);
@ -689,7 +690,8 @@ ufs_extattr_disable(struct ufsmount *ump, int attrnamespace,
LIST_REMOVE(uele, uele_entries);
uele->uele_backing_vnode->v_flag &= ~VSYSTEM;
ASSERT_VOP_LOCKED(uele->uele_backing_vnode, "ufs_extattr_disable");
uele->uele_backing_vnode->v_vflag &= ~VV_SYSTEM;
error = vn_close(uele->uele_backing_vnode, FREAD|FWRITE,
td->td_ucred, td);

View File

@ -417,7 +417,8 @@ quotaon(td, mp, type, fname)
quotaoff(td, mp, type);
ump->um_qflags[type] |= QTF_OPENING;
mp->mnt_flag |= MNT_QUOTA;
vp->v_flag |= VSYSTEM;
ASSERT_VOP_LOCKED(vp, "quotaon");
vp->v_vflag |= VV_SYSTEM;
*vpp = vp;
/*
* Save the credential of the process that turned on quotas.
@ -523,7 +524,8 @@ quotaoff(td, mp, type)
}
mtx_unlock(&mntvnode_mtx);
dqflush(qvp);
qvp->v_flag &= ~VSYSTEM;
ASSERT_VOP_LOCKED(qvp, "quotaoff");
qvp->v_vflag &= ~VV_SYSTEM;
error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td);
ump->um_quotas[type] = NULLVP;
crfree(ump->um_cred[type]);

View File

@ -793,7 +793,7 @@ ufs_remove(ap)
}
error = ufs_dirremove(dvp, ip, ap->a_cnp->cn_flags, 0);
if (ip->i_nlink <= 0)
vp->v_flag |= VNOSYNC;
vp->v_vflag |= VV_NOSYNC;
VN_KNOTE(vp, NOTE_DELETE);
VN_KNOTE(dvp, NOTE_WRITE);
out:
@ -2289,8 +2289,9 @@ ufs_vinit(mntp, specops, fifoops, vpp)
break;
}
ASSERT_VOP_LOCKED(vp, "ufs_vinit");
if (ip->i_number == ROOTINO)
vp->v_flag |= VROOT;
vp->v_vflag |= VV_ROOT;
/*
* Initialize modrev times
*/

View File

@ -415,7 +415,8 @@ vm_object_vndeallocate(vm_object_t object)
object->ref_count--;
if (object->ref_count == 0) {
vp->v_flag &= ~VTEXT;
mp_fixme("Unlocked vflag access.");
vp->v_vflag &= ~VV_TEXT;
#ifdef ENABLE_VFS_IOOPT
vm_object_clear_flag(object, OBJ_OPT);
#endif
@ -760,11 +761,10 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
if (object->type == OBJT_VNODE &&
(vp = (struct vnode *)object->handle) != NULL) {
if (vp->v_flag & VOBJDIRTY) {
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VOBJDIRTY;
mtx_unlock(&vp->v_interlock);
}
VI_LOCK(vp);
if (vp->v_iflag & VI_OBJDIRTY)
vp->v_iflag &= ~VI_OBJDIRTY;
VI_UNLOCK(vp);
}
}
@ -1900,11 +1900,10 @@ vm_object_set_writeable_dirty(vm_object_t object)
vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
if (object->type == OBJT_VNODE &&
(vp = (struct vnode *)object->handle) != NULL) {
if ((vp->v_flag & VOBJDIRTY) == 0) {
mtx_lock(&vp->v_interlock);
vp->v_flag |= VOBJDIRTY;
mtx_unlock(&vp->v_interlock);
}
VI_LOCK(vp);
if ((vp->v_iflag & VI_OBJDIRTY) == 0)
vp->v_iflag |= VI_OBJDIRTY;
VI_UNLOCK(vp);
}
}

View File

@ -1174,8 +1174,12 @@ vm_page_free_toq(vm_page_t m)
) {
struct vnode *vp = (struct vnode *)object->handle;
if (vp && VSHOULDFREE(vp))
if (vp) {
VI_LOCK(vp);
if (VSHOULDFREE(vp))
vfree(vp);
VI_UNLOCK(vp);
}
}
/*

View File

@ -142,11 +142,13 @@ swapdev_strategy(ap)
if (bp->b_iocmd == BIO_WRITE) {
vp = bp->b_vp;
if (vp) {
VI_LOCK(vp);
vp->v_numoutput--;
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
vp->v_flag &= ~VBWAIT;
if ((vp->v_iflag & VI_BWAIT) && vp->v_numoutput <= 0) {
vp->v_iflag &= ~VI_BWAIT;
wakeup(&vp->v_numoutput);
}
VI_UNLOCK(vp);
}
sp->sw_vp->v_numoutput++;
}

View File

@ -125,11 +125,13 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* Prevent race condition when allocating the object. This
* can happen with NFS vnodes since the nfsnode isn't locked.
*/
while (vp->v_flag & VOLOCK) {
vp->v_flag |= VOWANT;
tsleep(vp, PVM, "vnpobj", 0);
VI_LOCK(vp);
while (vp->v_iflag & VI_OLOCK) {
vp->v_iflag |= VI_OWANT;
msleep(vp, VI_MTX(vp), PVM, "vnpobj", 0);
}
vp->v_flag |= VOLOCK;
vp->v_iflag |= VI_OLOCK;
VI_UNLOCK(vp);
/*
* If the object is being terminated, wait for it to
@ -156,12 +158,14 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
} else {
object->ref_count++;
}
VI_LOCK(vp);
vp->v_usecount++;
vp->v_flag &= ~VOLOCK;
if (vp->v_flag & VOWANT) {
vp->v_flag &= ~VOWANT;
vp->v_iflag &= ~VI_OLOCK;
if (vp->v_iflag & VI_OWANT) {
vp->v_iflag &= ~VI_OWANT;
wakeup(vp);
}
VI_UNLOCK(vp);
mtx_unlock(&Giant);
return (object);
}
@ -180,8 +184,9 @@ vnode_pager_dealloc(object)
object->handle = NULL;
object->type = OBJT_DEAD;
ASSERT_VOP_LOCKED(vp, "vnode_pager_dealloc");
vp->v_object = NULL;
vp->v_flag &= ~(VTEXT | VOBJBUF);
vp->v_vflag &= ~(VV_TEXT | VV_OBJBUF);
}
static boolean_t
@ -204,9 +209,12 @@ vnode_pager_haspage(object, pindex, before, after)
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
*/
if ((vp == NULL) || (vp->v_flag & VDOOMED))
if (vp == NULL)
return FALSE;
mp_fixme("Unlocked iflags access");
if (vp->v_iflag & VI_DOOMED)
return FALSE;
/*
* If filesystem no longer mounted or offset beyond end of file we do
* not have the page.