Add marker vnodes to ensure that all vnodes associated with the mount point are

iterated over when using MNT_VNODE_FOREACH.

Reviewed by:	truckman
This commit is contained in:
Tor Egge 2006-01-09 20:42:19 +00:00
parent 61927b8299
commit 82be0a5a24
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=154152
14 changed files with 197 additions and 66 deletions

View File

@ -108,6 +108,7 @@ cd9660_setattr(ap)
case VFIFO:
case VNON:
case VBAD:
case VMARKER:
return (0);
}
}

View File

@ -501,7 +501,7 @@ static int compute_sb_data(devvp, es, fs)
static int
ext2_reload(struct mount *mp, struct thread *td)
{
struct vnode *vp, *nvp, *devvp;
struct vnode *vp, *mvp, *devvp;
struct inode *ip;
struct buf *bp;
struct ext2_super_block * es;
@ -545,7 +545,7 @@ ext2_reload(struct mount *mp, struct thread *td)
loop:
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
@ -556,6 +556,7 @@ ext2_reload(struct mount *mp, struct thread *td)
* Step 4: invalidate all cached file data.
*/
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ABORT(mp, mvp);
goto loop;
}
if (vinvalbuf(vp, 0, td, 0, 0))
@ -570,6 +571,7 @@ ext2_reload(struct mount *mp, struct thread *td)
if (error) {
VOP_UNLOCK(vp, 0, td);
vrele(vp);
MNT_VNODE_FOREACH_ABORT(mp, mvp);
return (error);
}
ext2_ei2i((struct ext2_inode *) ((char *)bp->b_data +
@ -855,7 +857,7 @@ ext2_sync(mp, waitfor, td)
int waitfor;
struct thread *td;
{
struct vnode *nvp, *vp;
struct vnode *mvp, *vp;
struct inode *ip;
struct ext2mount *ump = VFSTOEXT2(mp);
struct ext2_sb_info *fs;
@ -871,7 +873,7 @@ ext2_sync(mp, waitfor, td)
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) {
VI_UNLOCK(vp);
@ -890,8 +892,10 @@ ext2_sync(mp, waitfor, td)
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
MNT_ILOCK(mp);
if (error == ENOENT)
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
continue;
}
if ((error = VOP_FSYNC(vp, waitfor, td)) != 0)

View File

@ -108,6 +108,7 @@ cd9660_setattr(ap)
case VFIFO:
case VNON:
case VBAD:
case VMARKER:
return (0);
}
}

View File

@ -545,7 +545,7 @@ vfs_stdsync(mp, waitfor, td)
int waitfor;
struct thread *td;
{
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
int error, lockreq, allerror = 0;
lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
@ -556,7 +556,7 @@ vfs_stdsync(mp, waitfor, td)
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
@ -567,8 +567,10 @@ vfs_stdsync(mp, waitfor, td)
if ((error = vget(vp, lockreq, td)) != 0) {
MNT_ILOCK(mp);
if (error == ENOENT)
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
continue;
}
error = VOP_FSYNC(vp, waitfor, td);

View File

@ -90,6 +90,7 @@ SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
"Unprivileged users may mount and unmount file systems");
MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
/* List of mounted filesystems. */
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
@ -445,6 +446,28 @@ static void
vfs_mount_destroy(struct mount *mp, struct thread *td)
{
MNT_ILOCK(mp);
if (mp->mnt_holdcnt != 0) {
printf("Waiting for mount point to be unheld\n");
while (mp->mnt_holdcnt != 0) {
mp->mnt_holdcntwaiters++;
msleep(&mp->mnt_holdcnt, MNT_MTX(mp),
PZERO, "mntdestroy", 0);
mp->mnt_holdcntwaiters--;
}
printf("mount point unheld\n");
}
if (mp->mnt_writeopcount > 0) {
printf("Waiting for mount point write ops\n");
while (mp->mnt_writeopcount > 0) {
mp->mnt_kern_flag |= MNTK_SUSPEND;
msleep(&mp->mnt_writeopcount,
MNT_MTX(mp),
PZERO, "mntdestroy2", 0);
}
printf("mount point write ops completed\n");
}
MNT_IUNLOCK(mp);
mp->mnt_vfc->vfc_refcount--;
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
panic("unmount: dangling vnode");
@ -453,6 +476,12 @@ vfs_mount_destroy(struct mount *mp, struct thread *td)
MNT_ILOCK(mp);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup(mp);
if (mp->mnt_writeopcount != 0)
panic("vfs_mount_destroy: nonzero writeopcount");
if (mp->mnt_nvnodelistsize != 0)
panic("vfs_mount_destroy: nonzero nvnodelistsize");
mp->mnt_writeopcount = -1000;
mp->mnt_nvnodelistsize = -1000;
MNT_IUNLOCK(mp);
mtx_destroy(&mp->mnt_mtx);
#ifdef MAC
@ -1658,27 +1687,96 @@ vfs_copyopt(opts, name, dest, len)
*/
struct vnode *
__mnt_vnode_next(struct vnode **nvp, struct mount *mp)
__mnt_vnode_next(struct vnode **mvp, struct mount *mp)
{
struct vnode *vp;
mtx_assert(&mp->mnt_mtx, MA_OWNED);
mtx_assert(MNT_MTX(mp), MA_OWNED);
vp = *nvp;
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
while (vp != NULL && vp->v_type == VMARKER)
vp = TAILQ_NEXT(vp, v_nmntvnodes);
/* Check if we are done */
if (vp == NULL)
if (vp == NULL) {
__mnt_vnode_markerfree(mvp, mp);
return (NULL);
/* If our next vnode is no longer ours, start over */
if (vp->v_mount != mp)
vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
/* Save pointer to next vnode in list */
if (vp != NULL)
*nvp = TAILQ_NEXT(vp, v_nmntvnodes);
else
*nvp = NULL;
}
TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
return (vp);
}
struct vnode *
__mnt_vnode_first(struct vnode **mvp, struct mount *mp)
{
struct vnode *vp;
mtx_assert(MNT_MTX(mp), MA_OWNED);
vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
while (vp != NULL && vp->v_type == VMARKER)
vp = TAILQ_NEXT(vp, v_nmntvnodes);
/* Check if we are done */
if (vp == NULL) {
*mvp = NULL;
return (NULL);
}
mp->mnt_holdcnt++;
MNT_IUNLOCK(mp);
*mvp = (struct vnode *) malloc(sizeof(struct vnode),
M_VNODE_MARKER,
M_WAITOK | M_ZERO);
MNT_ILOCK(mp);
(*mvp)->v_type = VMARKER;
vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
while (vp != NULL && vp->v_type == VMARKER)
vp = TAILQ_NEXT(vp, v_nmntvnodes);
/* Check if we are done */
if (vp == NULL) {
MNT_IUNLOCK(mp);
free(*mvp, M_VNODE_MARKER);
MNT_ILOCK(mp);
*mvp = NULL;
mp->mnt_holdcnt--;
if (mp->mnt_holdcnt == 0 && mp->mnt_holdcntwaiters != 0)
wakeup(&mp->mnt_holdcnt);
return (NULL);
}
mp->mnt_markercnt++;
(*mvp)->v_mount = mp;
TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
return (vp);
}
void
__mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp)
{
if (*mvp == NULL)
return;
mtx_assert(MNT_MTX(mp), MA_OWNED);
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
MNT_IUNLOCK(mp);
free(*mvp, M_VNODE_MARKER);
MNT_ILOCK(mp);
*mvp = NULL;
mp->mnt_markercnt--;
mp->mnt_holdcnt--;
if (mp->mnt_holdcnt == 0 && mp->mnt_holdcntwaiters != 0)
wakeup(&mp->mnt_holdcnt);
}
int
__vfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td)
{

View File

@ -134,9 +134,9 @@ enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
};
int vttoif_tab[9] = {
int vttoif_tab[10] = {
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
S_IFSOCK, S_IFIFO, S_IFMT,
S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
};
/*
@ -566,7 +566,12 @@ vlrureclaim(struct mount *mp)
vn_start_write(NULL, &mp, V_WAIT);
MNT_ILOCK(mp);
count = mp->mnt_nvnodelistsize / 10 + 1;
while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
while (count != 0) {
vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
while (vp != NULL && vp->v_type == VMARKER)
vp = TAILQ_NEXT(vp, v_nmntvnodes);
if (vp == NULL)
break;
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
--count;
@ -968,6 +973,8 @@ insmntque(struct vnode *vp, struct mount *mp)
VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
MNT_ILOCK(vp->v_mount);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
("neg mount point vnode list size"));
mp->mnt_nvnodelistsize++;
MNT_IUNLOCK(vp->v_mount);
}
@ -2241,7 +2248,7 @@ vflush(mp, rootrefs, flags, td)
int flags;
struct thread *td;
{
struct vnode *vp, *nvp, *rootvp = NULL;
struct vnode *vp, *mvp, *rootvp = NULL;
struct vattr vattr;
int busy = 0, error;
@ -2260,7 +2267,7 @@ vflush(mp, rootrefs, flags, td)
}
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
vholdl(vp);
@ -2269,6 +2276,7 @@ vflush(mp, rootrefs, flags, td)
if (error) {
vdrop(vp);
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
/*
@ -2489,7 +2497,8 @@ count_dev(dev)
* Print out a description of a vnode.
*/
static char *typename[] =
{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
"VMARKER"};
void
vn_printf(struct vnode *vp, const char *fmt, ...)
@ -2818,20 +2827,11 @@ vfs_unmountall()
void
vfs_msync(struct mount *mp, int flags)
{
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
struct vm_object *obj;
int tries;
tries = 5;
MNT_ILOCK(mp);
loop:
TAILQ_FOREACH_SAFE(vp, &mp->mnt_nvnodelist, v_nmntvnodes, nvp) {
if (vp->v_mount != mp) {
if (--tries > 0)
goto loop;
break;
}
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
@ -2856,11 +2856,6 @@ vfs_msync(struct mount *mp, int flags)
vput(vp);
}
MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
if (--tries > 0)
goto loop;
break;
}
} else
VI_UNLOCK(vp);
}

View File

@ -733,7 +733,7 @@ nfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
static int
nfs_sync(struct mount *mp, int waitfor, struct thread *td)
{
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
int error, allerror = 0;
/*
@ -741,7 +741,7 @@ nfs_sync(struct mount *mp, int waitfor, struct thread *td)
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
if (VOP_ISLOCKED(vp, NULL) ||
@ -753,6 +753,7 @@ nfs_sync(struct mount *mp, int waitfor, struct thread *td)
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
error = VOP_FSYNC(vp, waitfor, td);

View File

@ -981,7 +981,7 @@ nfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
static int
nfs_sync(struct mount *mp, int waitfor, struct thread *td)
{
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
int error, allerror = 0;
/*
@ -989,7 +989,7 @@ nfs_sync(struct mount *mp, int waitfor, struct thread *td)
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
if (VOP_ISLOCKED(vp, NULL) ||
@ -1001,6 +1001,7 @@ nfs_sync(struct mount *mp, int waitfor, struct thread *td)
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
error = VOP_FSYNC(vp, waitfor, td);

View File

@ -160,14 +160,28 @@ struct mount {
struct label *mnt_fslabel; /* MAC label for the fs */
int mnt_nvnodelistsize; /* # of vnodes on this mount */
u_int mnt_hashseed; /* Random seed for vfs_hash */
int mnt_markercnt; /* marker vnodes in use */
int mnt_holdcnt; /* hold count */
int mnt_holdcntwaiters; /* waits on hold count */
};
struct vnode *__mnt_vnode_next(struct vnode **nvp, struct mount *mp);
struct vnode *__mnt_vnode_next(struct vnode **mvp, struct mount *mp);
struct vnode *__mnt_vnode_first(struct vnode **mvp, struct mount *mp);
void __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp);
#define MNT_VNODE_FOREACH(vp, mp, vp2) \
for ((vp2) = TAILQ_FIRST(&(mp)->mnt_nvnodelist); \
(vp = __mnt_vnode_next(&(vp2), (mp))) != NULL;)
#define MNT_VNODE_FOREACH(vp, mp, mvp) \
for (vp = __mnt_vnode_first(&(mvp), (mp)); \
(vp) != NULL; vp = __mnt_vnode_next(&(mvp), (mp)))
#define MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp) \
__mnt_vnode_markerfree(&(mvp), (mp))
#define MNT_VNODE_FOREACH_ABORT(mp, mvp) \
do { \
MNT_ILOCK(mp); \
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); \
MNT_IUNLOCK(mp); \
} while (0)
#define MNT_ILOCK(mp) mtx_lock(&(mp)->mnt_mtx)
#define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx)

View File

@ -59,7 +59,8 @@
/*
* Vnode types. VNON means no type.
*/
enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD };
enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD,
VMARKER };
/*
* Each underlying filesystem allocates its own private area and hangs

View File

@ -203,7 +203,7 @@ ffs_snapshot(mp, snapfile)
struct nameidata nd;
struct mount *wrtmp;
struct vattr vat;
struct vnode *vp, *xvp, *nvp, *devvp;
struct vnode *vp, *xvp, *mvp, *devvp;
struct uio auio;
struct iovec aiov;
struct snapdata *sn;
@ -485,7 +485,7 @@ ffs_snapshot(mp, snapfile)
MNT_ILOCK(mp);
mp->mnt_kern_flag &= ~MNTK_SUSPENDED;
loop:
MNT_VNODE_FOREACH(xvp, mp, nvp) {
MNT_VNODE_FOREACH(xvp, mp, mvp) {
VI_LOCK(xvp);
MNT_IUNLOCK(mp);
if ((xvp->v_iflag & VI_DOOMED) ||
@ -506,6 +506,7 @@ ffs_snapshot(mp, snapfile)
}
if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
if (snapdebug)
@ -553,6 +554,7 @@ ffs_snapshot(mp, snapfile)
free(copy_fs->fs_csp, M_UFSMNT);
bawrite(sbp);
sbp = NULL;
MNT_VNODE_FOREACH_ABORT(mp, mvp);
goto out1;
}
MNT_ILOCK(mp);

View File

@ -445,7 +445,7 @@ ffs_cmount(struct mntarg *ma, void *data, int flags, struct thread *td)
static int
ffs_reload(struct mount *mp, struct thread *td)
{
struct vnode *vp, *nvp, *devvp;
struct vnode *vp, *mvp, *devvp;
struct inode *ip;
void *space;
struct buf *bp;
@ -536,7 +536,7 @@ ffs_reload(struct mount *mp, struct thread *td)
loop:
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
@ -547,6 +547,7 @@ ffs_reload(struct mount *mp, struct thread *td)
* Step 4: invalidate all cached file data.
*/
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ABORT(mp, mvp);
goto loop;
}
if (vinvalbuf(vp, 0, td, 0, 0))
@ -561,6 +562,7 @@ ffs_reload(struct mount *mp, struct thread *td)
if (error) {
VOP_UNLOCK(vp, 0, td);
vrele(vp);
MNT_VNODE_FOREACH_ABORT(mp, mvp);
return (error);
}
ffs_load_inode(bp, ip, fs, ip->i_number);
@ -1106,7 +1108,7 @@ ffs_sync(mp, waitfor, td)
int waitfor;
struct thread *td;
{
struct vnode *nvp, *vp, *devvp;
struct vnode *mvp, *vp, *devvp;
struct inode *ip;
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
@ -1130,7 +1132,7 @@ ffs_sync(mp, waitfor, td)
lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
/*
* Depend on the mntvnode_slock to keep things stable enough
* for a quick test. Since there might be hundreds of
@ -1152,8 +1154,10 @@ ffs_sync(mp, waitfor, td)
MNT_IUNLOCK(mp);
if ((error = vget(vp, lockreq, td)) != 0) {
MNT_ILOCK(mp);
if (error == ENOENT || error == ENOLCK)
if (error == ENOENT || error == ENOLCK) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto loop;
}
continue;
}
if ((error = ffs_syncvnode(vp, waitfor)) != 0)

View File

@ -403,7 +403,7 @@ quotaon(td, mp, type, fname)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct vnode *vp, **vpp;
struct vnode *nvp;
struct vnode *mvp;
struct dquot *dq;
int error, flags;
struct nameidata nd;
@ -453,11 +453,12 @@ quotaon(td, mp, type, fname)
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto again;
}
if (vp->v_type == VNON || vp->v_writecount == 0) {
@ -470,8 +471,10 @@ quotaon(td, mp, type, fname)
VOP_UNLOCK(vp, 0, td);
vrele(vp);
MNT_ILOCK(mp);
if (error)
if (error) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
break;
}
}
MNT_IUNLOCK(mp);
ump->um_qflags[type] &= ~QTF_OPENING;
@ -490,7 +493,7 @@ quotaoff(td, mp, type)
int type;
{
struct vnode *vp;
struct vnode *qvp, *nvp;
struct vnode *qvp, *mvp;
struct ufsmount *ump = VFSTOUFS(mp);
struct dquot *dq;
struct inode *ip;
@ -509,7 +512,7 @@ quotaoff(td, mp, type)
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
if (vp->v_type == VNON) {
@ -519,6 +522,7 @@ quotaoff(td, mp, type)
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto again;
}
ip = VTOI(vp);
@ -717,7 +721,7 @@ qsync(mp)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct thread *td = curthread; /* XXX */
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
struct dquot *dq;
int i, error;
@ -736,7 +740,7 @@ qsync(mp)
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
if (vp->v_type == VNON) {
@ -747,8 +751,10 @@ qsync(mp)
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
MNT_ILOCK(mp);
if (error == ENOENT)
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
goto again;
}
continue;
}
for (i = 0; i < MAXQUOTAS; i++) {

View File

@ -692,6 +692,7 @@ nfs_filestat(struct vnode *vp, struct filestat *fsp)
break;
case VNON:
case VBAD:
case VMARKER:
return 0;
};
fsp->mode = mode;