Replace the MNT_VNODE_FOREACH interface with MNT_VNODE_FOREACH_ALL.

The primary changes are that the user of the interface no longer
needs to manage the mount-mutex locking and that the vnode that
is returned has its mutex locked (thus avoiding the need to check
to see if its is DOOMED or other possible end of life senarios).

To minimize compatibility issues for third-party developers, the
old MNT_VNODE_FOREACH interface will remain available so that this
change can be MFC'ed to 9. Following the MFC to 9, MNT_VNODE_FOREACH
will be removed in head.

The reason for this update is to prepare for the addition of the
MNT_VNODE_FOREACH_ACTIVE interface that will loop over just the
active vnodes associated with a mount point (typically less than
1% of the vnodes associated with the mount point).

Reviewed by: kib
Tested by:   Peter Holm
MFC after:   2 weeks
This commit is contained in:
Kirk McKusick 2012-04-17 16:28:22 +00:00
parent 9e21ef395a
commit 71469bb38f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=234386
15 changed files with 178 additions and 212 deletions

View File

@ -365,13 +365,7 @@ coda_checkunmounting(struct mount *mp)
struct cnode *cp;
int count = 0, bad = 0;
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, nvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
continue;
}
MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
cp = VTOC(vp);
count++;
if (!(cp->c_flags & C_UNMOUNTING)) {
@ -381,7 +375,6 @@ coda_checkunmounting(struct mount *mp)
}
VI_UNLOCK(vp);
}
MNT_IUNLOCK(mp);
}
void

View File

@ -480,19 +480,12 @@ ext2_reload(struct mount *mp, struct thread *td)
}
loop:
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/*
* Step 4: invalidate all cached file data.
*/
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
if (vinvalbuf(vp, 0, 0, 0))
@ -507,7 +500,7 @@ ext2_reload(struct mount *mp, struct thread *td)
if (error) {
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
return (error);
}
ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data +
@ -515,9 +508,7 @@ ext2_reload(struct mount *mp, struct thread *td)
brelse(bp);
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (0);
}
@ -841,27 +832,24 @@ ext2_sync(struct mount *mp, int waitfor)
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) {
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
ip = VTOI(vp);
if ((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY)) {
VI_UNLOCK(vp);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
MNT_ILOCK(mp);
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
continue;
@ -870,9 +858,7 @@ ext2_sync(struct mount *mp, int waitfor)
allerror = error;
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
/*
* Force stale file system control information to be flushed.

View File

@ -923,27 +923,22 @@ msdosfs_sync(struct mount *mp, int waitfor)
/*
* Write back each (modified) denode.
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, nvp) {
VI_LOCK(vp);
if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) {
MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
dep = VTODE(vp);
if ((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY)) {
VI_UNLOCK(vp);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -953,9 +948,7 @@ msdosfs_sync(struct mount *mp, int waitfor)
allerror = error;
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
/*
* Flush filesystem control info.

View File

@ -367,17 +367,10 @@ ncl_clearcommit(struct mount *mp)
struct buf *bp, *nbp;
struct bufobj *bo;
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
bo = &vp->v_bufobj;
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
continue;
}
vholdl(vp);
VI_UNLOCK(vp);
MNT_IUNLOCK(mp);
BO_LOCK(bo);
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
if (!BUF_ISLOCKED(bp) &&
@ -387,9 +380,7 @@ ncl_clearcommit(struct mount *mp)
}
BO_UNLOCK(bo);
vdrop(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
}
/*

View File

@ -1508,24 +1508,21 @@ nfs_sync(struct mount *mp, int waitfor)
MNT_IUNLOCK(mp);
return (EBADF);
}
MNT_IUNLOCK(mp);
/*
* Force stale buffer cache information to be flushed.
*/
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/* XXX Racy bv_cnt check. */
if (NFSVOPISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY) {
VI_UNLOCK(vp);
MNT_ILOCK(mp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
error = VOP_FSYNC(vp, waitfor, td);
@ -1533,10 +1530,7 @@ nfs_sync(struct mount *mp, int waitfor)
allerror = error;
NFSVOPUNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (allerror);
}

View File

@ -1114,18 +1114,15 @@ vfs_stdsync(mp, waitfor)
/*
* Force stale buffer cache information to be flushed.
*/
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
/* bv_cnt is an acceptable race here. */
if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
VI_UNLOCK(vp);
continue;
VI_LOCK(vp);
MNT_IUNLOCK(mp);
}
if ((error = vget(vp, lockreq, td)) != 0) {
MNT_ILOCK(mp);
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
continue;
@ -1134,9 +1131,7 @@ vfs_stdsync(mp, waitfor)
if (error)
allerror = error;
vput(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (allerror);
}

View File

@ -81,7 +81,6 @@ SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
"Unprivileged users may mount and unmount file systems");
MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
static uma_zone_t mount_zone;
/* List of mounted filesystems. */
@ -1720,10 +1719,14 @@ vfs_copyopt(opts, name, dest, len)
}
/*
* This is a helper function for filesystems to traverse their
* vnodes. See MNT_VNODE_FOREACH() in sys/mount.h
* These are helper functions for filesystems to traverse all
* their vnodes. See MNT_VNODE_FOREACH() in sys/mount.h.
*
* This interface has been deprecated in favor of MNT_VNODE_FOREACH_ALL.
*/
MALLOC_DECLARE(M_VNODE_MARKER);
struct vnode *
__mnt_vnode_next(struct vnode **mvp, struct mount *mp)
{
@ -1812,7 +1815,6 @@ __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp)
MNT_REL(mp);
}
int
__vfs_statfs(struct mount *mp, struct statfs *sbp)
{

View File

@ -2467,17 +2467,13 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
}
vput(rootvp);
}
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
vholdl(vp);
MNT_IUNLOCK(mp);
error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
if (error) {
vdrop(vp);
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
/*
@ -2486,7 +2482,6 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
VOP_UNLOCK(vp, 0);
vdrop(vp);
MNT_ILOCK(mp);
continue;
}
/*
@ -2504,7 +2499,7 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
if (error != 0) {
VOP_UNLOCK(vp, 0);
vdrop(vp);
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
return (error);
}
error = VOP_GETATTR(vp, &vattr, td->td_ucred);
@ -2515,7 +2510,6 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
(vp->v_writecount == 0 || vp->v_type != VREG)) {
VOP_UNLOCK(vp, 0);
vdropl(vp);
MNT_ILOCK(mp);
continue;
}
} else
@ -2540,9 +2534,7 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
}
VOP_UNLOCK(vp, 0);
vdropl(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
/*
* If just the root vnode is busy, and if its refcount
@ -3279,19 +3271,15 @@ vfs_msync(struct mount *mp, int flags)
struct vm_object *obj;
CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
MNT_IUNLOCK(mp);
if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
curthread)) {
if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
vput(vp);
MNT_ILOCK(mp);
continue;
}
@ -3305,11 +3293,9 @@ vfs_msync(struct mount *mp, int flags)
}
vput(vp);
}
MNT_ILOCK(mp);
} else
VI_UNLOCK(vp);
}
MNT_IUNLOCK(mp);
}
/*
@ -4504,3 +4490,90 @@ vfs_unixify_accmode(accmode_t *accmode)
return (0);
}
/*
* These are helper functions for filesystems to traverse all
* their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
*
* This interface replaces MNT_VNODE_FOREACH.
*/
MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
struct vnode *
__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
{
struct vnode *vp;
if (should_yield())
kern_yield(PRI_UNCHANGED);
MNT_ILOCK(mp);
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
while (vp != NULL && (vp->v_type == VMARKER ||
(vp->v_iflag & VI_DOOMED) != 0))
vp = TAILQ_NEXT(vp, v_nmntvnodes);
/* Check if we are done */
if (vp == NULL) {
__mnt_vnode_markerfree_all(mvp, mp);
/* MNT_IUNLOCK(mp); -- done in above function */
mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
return (NULL);
}
TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
VI_LOCK(vp);
MNT_IUNLOCK(mp);
return (vp);
}
struct vnode *
__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
{
struct vnode *vp;
*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
MNT_ILOCK(mp);
MNT_REF(mp);
(*mvp)->v_type = VMARKER;
vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
while (vp != NULL && (vp->v_type == VMARKER ||
(vp->v_iflag & VI_DOOMED) != 0))
vp = TAILQ_NEXT(vp, v_nmntvnodes);
/* Check if we are done */
if (vp == NULL) {
*mvp = NULL;
MNT_REL(mp);
MNT_IUNLOCK(mp);
free(*mvp, M_VNODE_MARKER);
return (NULL);
}
(*mvp)->v_mount = mp;
TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
VI_LOCK(vp);
MNT_IUNLOCK(mp);
return (vp);
}
void
__mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
{
if (*mvp == NULL) {
MNT_IUNLOCK(mp);
return;
}
mtx_assert(MNT_MTX(mp), MA_OWNED);
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
MNT_REL(mp);
MNT_IUNLOCK(mp);
free(*mvp, M_VNODE_MARKER);
*mvp = NULL;
}

View File

@ -866,16 +866,10 @@ nfs_clearcommit(struct mount *mp)
struct bufobj *bo;
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, nvp) {
MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
bo = &vp->v_bufobj;
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
continue;
}
vholdl(vp);
VI_UNLOCK(vp);
MNT_IUNLOCK(mp);
BO_LOCK(bo);
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
if (!BUF_ISLOCKED(bp) &&
@ -885,9 +879,7 @@ nfs_clearcommit(struct mount *mp)
}
BO_UNLOCK(bo);
vdrop(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
}
/*

View File

@ -1457,19 +1457,15 @@ nfs_sync(struct mount *mp, int waitfor)
* Force stale buffer cache information to be flushed.
*/
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/* XXX Racy bv_cnt check. */
if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY) {
VI_UNLOCK(vp);
MNT_ILOCK(mp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
error = VOP_FSYNC(vp, waitfor, td);
@ -1477,10 +1473,7 @@ nfs_sync(struct mount *mp, int waitfor)
allerror = error;
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (allerror);
}

View File

@ -187,6 +187,30 @@ struct mount {
struct lock mnt_explock; /* vfs_export walkers lock */
};
/*
* Definitions for MNT_VNODE_FOREACH_ALL.
*/
struct vnode *__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp);
struct vnode *__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp);
void __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp);
#define MNT_VNODE_FOREACH_ALL(vp, mp, mvp) \
for (vp = __mnt_vnode_first_all(&(mvp), (mp)); \
(vp) != NULL; vp = __mnt_vnode_next_all(&(mvp), (mp)))
#define MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp) \
do { \
MNT_ILOCK(mp); \
__mnt_vnode_markerfree_all(&(mvp), (mp)); \
/* MNT_IUNLOCK(mp); -- done in above function */ \
mtx_assert(MNT_MTX(mp), MA_NOTOWNED); \
} while (0)
/*
* Definitions for MNT_VNODE_FOREACH.
*
* This interface has been deprecated in favor of MNT_VNODE_FOREACH_ALL.
*/
struct vnode *__mnt_vnode_next(struct vnode **mvp, struct mount *mp);
struct vnode *__mnt_vnode_first(struct vnode **mvp, struct mount *mp);
void __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp);

View File

@ -522,17 +522,14 @@ ffs_snapshot(mp, snapfile)
FSMAXSNAP + 1 /* superblock */ + 1 /* last block */ + 1 /* size */;
MNT_ILOCK(mp);
mp->mnt_kern_flag &= ~MNTK_SUSPENDED;
MNT_IUNLOCK(mp);
loop:
MNT_VNODE_FOREACH(xvp, mp, mvp) {
VI_LOCK(xvp);
MNT_IUNLOCK(mp);
if ((xvp->v_iflag & VI_DOOMED) ||
(xvp->v_usecount == 0 &&
MNT_VNODE_FOREACH_ALL(xvp, mp, mvp) {
if ((xvp->v_usecount == 0 &&
(xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) ||
xvp->v_type == VNON ||
IS_SNAPSHOT(VTOI(xvp))) {
VI_UNLOCK(xvp);
MNT_ILOCK(mp);
continue;
}
/*
@ -541,13 +538,11 @@ ffs_snapshot(mp, snapfile)
*/
if (xvp == nd.ni_dvp) {
VI_UNLOCK(xvp);
MNT_ILOCK(mp);
continue;
}
vholdl(xvp);
if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
vdrop(xvp);
goto loop;
}
@ -557,7 +552,6 @@ ffs_snapshot(mp, snapfile)
VI_UNLOCK(xvp);
VOP_UNLOCK(xvp, 0);
vdrop(xvp);
MNT_ILOCK(mp);
continue;
}
VI_UNLOCK(xvp);
@ -567,14 +561,12 @@ ffs_snapshot(mp, snapfile)
vat.va_nlink > 0) {
VOP_UNLOCK(xvp, 0);
vdrop(xvp);
MNT_ILOCK(mp);
continue;
}
xp = VTOI(xvp);
if (ffs_checkfreefile(copy_fs, vp, xp->i_number)) {
VOP_UNLOCK(xvp, 0);
vdrop(xvp);
MNT_ILOCK(mp);
continue;
}
/*
@ -610,12 +602,10 @@ ffs_snapshot(mp, snapfile)
free(copy_fs->fs_csp, M_UFSMNT);
free(copy_fs, M_UFSMNT);
copy_fs = NULL;
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto out1;
}
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
/*
* Erase the journal file from the snapshot.
*/
@ -2532,31 +2522,26 @@ process_deferred_inactive(struct mount *mp)
td = curthread;
(void) vn_start_secondary_write(NULL, &mp, V_WAIT);
MNT_ILOCK(mp);
loop:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/*
* IN_LAZYACCESS is checked here without holding any
* vnode lock, but this flag is set only while holding
* vnode interlock.
*/
if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED) != 0 ||
if (vp->v_type == VNON ||
((VTOI(vp)->i_flag & IN_LAZYACCESS) == 0 &&
((vp->v_iflag & VI_OWEINACT) == 0 ||
vp->v_usecount > 0))) {
((vp->v_iflag & VI_OWEINACT) == 0 || vp->v_usecount > 0))) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
vholdl(vp);
error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
if (error != 0) {
vdrop(vp);
MNT_ILOCK(mp);
if (error == ENOENT)
continue; /* vnode recycled */
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
ip = VTOI(vp);
@ -2569,7 +2554,6 @@ process_deferred_inactive(struct mount *mp)
VI_UNLOCK(vp);
VOP_UNLOCK(vp, 0);
vdrop(vp);
MNT_ILOCK(mp);
continue;
}
vinactive(vp, td);
@ -2578,9 +2562,7 @@ process_deferred_inactive(struct mount *mp)
VI_UNLOCK(vp);
VOP_UNLOCK(vp, 0);
vdrop(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
vn_finished_secondary_write(mp);
}

View File

@ -12642,29 +12642,21 @@ softdep_request_cleanup(fs, vp, cred, resource)
fs->fs_cstotal.cs_nbfree <= needed) ||
(resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
fs->fs_cstotal.cs_nifree <= needed)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(lvp, mp, mvp) {
VI_LOCK(lvp);
MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
VI_UNLOCK(lvp);
continue;
}
MNT_IUNLOCK(mp);
if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
curthread)) {
MNT_ILOCK(mp);
curthread))
continue;
}
if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */
vput(lvp);
MNT_ILOCK(mp);
continue;
}
(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
vput(lvp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
lvp = ump->um_devvp;
if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
VOP_FSYNC(lvp, MNT_NOWAIT, curthread);

View File

@ -705,19 +705,12 @@ ffs_reload(struct mount *mp, struct thread *td)
}
loop:
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/*
* Step 4: invalidate all cached file data.
*/
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
if (vinvalbuf(vp, 0, 0, 0))
@ -732,7 +725,7 @@ ffs_reload(struct mount *mp, struct thread *td)
if (error) {
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_VNODE_FOREACH_ABORT(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
return (error);
}
ffs_load_inode(bp, ip, fs, ip->i_number);
@ -740,9 +733,7 @@ ffs_reload(struct mount *mp, struct thread *td)
brelse(bp);
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (0);
}
@ -1441,10 +1432,8 @@ ffs_sync_lazy(mp)
td = curthread;
if ((mp->mnt_flag & MNT_NOATIME) != 0)
goto qupdate;
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED || vp->v_type == VNON) {
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
@ -1462,19 +1451,14 @@ ffs_sync_lazy(mp)
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
td)) != 0) {
MNT_ILOCK(mp);
td)) != 0)
continue;
}
error = ffs_update(vp, 0);
if (error != 0)
allerror = error;
vput(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
qupdate:
#ifdef QUOTA
@ -1538,41 +1522,37 @@ ffs_sync(mp, waitfor)
lockreq = LK_EXCLUSIVE;
}
lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
MNT_ILOCK(mp);
loop:
/* Grab snapshot of secondary write counts */
MNT_ILOCK(mp);
secondary_writes = mp->mnt_secondary_writes;
secondary_accwrites = mp->mnt_secondary_accwrites;
MNT_IUNLOCK(mp);
/* Grab snapshot of softdep dependency counts */
MNT_IUNLOCK(mp);
softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
MNT_ILOCK(mp);
MNT_VNODE_FOREACH(vp, mp, mvp) {
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
/*
* Depend on the vnode interlock to keep things stable enough
* for a quick test. Since there might be hundreds of
* thousands of vnodes, we cannot afford even a subroutine
* call unless there's a good chance that we have work to do.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
ip = VTOI(vp);
if (vp->v_type == VNON || ((ip->i_flag &
if ((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
vp->v_bufobj.bo_dirty.bv_cnt == 0)) {
vp->v_bufobj.bo_dirty.bv_cnt == 0) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
if ((error = vget(vp, lockreq, td)) != 0) {
MNT_ILOCK(mp);
if (error == ENOENT || error == ENOLCK) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto loop;
}
continue;
@ -1580,9 +1560,7 @@ ffs_sync(mp, waitfor)
if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
allerror = error;
vput(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
/*
* Force stale filesystem control information to be flushed.
*/
@ -1590,10 +1568,8 @@ ffs_sync(mp, waitfor)
if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (allerror == 0 && count) {
MNT_ILOCK(mp);
if (allerror == 0 && count)
goto loop;
}
}
#ifdef QUOTA
qsync(mp);
@ -1608,18 +1584,18 @@ ffs_sync(mp, waitfor)
if ((error = VOP_FSYNC(devvp, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(devvp, 0);
if (allerror == 0 && waitfor == MNT_WAIT) {
MNT_ILOCK(mp);
if (allerror == 0 && waitfor == MNT_WAIT)
goto loop;
}
} else if (suspend != 0) {
if (softdep_check_suspend(mp,
devvp,
softdep_deps,
softdep_accdeps,
secondary_writes,
secondary_accwrites) != 0)
secondary_accwrites) != 0) {
MNT_IUNLOCK(mp);
goto loop; /* More work needed */
}
mtx_assert(MNT_MTX(mp), MA_OWNED);
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
MNT_IUNLOCK(mp);

View File

@ -598,32 +598,25 @@ quotaon(struct thread *td, struct mount *mp, int type, void *fname)
* adding references to quota file being opened.
* NB: only need to add dquot's for inodes being modified.
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
if (vp->v_type == VNON || vp->v_writecount == 0) {
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
continue;
}
error = getinoquota(VTOI(vp));
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
if (error) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
break;
}
}
MNT_IUNLOCK(mp);
if (error)
quotaoff_inchange(td, mp, type);
@ -669,19 +662,14 @@ quotaoff1(struct thread *td, struct mount *mp, int type)
* Search vnodes associated with this mount point,
* deleting any references to quota file being closed.
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_IUNLOCK(mp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
MNT_ILOCK(mp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_ILOCK(mp);
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
ip = VTOI(vp);
@ -690,9 +678,7 @@ quotaoff1(struct thread *td, struct mount *mp, int type)
dqrele(vp, dq);
VOP_UNLOCK(vp, 0);
vrele(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
dqflush(qvp);
/* Clear um_quotas before closing the quota vnode to prevent
@ -1057,20 +1043,16 @@ qsync(struct mount *mp)
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
MNT_ILOCK(mp);
again:
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
MNT_IUNLOCK(mp);
error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
if (error) {
MNT_ILOCK(mp);
if (error == ENOENT) {
MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
continue;
@ -1081,9 +1063,7 @@ qsync(struct mount *mp)
dqsync(vp, dq);
}
vput(vp);
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);
return (0);
}