Remove mntvnode_mtx and replace it with per-mountpoint mutex.

Introduce two new macros MNT_ILOCK(mp)/MNT_IUNLOCK(mp) to
operate on this mutex transparently.

Eventually new mutex will be protecting more fields in
struct mount, not only vnode list.

Discussed with: jeff
This commit is contained in:
Alexander Kabaev 2003-11-05 04:30:08 +00:00
parent 5c8bb90bf7
commit ca430f2e92
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122091
15 changed files with 124 additions and 120 deletions

View File

@ -307,12 +307,12 @@ coda_unmounting(whoIam)
void
coda_checkunmounting(mp)
struct mount *mp;
{
{
register struct vnode *vp, *nvp;
struct cnode *cp;
int count = 0, bad = 0;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
if (vp->v_mount != mp)
@ -331,7 +331,7 @@ coda_checkunmounting(mp)
}
VI_UNLOCK(vp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
}
void

View File

@ -307,12 +307,12 @@ coda_unmounting(whoIam)
void
coda_checkunmounting(mp)
struct mount *mp;
{
{
register struct vnode *vp, *nvp;
struct cnode *cp;
int count = 0, bad = 0;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
if (vp->v_mount != mp)
@ -331,7 +331,7 @@ coda_checkunmounting(mp)
}
VI_UNLOCK(vp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
}
void

View File

@ -778,7 +778,7 @@ msdosfs_sync(mp, waitfor, cred, td)
/*
* Write back each (modified) denode.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@ -794,19 +794,19 @@ msdosfs_sync(mp, waitfor, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -816,9 +816,9 @@ msdosfs_sync(mp, waitfor, cred, td)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Flush filesystem control info.

View File

@ -569,10 +569,10 @@ ext2_reload(mountp, cred, td)
brelse(bp);
loop:
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
goto loop;
}
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
@ -581,7 +581,7 @@ ext2_reload(mountp, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Step 4: invalidate all inactive vnodes.
*/
@ -614,9 +614,9 @@ ext2_reload(mountp, cred, td)
brelse(bp);
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (0);
}
@ -901,7 +901,7 @@ ext2_sync(mp, waitfor, cred, td)
/*
* Write back each (modified) inode.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@ -916,19 +916,19 @@ ext2_sync(mp, waitfor, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -937,9 +937,9 @@ ext2_sync(mp, waitfor, cred, td)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Force stale file system control information to be flushed.
*/

View File

@ -569,10 +569,10 @@ ext2_reload(mountp, cred, td)
brelse(bp);
loop:
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
goto loop;
}
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
@ -581,7 +581,7 @@ ext2_reload(mountp, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Step 4: invalidate all inactive vnodes.
*/
@ -614,9 +614,9 @@ ext2_reload(mountp, cred, td)
brelse(bp);
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (0);
}
@ -901,7 +901,7 @@ ext2_sync(mp, waitfor, cred, td)
/*
* Write back each (modified) inode.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@ -916,19 +916,19 @@ ext2_sync(mp, waitfor, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
ip = VTOI(vp);
if (vp->v_type == VNON ||
((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -937,9 +937,9 @@ ext2_sync(mp, waitfor, cred, td)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Force stale file system control information to be flushed.
*/

View File

@ -879,7 +879,7 @@ vfs_stdsync(mp, waitfor, cred, td)
/*
* Force stale buffer cache information to be flushed.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@ -896,10 +896,10 @@ vfs_stdsync(mp, waitfor, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if ((error = vget(vp, lockreq, td)) != 0) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -910,9 +910,9 @@ vfs_stdsync(mp, waitfor, cred, td)
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (allerror);
}

View File

@ -116,9 +116,6 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
struct mtx mntvnode_mtx;
/*
* The vnode of the system's root (/ in the filesystem, without chroot
* active.)
@ -662,6 +659,7 @@ vfs_nmount(td, fsflags, fsoptions)
TAILQ_INIT(&mp->mnt_nvnodelist);
TAILQ_INIT(&mp->mnt_reservedvnlist);
mp->mnt_nvnodelistsize = 0;
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@ -1029,6 +1027,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
TAILQ_INIT(&mp->mnt_nvnodelist);
TAILQ_INIT(&mp->mnt_reservedvnlist);
mp->mnt_nvnodelistsize = 0;
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@ -1371,6 +1370,7 @@ dounmount(mp, flags, td)
panic("unmount: dangling vnode");
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
mtx_destroy(&mp->mnt_mtx);
if (coveredvp != NULL)
vrele(coveredvp);
if (mp->mnt_kern_flag & MNTK_MWAIT)
@ -1408,6 +1408,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
if (vfsp == NULL)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
TAILQ_INIT(&mp->mnt_nvnodelist);

View File

@ -476,7 +476,6 @@ vntblinit(void *dummy __unused)
(5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
minvnodes = desiredvnodes / 4;
mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
TAILQ_INIT(&vnode_free_list);
@ -710,7 +709,7 @@ vlrureclaim(struct mount *mp)
trigger = cnt.v_page_count * 2 / usevnodes;
done = 0;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
count = mp->mnt_nvnodelistsize / 10 + 1;
while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
@ -722,16 +721,16 @@ vlrureclaim(struct mount *mp)
if (VMIGHTFREE(vp) && /* critical path opt */
(vp->v_object == NULL ||
vp->v_object->resident_page_count < trigger)) {
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
vgonel(vp, curthread);
done++;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
} else
VI_UNLOCK(vp);
}
--count;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return done;
}
@ -1051,24 +1050,26 @@ insmntque(vp, mp)
register struct mount *mp;
{
mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
if (vp->v_mount != NULL) {
MNT_ILOCK(vp->v_mount);
KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
("bad mount point vnode list size"));
TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
vp->v_mount->mnt_nvnodelistsize--;
MNT_IUNLOCK(vp->v_mount);
}
/*
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) != NULL) {
MNT_ILOCK(vp->v_mount);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
mp->mnt_nvnodelistsize++;
MNT_IUNLOCK(vp->v_mount);
}
mtx_unlock(&mntvnode_mtx);
}
/*
@ -2368,7 +2369,7 @@ vflush(mp, rootrefs, flags)
vput(rootvp);
}
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
/*
@ -2380,10 +2381,10 @@ vflush(mp, rootrefs, flags)
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
if (error) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto loop;
}
/*
@ -2391,7 +2392,7 @@ vflush(mp, rootrefs, flags)
*/
if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
VOP_UNLOCK(vp, 0, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
/*
@ -2407,7 +2408,7 @@ vflush(mp, rootrefs, flags)
(error == 0 && vattr.va_nlink > 0)) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
VOP_UNLOCK(vp, LK_INTERLOCK, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
} else
@ -2421,7 +2422,7 @@ vflush(mp, rootrefs, flags)
*/
if (vp->v_usecount == 0) {
vgonel(vp, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
@ -2435,7 +2436,7 @@ vflush(mp, rootrefs, flags)
vgonel(vp, td);
else
vgonechrl(vp, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
#ifdef DIAGNOSTIC
@ -2443,10 +2444,10 @@ vflush(mp, rootrefs, flags)
vprint("vflush: busy vnode", vp);
#endif
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
busy++;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
/*
* If just the root vnode is busy, and if its refcount
@ -2481,10 +2482,10 @@ vlruvp(struct vnode *vp)
struct mount *mp;
if ((mp = vp->v_mount) != NULL) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
}
#endif
}
@ -3073,7 +3074,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
continue;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
if (n == len)
break;
@ -3122,7 +3123,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
vrele(vp);
++n;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
mtx_lock(&mountlist_mtx);
vfs_unbusy(mp, td);
if (n == len)
@ -3201,7 +3202,7 @@ vfs_msync(struct mount *mp, int flags)
GIANT_REQUIRED;
tries = 5;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mp) {
@ -3219,13 +3220,13 @@ vfs_msync(struct mount *mp, int flags)
if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
curthread)) {
if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
vput(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
@ -3238,7 +3239,7 @@ vfs_msync(struct mount *mp, int flags)
}
vput(vp);
}
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
if (--tries > 0)
goto loop;
@ -3247,7 +3248,7 @@ vfs_msync(struct mount *mp, int flags)
} else
VI_UNLOCK(vp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
}
/*

View File

@ -788,7 +788,7 @@ nfs_clearcommit(struct mount *mp)
GIANT_REQUIRED;
s = splbio();
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
@ -799,7 +799,7 @@ nfs_clearcommit(struct mount *mp)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
@ -808,9 +808,9 @@ nfs_clearcommit(struct mount *mp)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
}
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
splx(s);
}

View File

@ -931,7 +931,7 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
/*
* Force stale buffer cache information to be flushed.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
vp != NULL;
@ -944,15 +944,15 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
goto loop;
vnp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
waitfor == MNT_LAZY) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto loop;
}
error = VOP_FSYNC(vp, cred, waitfor, td);
@ -960,8 +960,9 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (allerror);
}

View File

@ -132,6 +132,7 @@ struct mount {
struct vnodelst mnt_nvnodelist; /* list of vnodes this mount */
struct vnodelst mnt_reservedvnlist; /* (future) dirty vnode list */
struct lock mnt_lock; /* mount structure lock */
struct mtx mnt_mtx; /* mount structure interlock */
int mnt_writeopcount; /* write syscalls in progress */
int mnt_flag; /* flags shared with user */
struct vfsoptlist *mnt_opt; /* current mount options */
@ -148,6 +149,11 @@ struct mount {
struct label mnt_fslabel; /* MAC label for the fs */
int mnt_nvnodelistsize; /* # of vnodes on this mount */
};
#define MNT_ILOCK(mp) mtx_lock(&(mp)->mnt_mtx)
#define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx)
#endif /* _KERNEL */
/*

View File

@ -464,11 +464,6 @@ struct vnodeop_desc {
*/
extern struct vnodeop_desc *vnodeop_descs[];
/*
* Interlock for scanning list of vnodes attached to a mountpoint
*/
extern struct mtx mntvnode_mtx;
#define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
#define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \
((s_type)(((char*)(struct_p)) + (s_offset)))

View File

@ -405,7 +405,7 @@ ffs_snapshot(mp, snapfile)
snaplistsize = fs->fs_ncg + howmany(fs->fs_cssize, fs->fs_bsize) +
FSMAXSNAP + 1 /* superblock */ + 1 /* last block */ + 1 /* size */;
mp->mnt_kern_flag &= ~MNTK_SUSPENDED;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (xvp = TAILQ_FIRST(&mp->mnt_nvnodelist); xvp; xvp = nvp) {
/*
@ -416,30 +416,30 @@ ffs_snapshot(mp, snapfile)
goto loop;
nvp = TAILQ_NEXT(xvp, v_nmntvnodes);
VI_LOCK(xvp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if ((xvp->v_iflag & VI_XLOCK) ||
xvp->v_usecount == 0 || xvp->v_type == VNON ||
(VTOI(xvp)->i_flags & SF_SNAPSHOT)) {
VI_UNLOCK(xvp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
if (snapdebug)
vprint("ffs_snapshot: busy vnode", xvp);
if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto loop;
}
if (VOP_GETATTR(xvp, &vat, td->td_ucred, td) == 0 &&
vat.va_nlink > 0) {
VOP_UNLOCK(xvp, 0, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
xp = VTOI(xvp);
if (ffs_checkfreefile(copy_fs, vp, xp->i_number)) {
VOP_UNLOCK(xvp, 0, td);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
/*
@ -475,9 +475,9 @@ ffs_snapshot(mp, snapfile)
sbp = NULL;
goto out1;
}
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* If there already exist snapshots on this filesystem, grab a
* reference to their shared lock. If this is the first snapshot

View File

@ -495,10 +495,10 @@ ffs_reload(mp, cred, td)
}
loop:
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mp) {
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
goto loop;
}
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
@ -507,7 +507,7 @@ ffs_reload(mp, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Step 4: invalidate all inactive vnodes.
*/
@ -540,9 +540,9 @@ ffs_reload(mp, cred, td)
brelse(bp);
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (0);
}
@ -1130,7 +1130,7 @@ ffs_sync(mp, waitfor, cred, td)
lockreq = LK_EXCLUSIVE;
}
lockreq |= LK_INTERLOCK;
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@ -1159,9 +1159,9 @@ ffs_sync(mp, waitfor, cred, td)
VI_UNLOCK(vp);
continue;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if ((error = vget(vp, lockreq, td)) != 0) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@ -1170,11 +1170,11 @@ ffs_sync(mp, waitfor, cred, td)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp)
goto loop;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
/*
* Force stale filesystem control information to be flushed.
*/
@ -1183,7 +1183,7 @@ ffs_sync(mp, waitfor, cred, td)
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (allerror == 0 && count) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto loop;
}
}
@ -1199,7 +1199,7 @@ ffs_sync(mp, waitfor, cred, td)
allerror = error;
VOP_UNLOCK(devvp, 0, td);
if (allerror == 0 && waitfor == MNT_WAIT) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto loop;
}
} else

View File

@ -455,34 +455,34 @@ quotaon(td, mp, type, fname)
* adding references to quota file being opened.
* NB: only need to add dquot's for inodes being modified.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
again:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto again;
}
if (vp->v_type == VNON || vp->v_writecount == 0) {
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
error = getinoquota(VTOI(vp));
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error)
break;
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
ump->um_qflags[type] &= ~QTF_OPENING;
if (error)
quotaoff(td, mp, type);
@ -516,7 +516,7 @@ quotaoff(td, mp, type)
* Search vnodes associated with this mount point,
* deleting any references to quota file being closed.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
again:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
@ -524,14 +524,14 @@ quotaoff(td, mp, type)
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
goto again;
}
ip = VTOI(vp);
@ -540,11 +540,11 @@ quotaoff(td, mp, type)
dqrele(vp, dq);
VOP_UNLOCK(vp, 0, td);
vrele(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
dqflush(qvp);
ASSERT_VOP_LOCKED(qvp, "quotaoff");
qvp->v_vflag &= ~VV_SYSTEM;
@ -749,22 +749,22 @@ qsync(mp)
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
again:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (error == ENOENT)
goto again;
continue;
@ -775,11 +775,11 @@ qsync(mp)
dqsync(vp, dq);
}
vput(vp);
mtx_lock(&mntvnode_mtx);
MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
MNT_IUNLOCK(mp);
return (0);
}