Change the vnode list under the mount point from a LIST to a TAILQ
in preparation for an implementation of limiting code for kern.maxvnodes. MFC after: 3 days
This commit is contained in:
parent
2716f76b14
commit
c72ccd014d
@ -312,10 +312,10 @@ coda_checkunmounting(mp)
|
||||
struct cnode *cp;
|
||||
int count = 0, bad = 0;
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
cp = VTOC(vp);
|
||||
count++;
|
||||
if (!(cp->c_flags & C_UNMOUNTING)) {
|
||||
|
@ -312,10 +312,10 @@ coda_checkunmounting(mp)
|
||||
struct cnode *cp;
|
||||
int count = 0, bad = 0;
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
cp = VTOC(vp);
|
||||
count++;
|
||||
if (!(cp->c_flags & C_UNMOUNTING)) {
|
||||
|
@ -176,7 +176,7 @@ msdosfs_mountroot()
|
||||
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
||||
mp->mnt_op = &msdosfs_vfsops;
|
||||
mp->mnt_flag = 0;
|
||||
LIST_INIT(&mp->mnt_vnodelist);
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
|
||||
args.flags = 0;
|
||||
args.uid = 0;
|
||||
@ -859,14 +859,14 @@ msdosfs_sync(mp, waitfor, cred, td)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
* If the vnode that we are about to sync is no longer
|
||||
* associated with this mount point, start over.
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
|
@ -479,7 +479,7 @@ nwfs_sync(mp, waitfor, cred, td)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
|
||||
vp != NULL;
|
||||
vp = nvp) {
|
||||
/*
|
||||
@ -488,7 +488,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
|
@ -434,9 +434,9 @@ smbfs_sync(mp, waitfor, cred, p)
|
||||
* Force stale buffer cache information to be flushed.
|
||||
*/
|
||||
loop:
|
||||
for (vp = mp->mnt_vnodelist.lh_first;
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
|
||||
vp != NULL;
|
||||
vp = vp->v_mntvnodes.le_next) {
|
||||
vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
|
||||
/*
|
||||
* If the vnode that we are about to sync is no longer
|
||||
* associated with this mount point, start over.
|
||||
|
@ -333,7 +333,7 @@ union_unmount(mp, mntflags, td)
|
||||
/* count #vnodes held on mount list */
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
n = 0;
|
||||
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
|
||||
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
|
||||
n++;
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
|
||||
|
@ -141,6 +141,7 @@ ext2_mountroot()
|
||||
}
|
||||
mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
|
||||
bzero((char *)mp, (u_long)sizeof(struct mount));
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
mp->mnt_op = &ext2fs_vfsops;
|
||||
mp->mnt_flag = MNT_RDONLY;
|
||||
if (error = ext2_mountfs(rootvp, mp, td)) {
|
||||
@ -568,12 +569,12 @@ ext2_reload(mountp, cred, td)
|
||||
|
||||
loop:
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mountp) {
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
goto loop;
|
||||
}
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
@ -926,14 +927,14 @@ ext2_sync(mp, waitfor, cred, td)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
* If the vnode that we are about to sync is no longer
|
||||
* associated with this mount point, start over.
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
ip = VTOI(vp);
|
||||
|
@ -141,6 +141,7 @@ ext2_mountroot()
|
||||
}
|
||||
mp = bsd_malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
|
||||
bzero((char *)mp, (u_long)sizeof(struct mount));
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
mp->mnt_op = &ext2fs_vfsops;
|
||||
mp->mnt_flag = MNT_RDONLY;
|
||||
if (error = ext2_mountfs(rootvp, mp, td)) {
|
||||
@ -568,12 +569,12 @@ ext2_reload(mountp, cred, td)
|
||||
|
||||
loop:
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mountp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mountp) {
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
goto loop;
|
||||
}
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
@ -926,14 +927,14 @@ ext2_sync(mp, waitfor, cred, td)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
* If the vnode that we are about to sync is no longer
|
||||
* associated with this mount point, start over.
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
ip = VTOI(vp);
|
||||
|
@ -319,6 +319,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
|
||||
* Allocate and initialize the filesystem.
|
||||
*/
|
||||
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
|
||||
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
|
||||
mp->mnt_op = vfsp->vfc_vfsops;
|
||||
@ -591,7 +592,7 @@ dounmount(mp, flags, td)
|
||||
if ((coveredvp = mp->mnt_vnodecovered) != NULL)
|
||||
coveredvp->v_mountedhere = NULL;
|
||||
mp->mnt_vfc->vfc_refcount--;
|
||||
if (!LIST_EMPTY(&mp->mnt_vnodelist))
|
||||
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
|
||||
panic("unmount: dangling vnode");
|
||||
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
|
||||
lockdestroy(&mp->mnt_lock);
|
||||
|
@ -345,7 +345,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
|
||||
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
||||
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
|
||||
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
|
||||
LIST_INIT(&mp->mnt_vnodelist);
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
mp->mnt_vfc = vfsp;
|
||||
mp->mnt_op = vfsp->vfc_vfsops;
|
||||
mp->mnt_flag = MNT_RDONLY;
|
||||
@ -700,7 +700,7 @@ insmntque(vp, mp)
|
||||
* Delete from old mount point vnode list, if on one.
|
||||
*/
|
||||
if (vp->v_mount != NULL)
|
||||
LIST_REMOVE(vp, v_mntvnodes);
|
||||
TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
|
||||
/*
|
||||
* Insert into list of vnodes for the new mount point, if available.
|
||||
*/
|
||||
@ -708,7 +708,7 @@ insmntque(vp, mp)
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
return;
|
||||
}
|
||||
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
|
||||
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
}
|
||||
|
||||
@ -1723,14 +1723,14 @@ vflush(mp, rootrefs, flags)
|
||||
}
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
|
||||
/*
|
||||
* Make sure this vnode wasn't reclaimed in getnewvnode().
|
||||
* Start over if it has (it won't be on the list anymore).
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
@ -2191,7 +2191,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
|
||||
continue;
|
||||
}
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
||||
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
vprint((char *)0, vp);
|
||||
}
|
||||
@ -2313,7 +2313,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
|
||||
}
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
again:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
|
||||
vp != NULL;
|
||||
vp = nvp) {
|
||||
/*
|
||||
@ -2323,7 +2323,7 @@ again:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto again;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
|
||||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
|
||||
@ -2402,7 +2402,8 @@ vfs_unmountall()
|
||||
* the mount point must be locked.
|
||||
*/
|
||||
void
|
||||
vfs_msync(struct mount *mp, int flags) {
|
||||
vfs_msync(struct mount *mp, int flags)
|
||||
{
|
||||
struct vnode *vp, *nvp;
|
||||
struct vm_object *obj;
|
||||
int anyio, tries;
|
||||
@ -2413,9 +2414,9 @@ vfs_msync(struct mount *mp, int flags) {
|
||||
loop:
|
||||
anyio = 0;
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
if (vp->v_mount != mp) {
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
|
@ -319,6 +319,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
|
||||
* Allocate and initialize the filesystem.
|
||||
*/
|
||||
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
||||
TAILQ_INIT(&mp->mnt_nvnodelist);
|
||||
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
|
||||
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
|
||||
mp->mnt_op = vfsp->vfc_vfsops;
|
||||
@ -591,7 +592,7 @@ dounmount(mp, flags, td)
|
||||
if ((coveredvp = mp->mnt_vnodecovered) != NULL)
|
||||
coveredvp->v_mountedhere = NULL;
|
||||
mp->mnt_vfc->vfc_refcount--;
|
||||
if (!LIST_EMPTY(&mp->mnt_vnodelist))
|
||||
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
|
||||
panic("unmount: dangling vnode");
|
||||
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
|
||||
lockdestroy(&mp->mnt_lock);
|
||||
|
@ -788,10 +788,10 @@ nfs_clearcommit(struct mount *mp)
|
||||
s = splbio();
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
|
||||
if (vp->v_mount != mp) /* Paranoia */
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
nbp = TAILQ_NEXT(bp, b_vnbufs);
|
||||
if (BUF_REFCNT(bp) == 0 &&
|
||||
|
@ -968,7 +968,7 @@ nfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct thread *td)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
|
||||
vp != NULL;
|
||||
vp = vnp) {
|
||||
/*
|
||||
@ -977,7 +977,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
vnp = LIST_NEXT(vp, v_mntvnodes);
|
||||
vnp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
|
@ -110,7 +110,7 @@ struct statfs {
|
||||
* array of operations and an instance record. The file systems are
|
||||
* put on a doubly linked list.
|
||||
*/
|
||||
LIST_HEAD(vnodelst, vnode);
|
||||
TAILQ_HEAD(vnodelst, vnode);
|
||||
|
||||
struct mount {
|
||||
TAILQ_ENTRY(mount) mnt_list; /* mount list */
|
||||
@ -118,7 +118,7 @@ struct mount {
|
||||
struct vfsconf *mnt_vfc; /* configuration info */
|
||||
struct vnode *mnt_vnodecovered; /* vnode we mounted on */
|
||||
struct vnode *mnt_syncer; /* syncer vnode */
|
||||
struct vnodelst mnt_vnodelist; /* list of vnodes this mount */
|
||||
struct vnodelst mnt_nvnodelist; /* list of vnodes this mount */
|
||||
struct lock mnt_lock; /* mount structure lock */
|
||||
int mnt_writeopcount; /* write syscalls in progress */
|
||||
int mnt_flag; /* flags shared with user */
|
||||
|
@ -99,7 +99,7 @@ struct vnode {
|
||||
struct mount *v_mount; /* ptr to vfs we are in */
|
||||
vop_t **v_op; /* vnode operations vector */
|
||||
TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */
|
||||
LIST_ENTRY(vnode) v_mntvnodes; /* vnodes for mount point */
|
||||
TAILQ_ENTRY(vnode) v_nmntvnodes; /* vnodes for mount point */
|
||||
struct buflists v_cleanblkhd; /* clean blocklist head */
|
||||
struct buflists v_dirtyblkhd; /* dirty blocklist head */
|
||||
LIST_ENTRY(vnode) v_synclist; /* vnodes with dirty buffers */
|
||||
|
@ -476,12 +476,12 @@ ffs_reload(mp, cred, td)
|
||||
|
||||
loop:
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mp) {
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
goto loop;
|
||||
}
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
@ -1008,14 +1008,14 @@ ffs_sync(mp, waitfor, cred, td)
|
||||
}
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
* If the vnode that we are about to sync is no longer
|
||||
* associated with this mount point, start over.
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
|
@ -439,10 +439,10 @@ quotaon(td, mp, type, fname)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
again:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
|
||||
if (vp->v_mount != mp)
|
||||
goto again;
|
||||
nextvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
@ -460,7 +460,7 @@ again:
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
if (error)
|
||||
break;
|
||||
if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
|
||||
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
|
||||
goto again;
|
||||
}
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
@ -495,10 +495,10 @@ quotaoff(td, mp, type)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
again:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
|
||||
if (vp->v_mount != mp)
|
||||
goto again;
|
||||
nextvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
@ -517,7 +517,7 @@ again:
|
||||
dqrele(vp, dq);
|
||||
vput(vp);
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
|
||||
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
|
||||
goto again;
|
||||
}
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
@ -694,10 +694,10 @@ qsync(mp)
|
||||
*/
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
again:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
|
||||
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
|
||||
if (vp->v_mount != mp)
|
||||
goto again;
|
||||
nextvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
nextvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
mtx_lock(&vp->v_interlock);
|
||||
if (vp->v_type == VNON) {
|
||||
@ -719,7 +719,7 @@ again:
|
||||
}
|
||||
vput(vp);
|
||||
mtx_lock(&mntvnode_mtx);
|
||||
if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
|
||||
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
|
||||
goto again;
|
||||
}
|
||||
mtx_unlock(&mntvnode_mtx);
|
||||
|
@ -693,10 +693,10 @@ kinfo_vnodes(avnodes)
|
||||
for (num = 0, mp = TAILQ_FIRST(&mountlist); ; mp = mp_next) {
|
||||
KGET2(mp, &mount, sizeof(mount), "mount entry");
|
||||
mp_next = TAILQ_NEXT(&mount, mnt_list);
|
||||
for (vp = LIST_FIRST(&mount.mnt_vnodelist);
|
||||
for (vp = TAILQ_FIRST(&mount.mnt_nvnodelist);
|
||||
vp != NULL; vp = vp_next) {
|
||||
KGET2(vp, &vnode, sizeof(vnode), "vnode");
|
||||
vp_next = LIST_NEXT(&vnode, v_mntvnodes);
|
||||
vp_next = TAILQ_NEXT(&vnode, v_nmntvnodes);
|
||||
if ((bp + VPTRSZ + VNODESZ) > evbuf)
|
||||
/* XXX - should realloc */
|
||||
errx(1, "no more room for vnodes");
|
||||
|
Loading…
x
Reference in New Issue
Block a user