- Protect the mnt_vnode list with the mntvnode lock.

- Use queue(9) macros.
This commit is contained in:
John Baldwin 2001-06-28 04:10:07 +00:00
parent 48ce3c0e43
commit bc2327c310
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=78911
6 changed files with 48 additions and 18 deletions

View File

@ -2141,11 +2141,12 @@ nfs_clearcommit(mp)
s = splbio();
mtx_assert(&Giant, MA_OWNED);
mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
goto loop;
nvp = vp->v_mntvnodes.le_next;
nvp = LIST_NEXT(vp, v_mntvnodes);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
@ -2154,6 +2155,7 @@ nfs_clearcommit(mp)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
}
}
mtx_unlock(&mntvnode_mtx);
splx(s);
}

View File

@ -2141,11 +2141,12 @@ nfs_clearcommit(mp)
s = splbio();
mtx_assert(&Giant, MA_OWNED);
mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
goto loop;
nvp = vp->v_mntvnodes.le_next;
nvp = LIST_NEXT(vp, v_mntvnodes);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
@ -2154,6 +2155,7 @@ nfs_clearcommit(mp)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
}
}
mtx_unlock(&mntvnode_mtx);
splx(s);
}

View File

@ -1031,32 +1031,43 @@ nfs_sync(mp, waitfor, cred, p)
struct ucred *cred;
struct proc *p;
{
register struct vnode *vp;
struct vnode *vp, *vnp;
int error, allerror = 0;
/*
* Force stale buffer cache information to be flushed.
*/
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first;
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
vp = vnp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
vnp = LIST_NEXT(vp, v_mntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
waitfor == MNT_LAZY)
waitfor == MNT_LAZY) {
mtx_unlock(&vp->v_interlock);
mtx_lock(&mntvnode_mtx);
continue;
if (vget(vp, LK_EXCLUSIVE, p))
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
mtx_lock(&mntvnode_mtx);
goto loop;
}
error = VOP_FSYNC(vp, cred, waitfor, p);
if (error)
allerror = error;
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);
return (allerror);
}

View File

@ -2141,11 +2141,12 @@ nfs_clearcommit(mp)
s = splbio();
mtx_assert(&Giant, MA_OWNED);
mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
goto loop;
nvp = vp->v_mntvnodes.le_next;
nvp = LIST_NEXT(vp, v_mntvnodes);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
@ -2154,6 +2155,7 @@ nfs_clearcommit(mp)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
}
}
mtx_unlock(&mntvnode_mtx);
splx(s);
}

View File

@ -1031,32 +1031,43 @@ nfs_sync(mp, waitfor, cred, p)
struct ucred *cred;
struct proc *p;
{
register struct vnode *vp;
struct vnode *vp, *vnp;
int error, allerror = 0;
/*
* Force stale buffer cache information to be flushed.
*/
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first;
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
vp = vnp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
vnp = LIST_NEXT(vp, v_mntvnodes);
mtx_unlock(&mntvnode_mtx);
mtx_lock(&vp->v_interlock);
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
waitfor == MNT_LAZY)
waitfor == MNT_LAZY) {
mtx_unlock(&vp->v_interlock);
mtx_lock(&mntvnode_mtx);
continue;
if (vget(vp, LK_EXCLUSIVE, p))
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
mtx_lock(&mntvnode_mtx);
goto loop;
}
error = VOP_FSYNC(vp, cred, waitfor, p);
if (error)
allerror = error;
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);
return (allerror);
}

View File

@ -2141,11 +2141,12 @@ nfs_clearcommit(mp)
s = splbio();
mtx_assert(&Giant, MA_OWNED);
mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
if (vp->v_mount != mp) /* Paranoia */
goto loop;
nvp = vp->v_mntvnodes.le_next;
nvp = LIST_NEXT(vp, v_mntvnodes);
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
nbp = TAILQ_NEXT(bp, b_vnbufs);
if (BUF_REFCNT(bp) == 0 &&
@ -2154,6 +2155,7 @@ nfs_clearcommit(mp)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
}
}
mtx_unlock(&mntvnode_mtx);
splx(s);
}