Within ufs, the ffs_sync and ffs_fsync functions did not always

check for and/or report I/O errors. The result is that a VFS_SYNC
or VOP_FSYNC called with MNT_WAIT could loop infinitely on ufs in
the presence of a hard error writing a disk sector or in a filesystem
full condition. This patch ensures that I/O errors will always be
checked and returned.  This patch also ensures that every call to
VFS_SYNC or VOP_FSYNC with MNT_WAIT set checks for and takes
appropriate action when an error is returned.

Sponsored by:   DARPA & NAI Labs.
This commit is contained in:
mckusick 2002-10-25 00:20:37 +00:00
parent 776a2129fe
commit 6b1611bd94
6 changed files with 37 additions and 15 deletions

View File

@ -421,8 +421,8 @@ spec_fsync(ap)
struct vnode *vp = ap->a_vp;
struct buf *bp;
struct buf *nbp;
int s;
int maxretry = 10000; /* large, arbitrarily chosen */
int s, error = 0;
int maxretry = 100; /* large, arbitrarily chosen */
if (!vn_isdisk(vp, NULL))
return (0);
@ -435,6 +435,7 @@ spec_fsync(ap)
s = splbio();
TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
bp->b_flags &= ~B_SCANNED;
bp->b_error = 0;
}
splx(s);
@ -481,16 +482,25 @@ spec_fsync(ap)
PRIBIO + 1, "spfsyn", 0);
}
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
if (--maxretry != 0) {
/*
* If we are unable to write any of these buffers
* then we fail now rather than trying endlessly
* to write them out.
*/
TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
if ((error = bp->b_error) == 0)
continue;
if (error == 0 && --maxretry >= 0) {
splx(s);
goto loop1;
}
vprint("spec_fsync: giving up on dirty", vp);
error = EAGAIN;
}
}
VI_UNLOCK(vp);
splx(s);
return (0);
return (error);
}
/*

View File

@ -3425,7 +3425,7 @@ sync_fsync(ap)
struct vnode *syncvp = ap->a_vp;
struct mount *mp = syncvp->v_mount;
struct thread *td = ap->a_td;
int asyncflag;
int error, asyncflag;
/*
* We only need to do something if this is a lazy evaluation.
@ -3456,12 +3456,12 @@ sync_fsync(ap)
asyncflag = mp->mnt_flag & MNT_ASYNC;
mp->mnt_flag &= ~MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
if (asyncflag)
mp->mnt_flag |= MNT_ASYNC;
vn_finished_write(mp);
vfs_unbusy(mp, td);
return (0);
return (error);
}
/*

View File

@ -1005,19 +1005,24 @@ vn_finished_write(mp)
/*
* Request a filesystem to suspend write operations.
*/
void
int
vfs_write_suspend(mp)
struct mount *mp;
{
struct thread *td = curthread;
int error;
if (mp->mnt_kern_flag & MNTK_SUSPEND)
return;
return (0);
mp->mnt_kern_flag |= MNTK_SUSPEND;
if (mp->mnt_writeopcount > 0)
(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td);
if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) {
vfs_write_resume(mp);
return (error);
}
mp->mnt_kern_flag |= MNTK_SUSPENDED;
return (0);
}
/*

View File

@ -681,7 +681,7 @@ int vfs_object_create(struct vnode *vp, struct thread *td,
struct ucred *cred);
void vfs_timestamp(struct timespec *);
void vfs_write_resume(struct mount *mp);
void vfs_write_suspend(struct mount *mp);
int vfs_write_suspend(struct mount *mp);
int vop_stdbmap(struct vop_bmap_args *);
int vop_stdgetwritemount(struct vop_getwritemount_args *);
int vop_stdgetpages(struct vop_getpages_args *);

View File

@ -307,7 +307,10 @@ ffs_snapshot(mp, snapfile)
*/
for (;;) {
vn_finished_write(wrtmp);
vfs_write_suspend(vp->v_mount);
if ((error = vfs_write_suspend(vp->v_mount)) != 0) {
vn_start_write(NULL, &wrtmp, V_WAIT);
goto out;
}
if (mp->mnt_kern_flag & MNTK_SUSPENDED)
break;
vn_start_write(NULL, &wrtmp, V_WAIT);

View File

@ -184,7 +184,11 @@ ffs_mount(mp, path, data, ndp, td)
/*
* Flush any dirty data.
*/
VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
if ((error = VFS_SYNC(mp, MNT_WAIT,
td->td_proc->p_ucred, td)) != 0) {
vn_finished_write(mp);
return (error);
}
/*
* Check for and optionally get rid of files open
* for writing.
@ -1156,7 +1160,7 @@ ffs_sync(mp, waitfor, cred, td)
if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
allerror = error;
/* Flushed work items may create new vnodes to clean */
if (count) {
if (allerror == 0 && count) {
mtx_lock(&mntvnode_mtx);
goto loop;
}
@ -1172,7 +1176,7 @@ ffs_sync(mp, waitfor, cred, td)
if ((error = VOP_FSYNC(devvp, cred, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(devvp, 0, td);
if (waitfor == MNT_WAIT) {
if (allerror == 0 && waitfor == MNT_WAIT) {
mtx_lock(&mntvnode_mtx);
goto loop;
}