This patch reestablishes the spec_fsync() guarentee that synchronous
fsyncs, which typically occur during unmounting, will drain all dirty buffers even if it takes multiple passes to do so. The guarentee was mangled by the last patch which solved a problem due to -current disabling interrupts while holding giant (which caused an infinite spin loop waiting for I/O to complete). -stable does not have either patch, but has a similar bug in the original spec_fsync() code which is triggered by a bug in the softupdates umount code, a fix for which will be committed to -current as soon as Kirk stamps it. Then both solutions will be MFC'd to -stable. -stable currently suffers from a combination of the softupdates bug and a small window of opportunity in the original spec_fsync() code, and -stable also suffers from the spin-loop bug but since interrupts are enabled the spin resolves itself in a few milliseconds.
This commit is contained in:
parent
d3da30dfac
commit
11fb1bf637
@ -347,10 +347,12 @@ spec_fsync(ap)
|
|||||||
struct buf *bp;
|
struct buf *bp;
|
||||||
struct buf *nbp;
|
struct buf *nbp;
|
||||||
int s;
|
int s;
|
||||||
|
int maxretry = 10000; /* large, arbitrarily chosen */
|
||||||
|
|
||||||
if (!vn_isdisk(vp, NULL))
|
if (!vn_isdisk(vp, NULL))
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
|
loop1:
|
||||||
/*
|
/*
|
||||||
* MARK/SCAN initialization to avoid infinite loops
|
* MARK/SCAN initialization to avoid infinite loops
|
||||||
*/
|
*/
|
||||||
@ -364,7 +366,7 @@ spec_fsync(ap)
|
|||||||
/*
|
/*
|
||||||
* Flush all dirty buffers associated with a block device.
|
* Flush all dirty buffers associated with a block device.
|
||||||
*/
|
*/
|
||||||
loop:
|
loop2:
|
||||||
s = splbio();
|
s = splbio();
|
||||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||||
nbp = TAILQ_NEXT(bp, b_vnbufs);
|
nbp = TAILQ_NEXT(bp, b_vnbufs);
|
||||||
@ -384,20 +386,27 @@ spec_fsync(ap)
|
|||||||
splx(s);
|
splx(s);
|
||||||
bawrite(bp);
|
bawrite(bp);
|
||||||
}
|
}
|
||||||
goto loop;
|
goto loop2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If synchronous the caller expects us to completely resolve all
|
||||||
|
* dirty buffers in the system. Wait for in-progress I/O to
|
||||||
|
* complete (which could include background bitmap writes), then
|
||||||
|
* retry if dirty blocks still exist.
|
||||||
|
*/
|
||||||
if (ap->a_waitfor == MNT_WAIT) {
|
if (ap->a_waitfor == MNT_WAIT) {
|
||||||
while (vp->v_numoutput) {
|
while (vp->v_numoutput) {
|
||||||
vp->v_flag |= VBWAIT;
|
vp->v_flag |= VBWAIT;
|
||||||
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spfsyn", 0);
|
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spfsyn", 0);
|
||||||
}
|
}
|
||||||
#ifdef DIAGNOSTIC
|
|
||||||
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
||||||
vprint("spec_fsync: dirty", vp);
|
if (--maxretry != 0) {
|
||||||
splx(s);
|
splx(s);
|
||||||
goto loop;
|
goto loop1;
|
||||||
|
}
|
||||||
|
vprint("spec_fsync: giving up on dirty", vp);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
splx(s);
|
splx(s);
|
||||||
return (0);
|
return (0);
|
||||||
|
@ -347,10 +347,12 @@ spec_fsync(ap)
|
|||||||
struct buf *bp;
|
struct buf *bp;
|
||||||
struct buf *nbp;
|
struct buf *nbp;
|
||||||
int s;
|
int s;
|
||||||
|
int maxretry = 10000; /* large, arbitrarily chosen */
|
||||||
|
|
||||||
if (!vn_isdisk(vp, NULL))
|
if (!vn_isdisk(vp, NULL))
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
|
loop1:
|
||||||
/*
|
/*
|
||||||
* MARK/SCAN initialization to avoid infinite loops
|
* MARK/SCAN initialization to avoid infinite loops
|
||||||
*/
|
*/
|
||||||
@ -364,7 +366,7 @@ spec_fsync(ap)
|
|||||||
/*
|
/*
|
||||||
* Flush all dirty buffers associated with a block device.
|
* Flush all dirty buffers associated with a block device.
|
||||||
*/
|
*/
|
||||||
loop:
|
loop2:
|
||||||
s = splbio();
|
s = splbio();
|
||||||
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||||
nbp = TAILQ_NEXT(bp, b_vnbufs);
|
nbp = TAILQ_NEXT(bp, b_vnbufs);
|
||||||
@ -384,20 +386,27 @@ spec_fsync(ap)
|
|||||||
splx(s);
|
splx(s);
|
||||||
bawrite(bp);
|
bawrite(bp);
|
||||||
}
|
}
|
||||||
goto loop;
|
goto loop2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If synchronous the caller expects us to completely resolve all
|
||||||
|
* dirty buffers in the system. Wait for in-progress I/O to
|
||||||
|
* complete (which could include background bitmap writes), then
|
||||||
|
* retry if dirty blocks still exist.
|
||||||
|
*/
|
||||||
if (ap->a_waitfor == MNT_WAIT) {
|
if (ap->a_waitfor == MNT_WAIT) {
|
||||||
while (vp->v_numoutput) {
|
while (vp->v_numoutput) {
|
||||||
vp->v_flag |= VBWAIT;
|
vp->v_flag |= VBWAIT;
|
||||||
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spfsyn", 0);
|
(void) tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spfsyn", 0);
|
||||||
}
|
}
|
||||||
#ifdef DIAGNOSTIC
|
|
||||||
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
|
||||||
vprint("spec_fsync: dirty", vp);
|
if (--maxretry != 0) {
|
||||||
splx(s);
|
splx(s);
|
||||||
goto loop;
|
goto loop1;
|
||||||
|
}
|
||||||
|
vprint("spec_fsync: giving up on dirty", vp);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
splx(s);
|
splx(s);
|
||||||
return (0);
|
return (0);
|
||||||
|
Loading…
Reference in New Issue
Block a user