complete_rqe: Remove a race condition in RAID-4 and RAID-5 where a

request could be deallocated before the top half had finished
	issuing it.  The problem seems only to happen with IDE drives
	and vn devices, but theoretically it could happen with any
	drive.  This is the most important part of a possible series
	of fixes designed to remove race conditions without locking
	out interrupts for longer than absolutely necessary.

Reported-by:	       sos
Fix-supplied-by:       dillon
This commit is contained in:
Greg Lehey 2000-04-06 03:03:31 +00:00
parent 2eb1626e61
commit bd131be7d1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=59060

View File

@ -124,7 +124,6 @@ complete_rqe(struct buf *bp)
if (PLEX[rqe->rqg->plexno].volno >= 0)
VOL[PLEX[rqe->rqg->plexno].volno].bytes_written += bp->b_bcount;
}
rqg->active--; /* one less request active */
if (rqg->flags & XFR_RECOVERY_READ) { /* recovery read, */
int *sdata; /* source */
int *data; /* and group data */
@ -155,8 +154,9 @@ complete_rqe(struct buf *bp)
bcopy(src, dst, length); /* move it */
}
} else if ((rqg->flags & (XFR_NORMAL_WRITE | XFR_DEGRADED_WRITE)) /* RAID 4/5 group write operation */
&&(rqg->active == 0)) /* and we've finished phase 1 */
&&(rqg->active == 1)) /* and this is the last rq of phase 1 */
complete_raid5_write(rqe);
rqg->active--; /* one less request active */
if (rqg->active == 0) { /* request group finished, */
rq->active--; /* one less */
if (rqg->lock) { /* got a lock? */