Move B_ERROR flag to b_ioflags and call it BIO_ERROR.

(Much of this done by script)

Move B_ORDERED flag to b_ioflags and call it BIO_ORDERED.

Move b_pblkno and b_iodone_chain to struct bio while we transition, they
will be obsoleted once bio structs chain/stack.

Add bio_queue field for struct bio aware disksort.

Address a lot of stylistic issues brought up by bde.
This commit is contained in:
Poul-Henning Kamp 2000-04-02 15:24:56 +00:00
parent 4c9805fafa
commit c244d2de43
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=58934
86 changed files with 413 additions and 365 deletions

View File

@ -1967,7 +1967,7 @@ bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
return(1);
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
#endif
return(-1);

View File

@ -2409,7 +2409,7 @@ bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
return(1);
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return(-1);
}

View File

@ -378,7 +378,7 @@ cdoninvalidate(struct cam_periph *periph)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = ENXIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
@ -1381,7 +1381,7 @@ cdstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* Correctly set the buf to indicate a completed xfer
*/
@ -1429,7 +1429,7 @@ cdstart(struct cam_periph *periph, union ccb *start_ccb)
scsi_read_write(&start_ccb->csio,
/*retries*/4,
/* cbfcnp */ cddone,
(bp->b_flags & B_ORDERED) != 0 ?
(bp->b_ioflags & BIO_ORDERED) != 0 ?
MSG_ORDERED_Q_TAG :
MSG_SIMPLE_Q_TAG,
/* read */bp->b_iocmd == BIO_READ,
@ -1551,13 +1551,13 @@ cddone(struct cam_periph *periph, union ccb *done_ccb)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = EIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
bp->b_resid = bp->b_bcount;
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
cam_release_devq(done_ccb->ccb_h.path,
/*relsim_flags*/0,
/*reduction*/0,
@ -1569,7 +1569,7 @@ cddone(struct cam_periph *periph, union ccb *done_ccb)
bp->b_error = 0;
if (bp->b_resid != 0) {
/* Short transfer ??? */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
}

View File

@ -534,7 +534,7 @@ dastrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* Correctly set the buf to indicate a completed xfer
@ -806,7 +806,7 @@ daoninvalidate(struct cam_periph *periph)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = ENXIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
@ -1044,7 +1044,7 @@ dastart(struct cam_periph *periph, union ccb *start_ccb)
devstat_start_transaction(&softc->device_stats);
if ((bp->b_flags & B_ORDERED) != 0
if ((bp->b_ioflags & BIO_ORDERED) != 0
|| (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
softc->flags &= ~DA_FLAG_NEED_OTAG;
softc->ordered_tag_count++;
@ -1188,19 +1188,19 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = EIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
bp->b_error = error;
bp->b_resid = bp->b_bcount;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
bp->b_resid = csio->resid;
bp->b_error = 0;
if (bp->b_resid != 0) {
/* Short transfer ??? */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
}
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
@ -1212,7 +1212,7 @@ dadone(struct cam_periph *periph, union ccb *done_ccb)
} else {
bp->b_resid = csio->resid;
if (csio->resid > 0)
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
/*

View File

@ -214,7 +214,7 @@ passoninvalidate(struct cam_periph *periph)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = ENXIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
@ -512,7 +512,7 @@ passstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* Correctly set the buf to indicate a completed xfer
@ -562,7 +562,7 @@ passstart(struct cam_periph *periph, union ccb *start_ccb)
* hang.
*/
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);
bp = bufq_first(&softc->buf_queue);
@ -616,7 +616,7 @@ passdone(struct cam_periph *periph, union ccb *done_ccb)
* the abort process
*/
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
if ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)

View File

@ -260,7 +260,7 @@ ptstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* Correctly set the buf to indicate a completed xfer
@ -416,7 +416,7 @@ ptoninvalidate(struct cam_periph *periph)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = ENXIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
@ -630,19 +630,19 @@ ptdone(struct cam_periph *periph, union ccb *done_ccb)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = EIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
bp->b_error = error;
bp->b_resid = bp->b_bcount;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
bp->b_resid = csio->resid;
bp->b_error = 0;
if (bp->b_resid != 0) {
/* Short transfer ??? */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
}
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
@ -654,7 +654,7 @@ ptdone(struct cam_periph *periph, union ccb *done_ccb)
} else {
bp->b_resid = csio->resid;
if (bp->b_resid != 0)
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
/*

View File

@ -721,7 +721,7 @@ sastrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
/*
@ -1256,7 +1256,7 @@ saoninvalidate(struct cam_periph *periph)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = ENXIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
softc->queue_count = 0;
@ -1489,7 +1489,7 @@ sastart(struct cam_periph *periph, union ccb *start_ccb)
softc->queue_count--;
bufq_remove(&softc->buf_queue, bp);
bp->b_resid = bp->b_bcount;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) {
if (bp->b_iocmd == BIO_WRITE)
bp->b_error = ENOSPC;
@ -1643,7 +1643,7 @@ sadone(struct cam_periph *periph, union ccb *done_ccb)
bufq_remove(&softc->buf_queue, q_bp);
q_bp->b_resid = q_bp->b_bcount;
q_bp->b_error = EIO;
q_bp->b_flags |= B_ERROR;
q_bp->b_ioflags |= BIO_ERROR;
biodone(q_bp);
}
splx(s);
@ -1651,7 +1651,7 @@ sadone(struct cam_periph *periph, union ccb *done_ccb)
if (error != 0) {
bp->b_resid = bp->b_bcount;
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* In the error case, position is updated in saerror.
*/
@ -1659,7 +1659,7 @@ sadone(struct cam_periph *periph, union ccb *done_ccb)
bp->b_resid = csio->resid;
bp->b_error = 0;
if (csio->resid != 0) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
if (bp->b_iocmd == BIO_WRITE) {
softc->flags |= SA_FLAG_TAPE_WRITTEN;

View File

@ -1185,7 +1185,7 @@ targstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
/*
* Correctly set the buf to indicate a completed xfer
@ -1702,7 +1702,7 @@ targdone(struct cam_periph *periph, union ccb *done_ccb)
|| error != 0) {
if (bp->b_resid != 0)
/* Short transfer */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
("Completing a buffer\n"));
@ -1747,7 +1747,7 @@ targdone(struct cam_periph *periph, union ccb *done_ccb)
targrunqueue(periph, softc);
} else {
if (desc->bp != NULL) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENXIO;
biodone(bp);
}
@ -1817,13 +1817,13 @@ targfireexception(struct cam_periph *periph, struct targ_softc *softc)
while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
bufq_remove(&softc->snd_buf_queue, bp);
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
}
while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
bufq_remove(&softc->snd_buf_queue, bp);
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
}
@ -2159,7 +2159,7 @@ abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
("Aborting ATIO\n"));
if (desc->bp != NULL) {
desc->bp->b_flags |= B_ERROR;
desc->bp->b_ioflags |= BIO_ERROR;
if (softc->state != TARG_STATE_TEARDOWN)
desc->bp->b_error = errno;
else

View File

@ -242,7 +242,7 @@ flastrategy(struct buf *bp)
}
if (error) {
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
bp->b_resid = 0;
}

View File

@ -4667,7 +4667,7 @@ softdep_deallocate_dependencies(bp)
struct buf *bp;
{
if ((bp->b_flags & B_ERROR) == 0)
if ((bp->b_ioflags & BIO_ERROR) == 0)
panic("softdep_deallocate_dependencies: dangling deps");
softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
panic("softdep_deallocate_dependencies: unrecovered I/O error");

View File

@ -976,7 +976,7 @@ amr_completeio(struct amr_command *ac)
if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
switch(ac->ac_status) {
/* XXX need more information on I/O error reasons */

View File

@ -202,7 +202,7 @@ amrd_strategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
/*
@ -221,7 +221,7 @@ amrd_intr(void *data)
debug("called");
if (bp->b_flags & B_ERROR) {
if (bp->b_ioflags & BIO_ERROR) {
bp->b_error = EIO;
debug("i/o error\n");
} else {

View File

@ -539,7 +539,7 @@ ad_interrupt(struct ad_request *request)
/* finish up transfer */
if (request->flags & ADR_F_ERROR) {
request->bp->b_error = EIO;
request->bp->b_flags |= B_ERROR;
request->bp->b_ioflags |= BIO_ERROR;
}
else {
request->bytecount -= request->currentsize;
@ -599,7 +599,7 @@ ad_timeout(struct ad_request *request)
else {
/* retries all used up, return error */
request->bp->b_error = EIO;
request->bp->b_flags |= B_ERROR;
request->bp->b_ioflags |= BIO_ERROR;
devstat_end_transaction_buf(&adp->stats, request->bp);
biodone(request->bp);
free(request, M_AD);

View File

@ -1139,7 +1139,7 @@ acd_start(struct atapi_softc *atp)
/* reject all queued entries if media changed */
if (cdp->atp->flags & ATAPI_F_MEDIA_CHANGED) {
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -1193,7 +1193,7 @@ acd_done(struct atapi_request *request)
if (request->error) {
bp->b_error = request->error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
else {
bp->b_resid = bp->b_bcount - request->donecount;

View File

@ -309,7 +309,7 @@ afd_start(struct atapi_softc *atp)
/* should reject all queued entries if media have changed. */
if (fdp->atp->flags & ATAPI_F_MEDIA_CHANGED) {
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -364,7 +364,7 @@ afd_partial_done(struct atapi_request *request)
if (request->error) {
bp->b_error = request->error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
bp->b_resid += request->bytecount;
return 0;
@ -376,9 +376,9 @@ afd_done(struct atapi_request *request)
struct buf *bp = request->driver;
struct afd_softc *fdp = request->device->driver;
if (request->error || (bp->b_flags & B_ERROR)) {
if (request->error || (bp->b_ioflags & BIO_ERROR)) {
bp->b_error = request->error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
else
bp->b_resid += (bp->b_bcount - request->donecount);

View File

@ -423,7 +423,7 @@ aststrategy(struct buf *bp)
}
if (!(bp->b_iocmd == BIO_READ) && stp->flags & F_WRITEPROTECT) {
bp->b_error = EPERM;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -433,7 +433,7 @@ aststrategy(struct buf *bp)
printf("ast%d: bad request, must be multiple of %d\n",
stp->lun, stp->blksize);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -493,7 +493,7 @@ ast_done(struct atapi_request *request)
if (request->error) {
bp->b_error = request->error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
else {
if (!(bp->b_iocmd == BIO_READ))

View File

@ -779,7 +779,7 @@ ccdstrategy(bp)
#endif
if ((cs->sc_flags & CCDF_INITED) == 0) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto done;
}
@ -813,7 +813,8 @@ ccdstrategy(bp)
bp->b_resid = bp->b_bcount;
if (pbn != cs->sc_size) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
bp->b_flags |= B_INVAL;
bp->b_ioflags |= BIO_ERROR;
}
goto done;
}
@ -1108,7 +1109,7 @@ ccdintr(cs, bp)
/*
* Request is done for better or worse, wakeup the top half.
*/
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
bp->b_resid = bp->b_bcount;
devstat_end_transaction_buf(&cs->device_stats, bp);
biodone(bp);
@ -1148,7 +1149,7 @@ ccdiodone(ibp)
* succeed.
*/
if (cbp->cb_buf.b_flags & B_ERROR) {
if (cbp->cb_buf.b_ioflags & BIO_ERROR) {
const char *msg = "";
if ((ccd_softc[unit].sc_cflags & CCDF_MIRROR) &&
@ -1166,7 +1167,7 @@ ccdiodone(ibp)
cs->sc_pick = 1 - cs->sc_pick;
cs->sc_blk[cs->sc_pick] = bp->b_blkno;
} else {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = cbp->cb_buf.b_error ?
cbp->cb_buf.b_error : EIO;
}
@ -1204,7 +1205,7 @@ ccdiodone(ibp)
* occured with this one.
*/
if ((cbp->cb_pflags & CCDPF_MIRROR_DONE) == 0) {
if (cbp->cb_buf.b_flags & B_ERROR) {
if (cbp->cb_buf.b_ioflags & BIO_ERROR) {
cbp->cb_mirror->cb_pflags |=
CCDPF_MIRROR_DONE;
BUF_STRATEGY(&cbp->cb_mirror->cb_buf);

View File

@ -1464,7 +1464,7 @@ fdstrategy(struct buf *bp)
fdc = fd->fdc;
if (fd->type == NO_TYPE) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
};
@ -1475,12 +1475,12 @@ fdstrategy(struct buf *bp)
"fd%d: fdstrat: bad request blkno = %lu, bcount = %ld\n",
fdu, (u_long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
if ((bp->b_bcount % fdblk) != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -1494,7 +1494,7 @@ fdstrategy(struct buf *bp)
* multiplication below from overflowing.
*/
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
blknum = (unsigned) bp->b_blkno * DEV_BSIZE/fdblk;
@ -1508,7 +1508,7 @@ fdstrategy(struct buf *bp)
goto bad; /* not actually bad but EOF */
} else {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -2185,7 +2185,7 @@ retrier(struct fdc_data *fdc)
else
printf(" (No status)\n");
}
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
bp->b_resid += bp->b_bcount - fdc->fd->skip;
fdc->bp = NULL;
@ -2263,7 +2263,7 @@ fdformat(dev, finfo, p)
device_unbusy(fd->dev);
biodone(bp);
}
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
rv = bp->b_error;
/*
* allow the process to be swapped

View File

@ -519,7 +519,7 @@ ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
wakeup(qcb);
} else {
if (error)
qcb->buf->b_flags |= B_ERROR;
qcb->buf->b_ioflags |= BIO_ERROR;
id_intr(qcb->buf);
}

View File

@ -182,7 +182,7 @@ idstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
/*
@ -198,7 +198,7 @@ id_intr(struct buf *bp)
{
struct id_softc *drv = (struct id_softc *)bp->b_driver1;
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
bp->b_error = EIO;
else
bp->b_resid = 0;

View File

@ -402,7 +402,7 @@ mcdstrategy(struct buf *bp)
unit, (long)bp->b_blkno, bp->b_bcount);
printf("mcd: mcdstratregy failure");
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
@ -448,7 +448,7 @@ MCD_TRACE("strategy: drive not valid\n");
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
bp->b_resid = bp->b_bcount;
biodone(bp);
@ -1184,7 +1184,7 @@ mcd_doread(int state, struct mcd_mbx *mbxin)
}
harderr:
/* invalidate the buffer */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);

View File

@ -1722,7 +1722,7 @@ mlx_completeio(struct mlx_command *mc)
if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
switch(mc->mc_status) {
case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */

View File

@ -193,7 +193,7 @@ mlxd_strategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
/*
@ -212,7 +212,7 @@ mlxd_intr(void *data)
debug_called(1);
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
bp->b_error = EIO;
else
bp->b_resid = 0;

View File

@ -325,7 +325,7 @@ scdstrategy(struct buf *bp)
printf("scd%d: strategy failure: blkno = %ld, bcount = %ld\n",
unit, (long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
@ -367,7 +367,7 @@ scdstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
bp->b_resid = bp->b_bcount;
biodone(bp);
@ -1043,7 +1043,7 @@ scd_doread(int state, struct scd_mbx *mbxin)
harderr:
/* invalidate the buffer */
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);

View File

@ -83,7 +83,7 @@ complete_rqe(struct buf *bp)
if ((drive->active == (DRIVE_MAXACTIVE - 1)) /* we were at the drive limit */
||(vinum_conf.active == VINUM_MAXACTIVE)) /* or the global limit */
wakeup(&launch_requests); /* let another one at it */
if ((bp->b_flags & B_ERROR) != 0) { /* transfer in error */
if ((bp->b_ioflags & BIO_ERROR) != 0) { /* transfer in error */
if (bp->b_error != 0) /* did it return a number? */
rq->error = bp->b_error; /* yes, put it in. */
else if (rq->error == 0) /* no: do we have one already? */
@ -174,7 +174,7 @@ complete_rqe(struct buf *bp)
if (rq->error) { /* did we have an error? */
if (rq->isplex) { /* plex operation, */
ubp->b_flags |= B_ERROR; /* yes, propagate to user */
ubp->b_ioflags |= BIO_ERROR; /* yes, propagate to user */
ubp->b_error = rq->error;
} else /* try to recover */
queue_daemon_request(daemonrq_ioerror, (union daemoninfo) rq); /* let the daemon complete */
@ -216,8 +216,8 @@ sdio_done(struct buf *bp)
struct sdbuf *sbp;
sbp = (struct sdbuf *) bp;
if (sbp->b.b_flags & B_ERROR) { /* had an error */
sbp->bp->b_flags |= B_ERROR; /* propagate upwards */
if (sbp->b.b_ioflags & BIO_ERROR) { /* had an error */
sbp->bp->b_ioflags |= BIO_ERROR; /* propagate upwards */
sbp->bp->b_error = sbp->b.b_error;
}
#ifdef VINUMDEBUG

View File

@ -308,7 +308,7 @@ driveio(struct drive *drive, char *buf, size_t length, off_t offset, int flag)
error = biowait(bp);
bp->b_data = bp->b_saveaddr;
bp->b_flags |= B_INVAL | B_AGE;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
brelse(bp);
if (error)
break;
@ -767,7 +767,7 @@ write_volume_label(int volno)
DEV_STRATEGY(bp, 0);
error = biowait(bp);
bp->b_flags |= B_INVAL | B_AGE;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
brelse(bp);
return error;
}

View File

@ -467,7 +467,7 @@ bre5(struct request *rq,
/* Part C: build the requests */
rqg = allocrqg(rq, m.rqcount); /* get a request group */
if (rqg == NULL) { /* malloc failed */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return REQUEST_ENOMEM;

View File

@ -135,7 +135,7 @@ vinumstrategy(struct buf *bp)
case VINUM_DRIVE_TYPE:
default:
bp->b_error = EIO; /* I/O error */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
@ -144,7 +144,7 @@ vinumstrategy(struct buf *bp)
vol = &VOL[volno];
if (vol->state != volume_up) { /* can't access this volume */
bp->b_error = EIO; /* I/O error */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -192,14 +192,14 @@ vinumstart(struct buf *bp, int reviveok)
if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */
bp->b_error = EINVAL; /* invalid size */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return -1;
}
rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */
if (rq == NULL) { /* can't do it */
bp->b_error = ENOMEM; /* can't get memory */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return -1;
}
@ -257,7 +257,7 @@ vinumstart(struct buf *bp, int reviveok)
||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */
if (status == REQUEST_DOWN) { /* not enough subdisks */
bp->b_error = EIO; /* I/O error */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
biodone(bp);
freerq(rq);
@ -286,7 +286,7 @@ vinumstart(struct buf *bp, int reviveok)
||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */
if (status == REQUEST_DOWN) { /* not enough subdisks */
bp->b_error = EIO; /* I/O error */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
if ((bp->b_flags & B_DONE) == 0)
biodone(bp);
@ -480,7 +480,7 @@ bre(struct request *rq,
if (*diskaddr < (sd->plexoffset + sd->sectors)) { /* the request starts in this subdisk */
rqg = allocrqg(rq, 1); /* space for the request */
if (rqg == NULL) { /* malloc failed */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return REQUEST_ENOMEM;
@ -519,7 +519,7 @@ bre(struct request *rq,
*diskaddr += rqe->datalen; /* bump the address */
if (build_rq_buffer(rqe, plex)) { /* build the buffer */
deallocrqg(rqg);
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return REQUEST_ENOMEM; /* can't do it */
@ -564,7 +564,7 @@ bre(struct request *rq,
sd = &SD[plex->sdnos[sdno]]; /* the subdisk in question */
rqg = allocrqg(rq, 1); /* space for the request */
if (rqg == NULL) { /* malloc failed */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return REQUEST_ENOMEM;
@ -629,7 +629,7 @@ bre(struct request *rq,
}
if (build_rq_buffer(rqe, plex)) { /* build the buffer */
deallocrqg(rqg);
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return REQUEST_ENOMEM; /* can't do it */
@ -791,7 +791,8 @@ build_rq_buffer(struct rqelement *rqe, struct plex *plex)
/* Initialize the buf struct */
/* copy these flags from user bp */
bp->b_flags = ubp->b_flags & (B_ORDERED | B_NOCACHE | B_ASYNC);
bp->b_flags = ubp->b_flags & (B_NOCACHE | B_ASYNC);
bp->b_ioflags = ubp->b_ioflags & BIO_ORDERED;
bp->b_iocmd = ubp->b_iocmd;
BUF_LOCKINIT(bp); /* get a lock for the buffer */
BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */
@ -853,7 +854,7 @@ abortrequest(struct request *rq, int error)
{
struct buf *bp = rq->bp; /* user buffer */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
freerq(rq); /* free everything we're doing */
biodone(bp);
@ -896,7 +897,7 @@ sdio(struct buf *bp)
else if (bp->b_iocmd == BIO_WRITE) /* writing, */
set_sd_state(sd->sdno, sd_stale, setstate_force);
}
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
biodone(bp);
return;
@ -906,7 +907,7 @@ sdio(struct buf *bp)
* to get the I/O performed.
*/
if (sd->state < sd_empty) { /* nothing to talk to, */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
biodone(bp);
return;
@ -914,7 +915,7 @@ sdio(struct buf *bp)
/* Get a buffer */
sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf));
if (sbp == NULL) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENOMEM;
biodone(bp);
return;
@ -996,7 +997,7 @@ vinum_bounds_check(struct buf *bp, struct volume *vol)
&& (bp->b_iocmd == BIO_WRITE) /* and it's a write */
&& (!vol->flags & (VF_WLABEL | VF_LABELLING))) { /* and we're not allowed to write the label */
bp->b_error = EROFS; /* read-only */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return -1;
}
if (size == 0) /* no transfer specified, */
@ -1013,7 +1014,7 @@ vinum_bounds_check(struct buf *bp, struct volume *vol)
size = maxsize - bp->b_blkno;
if (size <= 0) { /* nothing to transfer */
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return -1;
}
bp->b_bcount = size << DEV_BSHIFT;

View File

@ -176,13 +176,13 @@ revive_block(int sdno)
biowait(bp);
}
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
else
/* Now write to the subdisk */
{
bp->b_dev = VINUM_SD(sdno); /* create the device number */
bp->b_flags = B_ORDERED; /* and make this an ordered write */
bp->b_ioflags = BIO_ORDERED; /* and make this an ordered write */
bp->b_iocmd = BIO_WRITE; /* and make this an ordered write */
BUF_LOCKINIT(bp); /* get a lock for the buffer */
BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */
@ -190,7 +190,7 @@ revive_block(int sdno)
bp->b_blkno = sd->revived; /* write it to here */
sdio(bp); /* perform the I/O */
biowait(bp);
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
else {
sd->revived += bp->b_bcount >> DEV_BSHIFT; /* moved this much further down */
@ -225,7 +225,7 @@ revive_block(int sdno)
}
if (bp->b_qindex == 0) { /* not on a queue, */
bp->b_flags |= B_INVAL;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
brelse(bp); /* is this kosher? */
}
return error;
@ -294,7 +294,7 @@ parityops(struct vinum_ioctl_msg *data, enum parityop op)
* the parity buffer header, which we have kept.
* Decide what to do with it.
*/
if ((pbp->b_flags & B_ERROR) == 0) { /* no error */
if ((pbp->b_ioflags & BIO_ERROR) == 0) { /* no error */
if (op == checkparity) {
int *parity_buf;
int isize;
@ -326,10 +326,10 @@ parityops(struct vinum_ioctl_msg *data, enum parityop op)
reply->error = 0;
}
}
if (pbp->b_flags & B_ERROR)
if (pbp->b_ioflags & BIO_ERROR)
reply->error = pbp->b_error;
pbp->b_flags |= B_INVAL;
pbp->b_flags &= ~B_ERROR;
pbp->b_ioflags &= ~BIO_ERROR;
brelse(pbp);
}
@ -445,7 +445,7 @@ parityrebuild(struct plex *plex,
for (sdno = 0; sdno < plex->subdisks; sdno++) { /* for each subdisk */
if ((sdno != psd) || check) {
biowait(bpp[sdno]);
if (bpp[sdno]->b_flags & B_ERROR) /* can't read, */
if (bpp[sdno]->b_ioflags & BIO_ERROR) /* can't read, */
error = bpp[sdno]->b_error;
}
}
@ -475,7 +475,7 @@ parityrebuild(struct plex *plex,
/* release our resources */
Free(bpp);
if (error) {
pbp->b_flags |= B_ERROR;
pbp->b_ioflags |= BIO_ERROR;
pbp->b_error = error;
}
return pbp;
@ -541,11 +541,11 @@ initsd(int sdno, int verify)
BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */
sdio(bp); /* perform the I/O */
biowait(bp);
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
if (bp->b_qindex == 0) { /* not on a queue, */
bp->b_flags |= B_INVAL;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
brelse(bp); /* is this kosher? */
}
if ((error == 0) && verify) { /* check that it got there */
@ -569,7 +569,7 @@ initsd(int sdno, int verify)
* XXX Bug fix code. This is hopefully no
* longer needed (21 February 2000).
*/
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
else if ((*bp->b_data != 0) /* first word spammed */
||(bcmp(bp->b_data, &bp->b_data[1], bp->b_bcount - 1))) { /* or one of the others */
@ -581,7 +581,7 @@ initsd(int sdno, int verify)
verified = 1;
if (bp->b_qindex == 0) { /* not on a queue, */
bp->b_flags |= B_INVAL;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
brelse(bp); /* is this kosher? */
}
}

View File

@ -294,7 +294,7 @@ vnstrategy(struct buf *bp)
if ((vn->sc_flags & VNF_INITED) == 0) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -303,6 +303,7 @@ vnstrategy(struct buf *bp)
IFOPT(vn, VN_LABELS) {
if (vn->sc_slices != NULL && dscheck(bp, vn->sc_slices) <= 0) {
/* XXX: Normal B_ERROR processing, instead ? */
bp->b_flags |= B_INVAL;
biodone(bp);
return;
@ -318,7 +319,8 @@ vnstrategy(struct buf *bp)
if (bp->b_bcount % vn->sc_secsize != 0 ||
bp->b_blkno % (vn->sc_secsize / DEV_BSIZE) != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
bp->b_flags |= B_INVAL;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -333,7 +335,8 @@ vnstrategy(struct buf *bp)
if (pbn < 0 || pbn >= vn->sc_size) {
if (pbn != vn->sc_size) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
bp->b_flags |= B_INVAL;
bp->b_ioflags |= BIO_ERROR;
}
biodone(bp);
return;
@ -358,7 +361,7 @@ vnstrategy(struct buf *bp)
/*
* VNODE I/O
*
* If an error occurs, we set B_ERROR but we do not set
* If an error occurs, we set BIO_ERROR but we do not set
* B_INVAL because (for a write anyway), the buffer is
* still valid.
*/
@ -390,7 +393,7 @@ vnstrategy(struct buf *bp)
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
biodone(bp);
} else if (vn->sc_object) {
@ -410,7 +413,7 @@ vnstrategy(struct buf *bp)
vm_pager_strategy(vn->sc_object, bp);
}
} else {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EINVAL;
biodone(bp);
}

View File

@ -758,7 +758,7 @@ cd9660_strategy(ap)
if ((error =
VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL))) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -746,7 +746,7 @@ hpfs_strategy(ap)
if (error) {
printf("hpfs_strategy: VOP_BMAP FAILED %d\n", error);
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -1838,7 +1838,7 @@ msdosfs_strategy(ap)
error = pcbmap(dep, bp->b_lblkno, &bp->b_blkno, 0, 0);
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -364,7 +364,7 @@ ntfs_strategy(ap)
if (error) {
printf("ntfs_strategy: ntfs_readattr failed\n");
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
bzero(bp->b_data + toread, bp->b_bcount - toread);
@ -376,7 +376,7 @@ ntfs_strategy(ap)
if (ntfs_cntob(bp->b_blkno) + bp->b_bcount >= fp->f_size) {
printf("ntfs_strategy: CAN'T EXTEND FILE\n");
bp->b_error = error = EFBIG;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
towrite = min(bp->b_bcount,
fp->f_size-ntfs_cntob(bp->b_blkno));
@ -390,7 +390,7 @@ ntfs_strategy(ap)
if (error) {
printf("ntfs_strategy: ntfs_writeattr fail\n");
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
}
}

View File

@ -311,7 +311,7 @@ nwfs_doio(bp, cr, p)
break;
};
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else { /* write */
@ -330,7 +330,7 @@ nwfs_doio(bp, cr, p)
/*
* For an interrupted write, the buffer is still valid
* and the write hasn't been pushed to the server yet,
* so we can't set B_ERROR and report the interruption
* so we can't set BIO_ERROR and report the interruption
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
* is not relevant, so the rpc attempt is essentially
* a noop. For the case of a V3 write rpc not being
@ -357,7 +357,7 @@ nwfs_doio(bp, cr, p)
splx(s);
} else {
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error /*= np->n_error */= error;
/* np->n_flag |= NWRITEERR;*/
}

View File

@ -687,7 +687,7 @@ spec_getpages(ap)
splx(s);
if ((bp->b_flags & B_ERROR) != 0) {
if ((bp->b_ioflags & BIO_ERROR) != 0) {
if (bp->b_error)
error = bp->b_error;
else

View File

@ -779,7 +779,7 @@ ccdstrategy(bp)
#endif
if ((cs->sc_flags & CCDF_INITED) == 0) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto done;
}
@ -813,7 +813,8 @@ ccdstrategy(bp)
bp->b_resid = bp->b_bcount;
if (pbn != cs->sc_size) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
bp->b_flags |= B_INVAL;
bp->b_ioflags |= BIO_ERROR;
}
goto done;
}
@ -1108,7 +1109,7 @@ ccdintr(cs, bp)
/*
* Request is done for better or worse, wakeup the top half.
*/
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
bp->b_resid = bp->b_bcount;
devstat_end_transaction_buf(&cs->device_stats, bp);
biodone(bp);
@ -1148,7 +1149,7 @@ ccdiodone(ibp)
* succeed.
*/
if (cbp->cb_buf.b_flags & B_ERROR) {
if (cbp->cb_buf.b_ioflags & BIO_ERROR) {
const char *msg = "";
if ((ccd_softc[unit].sc_cflags & CCDF_MIRROR) &&
@ -1166,7 +1167,7 @@ ccdiodone(ibp)
cs->sc_pick = 1 - cs->sc_pick;
cs->sc_blk[cs->sc_pick] = bp->b_blkno;
} else {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = cbp->cb_buf.b_error ?
cbp->cb_buf.b_error : EIO;
}
@ -1204,7 +1205,7 @@ ccdiodone(ibp)
* occured with this one.
*/
if ((cbp->cb_pflags & CCDPF_MIRROR_DONE) == 0) {
if (cbp->cb_buf.b_flags & B_ERROR) {
if (cbp->cb_buf.b_ioflags & BIO_ERROR) {
cbp->cb_mirror->cb_pflags |=
CCDPF_MIRROR_DONE;
BUF_STRATEGY(&cbp->cb_mirror->cb_buf);

View File

@ -193,7 +193,8 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp, runb)
#endif
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_INVAL|B_ERROR);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
BUF_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */

View File

@ -193,7 +193,8 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp, runb)
#endif
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_INVAL|B_ERROR);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
BUF_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */

View File

@ -2409,7 +2409,7 @@ bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
return(1);
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return(-1);
}

View File

@ -313,7 +313,7 @@ bp_done(struct buf *bp, int err)
if (err || bp->b_resid)
{
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
biodone(bp);

View File

@ -905,7 +905,7 @@ void matcdstrategy(struct buf *bp)
splx(s); /*Return priorities to normal*/
return; /*All done*/
bad: bp->b_flags |= B_ERROR; /*Request bad in some way*/
bad: bp->b_ioflags |= BIO_ERROR; /*Request bad in some way*/
done: bp->b_resid = bp->b_bcount; /*Show amount of data un read*/
biodone(bp); /*Signal we have done all we plan to*/
return;
@ -2035,7 +2035,7 @@ static void matcd_blockread(int state)
<14> has been removed by the user. In both cases there is no retry
<14> for this call. We will invalidate the label in both cases.
*/
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);
unlockbus(ldrive>>2, ldrive);

View File

@ -402,7 +402,7 @@ mcdstrategy(struct buf *bp)
unit, (long)bp->b_blkno, bp->b_bcount);
printf("mcd: mcdstratregy failure");
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
@ -448,7 +448,7 @@ MCD_TRACE("strategy: drive not valid\n");
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
bp->b_resid = bp->b_bcount;
biodone(bp);
@ -1184,7 +1184,7 @@ mcd_doread(int state, struct mcd_mbx *mbxin)
}
harderr:
/* invalidate the buffer */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);

View File

@ -325,7 +325,7 @@ scdstrategy(struct buf *bp)
printf("scd%d: strategy failure: blkno = %ld, bcount = %ld\n",
unit, (long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
@ -367,7 +367,7 @@ scdstrategy(struct buf *bp)
return;
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
done:
bp->b_resid = bp->b_bcount;
biodone(bp);
@ -1043,7 +1043,7 @@ scd_doread(int state, struct scd_mbx *mbxin)
harderr:
/* invalidate the buffer */
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount;
biodone(bp);

View File

@ -567,7 +567,7 @@ wdstrategy(register struct buf *bp)
|| bp->b_bcount % DEV_BSIZE != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto done;
}
@ -1001,7 +1001,7 @@ wdintr(void *unitnum)
} else {
wderror(bp, du, "hard error");
bp->b_error = EIO;
bp->b_flags |= B_ERROR; /* flag the error */
bp->b_ioflags |= BIO_ERROR; /* flag the error */
}
} else if (du->dk_status & WDCS_ECCCOR)
wderror(bp, du, "soft ecc");
@ -1010,7 +1010,7 @@ wdintr(void *unitnum)
/*
* If this was a successful read operation, fetch the data.
*/
if ((bp->b_iocmd == BIO_READ && !(bp->b_flags & B_ERROR))
if ((bp->b_iocmd == BIO_READ && !(bp->b_ioflags & BIO_ERROR))
&& !((du->dk_flags & (DKFL_DMA|DKFL_SINGLE)) == DKFL_DMA)
&& wdtab[unit].b_active) {
u_int chk, dummy, multisize;
@ -1052,7 +1052,7 @@ wdintr(void *unitnum)
}
/* final cleanup on DMA */
if (((bp->b_flags & B_ERROR) == 0)
if (((bp->b_ioflags & BIO_ERROR) == 0)
&& ((du->dk_flags & (DKFL_DMA|DKFL_SINGLE)) == DKFL_DMA)
&& wdtab[unit].b_active) {
int iosize;
@ -1065,7 +1065,7 @@ wdintr(void *unitnum)
outt:
if (wdtab[unit].b_active) {
if ((bp->b_flags & B_ERROR) == 0) {
if ((bp->b_ioflags & BIO_ERROR) == 0) {
du->dk_skip += du->dk_currentiosize;/* add to successful sectors */
if (wdtab[unit].b_errcnt)
wderror(bp, du, "soft error");
@ -1315,7 +1315,7 @@ wdcontrol(register struct buf *bp)
if (++wdtab[ctrlr].b_errcnt < RETRIES)
goto tryagainrecal;
bp->b_error = ENXIO; /* XXX needs translation */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return (2);
}
wdtab[ctrlr].b_errcnt = 0;

View File

@ -575,7 +575,7 @@ wtstrategy (struct buf *bp)
if (t->flags & TPEXCEP) {
errxit: bp->b_error = EIO;
err2xit: bp->b_flags |= B_ERROR;
err2xit: bp->b_ioflags |= BIO_ERROR;
}
xit: biodone (bp);
return;

View File

@ -1464,7 +1464,7 @@ fdstrategy(struct buf *bp)
fdc = fd->fdc;
if (fd->type == NO_TYPE) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
};
@ -1475,12 +1475,12 @@ fdstrategy(struct buf *bp)
"fd%d: fdstrat: bad request blkno = %lu, bcount = %ld\n",
fdu, (u_long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
if ((bp->b_bcount % fdblk) != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -1494,7 +1494,7 @@ fdstrategy(struct buf *bp)
* multiplication below from overflowing.
*/
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
blknum = (unsigned) bp->b_blkno * DEV_BSIZE/fdblk;
@ -1508,7 +1508,7 @@ fdstrategy(struct buf *bp)
goto bad; /* not actually bad but EOF */
} else {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -2185,7 +2185,7 @@ retrier(struct fdc_data *fdc)
else
printf(" (No status)\n");
}
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
bp->b_resid += bp->b_bcount - fdc->fd->skip;
fdc->bp = NULL;
@ -2263,7 +2263,7 @@ fdformat(dev, finfo, p)
device_unbusy(fd->dev);
biodone(bp);
}
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
rv = bp->b_error;
/*
* allow the process to be swapped

View File

@ -758,7 +758,7 @@ cd9660_strategy(ap)
if ((error =
VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL))) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -118,13 +118,13 @@ physio(dev_t dev, struct uio *uio, int ioflag)
if (uio->uio_segflg == UIO_USERSPACE)
vunmapbuf(bp);
iolen = bp->b_bcount - bp->b_resid;
if (iolen == 0 && !(bp->b_flags & B_ERROR))
if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR))
goto doerror; /* EOF */
uio->uio_iov[i].iov_len -= iolen;
uio->uio_iov[i].iov_base += iolen;
uio->uio_resid -= iolen;
uio->uio_offset += iolen;
if( bp->b_flags & B_ERROR) {
if( bp->b_ioflags & BIO_ERROR) {
error = bp->b_error;
goto doerror;
}

View File

@ -241,7 +241,7 @@ devstat_end_transaction_buf(struct devstat *ds, struct buf *bp)
flg = DEVSTAT_WRITE;
devstat_end_transaction(ds, bp->b_bcount - bp->b_resid,
(bp->b_flags & B_ORDERED) ?
(bp->b_ioflags & BIO_ORDERED) ?
DEVSTAT_TAG_ORDERED : DEVSTAT_TAG_SIMPLE, flg);
}

View File

@ -191,7 +191,7 @@ diskstrategy(struct buf *bp)
if (!dp) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}

View File

@ -77,7 +77,7 @@ bufqdisksort(bufq, bp)
* ordered transaction, then it's easy.
*/
if ((bq = bufq_first(bufq)) == NULL
|| (bp->b_flags & B_ORDERED) != 0) {
|| (bp->b_ioflags & BIO_ORDERED) != 0) {
bufq_insert_tail(bufq, bp);
return;
} else if (bufq->insert_point != NULL) {

View File

@ -307,7 +307,7 @@ if (labelsect != 0) Debugger("labelsect != 0 in dscheck()");
bad:
bp->b_resid = bp->b_bcount;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return (-1);
}
@ -540,7 +540,7 @@ dsiodone(bp)
bp->b_iodone = ic->ic_prev_iodone;
bp->b_iodone_chain = ic->ic_prev_iodone_chain;
if (!(bp->b_iocmd == BIO_READ)
|| (!(bp->b_flags & B_ERROR) && bp->b_error == 0)) {
|| (!(bp->b_ioflags & BIO_ERROR) && bp->b_error == 0)) {
msg = fixlabel((char *)NULL, ic->ic_args[1].ia_ptr,
(struct disklabel *)
(bp->b_data + ic->ic_args[0].ia_long),

View File

@ -1043,7 +1043,7 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
* but is returned using the aio_error mechanism. In this case,
* aio_suspend will return immediately.
*/
if (bp->b_error || (bp->b_flags & B_ERROR)) {
if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
struct aiocb *job = aiocbe->uuaiocb;
aiocbe->uaiocb._aiocb_private.status = 0;
@ -1110,7 +1110,7 @@ aio_fphysio(struct proc *p, struct aiocblist *iocb, int flgwait)
error = 0;
/* Check for an error. */
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
relpbuf(bp, NULL);
@ -2143,7 +2143,7 @@ aio_physwakeup(struct buf *bp)
aiocbe->uaiocb._aiocb_private.error = 0;
aiocbe->jobflags |= AIOCBLIST_DONE;
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
lj = aiocbe->lio;

View File

@ -445,7 +445,7 @@ bremfree(struct buf * bp)
/*
* Get a buffer with the specified data. Look in the cache first. We
* must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
* must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
* is set, the buffer is valid and we do not have to do anything ( see
* getblk() ).
*/
@ -464,7 +464,8 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
curproc->p_stats->p_ru.ru_inblock++;
KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if (bp->b_rcred == NOCRED) {
if (cred != NOCRED)
crhold(cred);
@ -479,7 +480,7 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
/*
* Operates like bread, but also starts asynchronous I/O on
* read-ahead blocks. We must clear B_ERROR and B_INVAL prior
* read-ahead blocks. We must clear BIO_ERROR and B_INVAL prior
* to initiating I/O . If B_CACHE is set, the buffer is valid
* and we do not have to do anything.
*/
@ -499,7 +500,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
if (curproc != NULL)
curproc->p_stats->p_ru.ru_inblock++;
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if (bp->b_rcred == NOCRED) {
if (cred != NOCRED)
crhold(cred);
@ -519,7 +521,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
if (curproc != NULL)
curproc->p_stats->p_ru.ru_inblock++;
rabp->b_flags |= B_ASYNC;
rabp->b_flags &= ~(B_ERROR | B_INVAL);
rabp->b_flags &= ~B_INVAL;
rabp->b_ioflags &= ~BIO_ERROR;
rabp->b_iocmd = BIO_READ;
if (rabp->b_rcred == NOCRED) {
if (cred != NOCRED)
@ -629,7 +632,8 @@ bwrite(struct buf * bp)
bp = newbp;
}
bp->b_flags &= ~(B_DONE | B_ERROR);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_flags |= B_WRITEINPROG | B_CACHE;
bp->b_iocmd = BIO_WRITE;
@ -862,7 +866,8 @@ bawrite(struct buf * bp)
int
bowrite(struct buf * bp)
{
bp->b_flags |= B_ORDERED | B_ASYNC;
bp->b_ioflags |= BIO_ORDERED;
bp->b_flags |= B_ASYNC;
return (BUF_WRITE(bp));
}
@ -911,20 +916,22 @@ brelse(struct buf * bp)
s = splbio();
if (bp->b_flags & B_LOCKED)
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
if (bp->b_iocmd == BIO_WRITE &&
(bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) {
(bp->b_ioflags & BIO_ERROR) &&
!(bp->b_flags & B_INVAL)) {
/*
* Failed write, redirty. Must clear B_ERROR to prevent
* Failed write, redirty. Must clear BIO_ERROR to prevent
* pages from being scrapped. If B_INVAL is set then
* this case is not run and the next case is run to
* destroy the buffer. B_INVAL can occur if the buffer
* is outside the range supported by the underlying device.
*/
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
bdirty(bp);
} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
(bp->b_ioflags & BIO_ERROR) ||
bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
/*
* Either a failed I/O or we were asked to free or not
@ -965,8 +972,8 @@ brelse(struct buf * bp)
* B_INVAL, the struct buf is invalidated but the VM object is kept
* around ( i.e. so it is trivial to reconstitute the buffer later ).
*
* If B_ERROR or B_NOCACHE is set, pages in the VM object will be
* invalidated. B_ERROR cannot be set for a failed write unless the
* If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
* invalidated. BIO_ERROR cannot be set for a failed write unless the
* buffer is also B_INVAL because it hits the re-dirtying code above.
*
* Normally we can do this whether a buffer is B_DELWRI or not. If
@ -1034,7 +1041,7 @@ brelse(struct buf * bp)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
}
if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
int poffset = foff & PAGE_MASK;
int presid = resid > (PAGE_SIZE - poffset) ?
(PAGE_SIZE - poffset) : resid;
@ -1086,7 +1093,7 @@ brelse(struct buf * bp)
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
bp->b_dev = NODEV;
/* buffers with junk contents */
} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || (bp->b_ioflags & BIO_ERROR)) {
bp->b_flags |= B_INVAL;
bp->b_xflags &= ~BX_BKGRDWRITE;
if (bp->b_xflags & BX_BKGRDINPROG)
@ -1155,7 +1162,8 @@ brelse(struct buf * bp)
/* unlock */
BUF_UNLOCK(bp);
bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
bp->b_ioflags &= ~BIO_ORDERED;
splx(s);
}
@ -1187,7 +1195,7 @@ bqrelse(struct buf * bp)
return;
}
if (bp->b_flags & B_LOCKED) {
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_qindex = QUEUE_LOCKED;
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
/* buffers with stale but valid contents */
@ -1214,7 +1222,8 @@ bqrelse(struct buf * bp)
/* unlock */
BUF_UNLOCK(bp);
bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
bp->b_ioflags &= ~BIO_ORDERED;
splx(s);
}
@ -1571,6 +1580,7 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
allocbuf(bp, 0);
bp->b_flags = 0;
bp->b_ioflags = 0;
bp->b_xflags = 0;
bp->b_dev = NODEV;
bp->b_vp = NULL;
@ -2037,7 +2047,7 @@ vfs_setdirty(struct buf *bp)
* the caller should set B_CACHE ( as an optimization ), else the caller
* should issue the I/O and biodone() will set B_CACHE if the I/O was
* a write attempt or if it was a successfull read. If the caller
* intends to issue a READ, the caller must clear B_INVAL and B_ERROR
* intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
* prior to issuing the READ. biodone() will *not* clear B_INVAL.
*/
struct buf *
@ -2590,7 +2600,7 @@ biowait(register struct buf * bp)
bp->b_flags &= ~B_EINTR;
return (EINTR);
}
if (bp->b_flags & B_ERROR) {
if (bp->b_ioflags & BIO_ERROR) {
return (bp->b_error ? bp->b_error : EIO);
} else {
return (0);
@ -2695,7 +2705,8 @@ biodone(register struct buf * bp)
*/
iosize = bp->b_bcount - bp->b_resid;
if (bp->b_iocmd == BIO_READ &&
!(bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR))) {
!(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
!(bp->b_ioflags & BIO_ERROR)) {
bp->b_flags |= B_CACHE;
}
@ -2776,7 +2787,7 @@ biodone(register struct buf * bp)
*/
if (bp->b_flags & B_ASYNC) {
if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
brelse(bp);
else
bqrelse(bp);
@ -2865,7 +2876,7 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
* inconsistant.
*
* Since I/O has not been initiated yet, certain buffer flags
* such as B_ERROR or B_INVAL may be in an inconsistant state
* such as BIO_ERROR or B_INVAL may be in an inconsistant state
* and should be ignored.
*/
void
@ -3006,7 +3017,7 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size)
* vfs_bio_clrbuf:
*
* clear a buffer. This routine essentially fakes an I/O, so we need
* to clear B_ERROR and B_INVAL.
* to clear BIO_ERROR and B_INVAL.
*
* Note that while we only theoretically need to clear through b_bcount,
* we go ahead and clear through b_bufsize.
@ -3017,7 +3028,8 @@ vfs_bio_clrbuf(struct buf *bp) {
int i, mask = 0;
caddr_t sa, ea;
if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
bp->b_flags &= ~(B_INVAL|B_ERROR);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
(bp->b_offset & PAGE_MASK) == 0) {
mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;

View File

@ -247,7 +247,8 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
#endif
if ((bp->b_flags & B_CLUSTER) == 0)
vfs_busy_pages(bp, 0);
bp->b_flags &= ~(B_ERROR|B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
BUF_KERNPROC(bp);
error = VOP_STRATEGY(vp, bp);
@ -282,7 +283,8 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
if ((rbp->b_flags & B_CLUSTER) == 0)
vfs_busy_pages(rbp, 0);
rbp->b_flags &= ~(B_ERROR|B_INVAL);
rbp->b_flags &= ~B_INVAL;
rbp->b_ioflags &= ~BIO_ERROR;
if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
BUF_KERNPROC(rbp);
(void) VOP_STRATEGY(vp, rbp);
@ -468,7 +470,7 @@ cluster_callback(bp)
/*
* Must propogate errors to all the components.
*/
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
@ -480,11 +482,12 @@ cluster_callback(bp)
tbp; tbp = nbp) {
nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
if (error) {
tbp->b_flags |= B_ERROR;
tbp->b_ioflags |= BIO_ERROR;
tbp->b_error = error;
} else {
tbp->b_dirtyoff = tbp->b_dirtyend = 0;
tbp->b_flags &= ~(B_ERROR|B_INVAL);
tbp->b_flags &= ~B_INVAL;
tbp->b_ioflags &= ~BIO_ERROR;
}
biodone(tbp);
}
@ -837,7 +840,8 @@ cluster_wbuild(vp, size, start_lbn, len)
s = splbio();
bundirty(tbp);
tbp->b_flags &= ~(B_DONE | B_ERROR);
tbp->b_flags &= ~B_DONE;
tbp->b_ioflags &= ~BIO_ERROR;
tbp->b_flags |= B_ASYNC;
tbp->b_iocmd = BIO_WRITE;
reassignbuf(tbp, tbp->b_vp); /* put on clean list */

View File

@ -152,11 +152,11 @@ vop_panic(struct vop_generic_args *ap)
*
* Strategy routine for VFS devices that have none.
*
* B_ERROR and B_INVAL must be cleared prior to calling any strategy
* BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
* routine. Typically this is done for a BIO_READ strategy call.
* Typically B_INVAL is assumed to already be clear prior to a write
* and should not be cleared manually unless you just made the buffer
* invalid. B_ERROR should be cleared either way.
* invalid. BIO_ERROR should be cleared either way.
*/
static int
@ -165,7 +165,7 @@ vop_nostrategy (struct vop_strategy_args *ap)
printf("No strategy for buffer at %p\n", ap->a_bp);
vprint("", ap->a_vp);
vprint("", ap->a_bp->b_vp);
ap->a_bp->b_flags |= B_ERROR;
ap->a_bp->b_ioflags |= BIO_ERROR;
ap->a_bp->b_error = EOPNOTSUPP;
biodone(ap->a_bp);
return (EOPNOTSUPP);

View File

@ -1864,7 +1864,7 @@ devfs_getpages(struct vop_getpages_args *ap)
splx(s);
if ((bp->b_flags & B_ERROR) != 0) {
if ((bp->b_ioflags & BIO_ERROR) != 0) {
if (bp->b_error)
error = bp->b_error;
else

View File

@ -687,7 +687,7 @@ spec_getpages(ap)
splx(s);
if ((bp->b_flags & B_ERROR) != 0) {
if ((bp->b_ioflags & BIO_ERROR) != 0) {
if (bp->b_error)
error = bp->b_error;
else

View File

@ -1838,7 +1838,7 @@ msdosfs_strategy(ap)
error = pcbmap(dep, bp->b_lblkno, &bp->b_blkno, 0, 0);
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -469,7 +469,8 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, p)) {
rabp->b_flags |= B_INVAL|B_ERROR;
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
break;
@ -558,7 +559,7 @@ nfs_bioread(vp, uio, ioflag, cred)
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
brelse(bp);
return (error);
}
@ -653,7 +654,8 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, p)) {
rabp->b_flags |= B_INVAL|B_ERROR;
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
}
@ -934,7 +936,8 @@ nfs_write(ap)
if (on == 0 && n == bcount) {
bp->b_flags |= B_CACHE;
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
}
if ((bp->b_flags & B_CACHE) == 0) {
@ -1034,7 +1037,7 @@ nfs_write(ap)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
brelse(bp);
break;
}
@ -1204,7 +1207,7 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg)
* This is mainly to avoid queueing async I/O requests when the nfsiods
* are all hung on a dead server.
*
* Note: nfs_asyncio() does not clear (B_ERROR|B_INVAL) but when the bp
* Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
* is eventually dequeued by the async daemon, nfs_doio() *will*.
*/
int
@ -1366,11 +1369,12 @@ nfs_doio(bp, cr, p)
uiop->uio_procp = p;
/*
* clear B_ERROR and B_INVAL state prior to initiating the I/O. We
* clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
* do this here so we do not have to do it in all the code that
* calls us.
*/
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
@ -1398,7 +1402,7 @@ nfs_doio(bp, cr, p)
error = nfs_writerpc(vp, uiop, cr, &iomode, &com);
}
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else if (bp->b_iocmd == BIO_READ) {
@ -1466,7 +1470,7 @@ nfs_doio(bp, cr, p)
break;
};
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else {
@ -1545,7 +1549,7 @@ nfs_doio(bp, cr, p)
/*
* For an interrupted write, the buffer is still valid
* and the write hasn't been pushed to the server yet,
* so we can't set B_ERROR and report the interruption
* so we can't set BIO_ERROR and report the interruption
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
* is not relevant, so the rpc attempt is essentially
* a noop. For the case of a V3 write rpc not being
@ -1575,7 +1579,7 @@ nfs_doio(bp, cr, p)
splx(s);
} else {
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = np->n_error = error;
np->n_flag |= NWRITEERR;
}

View File

@ -2939,7 +2939,8 @@ nfs_flush(vp, cred, waitfor, p, commit)
vp->v_numoutput++;
bp->b_flags |= B_ASYNC;
bundirty(bp);
bp->b_flags &= ~(B_DONE|B_ERROR);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_dirtyoff = bp->b_dirtyend = 0;
splx(s);
biodone(bp);
@ -3116,7 +3117,8 @@ nfs_writebp(bp, force, procp)
s = splbio();
bundirty(bp);
bp->b_flags &= ~(B_DONE|B_ERROR);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_iocmd = BIO_WRITE;
bp->b_vp->v_numoutput++;

View File

@ -469,7 +469,8 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, p)) {
rabp->b_flags |= B_INVAL|B_ERROR;
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
break;
@ -558,7 +559,7 @@ nfs_bioread(vp, uio, ioflag, cred)
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
brelse(bp);
return (error);
}
@ -653,7 +654,8 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp->b_iocmd = BIO_READ;
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred, p)) {
rabp->b_flags |= B_INVAL|B_ERROR;
rabp->b_flags |= B_INVAL;
rabp->b_ioflags |= BIO_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
}
@ -934,7 +936,8 @@ nfs_write(ap)
if (on == 0 && n == bcount) {
bp->b_flags |= B_CACHE;
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
}
if ((bp->b_flags & B_CACHE) == 0) {
@ -1034,7 +1037,7 @@ nfs_write(ap)
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
brelse(bp);
break;
}
@ -1204,7 +1207,7 @@ nfs_vinvalbuf(vp, flags, cred, p, intrflg)
* This is mainly to avoid queueing async I/O requests when the nfsiods
* are all hung on a dead server.
*
* Note: nfs_asyncio() does not clear (B_ERROR|B_INVAL) but when the bp
* Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
* is eventually dequeued by the async daemon, nfs_doio() *will*.
*/
int
@ -1366,11 +1369,12 @@ nfs_doio(bp, cr, p)
uiop->uio_procp = p;
/*
* clear B_ERROR and B_INVAL state prior to initiating the I/O. We
* clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
* do this here so we do not have to do it in all the code that
* calls us.
*/
bp->b_flags &= ~(B_ERROR | B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
@ -1398,7 +1402,7 @@ nfs_doio(bp, cr, p)
error = nfs_writerpc(vp, uiop, cr, &iomode, &com);
}
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else if (bp->b_iocmd == BIO_READ) {
@ -1466,7 +1470,7 @@ nfs_doio(bp, cr, p)
break;
};
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else {
@ -1545,7 +1549,7 @@ nfs_doio(bp, cr, p)
/*
* For an interrupted write, the buffer is still valid
* and the write hasn't been pushed to the server yet,
* so we can't set B_ERROR and report the interruption
* so we can't set BIO_ERROR and report the interruption
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
* is not relevant, so the rpc attempt is essentially
* a noop. For the case of a V3 write rpc not being
@ -1575,7 +1579,7 @@ nfs_doio(bp, cr, p)
splx(s);
} else {
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = np->n_error = error;
np->n_flag |= NWRITEERR;
}

View File

@ -2939,7 +2939,8 @@ nfs_flush(vp, cred, waitfor, p, commit)
vp->v_numoutput++;
bp->b_flags |= B_ASYNC;
bundirty(bp);
bp->b_flags &= ~(B_DONE|B_ERROR);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_dirtyoff = bp->b_dirtyend = 0;
splx(s);
biodone(bp);
@ -3116,7 +3117,8 @@ nfs_writebp(bp, force, procp)
s = splbio();
bundirty(bp);
bp->b_flags &= ~(B_DONE|B_ERROR);
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_iocmd = BIO_WRITE;
bp->b_vp->v_numoutput++;

View File

@ -364,7 +364,7 @@ ntfs_strategy(ap)
if (error) {
printf("ntfs_strategy: ntfs_readattr failed\n");
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
bzero(bp->b_data + toread, bp->b_bcount - toread);
@ -376,7 +376,7 @@ ntfs_strategy(ap)
if (ntfs_cntob(bp->b_blkno) + bp->b_bcount >= fp->f_size) {
printf("ntfs_strategy: CAN'T EXTEND FILE\n");
bp->b_error = error = EFBIG;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
towrite = min(bp->b_bcount,
fp->f_size-ntfs_cntob(bp->b_blkno));
@ -390,7 +390,7 @@ ntfs_strategy(ap)
if (error) {
printf("ntfs_strategy: ntfs_writeattr fail\n");
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
}
}

View File

@ -311,7 +311,7 @@ nwfs_doio(bp, cr, p)
break;
};
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = error;
}
} else { /* write */
@ -330,7 +330,7 @@ nwfs_doio(bp, cr, p)
/*
* For an interrupted write, the buffer is still valid
* and the write hasn't been pushed to the server yet,
* so we can't set B_ERROR and report the interruption
* so we can't set BIO_ERROR and report the interruption
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
* is not relevant, so the rpc attempt is essentially
* a noop. For the case of a V3 write rpc not being
@ -357,7 +357,7 @@ nwfs_doio(bp, cr, p)
splx(s);
} else {
if (error) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error /*= np->n_error */= error;
/* np->n_flag |= NWRITEERR;*/
}

View File

@ -1791,7 +1791,7 @@ fdstrategy(struct buf *bp)
fdc = fd->fdc;
if (fd->type == NO_TYPE) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
};
@ -1802,12 +1802,12 @@ fdstrategy(struct buf *bp)
"fd%d: fdstrat: bad request blkno = %lu, bcount = %ld\n",
fdu, (u_long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
if ((bp->b_bcount % fdblk) != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -1821,7 +1821,7 @@ fdstrategy(struct buf *bp)
* multiplication below from overflowing.
*/
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
blknum = (unsigned) bp->b_blkno * DEV_BSIZE/fdblk;
@ -1835,7 +1835,7 @@ fdstrategy(struct buf *bp)
goto bad; /* not actually bad but EOF */
} else {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -2611,7 +2611,7 @@ retrier(struct fdc_data *fdc)
else
printf(" (No status)\n");
}
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
bp->b_resid += bp->b_bcount - fdc->fd->skip;
fdc->bp = NULL;
@ -2689,7 +2689,7 @@ fdformat(dev, finfo, p)
device_unbusy(fd->dev);
biodone(bp);
}
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
rv = bp->b_error;
/*
* allow the process to be swapped

View File

@ -2705,7 +2705,7 @@ bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
return(1);
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return(-1);
}

View File

@ -1791,7 +1791,7 @@ fdstrategy(struct buf *bp)
fdc = fd->fdc;
if (fd->type == NO_TYPE) {
bp->b_error = ENXIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
};
@ -1802,12 +1802,12 @@ fdstrategy(struct buf *bp)
"fd%d: fdstrat: bad request blkno = %lu, bcount = %ld\n",
fdu, (u_long)bp->b_blkno, bp->b_bcount);
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
if ((bp->b_bcount % fdblk) != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -1821,7 +1821,7 @@ fdstrategy(struct buf *bp)
* multiplication below from overflowing.
*/
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
blknum = (unsigned) bp->b_blkno * DEV_BSIZE/fdblk;
@ -1835,7 +1835,7 @@ fdstrategy(struct buf *bp)
goto bad; /* not actually bad but EOF */
} else {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto bad;
}
}
@ -2611,7 +2611,7 @@ retrier(struct fdc_data *fdc)
else
printf(" (No status)\n");
}
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EIO;
bp->b_resid += bp->b_bcount - fdc->fd->skip;
fdc->bp = NULL;
@ -2689,7 +2689,7 @@ fdformat(dev, finfo, p)
device_unbusy(fd->dev);
biodone(bp);
}
if (bp->b_flags & B_ERROR)
if (bp->b_ioflags & BIO_ERROR)
rv = bp->b_error;
/*
* allow the process to be swapped

View File

@ -2705,7 +2705,7 @@ bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
return(1);
bad:
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return(-1);
}

View File

@ -661,7 +661,7 @@ wdstrategy(register struct buf *bp)
|| bp->b_bcount % DEV_BSIZE != 0) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
goto done;
}
@ -1124,7 +1124,7 @@ wdintr(void *unitnum)
} else {
wderror(bp, du, "hard error");
bp->b_error = EIO;
bp->b_flags |= B_ERROR; /* flag the error */
bp->b_ioflags |= BIO_ERROR; /* flag the error */
}
} else if (du->dk_status & WDCS_ECCCOR)
wderror(bp, du, "soft ecc");
@ -1133,7 +1133,7 @@ wdintr(void *unitnum)
/*
* If this was a successful read operation, fetch the data.
*/
if (bp->b_iocmd == BIO_READ && !(bp->b_flags & B_ERROR)
if (bp->b_iocmd == BIO_READ && !(bp->b_ioflags & BIO_ERROR)
&& !((du->dk_flags & (DKFL_DMA|DKFL_SINGLE)) == DKFL_DMA)
&& wdtab[unit].b_active) {
u_int chk, dummy, multisize;
@ -1175,7 +1175,7 @@ wdintr(void *unitnum)
}
/* final cleanup on DMA */
if (((bp->b_flags & B_ERROR) == 0)
if (((bp->b_ioflags & BIO_ERROR) == 0)
&& ((du->dk_flags & (DKFL_DMA|DKFL_SINGLE)) == DKFL_DMA)
&& wdtab[unit].b_active) {
int iosize;
@ -1188,7 +1188,7 @@ wdintr(void *unitnum)
outt:
if (wdtab[unit].b_active) {
if ((bp->b_flags & B_ERROR) == 0) {
if ((bp->b_ioflags & BIO_ERROR) == 0) {
du->dk_skip += du->dk_currentiosize;/* add to successful sectors */
if (wdtab[unit].b_errcnt)
wderror(bp, du, "soft error");
@ -1448,7 +1448,7 @@ wdcontrol(register struct buf *bp)
if (++wdtab[ctrlr].b_errcnt < RETRIES)
goto tryagainrecal;
bp->b_error = ENXIO; /* XXX needs translation */
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
return (2);
}
wdtab[ctrlr].b_errcnt = 0;

View File

@ -443,7 +443,7 @@ acdstrategy(struct buf *bp)
/* allow write only on CD-R/RW media */ /* all for now SOS */
if ((bp->b_iocmd == BIO_WRITE) && !(writeable_media)) {
bp->b_error = EROFS;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -480,7 +480,7 @@ acd_start(struct acd *cdp)
/* Should reject all queued entries if media have changed. */
if (cdp->flags & F_MEDIA_CHANGED) {
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -492,7 +492,7 @@ acd_start(struct acd *cdp)
if ((cdp->flags & F_TRACK_PREP) == 0) {
printf("wcd%d: sequence error\n", cdp->lun);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
} else {
@ -539,7 +539,7 @@ acd_done(struct acd *cdp, struct buf *bp, int resid, struct atapires result)
if (result.code) {
atapi_error(cdp->ata, cdp->unit, result);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else {
bp->b_resid = resid;
if (bp->b_iocmd == BIO_WRITE)

View File

@ -533,7 +533,7 @@ static void wfd_done (struct wfd *t, struct buf *bp, int resid,
if (result.code) {
wfd_error (t, result);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
} else
(int)bp->b_driver2 += resid;
/*

View File

@ -397,7 +397,7 @@ wststrategy(struct buf *bp)
if (bp->b_bcount % t->blksize) {
printf("wst%d: bad request, must be multiple of %d\n", lun, t->blksize);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return;
}
@ -477,7 +477,7 @@ wst_done(struct wst *t, struct buf *bp, int resid,
printf("wst_done: ");
wst_error(t, result);
bp->b_error = EIO;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
}
else
bp->b_resid = resid;

View File

@ -80,22 +80,26 @@ struct iodone_chain {
};
/*
* The bio structure descripes an I/O operation in the kernel.
* The bio structure describes an I/O operation in the kernel.
*/
struct bio {
u_int bio_cmd; /* BIO_READ, BIO_WRITE, BIO_DELETE */
dev_t bio_dev; /* Device to do I/O on */
u_int bio_cmd; /* I/O operation. */
dev_t bio_dev; /* Device to do I/O on. */
daddr_t bio_blkno; /* Underlying physical block number. */
u_int bio_flags; /* BIO_ORDERED, BIO_ERROR */
struct buf *__bio_buf; /* Parent buffer */
int bio_error; /* Errno for BIO_ERROR */
long bio_resid; /* Remaining I/0 in bytes */
u_int bio_flags; /* BIO_ flags. */
struct buf *_bio_buf; /* Parent buffer. */
int bio_error; /* Errno for BIO_ERROR. */
long bio_resid; /* Remaining I/0 in bytes. */
void (*bio_done) __P((struct buf *));
void *bio_driver1; /* for private use by the driver */
void *bio_driver2; /* for private use by the driver */
void *bio_caller1; /* for private use by the caller */
void *bio_caller2; /* for private use by the caller */
void *bio_driver1; /* Private use by the callee. */
void *bio_driver2; /* Private use by the callee. */
void *bio_caller1; /* Private use by the caller. */
void *bio_caller2; /* Private use by the caller. */
TAILQ_ENTRY(bio) bio_queue; /* Disksort queue. */
/* XXX: these go away when bio chaining is introduced */
daddr_t bio_pblkno; /* physical block number */
struct iodone_chain *bio_done_chain;
};
/*
@ -115,20 +119,21 @@ struct bio {
* completes, b_resid is usually 0 indicating 100% success.
*/
struct buf {
struct bio b_bio; /* I/O request
* XXX: Must be first element for now
*/
#define b_iocmd b_bio.bio_cmd
#define b_ioflags b_bio.bio_flags
#define b_iodone b_bio.bio_done
#define b_error b_bio.bio_error
#define b_resid b_bio.bio_resid
#define b_blkno b_bio.bio_blkno
#define b_driver1 b_bio.bio_driver1
#define b_driver2 b_bio.bio_driver2
#define b_caller1 b_bio.bio_caller1
#define b_caller2 b_bio.bio_caller2
#define b_dev b_bio.bio_dev
/* XXX: b_io must be the first element of struct buf for now /phk */
struct bio b_io; /* "Builtin" I/O request. */
#define b_blkno b_io.bio_blkno
#define b_caller1 b_io.bio_caller1
#define b_caller2 b_io.bio_caller2
#define b_dev b_io.bio_dev
#define b_driver1 b_io.bio_driver1
#define b_driver2 b_io.bio_driver2
#define b_error b_io.bio_error
#define b_iocmd b_io.bio_cmd
#define b_iodone b_io.bio_done
#define b_iodone_chain b_io.bio_done_chain
#define b_ioflags b_io.bio_flags
#define b_pblkno b_io.bio_pblkno
#define b_resid b_io.bio_resid
LIST_ENTRY(buf) b_hash; /* Hash chain. */
TAILQ_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
@ -144,14 +149,11 @@ struct buf {
int b_kvasize; /* size of kva for buffer */
daddr_t b_lblkno; /* Logical block number. */
off_t b_offset; /* Offset into file */
/* Function to call upon completion. */
struct iodone_chain *b_iodone_chain;
struct vnode *b_vp; /* Device vnode. */
int b_dirtyoff; /* Offset in buffer of dirty region. */
int b_dirtyend; /* Offset of end of dirty region. */
struct ucred *b_rcred; /* Read credentials reference. */
struct ucred *b_wcred; /* Write credentials reference. */
daddr_t b_pblkno; /* physical block number */
void *b_saveaddr; /* Original b_addr for physio. */
union pager_info {
void *pg_spc;
@ -192,7 +194,7 @@ struct buf {
* clear MUST be committed to disk by getblk() so
* B_DELWRI can also be cleared. See the comments for
* getblk() in kern/vfs_bio.c. If B_CACHE is clear,
* the caller is expected to clear B_ERROR|B_INVAL,
* the caller is expected to clear BIO_ERROR and B_INVAL,
* set BIO_READ, and initiate an I/O.
*
* The 'entire buffer' is defined to be the range from
@ -219,6 +221,9 @@ struct buf {
#define BIO_WRITE 2
#define BIO_DELETE 4
#define BIO_ERROR 0x00000001
#define BIO_ORDERED 0x00000002
#define B_AGE 0x00000001 /* Move to age queue when I/O done. */
#define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
#define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
@ -417,7 +422,7 @@ bufq_init(struct buf_queue_head *head)
static __inline void
bufq_insert_tail(struct buf_queue_head *head, struct buf *bp)
{
if ((bp->b_flags & B_ORDERED) != 0) {
if ((bp->b_ioflags & BIO_ORDERED) != 0) {
head->insert_point = bp;
head->switch_point = NULL;
}

View File

@ -80,22 +80,26 @@ struct iodone_chain {
};
/*
* The bio structure descripes an I/O operation in the kernel.
* The bio structure describes an I/O operation in the kernel.
*/
struct bio {
u_int bio_cmd; /* BIO_READ, BIO_WRITE, BIO_DELETE */
dev_t bio_dev; /* Device to do I/O on */
u_int bio_cmd; /* I/O operation. */
dev_t bio_dev; /* Device to do I/O on. */
daddr_t bio_blkno; /* Underlying physical block number. */
u_int bio_flags; /* BIO_ORDERED, BIO_ERROR */
struct buf *__bio_buf; /* Parent buffer */
int bio_error; /* Errno for BIO_ERROR */
long bio_resid; /* Remaining I/0 in bytes */
u_int bio_flags; /* BIO_ flags. */
struct buf *_bio_buf; /* Parent buffer. */
int bio_error; /* Errno for BIO_ERROR. */
long bio_resid; /* Remaining I/0 in bytes. */
void (*bio_done) __P((struct buf *));
void *bio_driver1; /* for private use by the driver */
void *bio_driver2; /* for private use by the driver */
void *bio_caller1; /* for private use by the caller */
void *bio_caller2; /* for private use by the caller */
void *bio_driver1; /* Private use by the callee. */
void *bio_driver2; /* Private use by the callee. */
void *bio_caller1; /* Private use by the caller. */
void *bio_caller2; /* Private use by the caller. */
TAILQ_ENTRY(bio) bio_queue; /* Disksort queue. */
/* XXX: these go away when bio chaining is introduced */
daddr_t bio_pblkno; /* physical block number */
struct iodone_chain *bio_done_chain;
};
/*
@ -115,20 +119,21 @@ struct bio {
* completes, b_resid is usually 0 indicating 100% success.
*/
struct buf {
struct bio b_bio; /* I/O request
* XXX: Must be first element for now
*/
#define b_iocmd b_bio.bio_cmd
#define b_ioflags b_bio.bio_flags
#define b_iodone b_bio.bio_done
#define b_error b_bio.bio_error
#define b_resid b_bio.bio_resid
#define b_blkno b_bio.bio_blkno
#define b_driver1 b_bio.bio_driver1
#define b_driver2 b_bio.bio_driver2
#define b_caller1 b_bio.bio_caller1
#define b_caller2 b_bio.bio_caller2
#define b_dev b_bio.bio_dev
/* XXX: b_io must be the first element of struct buf for now /phk */
struct bio b_io; /* "Builtin" I/O request. */
#define b_blkno b_io.bio_blkno
#define b_caller1 b_io.bio_caller1
#define b_caller2 b_io.bio_caller2
#define b_dev b_io.bio_dev
#define b_driver1 b_io.bio_driver1
#define b_driver2 b_io.bio_driver2
#define b_error b_io.bio_error
#define b_iocmd b_io.bio_cmd
#define b_iodone b_io.bio_done
#define b_iodone_chain b_io.bio_done_chain
#define b_ioflags b_io.bio_flags
#define b_pblkno b_io.bio_pblkno
#define b_resid b_io.bio_resid
LIST_ENTRY(buf) b_hash; /* Hash chain. */
TAILQ_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
@ -144,14 +149,11 @@ struct buf {
int b_kvasize; /* size of kva for buffer */
daddr_t b_lblkno; /* Logical block number. */
off_t b_offset; /* Offset into file */
/* Function to call upon completion. */
struct iodone_chain *b_iodone_chain;
struct vnode *b_vp; /* Device vnode. */
int b_dirtyoff; /* Offset in buffer of dirty region. */
int b_dirtyend; /* Offset of end of dirty region. */
struct ucred *b_rcred; /* Read credentials reference. */
struct ucred *b_wcred; /* Write credentials reference. */
daddr_t b_pblkno; /* physical block number */
void *b_saveaddr; /* Original b_addr for physio. */
union pager_info {
void *pg_spc;
@ -192,7 +194,7 @@ struct buf {
* clear MUST be committed to disk by getblk() so
* B_DELWRI can also be cleared. See the comments for
* getblk() in kern/vfs_bio.c. If B_CACHE is clear,
* the caller is expected to clear B_ERROR|B_INVAL,
* the caller is expected to clear BIO_ERROR and B_INVAL,
* set BIO_READ, and initiate an I/O.
*
* The 'entire buffer' is defined to be the range from
@ -219,6 +221,9 @@ struct buf {
#define BIO_WRITE 2
#define BIO_DELETE 4
#define BIO_ERROR 0x00000001
#define BIO_ORDERED 0x00000002
#define B_AGE 0x00000001 /* Move to age queue when I/O done. */
#define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
#define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
@ -417,7 +422,7 @@ bufq_init(struct buf_queue_head *head)
static __inline void
bufq_insert_tail(struct buf_queue_head *head, struct buf *bp)
{
if ((bp->b_flags & B_ORDERED) != 0) {
if ((bp->b_ioflags & BIO_ORDERED) != 0) {
head->insert_point = bp;
head->switch_point = NULL;
}

View File

@ -448,7 +448,8 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
if ((bp->b_flags & B_CACHE) == 0) {
curproc->p_stats->p_ru.ru_inblock++; /* pay for read */
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_ERROR|B_INVAL);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if (bp->b_bcount > bp->b_bufsize)
panic("ffs_indirtrunc: bad buffer size");
bp->b_blkno = dbn;

View File

@ -4667,7 +4667,7 @@ softdep_deallocate_dependencies(bp)
struct buf *bp;
{
if ((bp->b_flags & B_ERROR) == 0)
if ((bp->b_ioflags & BIO_ERROR) == 0)
panic("softdep_deallocate_dependencies: dangling deps");
softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
panic("softdep_deallocate_dependencies: unrecovered I/O error");

View File

@ -276,7 +276,7 @@ mfs_doio(bp, mfsp)
bp->b_error = copyout(bp->b_data, base, bp->b_bcount);
}
if (bp->b_error)
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
}

View File

@ -193,7 +193,8 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp, runb)
#endif
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~(B_INVAL|B_ERROR);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
BUF_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */

View File

@ -77,7 +77,7 @@ bufqdisksort(bufq, bp)
* ordered transaction, then it's easy.
*/
if ((bq = bufq_first(bufq)) == NULL
|| (bp->b_flags & B_ORDERED) != 0) {
|| (bp->b_ioflags & BIO_ORDERED) != 0) {
bufq_insert_tail(bufq, bp);
return;
} else if (bufq->insert_point != NULL) {

View File

@ -1764,7 +1764,7 @@ ufs_strategy(ap)
error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
if (error) {
bp->b_error = error;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return (error);
}

View File

@ -824,7 +824,8 @@ swap_pager_strategy(vm_object_t object, struct buf *bp)
if (bp->b_bcount & PAGE_MASK) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR | B_INVAL;
bp->b_ioflags |= BIO_ERROR;
bp->b_flags |= B_INVAL;
biodone(bp);
printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
return;
@ -835,7 +836,7 @@ swap_pager_strategy(vm_object_t object, struct buf *bp)
*/
bp->b_error = 0;
bp->b_flags &= ~B_ERROR;
bp->b_ioflags &= ~BIO_ERROR;
bp->b_resid = bp->b_bcount;
start = bp->b_pblkno;
@ -877,7 +878,7 @@ swap_pager_strategy(vm_object_t object, struct buf *bp)
blk = swp_pager_getswapspace(1);
if (blk == SWAPBLK_NONE) {
bp->b_error = ENOMEM;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
break;
}
swp_pager_meta_build(object, start, blk);
@ -1478,7 +1479,7 @@ swp_pager_async_iodone(bp)
* report error
*/
if (bp->b_flags & B_ERROR) {
if (bp->b_ioflags & BIO_ERROR) {
printf(
"swap_pager: I/O error - %s failed; blkno %ld,"
"size %ld, error %d\n",
@ -1517,7 +1518,7 @@ swp_pager_async_iodone(bp)
vm_page_flag_clear(m, PG_SWAPINPROG);
if (bp->b_flags & B_ERROR) {
if (bp->b_ioflags & BIO_ERROR) {
/*
* If an error occurs I'd love to throw the swapblk
* away without freeing it back to swapspace, so it

View File

@ -267,7 +267,7 @@ vm_pager_strategy(vm_object_t object, struct buf *bp)
if (pagertab[object->type]->pgo_strategy) {
(*pagertab[object->type]->pgo_strategy)(object, bp);
} else {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = ENXIO;
biodone(bp);
}
@ -348,6 +348,7 @@ initpbuf(struct buf *bp)
bp->b_kvasize = MAXPHYS;
bp->b_xflags = 0;
bp->b_flags = 0;
bp->b_ioflags = 0;
bp->b_iodone = NULL;
bp->b_error = 0;
BUF_LOCK(bp, LK_EXCLUSIVE);
@ -496,11 +497,11 @@ vm_pager_chain_iodone(struct buf *nbp)
struct buf *bp;
if ((bp = nbp->b_chain.parent) != NULL) {
if (nbp->b_flags & B_ERROR) {
bp->b_flags |= B_ERROR;
if (nbp->b_ioflags & BIO_ERROR) {
bp->b_ioflags |= BIO_ERROR;
bp->b_error = nbp->b_error;
} else if (nbp->b_resid != 0) {
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EINVAL;
} else {
bp->b_resid -= nbp->b_bcount;
@ -513,8 +514,8 @@ vm_pager_chain_iodone(struct buf *nbp)
}
if (!bp->b_chain.count && (bp->b_flags & B_AUTOCHAINDONE)) {
bp->b_flags &= ~B_AUTOCHAINDONE;
if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
bp->b_flags |= B_ERROR;
if (bp->b_resid != 0 && !(bp->b_ioflags & BIO_ERROR)) {
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EINVAL;
}
biodone(bp);
@ -531,9 +532,6 @@ vm_pager_chain_iodone(struct buf *nbp)
* Obtain a physical buffer and chain it to its parent buffer. When
* I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
* automatically propagated to the parent
*
* Since these are brand new buffers, we do not have to clear B_INVAL
* and B_ERROR because they are already clear.
*/
struct buf *
@ -547,7 +545,8 @@ getchainbuf(struct buf *bp, struct vnode *vp, int flags)
if (bp->b_chain.count > 4)
waitchainbuf(bp, 4, 0);
nbp->b_flags = (bp->b_flags & B_ORDERED) | flags;
nbp->b_ioflags = bp->b_ioflags & BIO_ORDERED;
nbp->b_flags = flags;
nbp->b_rcred = nbp->b_wcred = proc0.p_ucred;
nbp->b_iodone = vm_pager_chain_iodone;
@ -584,8 +583,8 @@ waitchainbuf(struct buf *bp, int count, int done)
tsleep(bp, PRIBIO + 4, "bpchain", 0);
}
if (done) {
if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
bp->b_flags |= B_ERROR;
if (bp->b_resid != 0 && !(bp->b_ioflags & BIO_ERROR)) {
bp->b_ioflags |= BIO_ERROR;
bp->b_error = EINVAL;
}
biodone(bp);

View File

@ -104,7 +104,7 @@ swapdev_strategy(ap)
off = bp->b_blkno % dmmax;
if (off + sz > dmmax) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return 0;
}
@ -118,14 +118,14 @@ swapdev_strategy(ap)
sp = &swdevt[index];
if (bp->b_blkno + sz > sp->sw_nblks) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return 0;
}
bp->b_dev = sp->sw_device;
if (sp->sw_vp == NULL) {
bp->b_error = ENODEV;
bp->b_flags |= B_ERROR;
bp->b_ioflags |= BIO_ERROR;
biodone(bp);
return 0;
}

View File

@ -434,7 +434,7 @@ vnode_pager_input_smlfs(object, m)
tsleep(bp, PVM, "vnsrd", 0);
}
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
if ((bp->b_ioflags & BIO_ERROR) != 0)
error = EIO;
/*
@ -755,7 +755,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
tsleep(bp, PVM, "vnread", 0);
}
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
if ((bp->b_ioflags & BIO_ERROR) != 0)
error = EIO;
if (!error) {