Minor clean up to xbd_queue_cb:
* nsegs must be at most BLKIF_MAX_SEGMENTS_PER_REQUEST (since we specify that limit to bus_dma_tag_create), so KASSERT that rather than silently adjusting the request. * block_segs is now a synonym for nsegs, so garbage collect that variable. * nsegs is never read during or after the while loop, so remove the dead decrement from the loop. These were all left behind from the pre-r284296 support for a "segment block" extension.
This commit is contained in:
parent
200a3fb372
commit
87997fdb5d
@ -168,7 +168,6 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
uint64_t fsect, lsect;
|
||||
int ref;
|
||||
int op;
|
||||
int block_segs;
|
||||
|
||||
cm = arg;
|
||||
sc = cm->cm_sc;
|
||||
@ -180,6 +179,9 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
return;
|
||||
}
|
||||
|
||||
KASSERT(nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST,
|
||||
("Too many segments in a blkfront I/O"));
|
||||
|
||||
/* Fill out a communications ring structure. */
|
||||
ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
|
||||
sc->xbd_ring.req_prod_pvt++;
|
||||
@ -190,9 +192,8 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
ring_req->nr_segments = nsegs;
|
||||
cm->cm_nseg = nsegs;
|
||||
|
||||
block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
sg = ring_req->seg;
|
||||
last_block_sg = sg + block_segs;
|
||||
last_block_sg = sg + nsegs;
|
||||
sg_ref = cm->cm_sg_refs;
|
||||
|
||||
while (sg < last_block_sg) {
|
||||
@ -227,7 +228,6 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
sg++;
|
||||
sg_ref++;
|
||||
segs++;
|
||||
nsegs--;
|
||||
}
|
||||
|
||||
if (cm->cm_operation == BLKIF_OP_READ)
|
||||
|
Loading…
Reference in New Issue
Block a user