x86/bounce: try to always completely fill bounce pages

Current code doesn't try to make use of the full page when bouncing because
the size is only expanded to be a multiple of the alignment. Instead try to
always create segments of PAGE_SIZE when using bounce pages.

This allows us to remove the specific casing done for
BUS_DMA_KEEP_PG_OFFSET, since the requirement is to make sure the offsets
into contiguous segments are aligned, and now this is done by default.

Sponsored by:		Citrix Systems R&D
Reviewed by:		hps, kib
Differential revision:	https://reviews.freebsd.org/D4119
This commit is contained in:
Roger Pau Monné 2015-12-15 10:07:03 +00:00
parent c8cfbc066f
commit a7285da666

View File

@ -476,8 +476,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
sgsize = MIN(PAGE_SIZE, sgsize);
map->pagesneeded++;
}
curaddr += sgsize;
@ -517,8 +516,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
else
paddr = pmap_extract(pmap, vaddr);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = PAGE_SIZE;
map->pagesneeded++;
}
vaddr += sg_len;
@ -554,9 +552,7 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sg_len = MIN(sg_len, max_sgsize);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
sg_len = MIN(PAGE_SIZE, max_sgsize);
KASSERT((sg_len & (dmat->common.alignment - 1))
== 0, ("Segment size is not aligned"));
map->pagesneeded++;
@ -652,7 +648,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
bus_addr_t curaddr, nextaddr;
int error;
if (map == NULL)
@ -676,9 +672,12 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
sgsize);
nextaddr = 0;
sgsize = MIN(PAGE_SIZE, sgsize);
if ((curaddr & PAGE_MASK) + sgsize > PAGE_SIZE)
nextaddr = roundup2(curaddr, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
nextaddr, sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
@ -744,8 +743,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
sgsize = MIN(PAGE_SIZE, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
sgsize);
} else {
@ -774,17 +772,6 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
int error, page_index;
bus_size_t sgsize, max_sgsize;
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/*
* If we have to keep the offset of each page this function
* is not suitable, switch back to bus_dmamap_load_ma_triv
* which is going to do the right thing in this case.
*/
error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
flags, segs, segp);
return (error);
}
if (map == NULL)
map = &nobounce_dmamap;
@ -811,10 +798,7 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
("Segment size is not aligned"));
sgsize = MIN(PAGE_SIZE, max_sgsize);
/*
* Check if two pages of the user provided buffer
* are used.
@ -1175,13 +1159,6 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/* Page offset needs to be preserved. */
bpage->vaddr |= addr1 & PAGE_MASK;
bpage->busaddr |= addr1 & PAGE_MASK;
KASSERT(addr2 == 0,
("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
}
bpage->datavaddr = vaddr;
bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
@ -1201,15 +1178,6 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
bz = dmat->bounce_zone;
bpage->datavaddr = 0;
bpage->datacount = 0;
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/*
* Reset the bounce page to start at offset 0. Other uses
* of this bounce page may need to store a full page of
* data and/or assume it starts on a page boundary.
*/
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
}
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);