Use pmap_quick* functions in arm64 busdma to make bounce buffer synchronization more flexible and avoid borrowing UVAs for userspace buffers. This is mostly equivalent to r286785 and r286787 for x86.

Differential Revision:	https://reviews.freebsd.org/D3870
This commit is contained in:
Jason A. Harmening 2015-10-21 19:44:20 +00:00
parent 2bd58a9fa5
commit d394b026f6

View File

@ -78,7 +78,8 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
vm_page_t datapage; /* physical page of client data */
vm_offset_t dataoffs; /* page offset of client data */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
@ -478,7 +479,8 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
@ -632,7 +634,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
@ -661,7 +663,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
{
bus_size_t sgsize, max_sgsize;
bus_addr_t curaddr;
vm_offset_t vaddr;
vm_offset_t kvaddr, vaddr;
int error;
if (map == NULL)
@ -684,22 +686,25 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/*
* Get the physical address for this segment.
*/
if (pmap == kernel_pmap)
if (pmap == kernel_pmap) {
curaddr = pmap_kextract(vaddr);
else
kvaddr = vaddr;
} else {
curaddr = pmap_extract(pmap, vaddr);
kvaddr = 0;
}
/*
* Compute the segment size, and adjust counts.
*/
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
} else {
sgsize = MIN(sgsize, max_sgsize);
@ -760,6 +765,10 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
struct bounce_page *bpage;
vm_offset_t datavaddr, tempvaddr;
if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
return;
/*
* XXX ARM64TODO:
@ -768,27 +777,24 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
* added to this function.
*/
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
/*
* Handle data bouncing. We might also
* want to add support for invalidating
* the caches on broken hardware
*/
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
"performing bounce", __func__, dmat,
dmat->common.flags, op);
"performing bounce", __func__, dmat, dmat->common.flags, op);
if ((op & BUS_DMASYNC_PREWRITE) != 0) {
while (bpage != NULL) {
if (bpage->datavaddr != 0) {
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
} else {
physcopyout(bpage->dataaddr,
(void *)bpage->vaddr,
bpage->datacount);
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr =
pmap_quick_enter_page(bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
}
bcopy((void *)datavaddr,
(void *)bpage->vaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -796,21 +802,24 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
while (bpage != NULL) {
if (bpage->datavaddr != 0) {
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
} else {
physcopyin((void *)bpage->vaddr,
bpage->dataaddr,
bpage->datacount);
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr =
pmap_quick_enter_page(bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
}
bcopy((void *)bpage->vaddr,
(void *)datavaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
}
}
}
static void
init_bounce_pages(void *dummy __unused)
@ -1000,7 +1009,9 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= addr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
/* PHYS_TO_VM_PAGE() will truncate unaligned addresses. */
bpage->datapage = PHYS_TO_VM_PAGE(addr);
bpage->dataoffs = addr & PAGE_MASK;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);