ARM: Fix dma_dcache_sync() for early allocated memory.

Drivers can request DMA to buffers that are not in memory represented
in the vm page arrays. Because of this, store KVA of already mapped
buffer to synclist and use it in dma_dcache_sync().

Reviewed by:	jah
Approved by:	kib (mentor)
Differential Revision: https://reviews.freebsd.org/D4120
This commit is contained in:
Michal Meloun 2015-11-18 16:07:01 +00:00
parent cc2ccfbe20
commit bb12cf0137

View File

@ -117,8 +117,8 @@ struct bounce_page {
struct sync_list {
vm_offset_t vaddr; /* kva of client data */
bus_addr_t paddr; /* physical address */
vm_page_t pages; /* starting page of client data */
vm_offset_t dataoffs; /* page offset of client data */
bus_size_t datacount; /* client data count */
};
@ -1076,17 +1076,19 @@ _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
sgsize);
} else {
if (map->sync_count > 0)
sl_end = VM_PAGE_TO_PHYS(sl->pages) +
sl->dataoffs + sl->datacount;
sl_end = sl->paddr + sl->datacount;
if (map->sync_count == 0 || curaddr != sl_end) {
if (++map->sync_count > dmat->nsegments)
break;
sl++;
sl->vaddr = 0;
sl->paddr = curaddr;
sl->datacount = sgsize;
sl->pages = PHYS_TO_VM_PAGE(curaddr);
sl->dataoffs = curaddr & PAGE_MASK;
KASSERT(sl->pages != NULL,
("%s: page at PA:0x%08lx is not in "
"vm_page_array", __func__, curaddr));
} else
sl->datacount += sgsize;
}
@ -1188,8 +1190,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
sgsize);
} else {
if (map->sync_count > 0) {
sl_pend = VM_PAGE_TO_PHYS(sl->pages) +
sl->dataoffs + sl->datacount;
sl_pend = sl->paddr + sl->datacount;
sl_vend = sl->vaddr + sl->datacount;
}
@ -1201,9 +1202,17 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
goto cleanup;
sl++;
sl->vaddr = kvaddr;
sl->paddr = curaddr;
if (kvaddr != 0) {
sl->pages = NULL;
} else {
sl->pages = PHYS_TO_VM_PAGE(curaddr);
KASSERT(sl->pages != NULL,
("%s: page at PA:0x%08lx is not "
"in vm_page_array", __func__,
curaddr));
}
sl->datacount = sgsize;
sl->pages = PHYS_TO_VM_PAGE(curaddr);
sl->dataoffs = curaddr & PAGE_MASK;
} else
sl->datacount += sgsize;
}
@ -1299,10 +1308,10 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
vm_offset_t va, tempva;
bus_size_t size;
offset = sl->dataoffs;
offset = sl->paddr & PAGE_MASK;
m = sl->pages;
size = sl->datacount;
pa = VM_PAGE_TO_PHYS(m) | offset;
pa = sl->paddr;
for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
tempva = 0;
@ -1310,13 +1319,13 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
len = min(PAGE_SIZE - offset, size);
tempva = pmap_quick_enter_page(m);
va = tempva | offset;
KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
("unexpected vm_page_t phys: 0x%08x != 0x%08x",
VM_PAGE_TO_PHYS(m) | offset, pa));
} else {
len = sl->datacount;
va = sl->vaddr;
}
KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
("unexpected vm_page_t phys: 0x%08x != 0x%08x",
VM_PAGE_TO_PHYS(m) | offset, pa));
switch (op) {
case BUS_DMASYNC_PREWRITE: