Remove code left over from the armv4 days. On armv4, cache maintenance

operations always had to be aligned and sized to cache lines.  On armv6
and later, cache maintenance operates on a cache line if any part of
the line is referenced in the operation, so we don't need extra code to
align the edges of the sync range.
This commit is contained in:
Ian Lepore 2015-08-20 19:39:15 +00:00
parent a715badd2e
commit e178d0d43e

View File

@ -1381,22 +1381,11 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
*/
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
vm_offset_t startv;
vm_paddr_t startp;
int len;
startv = bpage->vaddr &~ arm_dcache_align_mask;
startp = bpage->busaddr &~ arm_dcache_align_mask;
len = bpage->datacount;
if (startv != bpage->vaddr)
len += bpage->vaddr & arm_dcache_align_mask;
if (len & arm_dcache_align_mask)
len = (len -
(len & arm_dcache_align_mask)) +
arm_dcache_align;
l2cache_inv_range(startv, startp, len);
cpu_dcache_inv_range(startv, len);
l2cache_inv_range((vm_offset_t)bpage->vaddr,
(vm_offset_t)bpage->busaddr,
bpage->datacount);
cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,