If our buffer is not aligned on the cache line size, write back/invalidate
the first and last cache line in PREREAD, and just invalidate the cache lines in POSTREAD, instead of write-back/invalidating in POSTREAD, which could lead to stale data overriding what has been transfered by DMA.
This commit is contained in:
parent
866fcf84ba
commit
4cd3385ee3
@ -806,13 +806,16 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
|
||||
|
||||
if (op & BUS_DMASYNC_PREWRITE)
|
||||
cpu_dcache_wb_range((vm_offset_t)buf, len);
|
||||
if (op & BUS_DMASYNC_POSTREAD) {
|
||||
if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
|
||||
cpu_dcache_inv_range((vm_offset_t)buf, len);
|
||||
else
|
||||
cpu_dcache_wbinv_range((vm_offset_t)buf, len);
|
||||
|
||||
if (op & BUS_DMASYNC_PREREAD) {
|
||||
if ((vm_offset_t)buf & arm_dcache_align_mask)
|
||||
cpu_dcache_wbinv_range((vm_offset_t)buf &
|
||||
~arm_dcache_align_mask, arm_dcache_align);
|
||||
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
|
||||
cpu_dcache_wbinv_range(((vm_offset_t)buf + len) &
|
||||
~arm_dcache_align_mask, arm_dcache_align);
|
||||
}
|
||||
if (op & BUS_DMASYNC_POSTREAD)
|
||||
cpu_dcache_inv_range((vm_offset_t)buf, len);
|
||||
}
|
||||
|
||||
void
|
||||
@ -823,7 +826,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
|
||||
int resid;
|
||||
struct iovec *iov;
|
||||
|
||||
if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
|
||||
if (op == BUS_DMASYNC_POSTWRITE)
|
||||
return;
|
||||
if (map->flags & DMAMAP_COHERENT)
|
||||
return;
|
||||
|
Loading…
x
Reference in New Issue
Block a user