Provide a proper armv7 implementation of icache_sync_all rather than

using armv7_idcache_wbinv_all, because wbinv_all doesn't broadcast the
operation to other cores.  In elf_cpu_load_file() use icache_sync_all()
and explain why it's needed (and why other sync operations aren't).

As part of doing this, all callers of cpu_icache_sync_all() were
inspected to ensure they weren't relying on the old side effect of
doing a wbinv_all along with the icache work.
This commit is contained in:
Ian Lepore 2014-04-27 00:46:01 +00:00
parent 1c6c63fc6a
commit 91c650065a
4 changed files with 22 additions and 4 deletions

View File

@ -769,7 +769,7 @@ struct cpu_functions cortexa_cpufuncs = {
/* Cache operations */
armv7_idcache_wbinv_all, /* icache_sync_all */
armv7_icache_sync_all, /* icache_sync_all */
armv7_icache_sync_range, /* icache_sync_range */
armv7_dcache_wbinv_all, /* dcache_wbinv_all */

View File

@ -250,6 +250,13 @@ ENTRY(armv7_idcache_wbinv_range)
RET
END(armv7_idcache_wbinv_range)
ENTRY_NP(armv7_icache_sync_all)
mcr p15, 0, r0, c7, c1, 0 /* Invalidate all I cache to PoU Inner Shareable */
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
END(armv7_icache_sync_all)
ENTRY_NP(armv7_icache_sync_range)
ldr ip, .Larmv7_line_size
.Larmv7_sync_next:

View File

@ -220,9 +220,19 @@ int
elf_cpu_load_file(linker_file_t lf __unused)
{
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
cpu_tlb_flushID();
/*
* The pmap code does not do an icache sync upon establishing executable
* mappings in the kernel pmap. It's an optimization based on the fact
* that kernel memory allocations always have EXECUTABLE protection even
* when the memory isn't going to hold executable code. The only time
* kernel memory holding instructions does need a sync is after loading
* a kernel module, and that's when this function gets called. Normal
* data cache maintenance has already been done by the IO code, and TLB
* maintenance has been done by the pmap code, so all we have to do here
* is invalidate the instruction cache (which also invalidates the
* branch predictor cache on platforms that have one).
*/
cpu_icache_sync_all();
return (0);
}

View File

@ -411,6 +411,7 @@ void armv6_idcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_setttb (u_int);
void armv7_tlb_flushID (void);
void armv7_tlb_flushID_SE (u_int);
void armv7_icache_sync_all ();
void armv7_icache_sync_range (vm_offset_t, vm_size_t);
void armv7_idcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_idcache_inv_all (void);