From c1381f07f61a66979f1569995f37f2a0413c0413 Mon Sep 17 00:00:00 2001 From: Andrew Turner Date: Fri, 17 Dec 2021 09:33:57 +0000 Subject: [PATCH] Don't sync the I/D caches when they are coherent In the arm64 loader we need to syncronise the I and D caches. On some newer CPUs the I and D caches are coherent so we don't need to perform these operations. While here remove the arguments to cpu_inval_icache as they are unneeded. Reported by: cperciva Tested by: cperciva Sponsored by: Innovate UK --- stand/arm64/libarm64/cache.c | 67 +++++++++++++++++++----------- stand/arm64/libarm64/cache.h | 2 +- stand/efi/loader/arch/arm64/exec.c | 2 +- 3 files changed, 45 insertions(+), 26 deletions(-) diff --git a/stand/arm64/libarm64/cache.c b/stand/arm64/libarm64/cache.c index 25766ef564dd..ff52572399ac 100644 --- a/stand/arm64/libarm64/cache.c +++ b/stand/arm64/libarm64/cache.c @@ -32,20 +32,29 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include "cache.h" -static unsigned int -get_dcache_line_size(void) +static bool +get_cache_dic(uint64_t ctr) { - uint64_t ctr; - unsigned int dcl_size; + return (CTR_DIC_VAL(ctr) != 0); +} - /* Accessible from all security levels */ - ctr = READ_SPECIALREG(ctr_el0); +static bool +get_cache_idc(uint64_t ctr) +{ + return (CTR_IDC_VAL(ctr) != 0); +} + +static unsigned int +get_dcache_line_size(uint64_t ctr) +{ + unsigned int dcl_size; /* * Relevant field [19:16] is LOG2 @@ -60,36 +69,46 @@ get_dcache_line_size(void) void cpu_flush_dcache(const void *ptr, size_t len) { - - uint64_t cl_size; + uint64_t cl_size, ctr; vm_offset_t addr, end; - cl_size = get_dcache_line_size(); + /* Accessible from all security levels */ + ctr = READ_SPECIALREG(ctr_el0); - /* Calculate end address to clean */ - end = (vm_offset_t)ptr + (vm_offset_t)len; - /* Align start address to cache line */ - addr = (vm_offset_t)ptr; - addr = rounddown2(addr, cl_size); + if (get_cache_idc(ctr)) { + dsb(ishst); + } else { + cl_size = get_dcache_line_size(ctr); - for (; addr < end; addr += cl_size) - __asm __volatile("dc civac, %0" : : "r" (addr) : "memory"); - /* Full system DSB */ - __asm __volatile("dsb sy" : : : "memory"); + /* Calculate end address to clean */ + end = (vm_offset_t)ptr + (vm_offset_t)len; + /* Align start address to cache line */ + addr = (vm_offset_t)ptr; + addr = rounddown2(addr, cl_size); + + for (; addr < end; addr += cl_size) + __asm __volatile("dc civac, %0" : : "r" (addr) : + "memory"); + /* Full system DSB */ + dsb(ish); + } } void -cpu_inval_icache(const void *ptr, size_t len) +cpu_inval_icache(void) { + uint64_t ctr; - /* NULL ptr or 0 len means all */ - if (ptr == NULL || len == 0) { + /* Accessible from all security levels */ + ctr = READ_SPECIALREG(ctr_el0); + + if (get_cache_dic(ctr)) { + isb(); + } else { __asm __volatile( "ic ialluis \n" "dsb ish \n" + "isb \n" : : : "memory"); - return; } - - /* TODO: Other cache ranges if necessary */ } diff --git a/stand/arm64/libarm64/cache.h b/stand/arm64/libarm64/cache.h index 89b094b19c18..5e560c4d578d 100644 --- a/stand/arm64/libarm64/cache.h +++ b/stand/arm64/libarm64/cache.h @@ -33,6 +33,6 @@ /* cache.c */ void cpu_flush_dcache(const void *, size_t); -void cpu_inval_icache(const void *, size_t); +void cpu_inval_icache(void); #endif /* _CACHE_H_ */ diff --git a/stand/efi/loader/arch/arm64/exec.c b/stand/efi/loader/arch/arm64/exec.c index 7783d46cd8e1..6cf4a4fd8e4d 100644 --- a/stand/efi/loader/arch/arm64/exec.c +++ b/stand/efi/loader/arch/arm64/exec.c @@ -128,7 +128,7 @@ elf64_exec(struct preloaded_file *fp) clean_size = (vm_offset_t)efi_translate(kernendp) - clean_addr; cpu_flush_dcache((void *)clean_addr, clean_size); - cpu_inval_icache(NULL, 0); + cpu_inval_icache(); (*entry)(modulep); panic("exec returned");