Remove unused functions on armv6. Some of the cache handling code is still

used in the elf trampoline so add a macro to handle this.

Sponsored by:	ABT Systems Ltd
This commit is contained in:
andrew 2016-10-03 16:10:38 +00:00
parent 841d22cadc
commit 29875679ba
6 changed files with 9 additions and 442 deletions

View File

@ -37,62 +37,6 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
END(arm11_tlb_flushID_SE)
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm11_context_switch)
/*
* We can assume that the caches will only contain kernel addresses
* at this point. So no need to flush them again.
*/
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
/* Paranoia -- make sure the pipeline is empty. */
nop
nop
nop
RET
END(arm11_context_switch)
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(arm11_tlb_flushID)
ENTRY(arm11_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(arm11_tlb_flushD)
ENTRY(arm11_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(arm11_tlb_flushD_SE)
/*
* Other functions
*/

View File

@ -64,55 +64,6 @@ __FBSDID("$FreeBSD$");
.cpu arm1176jz-s
#if 0
#define Invalidate_I_cache(Rtmp1, Rtmp2) \
mcr p15, 0, Rtmp1, c7, c5, 0 /* Invalidate Entire I cache */
#else
/*
* Workaround for
*
* Erratum 411920 in ARM1136 (fixed in r1p4)
* Erratum 415045 in ARM1176 (fixed in r0p5?)
*
* - value of arg 'reg' Should Be Zero
*/
#define Invalidate_I_cache(Rtmp1, Rtmp2) \
mov Rtmp1, #0; /* SBZ */ \
mrs Rtmp2, cpsr; \
cpsid ifa; \
mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \
mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \
mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \
mcr p15, 0, Rtmp1, c7, c5, 0; /* Nuke Whole Icache */ \
msr cpsr_cx, Rtmp2; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop;
#endif
#if 1
#define Flush_D_cache(reg) \
mov reg, #0; /* SBZ */ \
mcr p15, 0, reg, c7, c14, 0;/* Clean and Invalidate Entire Data Cache */ \
mcr p15, 0, reg, c7, c10, 4;/* Data Synchronization Barrier */
#else
#define Flush_D_cache(reg) \
1: mov reg, #0; /* SBZ */ \
mcr p15, 0, reg, c7, c14, 0;/* Clean and Invalidate Entire Data Cache */ \
mrc p15, 0, reg, C7, C10, 6;/* Read Cache Dirty Status Register */ \
ands reg, reg, #01; /* Check if it is clean */ \
bne 1b; /* loop if not */ \
mcr p15, 0, reg, c7, c10, 4;/* Data Synchronization Barrier */
#endif
ENTRY(arm11x6_setttb)
mov r1, #0
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
@ -121,71 +72,6 @@ ENTRY(arm11x6_setttb)
RET
END(arm11x6_setttb)
ENTRY_NP(arm11x6_idcache_wbinv_all)
Flush_D_cache(r0)
Invalidate_I_cache(r0, r1)
RET
END(arm11x6_idcache_wbinv_all)
ENTRY_NP(arm11x6_dcache_wbinv_all)
Flush_D_cache(r0)
RET
END(arm11x6_dcache_wbinv_all)
ENTRY_NP(arm11x6_icache_sync_range)
add r1, r1, r0
sub r1, r1, #1
/* Erratum ARM1136 371025, workaround #2 */
/* Erratum ARM1176 371367 */
mrs r2, cpsr /* save the CPSR */
cpsid ifa /* disable interrupts (irq,fiq,abort) */
mov r3, #0
mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */
mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */
add r3, pc, #0x24
mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */
mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */
msr cpsr_cx, r2 /* local_irq_restore */
nop
nop
nop
nop
nop
nop
nop
mcrr p15, 0, r1, r0, c12 /* clean and invalidate D cache range */ /* XXXNH */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(arm11x6_icache_sync_range)
ENTRY_NP(arm11x6_idcache_wbinv_range)
add r1, r1, r0
sub r1, r1, #1
/* Erratum ARM1136 371025, workaround #2 */
/* Erratum ARM1176 371367 */
mrs r2, cpsr /* save the CPSR */
cpsid ifa /* disable interrupts (irq,fiq,abort) */
mov r3, #0
mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */
mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */
add r3, pc, #0x24
mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */
mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */
msr cpsr_cx, r2 /* local_irq_restore */
nop
nop
nop
nop
nop
nop
nop
mcrr p15, 0, r1, r0, c14 /* clean and invalidate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(arm11x6_idcache_wbinv_range)
/*
* Preload the cache before issuing the WFI by conditionally disabling the
* mcr intstructions the first time around the loop. Ensure the function is
@ -208,4 +94,3 @@ ENTRY_NP(arm11x6_sleep)
bne 1b
RET
END(arm11x6_sleep)

View File

@ -42,57 +42,11 @@
.arch armv6
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(armv6_setttb)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
END(armv6_setttb)
/*
* Cache operations.
*/
/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wb_range)
add r1, r1, r0
sub r1, r1, #1
mcrr p15, 0, r1, r0, c12 /* clean D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_dcache_wb_range)
/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wbinv_range)
add r1, r1, r0
sub r1, r1, #1
mcrr p15, 0, r1, r0, c14 /* clean and invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*
* LINTSTUB: void armv6_dcache_inv_range(vaddr_t, vsize_t);
*/
ENTRY(armv6_dcache_inv_range)
add r1, r1, r0
sub r1, r1, #1
mcrr p15, 0, r1, r0, c6 /* invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_dcache_inv_range)
#ifdef ELF_TRAMPOLINE
/* LINTSTUB: void armv6_idcache_wbinv_all(void); */
ENTRY_NP(armv6_idcache_wbinv_all)
/*
@ -107,10 +61,4 @@ ENTRY_NP(armv6_idcache_wbinv_all)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_idcache_wbinv_all)
ENTRY(armv6_idcache_inv_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 /* invalidate all I+D cache */
RET
END(armv6_idcache_inv_all)
#endif /* ELF_TRAMPOLINE */

View File

@ -86,35 +86,7 @@ ENTRY(armv7_setttb)
RET
END(armv7_setttb)
ENTRY(armv7_tlb_flushID)
dsb
#ifdef SMP
mcr CP15_TLBIALLIS
mcr CP15_BPIALLIS
#else
mcr CP15_TLBIALL
mcr CP15_BPIALL
#endif
dsb
isb
mov pc, lr
END(armv7_tlb_flushID)
ENTRY(armv7_tlb_flushID_SE)
ldr r1, .Lpage_mask
bic r0, r0, r1
#ifdef SMP
mcr CP15_TLBIMVAAIS(r0)
mcr CP15_BPIALLIS
#else
mcr CP15_TLBIMVA(r0)
mcr CP15_BPIALL
#endif
dsb
isb
mov pc, lr
END(armv7_tlb_flushID_SE)
#ifdef ELF_TRAMPOLINE
/* Based on algorithm from ARM Architecture Reference Manual */
ENTRY(armv7_dcache_wbinv_all)
stmdb sp!, {r4, r5, r6, r7, r8, r9}
@ -181,94 +153,7 @@ ENTRY(armv7_idcache_wbinv_all)
ldmia sp!, {lr}
RET
END(armv7_idcache_wbinv_all)
ENTRY(armv7_dcache_wb_range)
ldr ip, .Larmv7_dcache_line_size
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_wb_next:
mcr CP15_DCCMVAC(r0)
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_wb_next
dsb /* data synchronization barrier */
RET
END(armv7_dcache_wb_range)
ENTRY(armv7_dcache_wbinv_range)
ldr ip, .Larmv7_dcache_line_size
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_wbinv_next:
mcr CP15_DCCIMVAC(r0)
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_wbinv_next
dsb /* data synchronization barrier */
RET
END(armv7_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv7_dcache_inv_range)
ldr ip, .Larmv7_dcache_line_size
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_inv_next:
mcr CP15_DCIMVAC(r0)
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_inv_next
dsb /* data synchronization barrier */
RET
END(armv7_dcache_inv_range)
ENTRY(armv7_idcache_wbinv_range)
ldr ip, .Larmv7_idcache_line_size
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_id_wbinv_next:
mcr CP15_ICIMVAU(r0)
mcr CP15_DCCIMVAC(r0)
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_id_wbinv_next
dsb /* data synchronization barrier */
isb /* instruction synchronization barrier */
RET
END(armv7_idcache_wbinv_range)
ENTRY_NP(armv7_icache_sync_range)
ldr ip, .Larmv7_icache_line_size
ldr ip, [ip]
sub r3, ip, #1 /* Address need not be aligned, but */
and r2, r0, r3 /* round length up if op spans line */
add r1, r1, r2 /* boundary: len += addr & linemask; */
.Larmv7_sync_next:
mcr CP15_DCCMVAC(r0)
mcr CP15_ICIMVAU(r0)
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_sync_next
dsb /* data synchronization barrier */
isb /* instruction synchronization barrier */
RET
END(armv7_icache_sync_range)
#endif
ENTRY(armv7_cpu_sleep)
dsb /* data synchronization barrier */
@ -276,22 +161,6 @@ ENTRY(armv7_cpu_sleep)
RET
END(armv7_cpu_sleep)
ENTRY(armv7_context_switch)
dsb
orr r0, r0, #PT_ATTR
mcr CP15_TTBR0(r0)
isb
#ifdef SMP
mcr CP15_TLBIALLIS
#else
mcr CP15_TLBIALL
#endif
dsb
isb
RET
END(armv7_context_switch)
ENTRY(armv7_drain_writebuf)
dsb
RET
@ -303,56 +172,3 @@ ENTRY(armv7_sev)
nop
RET
END(armv7_sev)
ENTRY(armv7_auxctrl)
mrc CP15_ACTLR(r2)
bic r3, r2, r0 /* Clear bits */
eor r3, r3, r1 /* XOR bits */
teq r2, r3
mcrne CP15_ACTLR(r3)
mov r0, r2
RET
END(armv7_auxctrl)
/*
* Invalidate all I+D+branch cache. Used by startup code, which counts
* on the fact that only r0-r3,ip are modified and no stack space is used.
*/
ENTRY(armv7_idcache_inv_all)
mov r0, #0
mcr CP15_CSSELR(r0) @ set cache level to L1
mrc CP15_CCSIDR(r0)
ubfx r2, r0, #13, #15 @ get num sets - 1 from CCSIDR
ubfx r3, r0, #3, #10 @ get numways - 1 from CCSIDR
clz r1, r3 @ number of bits to MSB of way
lsl r3, r3, r1 @ shift into position
mov ip, #1 @
lsl ip, ip, r1 @ ip now contains the way decr
ubfx r0, r0, #0, #3 @ get linesize from CCSIDR
add r0, r0, #4 @ apply bias
lsl r2, r2, r0 @ shift sets by log2(linesize)
add r3, r3, r2 @ merge numsets - 1 with numways - 1
sub ip, ip, r2 @ subtract numsets - 1 from way decr
mov r1, #1
lsl r1, r1, r0 @ r1 now contains the set decr
mov r2, ip @ r2 now contains set way decr
/* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */
1: mcr CP15_DCISW(r3) @ invalidate line
movs r0, r3 @ get current way/set
beq 2f @ at 0 means we are done.
movs r0, r0, lsl #10 @ clear way bits leaving only set bits
subne r3, r3, r1 @ non-zero?, decrement set #
subeq r3, r3, r2 @ zero?, decrement way # and restore set count
b 1b
2: dsb @ wait for stores to finish
mov r0, #0 @ and ...
mcr CP15_ICIALLU @ invalidate instruction+branch cache
isb @ instruction sync barrier
bx lr @ return
END(armv7_idcache_inv_all)

View File

@ -280,22 +280,11 @@ void armv6_idcache_wbinv_all (void);
#endif
#if defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
void armv7_setttb (u_int);
void armv7_tlb_flushID (void);
void armv7_tlb_flushID_SE (u_int);
void armv7_icache_sync_range (vm_offset_t, vm_size_t);
void armv7_idcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_idcache_inv_all (void);
void armv7_dcache_wbinv_all (void);
void armv7_idcache_wbinv_all (void);
void armv7_dcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_dcache_inv_range (vm_offset_t, vm_size_t);
void armv7_dcache_wb_range (vm_offset_t, vm_size_t);
void armv7_cpu_sleep (int);
void armv7_setup (void);
void armv7_context_switch (void);
void armv7_drain_writebuf (void);
void armv7_sev (void);
u_int armv7_auxctrl (u_int, u_int);
void armadaxp_idcache_wbinv_all (void);
@ -307,26 +296,9 @@ void pj4bv7_setup (void);
#endif
#if defined(CPU_ARM1176)
void arm11_tlb_flushID (void);
void arm11_tlb_flushID_SE (u_int);
void arm11_tlb_flushD (void);
void arm11_tlb_flushD_SE (u_int va);
void arm11_context_switch (void);
void arm11_drain_writebuf (void);
void armv6_dcache_wbinv_range (vm_offset_t, vm_size_t);
void armv6_dcache_inv_range (vm_offset_t, vm_size_t);
void armv6_dcache_wb_range (vm_offset_t, vm_size_t);
void armv6_idcache_inv_all (void);
void arm11x6_setttb (u_int);
void arm11x6_idcache_wbinv_all (void);
void arm11x6_dcache_wbinv_all (void);
void arm11x6_icache_sync_range (vm_offset_t, vm_size_t);
void arm11x6_idcache_wbinv_range (vm_offset_t, vm_size_t);
void arm11x6_setup (void);
void arm11x6_sleep (int); /* no ref. for errata */
#endif

View File

@ -92,6 +92,7 @@ ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c
echo "#define KERNSIZE $$st_size" >>opt_kernname.h
${CC} -O -nostdlib -I. -I$S \
-Xlinker -T -Xlinker ldscript.$M.tramp \
-DELF_TRAMPOLINE \
tmphack.S \
$S/$M/$M/elf_trampoline.c \
$S/$M/$M/inckern.S \
@ -99,6 +100,7 @@ ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c
-o ${KERNEL_KO}.tramp
${CC} -O -nostdlib -I. -I$S \
-Xlinker -T -Xlinker ldscript.$M.tramp.noheader \
-DELF_TRAMPOLINE \
tmphack.S \
$S/$M/$M/elf_trampoline.c \
$S/$M/$M/inckern.S \
@ -114,12 +116,12 @@ ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c
eval $$(stat -s ${KERNEL_KO}.tmp.gz) && \
echo "#define KERNCOMPSIZE $$st_size" >>opt_kernname.h
${CC} -O2 -ffreestanding -I. -I$S -c \
-DKZIP \
-DKZIP -DELF_TRAMPOLINE \
$S/kern/inflate.c \
-o inflate-tramp.o
${CC} -O -nostdlib -I. -I$S \
-Xlinker -T -Xlinker ldscript.$M.tramp \
-DKZIP \
-DKZIP -DELF_TRAMPOLINE \
tmphack.S \
$S/$M/$M/elf_trampoline.c \
inflate-tramp.o \
@ -128,7 +130,7 @@ ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c
-o ${KERNEL_KO}.gz.tramp
${CC} -O -nostdlib -I. -I$S \
-Xlinker -T -Xlinker ldscript.$M.tramp.noheader \
-DKZIP \
-DKZIP -DELF_TRAMPOLINE \
tmphack.S \
$S/$M/$M/elf_trampoline.c \
inflate-tramp.o \