Remove old CPU_ values from the arm cpufunc code. These have been removed.

This commit is contained in:
Andrew Turner 2018-07-28 12:00:32 +00:00
parent 96690e0dc4
commit 0faf121391
3 changed files with 3 additions and 710 deletions

View File

@ -61,10 +61,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpufunc.h>
#if defined(CPU_XSCALE_81342)
#include <arm/xscale/i8134x/i81342reg.h>
#endif
/* PRIMARY CACHE VARIABLES */
int arm_picache_size;
int arm_picache_line_size;
@ -84,57 +80,6 @@ u_int arm_cache_level;
u_int arm_cache_type[14];
u_int arm_cache_loc;
#ifdef CPU_ARM9
struct cpu_functions arm9_cpufuncs = {
/* CPU functions */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
arm9_setttb, /* Setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
arm9_icache_sync_range, /* icache_sync_range */
arm9_dcache_wbinv_all, /* dcache_wbinv_all */
arm9_dcache_wbinv_range, /* dcache_wbinv_range */
arm9_dcache_inv_range, /* dcache_inv_range */
arm9_dcache_wb_range, /* dcache_wb_range */
armv4_idcache_inv_all, /* idcache_inv_all */
arm9_idcache_wbinv_all, /* idcache_wbinv_all */
arm9_idcache_wbinv_range, /* idcache_wbinv_range */
cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
arm9_context_switch, /* context_switch */
arm9_setup /* cpu setup */
};
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E)
struct cpu_functions armv5_ec_cpufuncs = {
/* CPU functions */
@ -254,160 +199,6 @@ struct cpu_functions pj4bv7_cpufuncs = {
};
#endif /* CPU_MV_PJ4B */
#if defined(CPU_XSCALE_PXA2X0)
struct cpu_functions xscale_cpufuncs = {
/* CPU functions */
xscale_cpwait, /* cpwait */
/* MMU functions */
xscale_control, /* control */
xscale_setttb, /* setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
xscale_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
xscale_cache_syncI_rng, /* icache_sync_range */
xscale_cache_purgeD, /* dcache_wbinv_all */
xscale_cache_purgeD_rng, /* dcache_wbinv_range */
xscale_cache_flushD_rng, /* dcache_inv_range */
xscale_cache_cleanD_rng, /* dcache_wb_range */
xscale_cache_flushID, /* idcache_inv_all */
xscale_cache_purgeID, /* idcache_wbinv_all */
xscale_cache_purgeID_rng, /* idcache_wbinv_range */
cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
xscale_cpu_sleep, /* sleep */
/* Soft functions */
xscale_context_switch, /* context_switch */
xscale_setup /* cpu setup */
};
#endif
/* CPU_XSCALE_PXA2X0 */
#ifdef CPU_XSCALE_81342
struct cpu_functions xscalec3_cpufuncs = {
/* CPU functions */
xscale_cpwait, /* cpwait */
/* MMU functions */
xscale_control, /* control */
xscalec3_setttb, /* setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
xscale_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
xscalec3_cache_syncI_rng, /* icache_sync_range */
xscalec3_cache_purgeD, /* dcache_wbinv_all */
xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
xscale_cache_flushD_rng, /* dcache_inv_range */
xscalec3_cache_cleanD_rng, /* dcache_wb_range */
xscale_cache_flushID, /* idcache_inv_all */
xscalec3_cache_purgeID, /* idcache_wbinv_all */
xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
xscalec3_l2cache_purge, /* l2cache_wbinv_all */
xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
xscale_cpu_sleep, /* sleep */
/* Soft functions */
xscalec3_context_switch, /* context_switch */
xscale_setup /* cpu setup */
};
#endif /* CPU_XSCALE_81342 */
#if defined(CPU_FA526)
struct cpu_functions fa526_cpufuncs = {
/* CPU functions */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
fa526_setttb, /* setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
fa526_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
fa526_icache_sync_range, /* icache_sync_range */
fa526_dcache_wbinv_all, /* dcache_wbinv_all */
fa526_dcache_wbinv_range, /* dcache_wbinv_range */
fa526_dcache_inv_range, /* dcache_inv_range */
fa526_dcache_wb_range, /* dcache_wb_range */
armv4_idcache_inv_all, /* idcache_inv_all */
fa526_idcache_wbinv_all, /* idcache_wbinv_all */
fa526_idcache_wbinv_range, /* idcache_wbinv_range */
cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
fa526_cpu_sleep, /* sleep */
/* Soft functions */
fa526_context_switch, /* context_switch */
fa526_setup /* cpu setup */
};
#endif /* CPU_FA526 */
#if defined(CPU_ARM1176)
struct cpu_functions arm1176_cpufuncs = {
@ -459,12 +250,9 @@ u_int cputype;
u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
#endif
#if defined(CPU_ARM9) || \
defined (CPU_ARM9E) || \
#if defined (CPU_ARM9E) || \
defined(CPU_ARM1176) || \
defined(CPU_XSCALE_PXA2X0) || \
defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
defined(CPU_XSCALE_81342) || \
defined(CPU_MV_PJ4B) || \
defined(CPU_CORTEXA) || defined(CPU_KRAIT)
/* Global cache line sizes, use 32 as default */
@ -601,22 +389,6 @@ set_cpufuncs(void)
cputype = cpu_ident();
cputype &= CPU_ID_CPU_MASK;
#ifdef CPU_ARM9
if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
(cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
(cputype & 0x0000f000) == 0x00009000) {
cpufuncs = arm9_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
pmap_pte_init_generic();
goto out;
}
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E)
if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41) {
@ -686,40 +458,6 @@ set_cpufuncs(void)
}
#endif /* CPU_MV_PJ4B */
#if defined(CPU_FA526)
if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
cpufuncs = fa526_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
get_cachetype_cp15();
pmap_pte_init_generic();
goto out;
}
#endif /* CPU_FA526 */
#if defined(CPU_XSCALE_81342)
if (cputype == CPU_ID_81342) {
cpufuncs = xscalec3_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
get_cachetype_cp15();
pmap_pte_init_xscale();
goto out;
}
#endif /* CPU_XSCALE_81342 */
#ifdef CPU_XSCALE_PXA2X0
/* ignore core revision to test PXA2xx CPUs */
if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
(cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
(cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
cpufuncs = xscale_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
get_cachetype_cp15();
pmap_pte_init_xscale();
goto out;
}
#endif /* CPU_XSCALE_PXA2X0 */
/*
* Bzzzz. And the answer was ...
*/
@ -734,44 +472,6 @@ set_cpufuncs(void)
* CPU Setup code
*/
#ifdef CPU_ARM9
void
arm9_setup(void)
{
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
CPU_CONTROL_ROUNDROBIN;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
| CPU_CONTROL_ROUNDROBIN;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Set the control register (SCTLR) */
cpu_control(cpuctrlmask, cpuctrl);
}
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E)
void
arm10_setup(void)
@ -894,109 +594,3 @@ cortexa_setup(void)
cpu_scc_setup_ccnt();
}
#endif /* CPU_CORTEXA || CPU_KRAIT */
#if defined(CPU_FA526)
void
fa526_setup(void)
{
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
| CPU_CONTROL_BPRD_ENABLE;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Set the control register */
cpu_control(0xffffffff, cpuctrl);
}
#endif /* CPU_FA526 */
#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
void
xscale_setup(void)
{
uint32_t auxctl;
int cpuctrl, cpuctrlmask;
/*
* The XScale Write Buffer is always enabled. Our option
* is to enable/disable coalescing. Note that bits 6:3
* must always be enabled.
*/
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
| CPU_CONTROL_BPRD_ENABLE;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
| CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
CPU_CONTROL_L2_ENABLE;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
#ifdef CPU_XSCALE_CORE3
cpuctrl |= CPU_CONTROL_L2_ENABLE;
#endif
/* Clear out the cache */
cpu_idcache_wbinv_all();
/*
* Set the control register. Note that bits 6:3 must always
* be set to 1.
*/
/* cpu_control(cpuctrlmask, cpuctrl);*/
cpu_control(0xffffffff, cpuctrl);
/* Make sure write coalescing is turned on */
__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
: "=r" (auxctl));
#ifdef XSCALE_NO_COALESCE_WRITES
auxctl |= XSCALE_AUXCTL_K;
#else
auxctl &= ~XSCALE_AUXCTL_K;
#endif
#ifdef CPU_XSCALE_CORE3
auxctl |= XSCALE_AUXCTL_LLR;
auxctl |= XSCALE_AUXCTL_MD_MASK;
#endif
__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
: : "r" (auxctl));
}
#endif /* CPU_XSCALE_PXA2X0 */

View File

@ -34,23 +34,6 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm9_setttb)
stmfd sp!, {r0, lr}
bl _C_LABEL(arm9_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mov pc, lr
END(arm9_setttb)
/*
* TLB functions
*/
@ -60,160 +43,6 @@ ENTRY(arm9_tlb_flushID_SE)
mov pc, lr
END(arm9_tlb_flushID_SE)
/*
* Cache operations. For the entire cache we use the set/index
* operations.
*/
s_max .req r0
i_max .req r1
s_inc .req r2
i_inc .req r3
ENTRY_NP(arm9_icache_sync_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_icache_sync_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_sync_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larm9_sync_next
mov pc, lr
.Larm9_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larm9_dcache_wb:
ldr ip, .Larm9_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set:
orr ip, s_max, i_max
.Lnext_index:
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
subs ip, ip, i_inc
bhs .Lnext_index /* Next index */
subs s_max, s_max, s_inc
bhs .Lnext_set /* Next set */
mov pc, lr
END(arm9_icache_sync_range)
.Larm9_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(arm9_dcache_wb_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wb
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_wb_next:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larm9_wb_next
mov pc, lr
END(arm9_dcache_wb_range)
ENTRY(arm9_dcache_wbinv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_wbinv_next:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larm9_wbinv_next
mov pc, lr
END(arm9_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(arm9_dcache_inv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_dcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_inv_next:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larm9_inv_next
mov pc, lr
END(arm9_dcache_inv_range)
ENTRY(arm9_idcache_wbinv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
bcs .Larm9_idcache_wbinv_all
ldr ip, [ip]
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larm9_id_wbinv_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larm9_id_wbinv_next
mov pc, lr
END(arm9_idcache_wbinv_range)
ENTRY_NP(arm9_idcache_wbinv_all)
.Larm9_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through */
EENTRY(arm9_dcache_wbinv_all)
.Larm9_dcache_wbinv_all:
ldr ip, .Larm9_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
.Lnext_set_inv:
orr ip, s_max, i_max
.Lnext_index_inv:
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
subs ip, ip, i_inc
bhs .Lnext_index_inv /* Next index */
subs s_max, s_max, s_inc
bhs .Lnext_set_inv /* Next set */
mov pc, lr
EEND(arm9_dcache_wbinv_all)
END(arm9_idcache_wbinv_all)
.Larm9_cache_data:
.word _C_LABEL(arm9_dcache_sets_max)
/*
* Context switch.
*
@ -238,24 +67,3 @@ ENTRY(arm9_context_switch)
nop
mov pc, lr
END(arm9_context_switch)
.bss
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
/*
* Parameters for the cache cleaning code. Note that the order of these
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
.align 2
C_OBJECT(arm9_dcache_sets_max)
.space 4
C_OBJECT(arm9_dcache_index_max)
.space 4
C_OBJECT(arm9_dcache_sets_inc)
.space 4
C_OBJECT(arm9_dcache_index_inc)
.space 4

View File

@ -216,49 +216,10 @@ u_int cpu_faultaddress (void);
u_int cpu_get_control (void);
u_int cpu_pfr (int);
#if defined(CPU_FA526)
void fa526_setup (void);
void fa526_setttb (u_int ttb);
void fa526_context_switch (void);
void fa526_cpu_sleep (int);
void fa526_tlb_flushID_SE (u_int);
void fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
void fa526_dcache_wbinv_all (void);
void fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
void fa526_dcache_inv_range (vm_offset_t start, vm_size_t end);
void fa526_dcache_wb_range (vm_offset_t start, vm_size_t end);
void fa526_idcache_wbinv_all(void);
void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM9E)
void arm9_setttb (u_int);
#if defined(CPU_ARM9E)
void arm9_tlb_flushID_SE (u_int va);
void arm9_context_switch (void);
#endif
#if defined(CPU_ARM9)
void arm9_icache_sync_range (vm_offset_t, vm_size_t);
void arm9_dcache_wbinv_all (void);
void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t);
void arm9_dcache_inv_range (vm_offset_t, vm_size_t);
void arm9_dcache_wb_range (vm_offset_t, vm_size_t);
void arm9_idcache_wbinv_all (void);
void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
void arm9_setup (void);
extern unsigned arm9_dcache_sets_max;
extern unsigned arm9_dcache_sets_inc;
extern unsigned arm9_dcache_index_max;
extern unsigned arm9_dcache_index_inc;
#endif
#if defined(CPU_ARM9E)
void arm10_setup (void);
u_int sheeva_control_ext (u_int, u_int);
@ -303,11 +264,6 @@ void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
void armv5_ec_idcache_wbinv_all(void);
void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM9E) || \
defined(CPU_FA526) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
void armv4_tlb_flushID (void);
void armv4_tlb_flushD (void);
@ -317,71 +273,6 @@ void armv4_drain_writebuf (void);
void armv4_idcache_inv_all (void);
#endif
#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
void xscale_cpwait (void);
void xscale_cpu_sleep (int mode);
u_int xscale_control (u_int clear, u_int bic);
void xscale_setttb (u_int ttb);
void xscale_tlb_flushID_SE (u_int va);
void xscale_cache_flushID (void);
void xscale_cache_flushI (void);
void xscale_cache_flushD (void);
void xscale_cache_flushD_SE (u_int entry);
void xscale_cache_cleanID (void);
void xscale_cache_cleanD (void);
void xscale_cache_cleanD_E (u_int entry);
void xscale_cache_clean_minidata (void);
void xscale_cache_purgeID (void);
void xscale_cache_purgeID_E (u_int entry);
void xscale_cache_purgeD (void);
void xscale_cache_purgeD_E (u_int entry);
void xscale_cache_syncI (void);
void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end);
void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end);
void xscale_context_switch (void);
void xscale_setup (void);
#endif /* CPU_XSCALE_PXA2X0 */
#ifdef CPU_XSCALE_81342
void xscalec3_l2cache_purge (void);
void xscalec3_cache_purgeID (void);
void xscalec3_cache_purgeD (void);
void xscalec3_cache_cleanID (void);
void xscalec3_cache_cleanD (void);
void xscalec3_cache_syncI (void);
void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end);
void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t);
void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end);
void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end);
void xscalec3_setttb (u_int ttb);
void xscalec3_context_switch (void);
#endif /* CPU_XSCALE_81342 */
/*
* Macros for manipulating CPU interrupts
*/