Merge support from p4 (from NetBSD) for arm9e and arm10, arm11 cores. Not

yet connected to the build, but reduces diffs to p4 repo.

Obtained from: NetBSD
This commit is contained in:
Warner Losh 2007-10-18 05:33:06 +00:00
parent dfb7d4cdef
commit 63b2597849
7 changed files with 834 additions and 22 deletions

View File

@ -296,6 +296,64 @@ struct cpu_functions arm9_cpufuncs = {
};
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E) || defined(CPU_ARM10)
struct cpu_functions armv5_ec_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
armv5_ec_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
arm10_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushI, /* tlb_flushI */
arm10_tlb_flushI_SE, /* tlb_flushI_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv5_ec_icache_sync_all, /* icache_sync_all */
armv5_ec_icache_sync_range, /* icache_sync_range */
armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
/*XXX*/ armv5_ec_dcache_wbinv_range, /* dcache_inv_range */
armv5_ec_dcache_wb_range, /* dcache_wb_range */
armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
/* Other functions */
cpufunc_nullop, /* flush_prefetchbuf */
armv4_drain_writebuf, /* drain_writebuf */
cpufunc_nullop, /* flush_brnchtgt_C */
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm10_context_switch, /* context_switch */
arm10_setup /* cpu setup */
};
#endif /* CPU_ARM9E || CPU_ARM10 */
#ifdef CPU_ARM10
struct cpu_functions arm10_cpufuncs = {
/* CPU functions */
@ -869,6 +927,16 @@ set_cpufuncs()
goto out;
}
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E) || defined(CPU_ARM10)
if (cputype == CPU_ID_ARM926EJS ||
cputype == CPU_ID_ARM1026EJS) {
cpufuncs = armv5_ec_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM9E || CPU_ARM10 */
#ifdef CPU_ARM10
if (/* cputype == CPU_ID_ARM1020T || */
cputype == CPU_ID_ARM1020E) {
@ -1434,10 +1502,12 @@ late_abort_fixup(arg)
*/
#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
defined(CPU_ARM9E) || \
defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_ARM10) || defined(CPU_ARM11)
#define IGN 0
#define OR 1
@ -1679,7 +1749,7 @@ arm9_setup(args)
}
#endif /* CPU_ARM9 */
#ifdef CPU_ARM10
#if defined(CPU_ARM9E) || defined(CPU_ARM10)
struct cpu_option arm10_options[] = {
{ "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
@ -1722,7 +1792,7 @@ arm10_setup(args)
cpu_idcache_wbinv_all();
/* Now really make sure they are clean. */
asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
/* Set the control register */
ctrl = cpuctrl;
@ -1731,7 +1801,57 @@ arm10_setup(args)
/* And again. */
cpu_idcache_wbinv_all();
}
#endif /* CPU_ARM10 */
#endif /* CPU_ARM9E || CPU_ARM10 */
#ifdef CPU_ARM11
struct cpu_option arm11_options[] = {
{ "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
{ "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
{ NULL, IGN, IGN, 0 }
};
void
arm11_setup(args)
char *args;
{
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
/* | CPU_CONTROL_BPRD_ENABLE */;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Now really make sure they are clean. */
__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
/* Set the control register */
curcpu()->ci_ctrl = cpuctrl;
cpu_control(0xffffffff, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
}
#endif /* CPU_ARM11 */
#ifdef CPU_SA110
struct cpu_option sa110_options[] = {

View File

@ -0,0 +1,124 @@
/* $NetBSD: cpufunc_asm_arm11.S,v 1.2 2005/12/11 12:16:41 christos Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM11 assembly functions for CPU / MMU / TLB specific operations
*
* XXX We make no attempt at present to take advantage of the v6 memroy
* architecture or physically tagged cache.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(arm11_setttb)
stmfd sp!, {r0, lr}
bl _C_LABEL(armv5_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
ENTRY(arm11_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm11_context_switch)
/*
* We can assume that the caches will only contain kernel addresses
* at this point. So no need to flush them again.
*/
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
/* Paranoia -- make sure the pipeline is empty. */
nop
nop
nop
RET
/*
* TLB functions
*/
ENTRY(arm11_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY(arm11_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
/*
* Other functions
*/
ENTRY(arm11_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr

View File

@ -0,0 +1,238 @@
/* $NetBSD: cpufunc_asm_armv5.S,v 1.3 2007/01/06 00:50:54 christos Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARMv5 assembly functions for manipulating caches.
* These routines can be used by any core that supports the set/index
* operations.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(armv5_setttb)
stmfd sp!, {r0, lr}
bl _C_LABEL(armv5_idcache_wbinv_all)
ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
/*
* Cache operations. For the entire cache we use the set/index
* operations.
*/
s_max .req r0
i_max .req r1
s_inc .req r2
i_inc .req r3
ENTRY_NP(armv5_icache_sync_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_icache_sync_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_icache_sync_all)
.Larmv5_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larmv5_dcache_wb:
ldr ip, .Larmv5_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
1:
orr ip, s_max, i_max
2:
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne 2b /* Next index */
mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(armv5_dcache_wb_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wb
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_dcache_wbinv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv5_dcache_inv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_idcache_wbinv_range)
ldr ip, .Larmv5_line_size
cmp r1, #0x4000
bcs .Larmv5_idcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_idcache_wbinv_all)
.Larmv5_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */
ENTRY(armv5_dcache_wbinv_all)
.Larmv5_dcache_wbinv_all:
ldr ip, .Larmv5_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc}
1:
orr ip, s_max, i_max
2:
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
sub ip, ip, i_inc
tst ip, i_max /* Index 0 is last one */
bne 2b /* Next index */
mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
subs s_max, s_max, s_inc
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_cache_data:
.word _C_LABEL(armv5_dcache_sets_max)
.bss
/* XXX The following macros should probably be moved to asm.h */
#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
/*
* Parameters for the cache cleaning code. Note that the order of these
* four variables is assumed in the code above. Hence the reason for
* declaring them in the assembler file.
*/
.align 0
C_OBJECT(armv5_dcache_sets_max)
.space 4
C_OBJECT(armv5_dcache_index_max)
.space 4
C_OBJECT(armv5_dcache_sets_inc)
.space 4
C_OBJECT(armv5_dcache_index_inc)
.space 4

View File

@ -0,0 +1,206 @@
/* $NetBSD: cpufunc_asm_armv5_ec.S,v 1.1 2007/01/06 00:50:54 christos Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARMv5 assembly functions for manipulating caches.
* These routines can be used by any core that supports both the set/index
* operations and the test and clean operations for efficiently cleaning the
* entire DCache. If a core does not have the test and clean operations, but
* does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
* This source was derived from that file.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(armv5_ec_setttb)
/*
* Some other ARM ports save registers on the stack, call the
* idcache_wbinv_all function and then restore the registers from the
* stack before setting the TTB. I observed that this caused a
* problem when the old and new translation table entries' buffering
* bits were different. If I saved the registers in other registers
* or invalidated the caches when I returned from idcache_wbinv_all,
* it worked fine. If not, I ended up executing at an invalid PC.
* For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
* do it directly and entirely avoid the problem.
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
/*
* Cache operations. For the entire cache we use the enhanced cache
* operations.
*/
ENTRY_NP(armv5_ec_icache_sync_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_icache_sync_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_ec_icache_sync_all)
.Larmv5_ec_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larmv5_ec_dcache_wb:
1:
mrc p15, 0, r15, c7, c10, 3 /* Test and clean (don't invalidate) */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_ec_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(armv5_ec_dcache_wb_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wb
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_ec_dcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv5_ec_dcache_inv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv5_ec_idcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_idcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY_NP(armv5_ec_idcache_wbinv_all)
.Larmv5_ec_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
/* Fall through to purge Dcache. */
ENTRY(armv5_ec_dcache_wbinv_all)
.Larmv5_ec_dcache_wbinv_all:
1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET

View File

@ -70,9 +70,12 @@ enum cpu_class {
CPU_CLASS_ARM8,
CPU_CLASS_ARM9TDMI,
CPU_CLASS_ARM9ES,
CPU_CLASS_ARM9EJS,
CPU_CLASS_ARM10E,
CPU_CLASS_ARM10EJ,
CPU_CLASS_SA1,
CPU_CLASS_XSCALE
CPU_CLASS_XSCALE,
CPU_CLASS_ARM11J
};
static const char * const generic_steppings[16] = {
@ -119,6 +122,13 @@ static const char * const xscale_steppings[16] = {
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const i80219_steppings[16] = {
"step A-0", "rev 1", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const i80321_steppings[16] = {
"step A-0", "step B-0", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
@ -133,6 +143,7 @@ static const char * const i81342_steppings[16] = {
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Steppings for PXA2[15]0 */
static const char * const pxa2x0_steppings[16] = {
"step A-0", "step A-1", "step B-0", "step B-1",
"step B-2", "step C-0", "rev 6", "rev 7",
@ -140,6 +151,24 @@ static const char * const pxa2x0_steppings[16] = {
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Steppings for PXA255/26x.
* rev 5: PXA26x B0, rev 6: PXA255 A0
*/
static const char * const pxa255_steppings[16] = {
"rev 0", "rev 1", "rev 2", "step A-0",
"rev 4", "step B-0", "step A-0", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Stepping for PXA27x */
static const char * const pxa27x_steppings[16] = {
"step A-0", "step A-1", "step B-0", "step B-1",
"step C-0", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const ixp425_steppings[16] = {
"step 0 (A0)", "rev 1 (ARMv5TE)", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
@ -198,6 +227,8 @@ const struct cpuidtab cpuids[] = {
generic_steppings },
{ CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T",
generic_steppings },
{ CPU_ID_ARM926EJS, CPU_CLASS_ARM9EJS, "ARM926EJ-S",
generic_steppings },
{ CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T",
generic_steppings },
{ CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S",
@ -213,6 +244,8 @@ const struct cpuidtab cpuids[] = {
generic_steppings },
{ CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S",
generic_steppings },
{ CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S",
generic_steppings },
{ CPU_ID_SA110, CPU_CLASS_SA1, "SA-110",
sa110_steppings },
@ -240,11 +273,12 @@ const struct cpuidtab cpuids[] = {
i81342_steppings },
{ CPU_ID_80219_400, CPU_CLASS_XSCALE, "i80219 400MHz",
xscale_steppings },
i80219_steppings },
{ CPU_ID_80219_600, CPU_CLASS_XSCALE, "i80219 600MHz",
xscale_steppings },
i80219_steppings },
{ CPU_ID_PXA27X, CPU_CLASS_XSCALE, "PXA27x",
pxa27x_steppings },
{ CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250",
pxa2x0_steppings },
{ CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210",
@ -253,8 +287,8 @@ const struct cpuidtab cpuids[] = {
pxa2x0_steppings },
{ CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210",
pxa2x0_steppings },
{ CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA250",
pxa2x0_steppings },
{ CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA255",
pxa255_steppings },
{ CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210",
pxa2x0_steppings },
@ -265,6 +299,11 @@ const struct cpuidtab cpuids[] = {
{ CPU_ID_IXP425_266, CPU_CLASS_XSCALE, "IXP425 266MHz",
ixp425_steppings },
{ CPU_ID_ARM1136JS, CPU_CLASS_ARM11J, "ARM1136J-S",
generic_steppings },
{ CPU_ID_ARM1136JSR1, CPU_CLASS_ARM11J, "ARM1136J-S R1",
generic_steppings },
{ 0, CPU_CLASS_NONE, NULL, NULL }
};
@ -283,10 +322,13 @@ const struct cpu_classtab cpu_classes[] = {
{ "ARM7TDMI", "CPU_ARM7TDMI" }, /* CPU_CLASS_ARM7TDMI */
{ "ARM8", "CPU_ARM8" }, /* CPU_CLASS_ARM8 */
{ "ARM9TDMI", "CPU_ARM9TDMI" }, /* CPU_CLASS_ARM9TDMI */
{ "ARM9E-S", NULL }, /* CPU_CLASS_ARM9ES */
{ "ARM9E-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9ES */
{ "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */
{ "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
{ "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */
{ "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
{ "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
{ "ARM11J", "CPU_ARM11" }, /* CPU_CLASS_ARM11J */
};
/*
@ -310,7 +352,7 @@ static const char * const wtnames[] = {
"**unknown 11**",
"**unknown 12**",
"**unknown 13**",
"**unknown 14**",
"write-back-locking-C",
"**unknown 15**",
};
@ -363,9 +405,13 @@ identify_arm_cpu(void)
printf(" IDC enabled");
break;
case CPU_CLASS_ARM9TDMI:
case CPU_CLASS_ARM9ES:
case CPU_CLASS_ARM9EJS:
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_SA1:
case CPU_CLASS_XSCALE:
case CPU_CLASS_ARM11J:
if ((ctrl & CPU_CONTROL_DC_ENABLE) == 0)
printf(" DC disabled");
else

View File

@ -50,12 +50,22 @@
/*
* Step 1: Count the number of CPU types configured into the kernel.
*/
#define CPU_NTYPES 2
#define CPU_NTYPES (defined(CPU_ARM7TDMI) + \
defined(CPU_ARM8) + defined(CPU_ARM9) + \
defined(CPU_ARM9E) + \
defined(CPU_ARM10) + \
defined(CPU_ARM11) + \
defined(CPU_SA110) + defined(CPU_SA1100) + \
defined(CPU_SA1110) + \
defined(CPU_IXP12X0) + \
defined(CPU_XSCALE_80200) + \
defined(CPU_XSCALE_80321) + \
defined(__CPU_XSCALE_PXA2XX) + \
defined(CPU_XSCALE_IXP425))
/*
* Step 2: Determine which ARM architecture versions are configured.
*/
#if (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425))
@ -64,19 +74,34 @@
#define ARM_ARCH_4 0
#endif
#if (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_XSCALE_PXA2X0)) || defined(CPU_ARM10)
#if (defined(CPU_ARM9E) || defined(CPU_ARM10) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_XSCALE_PXA2X0))
#define ARM_ARCH_5 1
#else
#define ARM_ARCH_5 0
#endif
#define ARM_NARCH (ARM_ARCH_4 + ARM_ARCH_5)
#if defined(CPU_ARM11)
#define ARM_ARCH_6 1
#else
#define ARM_ARCH_6 0
#endif
#define ARM_NARCH (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6)
#if ARM_NARCH == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NARCH is 0
#endif
#if ARM_ARCH_5 || ARM_ARCH_6
/*
* We could support Thumb code on v4T, but the lack of clean interworking
* makes that hard.
*/
#define THUMB_CODE
#endif
/*
* Step 3: Define which MMU classes are configured:
*
@ -99,7 +124,8 @@
#endif
#if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10))
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) || \
defined(CPU_ARM10) || defined(CPU_ARM11))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0

View File

@ -351,7 +351,7 @@ extern unsigned arm9_dcache_index_max;
extern unsigned arm9_dcache_index_inc;
#endif
#ifdef CPU_ARM10
#if defined(CPU_ARM9E) || defined(CPU_ARM10)
void arm10_setttb (u_int);
void arm10_tlb_flushID_SE (u_int);
@ -378,8 +378,60 @@ extern unsigned arm10_dcache_index_max;
extern unsigned arm10_dcache_index_inc;
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
defined(CPU_SA1100) || defined(CPU_SA1110) || \
#ifdef CPU_ARM11
void arm11_setttb (u_int);
void arm11_tlb_flushID_SE (u_int);
void arm11_tlb_flushI_SE (u_int);
void arm11_context_switch (void);
void arm11_setup (char *string);
void arm11_tlb_flushID (void);
void arm11_tlb_flushI (void);
void arm11_tlb_flushD (void);
void arm11_tlb_flushD_SE (u_int va);
void arm11_drain_writebuf (void);
#endif
#if defined(CPU_ARM9E) || defined (CPU_ARM10)
void armv5_ec_setttb(u_int);
void armv5_ec_icache_sync_all(void);
void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_wbinv_all(void);
void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
void armv5_ec_idcache_wbinv_all(void);
void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
#endif
#if defined (CPU_ARM10) || defined (CPU_ARM11)
void armv5_setttb(u_int);
void armv5_icache_sync_all(void);
void armv5_icache_sync_range(vm_offset_t, vm_size_t);
void armv5_dcache_wbinv_all(void);
void armv5_dcache_wbinv_range(vm_offset_t, vm_size_t);
void armv5_dcache_inv_range(vm_offset_t, vm_size_t);
void armv5_dcache_wb_range(vm_offset_t, vm_size_t);
void armv5_idcache_wbinv_all(void);
void armv5_idcache_wbinv_range(vm_offset_t, vm_size_t);
extern unsigned armv5_dcache_sets_max;
extern unsigned armv5_dcache_sets_inc;
extern unsigned armv5_dcache_index_max;
extern unsigned armv5_dcache_index_inc;
#endif
#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)