Remove remaining fragments of code for older already ceased ARM versions.

This commit is contained in:
Michal Meloun 2020-11-29 15:24:00 +00:00
parent 0879a64283
commit 13a3f95057
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=368154
9 changed files with 0 additions and 1017 deletions

View File

@ -80,9 +80,6 @@ u_int arm_cache_level;
u_int arm_cache_type[14];
u_int arm_cache_loc;
#if defined(CPU_ARM9E)
static void arm10_setup(void);
#endif
#ifdef CPU_MV_PJ4B
static void pj4bv7_setup(void);
#endif
@ -93,107 +90,6 @@ static void arm11x6_setup(void);
static void cortexa_setup(void);
#endif
#if defined(CPU_ARM9E)
struct cpu_functions armv5_ec_cpufuncs = {
/* CPU functions */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
armv5_ec_setttb, /* Setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv5_ec_icache_sync_range, /* icache_sync_range */
armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
armv5_ec_dcache_inv_range, /* dcache_inv_range */
armv5_ec_dcache_wb_range, /* dcache_wb_range */
armv4_idcache_inv_all, /* idcache_inv_all */
armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
arm9_context_switch, /* context_switch */
arm10_setup /* cpu setup */
};
struct cpu_functions sheeva_cpufuncs = {
/* CPU functions */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
sheeva_setttb, /* Setttb */
/* TLB functions */
armv4_tlb_flushID, /* tlb_flushID */
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
armv4_tlb_flushD, /* tlb_flushD */
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv5_ec_icache_sync_range, /* icache_sync_range */
armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
sheeva_dcache_inv_range, /* dcache_inv_range */
sheeva_dcache_wb_range, /* dcache_wb_range */
armv4_idcache_inv_all, /* idcache_inv_all */
armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
sheeva_l2cache_inv_range, /* l2cache_inv_range */
sheeva_l2cache_wb_range, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
armv4_drain_writebuf, /* drain_writebuf */
sheeva_cpu_sleep, /* sleep */
/* Soft functions */
arm9_context_switch, /* context_switch */
arm10_setup /* cpu setup */
};
#endif /* CPU_ARM9E */
#ifdef CPU_MV_PJ4B
struct cpu_functions pj4bv7_cpufuncs = {
/* Cache operations */
@ -257,11 +153,6 @@ struct cpu_functions cortexa_cpufuncs = {
struct cpu_functions cpufuncs;
u_int cputype;
#if defined (CPU_ARM9E) || \
defined(CPU_ARM1176) || \
defined(CPU_MV_PJ4B) || \
defined(CPU_CORTEXA) || defined(CPU_KRAIT)
static void get_cachetype_cp15(void);
/* Additional cache information local to this file. Log2 of some of the
@ -371,7 +262,6 @@ get_cachetype_cp15(void)
arm_dcache_align_mask = arm_dcache_align - 1;
}
}
#endif /* ARM9 || XSCALE */
/*
* Cannot panic here as we may not have a console yet ...
@ -383,38 +273,6 @@ set_cpufuncs(void)
cputype = cp15_midr_get();
cputype &= CPU_ID_CPU_MASK;
#if defined(CPU_ARM9E)
if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41) {
uint32_t sheeva_ctrl;
sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
MV_L2_ENABLE);
/*
* Workaround for Marvell MV78100 CPU: Cache prefetch
* mechanism may affect the cache coherency validity,
* so it needs to be disabled.
*
* Refer to errata document MV-S501058-00C.pdf (p. 3.1
* L2 Prefetching Mechanism) for details.
*/
if (cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41)
sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
cpufuncs = sheeva_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_generic();
goto out;
} else if (cputype == CPU_ID_ARM926EJS) {
cpufuncs = armv5_ec_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_generic();
goto out;
}
#endif /* CPU_ARM9E */
#if defined(CPU_ARM1176)
if (cputype == CPU_ID_ARM1176JZS) {
cpufuncs = arm1176_cpufuncs;
@ -466,43 +324,6 @@ set_cpufuncs(void)
* CPU Setup code
*/
#if defined(CPU_ARM9E)
static void
arm10_setup(void)
{
int cpuctrl, cpuctrlmask;
cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
| CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
| CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
| CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
| CPU_CONTROL_BPRD_ENABLE
| CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Now really make sure they are clean. */
__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
/* Set the control register */
cpu_control(0xffffffff, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
}
#endif /* CPU_ARM9E || CPU_ARM10 */
#if defined(CPU_ARM1176) \
|| defined(CPU_MV_PJ4B) \

View File

@ -1,69 +0,0 @@
/* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */
/*
* Copyright (c) 2001, 2004 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM9 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* TLB functions
*/
ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
END(arm9_tlb_flushID_SE)
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(arm9_context_switch)
/*
* We can assume that the caches will only contain kernel addresses
* at this point. So no need to flush them again.
*/
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
/* Paranoia -- make sure the pipeline is empty. */
nop
nop
nop
mov pc, lr
END(arm9_context_switch)

View File

@ -1,74 +0,0 @@
/* $NetBSD: cpufunc_asm_armv4.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
/*-
* Copyright (c) 2001 ARM Limited
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARM9 assembly functions for CPU / MMU / TLB specific operations
*
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
/*
* TLB functions
*/
ENTRY(armv4_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
RET
END(armv4_tlb_flushID)
ENTRY(armv4_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
RET
END(armv4_tlb_flushD)
ENTRY(armv4_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
RET
END(armv4_tlb_flushD_SE)
/*
* Other functions
*/
ENTRY(armv4_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
END(armv4_drain_writebuf)
ENTRY(armv4_idcache_inv_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 /* invalidate all I+D cache */
RET
END(armv4_idcache_inv_all)

View File

@ -1,217 +0,0 @@
/* $NetBSD: cpufunc_asm_armv5_ec.S,v 1.1 2007/01/06 00:50:54 christos Exp $ */
/*
* Copyright (c) 2002, 2005 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* ARMv5 assembly functions for manipulating caches.
* These routines can be used by any core that supports both the set/index
* operations and the test and clean operations for efficiently cleaning the
* entire DCache. If a core does not have the test and clean operations, but
* does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
* This source was derived from that file.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#ifndef ELF_TRAMPOLINE
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(armv5_ec_setttb)
/*
* Some other ARM ports save registers on the stack, call the
* idcache_wbinv_all function and then restore the registers from the
* stack before setting the TTB. I observed that this caused a
* problem when the old and new translation table entries' buffering
* bits were different. If I saved the registers in other registers
* or invalidated the caches when I returned from idcache_wbinv_all,
* it worked fine. If not, I ended up executing at an invalid PC.
* For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
* do it directly and entirely avoid the problem.
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
1: mrc p15, 0, APSR_nzcv, c7, c14, 3 /* Test, clean and invalidate DCache */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
END(armv5_ec_setttb)
/*
* Cache operations. For the entire cache we use the enhanced cache
* operations.
*/
ENTRY_NP(armv5_ec_icache_sync_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_icache_sync_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
.Larmv5_ec_icache_sync_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to clean Dcache. */
.Larmv5_ec_dcache_wb:
1:
mrc p15, 0, APSR_nzcv, c7, c10, 3 /* Test and clean (don't invalidate) */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_icache_sync_range)
.Larmv5_ec_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(armv5_ec_dcache_wb_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wb
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_dcache_wb_range)
ENTRY(armv5_ec_dcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv5_ec_dcache_inv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_dcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_dcache_inv_range)
ENTRY(armv5_ec_idcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
cmp r1, #0x4000
bcs .Larmv5_ec_idcache_wbinv_all
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_idcache_wbinv_range)
#endif /* !ELF_TRAMPOLINE */
ENTRY_NP(armv5_ec_idcache_wbinv_all)
.Larmv5_ec_idcache_wbinv_all:
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache purging code.
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
/* Fall through to purge Dcache. */
END(armv5_ec_idcache_wbinv_all)
#ifndef ELF_TRAMPOLINE
ENTRY(armv5_ec_dcache_wbinv_all)
.Larmv5_ec_dcache_wbinv_all:
1: mrc p15, 0, APSR_nzcv, c7, c14, 3 /* Test, clean and invalidate DCache */
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv5_ec_dcache_wbinv_all)
#endif

View File

@ -1,424 +0,0 @@
/*-
* Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#include <machine/armreg.h>
#include <machine/param.h>
#ifndef ELF_TRAMPOLINE
.Lsheeva_cache_line_size:
.word _C_LABEL(arm_pdcache_line_size)
.Lsheeva_asm_page_mask:
.word _C_LABEL(PAGE_MASK)
ENTRY(sheeva_setttb)
/* Disable irqs */
mrs r2, cpsr
orr r3, r2, #PSR_I | PSR_F
msr cpsr_c, r3
mov r1, #0
mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
1: mrc p15, 0, APSR_nzcv, c7, c14, 3 /* Test, clean and invalidate DCache */
bne 1b /* More to do? */
mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
/* Reenable irqs */
msr cpsr_c, r2
mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
END(sheeva_setttb)
ENTRY(sheeva_dcache_wbinv_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_dcache_wbinv_range)
ENTRY(sheeva_idcache_wbinv_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
/* Enable irqs */
msr cpsr_c, lr
/* Invalidate and clean icache line by line */
ldr r3, .Lsheeva_cache_line_size
ldr r3, [r3]
2:
mcr p15, 0, r0, c7, c5, 1
add r0, r0, r3
cmp r2, r0
bhi 2b
add r0, r2, #1
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_idcache_wbinv_range)
ENTRY(sheeva_dcache_inv_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_dcache_inv_range)
ENTRY(sheeva_dcache_wb_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_dcache_wb_range)
ENTRY(sheeva_l2cache_wbinv_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_l2cache_wbinv_range)
ENTRY(sheeva_l2cache_inv_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_l2cache_inv_range)
ENTRY(sheeva_l2cache_wb_range)
str lr, [sp, #-4]!
mrs lr, cpsr
/* Start with cache line aligned address */
ldr ip, .Lsheeva_cache_line_size
ldr ip, [ip]
sub ip, ip, #1
and r2, r0, ip
add r1, r1, r2
add r1, r1, ip
bics r1, r1, ip
bics r0, r0, ip
ldr ip, .Lsheeva_asm_page_mask
and r2, r0, ip
rsb r2, r2, #PAGE_SIZE
cmp r1, r2
movcc ip, r1
movcs ip, r2
1:
add r3, r0, ip
sub r2, r3, #1
/* Disable irqs */
orr r3, lr, #PSR_I | PSR_F
msr cpsr_c, r3
mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
/* Enable irqs */
msr cpsr_c, lr
add r0, r0, ip
sub r1, r1, ip
cmp r1, #PAGE_SIZE
movcc ip, r1
movcs ip, #PAGE_SIZE
cmp r1, #0
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
END(sheeva_l2cache_wb_range)
#endif /* !ELF_TRAMPOLINE */
ENTRY(sheeva_l2cache_wbinv_all)
/* Disable irqs */
mrs r1, cpsr
orr r2, r1, #PSR_I | PSR_F
msr cpsr_c, r2
mov r0, #0
mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
msr cpsr_c, r1 /* Reenable irqs */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(sheeva_l2cache_wbinv_all)
#ifndef ELF_TRAMPOLINE
/* This function modifies register value as follows:
*
* arg1 arg EFFECT (bit value saved into register)
* 0 0 not changed
* 0 1 negated
* 1 0 cleared
* 1 1 set
*/
ENTRY(sheeva_control_ext)
mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
bic r2, r3, r0 /* Clear bits */
eor r2, r2, r1 /* XOR bits */
teq r2, r3 /* Only write if there is a change */
mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
mov r0, r3 /* Return old value */
RET
END(sheeva_control_ext)
ENTRY(sheeva_cpu_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
mov pc, lr
END(sheeva_cpu_sleep)
#endif /* !ELF_TRAMPOLINE */

View File

@ -93,23 +93,6 @@ void cpufunc_nullop (void);
u_int cpufunc_control (u_int clear, u_int bic);
void cpu_domains (u_int domains);
#if defined(CPU_ARM9E)
void arm9_tlb_flushID_SE (u_int va);
void arm9_context_switch (void);
u_int sheeva_control_ext (u_int, u_int);
void sheeva_cpu_sleep (int);
void sheeva_setttb (u_int);
void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t);
void sheeva_dcache_inv_range (vm_offset_t, vm_size_t);
void sheeva_dcache_wb_range (vm_offset_t, vm_size_t);
void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t);
void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t);
void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t);
void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t);
void sheeva_l2cache_wbinv_all (void);
#endif
#if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
void armv7_cpu_sleep (int);
@ -122,26 +105,6 @@ void pj4b_config (void);
void arm11x6_sleep (int); /* no ref. for errata */
#endif
#if defined(CPU_ARM9E)
void armv5_ec_setttb(u_int);
void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_wbinv_all(void);
void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
void armv5_ec_idcache_wbinv_all(void);
void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
void armv4_tlb_flushID (void);
void armv4_tlb_flushD (void);
void armv4_tlb_flushD_SE (u_int va);
void armv4_drain_writebuf (void);
void armv4_idcache_inv_all (void);
#endif
/*
* Macros for manipulating CPU interrupts

View File

@ -54,14 +54,8 @@ extern int _min_bzero_size;
enum cpu_class {
CPU_CLASS_NONE,
CPU_CLASS_ARM9TDMI,
CPU_CLASS_ARM9ES,
CPU_CLASS_ARM9EJS,
CPU_CLASS_ARM10E,
CPU_CLASS_ARM10EJ,
CPU_CLASS_CORTEXA,
CPU_CLASS_KRAIT,
CPU_CLASS_XSCALE,
CPU_CLASS_ARM11J,
CPU_CLASS_MARVELL
};

View File

@ -1,7 +0,0 @@
# $FreeBSD$
files "../mv/files.mv"
cpu CPU_ARM9E
machine arm
makeoptions CONF_CFLAGS="-march=armv5te"
options FREEBSD_BOOT_LOADER

View File

@ -11,13 +11,9 @@ arm/arm/busdma_machdep.c standard
arm/arm/copystr.S standard
arm/arm/cpufunc.c standard
arm/arm/cpufunc_asm.S standard
arm/arm/cpufunc_asm_arm9.S optional cpu_arm9e
arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176
arm/arm/cpufunc_asm_armv4.S optional cpu_arm9e
arm/arm/cpufunc_asm_armv5_ec.S optional cpu_arm9e
arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b
arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b
arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e
arm/arm/cpuinfo.c standard
arm/arm/cpu_asm-v6.S standard
arm/arm/db_disasm.c optional ddb