Remove now the cow unused CPU_ARM9 and CPU_FA526 options. These are for

ARMv4 CPUs that are no longer supported.
This commit is contained in:
Andrew Turner 2018-07-28 11:00:45 +00:00
parent 560a8e6ccc
commit e57edd3580
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336813
7 changed files with 5 additions and 1127 deletions

View File

@ -1,207 +0,0 @@
/* $NetBSD: cpufunc_asm_fa526.S,v 1.3 2008/10/15 16:56:49 matt Exp $*/
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <matt@3am-software.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#ifdef CPU_FA526
#define CACHELINE_SIZE 16
#else
#define CACHELINE_SIZE 32
#endif
ENTRY(fa526_setttb)
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 /* clean and invalidate D$ */
mcr p15, 0, r1, c7, c5, 0 /* invalidate I$ */
mcr p15, 0, r1, c7, c5, 6 /* invalidate BTB */
mcr p15, 0, r1, c7, c10, 4 /* drain write and fill buffer */
mcr p15, 0, r0, c2, c0, 0 /* Write the TTB */
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r1, c8, c7, 0 /* invalidate I+D TLB */
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
END(fa526_setttb)
/*
* TLB functions
*/
ENTRY(fa526_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush Utlb single entry */
mov pc, lr
END(fa526_tlb_flushID_SE)
ENTRY(fa526_cpu_sleep)
mov r0, #0
/* nop
nop*/
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt*/
mov pc, lr
END(fa526_cpu_sleep)
/*
* Cache functions
*/
ENTRY(fa526_idcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate D$ */
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(fa526_idcache_wbinv_all)
ENTRY(fa526_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate D$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(fa526_dcache_wbinv_all)
/*
* Soft functions
*/
ENTRY(fa526_dcache_wbinv_range)
cmp r1, #0x4000
bhs _C_LABEL(fa526_dcache_wbinv_all)
and r2, r0, #(CACHELINE_SIZE - 1)
add r1, r1, r2
bic r0, r0, #(CACHELINE_SIZE - 1)
1: mcr p15, 0, r0, c7, c14, 1 /* clean and invalidate D$ entry */
add r0, r0, #CACHELINE_SIZE
subs r1, r1, #CACHELINE_SIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(fa526_dcache_wbinv_range)
ENTRY(fa526_dcache_wb_range)
cmp r1, #0x4000
bls 1f
mov r0, #0
mcr p15, 0, r0, c7, c10, 0 /* clean entire D$ */
b 3f
1: and r2, r0, #(CACHELINE_SIZE - 1)
add r1, r1, r2
bic r0, r0, #(CACHELINE_SIZE - 1)
2: mcr p15, 0, r0, c7, c10, 1 /* clean D$ entry */
add r0, r0, #CACHELINE_SIZE
subs r1, r1, #CACHELINE_SIZE
bhi 2b
3: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(fa526_dcache_wb_range)
ENTRY(fa526_dcache_inv_range)
and r2, r0, #(CACHELINE_SIZE - 1)
add r1, r1, r2
bic r0, r0, #(CACHELINE_SIZE - 1)
1: mcr p15, 0, r0, c7, c6, 1 /* invalidate D$ single entry */
add r0, r0, #CACHELINE_SIZE
subs r1, r1, #CACHELINE_SIZE
bhi 1b
mov pc, lr
END(fa526_dcache_inv_range)
ENTRY(fa526_idcache_wbinv_range)
cmp r1, #0x4000
bhs _C_LABEL(fa526_idcache_wbinv_all)
and r2, r0, #(CACHELINE_SIZE - 1)
add r1, r1, r2
bic r0, r0, #(CACHELINE_SIZE - 1)
1: mcr p15, 0, r0, c7, c14, 1 /* clean and invalidate D$ entry */
mcr p15, 0, r0, c7, c5, 1 /* invalidate I$ entry */
add r0, r0, #CACHELINE_SIZE
subs r1, r1, #CACHELINE_SIZE
bhi 1b
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
END(fa526_idcache_wbinv_range)
ENTRY(fa526_icache_sync_range)
cmp r1, #0x4000
bhs .Lfa526_icache_sync_all
and r2, r0, #(CACHELINE_SIZE - 1)
add r1, r1, r2
bic r0, r0, #(CACHELINE_SIZE - 1)
1: mcr p15, 0, r0, c7, c10, 1 /* clean D$ entry */
mcr p15, 0, r0, c7, c5, 1 /* invalidate I$ entry */
add r0, r0, #CACHELINE_SIZE
subs r1, r1, #CACHELINE_SIZE
bhi 1b
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
.Lfa526_icache_sync_all:
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mov pc, lr
END(fa526_icache_sync_range)
ENTRY(fa526_context_switch)
/*
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
* Thus the data cache will contain only kernel data and the
* instruction cache will contain only kernel code, and all
* kernel mappings are shared by all processes.
*/
mcr p15, 0, r0, c2, c0, 0 /* Write the TTB */
/* If we have updated the TTB we must flush the TLB */
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
END(fa526_context_switch)

View File

@ -1,509 +0,0 @@
/* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
/*-
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
/*-
* Copyright (c) 2001 Matt Thomas.
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* XScale assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#include <machine/armreg.h>
/*
* Size of the XScale core D-cache.
*/
#define DCACHE_SIZE 0x00008000
/*
* CPWAIT -- Canonical method to wait for CP15 update.
* From: Intel 80200 manual, section 2.3.3.
*
* NOTE: Clobbers the specified temp reg.
*/
#define CPWAIT_BRANCH \
sub pc, pc, #4
#define CPWAIT(tmp) \
mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
mov tmp, tmp /* wait for it to complete */ ;\
CPWAIT_BRANCH /* branch to next insn */
#define CPWAIT_AND_RETURN_SHIFTER lsr #32
#define CPWAIT_AND_RETURN(tmp) \
mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
/* Wait for it to complete and branch to the return address */ \
sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
ENTRY(xscale_cpwait)
CPWAIT_AND_RETURN(r0)
END(xscale_cpwait)
/*
* We need a separate cpu_control() entry point, since we have to
* invalidate the Branch Target Buffer in the event the BPRD bit
* changes in the control register.
*/
ENTRY(xscale_control)
mrc CP15_SCTLR(r3) /* Read the control register */
bic r2, r3, r0 /* Clear bits */
eor r2, r2, r1 /* XOR bits */
teq r2, r3 /* Only write if there was a change */
mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */
mcrne CP15_SCTLR(r2) /* Write new control register */
mov r0, r3 /* Return old value */
CPWAIT_AND_RETURN(r1)
END(xscale_control)
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(xscale_setttb)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr
orr r1, r3, #(PSR_I | PSR_F)
msr cpsr_fsxc, r1
#endif
stmfd sp!, {r0-r3, lr}
bl _C_LABEL(xscale_cache_cleanID)
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
CPWAIT(r0)
ldmfd sp!, {r0-r3, lr}
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
/* The cleanID above means we only need to flush the I cache here */
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
CPWAIT(r0)
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_fsxc, r3
#endif
RET
END(xscale_setttb)
/*
* TLB functions
*
*/
ENTRY(xscale_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
CPWAIT_AND_RETURN(r0)
END(xscale_tlb_flushID_SE)
/*
* Cache functions
*/
ENTRY(xscale_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushID)
ENTRY(xscale_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushI)
ENTRY(xscale_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushD)
ENTRY(xscale_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushI_SE)
ENTRY(xscale_cache_flushD_SE)
/*
* Errata (rev < 2): Must clean-dcache-line to an address
* before invalidate-dcache-line to an address, or dirty
* bits will not be cleared in the dcache array.
*/
mcr p15, 0, r0, c7, c10, 1
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushD_SE)
ENTRY(xscale_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_cleanD_E)
/*
* Information for the XScale cache clean/purge functions:
*
* * Virtual address of the memory region to use
* * Size of memory region
*
* Note the virtual address for the Data cache clean operation
* does not need to be backed by physical memory, since no loads
* will actually be performed by the allocate-line operation.
*
* Note that the Mini-Data cache MUST be cleaned by executing
* loads from memory mapped into a region reserved exclusively
* for cleaning of the Mini-Data cache.
*/
.data
.global _C_LABEL(xscale_cache_clean_addr)
_C_LABEL(xscale_cache_clean_addr):
.word 0x00000000
.global _C_LABEL(xscale_cache_clean_size)
_C_LABEL(xscale_cache_clean_size):
.word DCACHE_SIZE
.global _C_LABEL(xscale_minidata_clean_addr)
_C_LABEL(xscale_minidata_clean_addr):
.word 0x00000000
.global _C_LABEL(xscale_minidata_clean_size)
_C_LABEL(xscale_minidata_clean_size):
.word 0x00000800
.text
.Lxscale_cache_clean_addr:
.word _C_LABEL(xscale_cache_clean_addr)
.Lxscale_cache_clean_size:
.word _C_LABEL(xscale_cache_clean_size)
.Lxscale_minidata_clean_addr:
.word _C_LABEL(xscale_minidata_clean_addr)
.Lxscale_minidata_clean_size:
.word _C_LABEL(xscale_minidata_clean_size)
#ifdef CACHE_CLEAN_BLOCK_INTR
#define XSCALE_CACHE_CLEAN_BLOCK \
mrs r3, cpsr ; \
orr r0, r3, #(PSR_I | PSR_F) ; \
msr cpsr_fsxc, r0
#define XSCALE_CACHE_CLEAN_UNBLOCK \
msr cpsr_fsxc, r3
#else
#define XSCALE_CACHE_CLEAN_BLOCK
#define XSCALE_CACHE_CLEAN_UNBLOCK
#endif /* CACHE_CLEAN_BLOCK_INTR */
#define XSCALE_CACHE_CLEAN_PROLOGUE \
XSCALE_CACHE_CLEAN_BLOCK ; \
ldr r2, .Lxscale_cache_clean_addr ; \
ldmia r2, {r0, r1} ; \
/* \
* BUG ALERT! \
* \
* The XScale core has a strange cache eviction bug, which \
* requires us to use 2x the cache size for the cache clean \
* and for that area to be aligned to 2 * cache size. \
* \
* The work-around is to use 2 areas for cache clean, and to \
* alternate between them whenever this is done. No one knows \
* why the work-around works (mmm!). \
*/ \
eor r0, r0, #(DCACHE_SIZE) ; \
str r0, [r2] ; \
add r0, r0, r1
#define XSCALE_CACHE_CLEAN_EPILOGUE \
XSCALE_CACHE_CLEAN_UNBLOCK
ENTRY_NP(xscale_cache_syncI)
EENTRY_NP(xscale_cache_purgeID)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
EENTRY_NP(xscale_cache_cleanID)
EENTRY_NP(xscale_cache_purgeD)
EENTRY(xscale_cache_cleanD)
XSCALE_CACHE_CLEAN_PROLOGUE
1: subs r0, r0, #32
mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */
subs r1, r1, #32
bne 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT(r0)
XSCALE_CACHE_CLEAN_EPILOGUE
RET
EEND(xscale_cache_cleanD)
EEND(xscale_cache_purgeD)
EEND(xscale_cache_cleanID)
EEND(xscale_cache_purgeID)
END(xscale_cache_syncI)
/*
* Clean the mini-data cache.
*
* It's expected that we only use the mini-data cache for
* kernel addresses, so there is no need to purge it on
* context switch, and no need to prevent userspace access
* while we clean it.
*/
ENTRY(xscale_cache_clean_minidata)
ldr r2, .Lxscale_minidata_clean_addr
ldmia r2, {r0, r1}
1: ldr r3, [r0], #32
subs r1, r1, #32
bne 1b
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r1)
END(xscale_cache_clean_minidata)
ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
CPWAIT(r1)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
END(xscale_cache_purgeID_E)
ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
CPWAIT(r1)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
END(xscale_cache_purgeD_E)
/*
* Soft functions
*/
/* xscale_cache_syncI is identical to xscale_cache_purgeID */
EENTRY(xscale_cache_cleanID_rng)
ENTRY(xscale_cache_cleanD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
/*END(xscale_cache_cleanID_rng)*/
END(xscale_cache_cleanD_rng)
ENTRY(xscale_cache_purgeID_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_purgeID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_purgeID_rng)
ENTRY(xscale_cache_purgeD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_purgeD)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_purgeD_rng)
ENTRY(xscale_cache_syncI_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_syncI)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_syncI_rng)
ENTRY(xscale_cache_flushD_rng)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscale_cache_flushD_rng)
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(xscale_context_switch)
/*
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
* Thus the data cache will contain only kernel data and the
* instruction cache will contain only kernel code, and all
* kernel mappings are shared by all processes.
*/
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
END(xscale_context_switch)
/*
* xscale_cpu_sleep
*
* This is called when there is nothing on any of the run queues.
* We go into IDLE mode so that any IRQ or FIQ will awaken us.
*
* If this is called with anything other than ARM_SLEEP_MODE_IDLE,
* ignore it.
*/
ENTRY(xscale_cpu_sleep)
tst r0, #0x00000000
bne 1f
mov r0, #0x1
mcr p14, 0, r0, c7, c0, 0
1:
RET
END(xscale_cpu_sleep)

View File

@ -1,399 +0,0 @@
/* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
/*-
* Copyright (c) 2007 Olivier Houchard
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
/*-
* Copyright (c) 2001 Matt Thomas.
* Copyright (c) 1997,1998 Mark Brinicombe.
* Copyright (c) 1997 Causality Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Causality Limited.
* 4. The name of Causality Limited may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* XScale core 3 assembly functions for CPU / MMU / TLB specific operations
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#include <machine/armreg.h>
/*
* Size of the XScale core D-cache.
*/
#define DCACHE_SIZE 0x00008000
/*
* CPWAIT -- Canonical method to wait for CP15 update.
* From: Intel 80200 manual, section 2.3.3.
*
* NOTE: Clobbers the specified temp reg.
*/
#define CPWAIT_BRANCH \
sub pc, pc, #4
#define CPWAIT(tmp) \
mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
mov tmp, tmp /* wait for it to complete */ ;\
CPWAIT_BRANCH /* branch to next insn */
#define CPWAIT_AND_RETURN_SHIFTER lsr #32
#define CPWAIT_AND_RETURN(tmp) \
mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
/* Wait for it to complete and branch to the return address */ \
sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
#define ARM_USE_L2_CACHE
#define L2_CACHE_SIZE 0x80000
#define L2_CACHE_WAYS 8
#define L2_CACHE_LINE_SIZE 32
#define L2_CACHE_SETS (L2_CACHE_SIZE / \
(L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
#define L1_DCACHE_SIZE 32 * 1024
#define L1_DCACHE_WAYS 4
#define L1_DCACHE_LINE_SIZE 32
#define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
(L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
#ifdef CACHE_CLEAN_BLOCK_INTR
#define XSCALE_CACHE_CLEAN_BLOCK \
stmfd sp!, {r4} ; \
mrs r4, cpsr ; \
orr r0, r4, #(PSR_I | PSR_F) ; \
msr cpsr_fsxc, r0
#define XSCALE_CACHE_CLEAN_UNBLOCK \
msr cpsr_fsxc, r4 ; \
ldmfd sp!, {r4}
#else
#define XSCALE_CACHE_CLEAN_BLOCK
#define XSCALE_CACHE_CLEAN_UNBLOCK
#endif /* CACHE_CLEAN_BLOCK_INTR */
ENTRY_NP(xscalec3_cache_syncI)
EENTRY_NP(xscalec3_cache_purgeID)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
EENTRY_NP(xscalec3_cache_cleanID)
EENTRY_NP(xscalec3_cache_purgeD)
EENTRY(xscalec3_cache_cleanD)
XSCALE_CACHE_CLEAN_BLOCK
mov r0, #0
1:
mov r1, r0, asl #30
mov r2, #0
2:
orr r3, r1, r2, asl #5
mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
add r2, r2, #1
cmp r2, #L1_DCACHE_SETS
bne 2b
add r0, r0, #1
cmp r0, #4
bne 1b
CPWAIT(r0)
XSCALE_CACHE_CLEAN_UNBLOCK
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
EEND(xscalec3_cache_purgeID)
EEND(xscalec3_cache_cleanID)
EEND(xscalec3_cache_purgeD)
EEND(xscalec3_cache_cleanD)
END(xscalec3_cache_syncI)
ENTRY(xscalec3_cache_purgeID_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscalec3_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
nop
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscalec3_cache_purgeID_rng)
ENTRY(xscalec3_cache_syncI_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscalec3_cache_syncI)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscalec3_cache_syncI_rng)
ENTRY(xscalec3_cache_purgeD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscalec3_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
END(xscalec3_cache_purgeD_rng)
ENTRY(xscalec3_cache_cleanID_rng)
EENTRY(xscalec3_cache_cleanD_rng)
cmp r1, #0x4000
bcs _C_LABEL(xscalec3_cache_cleanID)
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
nop
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
EEND(xscalec3_cache_cleanD_rng)
END(xscalec3_cache_cleanID_rng)
ENTRY(xscalec3_l2cache_purge)
/* Clean-up the L2 cache */
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
mov r0, #0
1:
mov r1, r0, asl #29
mov r2, #0
2:
orr r3, r1, r2, asl #5
mcr p15, 1, r3, c7, c15, 2
add r2, r2, #1
cmp r2, #L2_CACHE_SETS
bne 2b
add r0, r0, #1
cmp r0, #8
bne 1b
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
RET
END(xscalec3_l2cache_purge)
ENTRY(xscalec3_l2cache_clean_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
END(xscalec3_l2cache_clean_rng)
ENTRY(xscalec3_l2cache_purge_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
END(xscalec3_l2cache_purge_rng)
ENTRY(xscalec3_l2cache_flush_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
and r2, r0, #0x1f
add r1, r1, r2
bic r0, r0, #0x1f
1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
add r0, r0, #32
subs r1, r1, #32
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
END(xscalec3_l2cache_flush_rng)
/*
* Functions to set the MMU Translation Table Base register
*
* We need to clean and flush the cache as it uses virtual
* addresses that are about to change.
*/
ENTRY(xscalec3_setttb)
#ifdef CACHE_CLEAN_BLOCK_INTR
mrs r3, cpsr
orr r1, r3, #(PSR_I | PSR_F)
msr cpsr_fsxc, r1
#endif
stmfd sp!, {r0-r3, lr}
bl _C_LABEL(xscalec3_cache_cleanID)
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
CPWAIT(r0)
ldmfd sp!, {r0-r3, lr}
#ifdef ARM_USE_L2_CACHE
orr r0, r0, #0x18 /* cache the page table in L2 */
#endif
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
CPWAIT(r0)
#ifdef CACHE_CLEAN_BLOCK_INTR
msr cpsr_fsxc, r3
#endif
RET
END(xscalec3_setttb)
/*
* Context switch.
*
* These is the CPU-specific parts of the context switcher cpu_switch()
* These functions actually perform the TTB reload.
*
* NOTE: Special calling convention
* r1, r4-r13 must be preserved
*/
ENTRY(xscalec3_context_switch)
/*
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
* Thus the data cache will contain only kernel data and the
* instruction cache will contain only kernel code, and all
* kernel mappings are shared by all processes.
*/
#ifdef ARM_USE_L2_CACHE
orr r0, r0, #0x18 /* Cache the page table in L2 */
#endif
/* Write the TTB */
mcr p15, 0, r0, c2, c0, 0
/* If we have updated the TTB we must flush the TLB */
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
END(xscalec3_context_switch)

View File

@ -2,9 +2,7 @@
machine arm
cpu CPU_ARM9
cpu CPU_ARM9E
cpu CPU_FA526
files "../mv/files.mv"
files "../mv/discovery/files.db78xxx"

View File

@ -74,9 +74,9 @@ SYSTEM_LD_TAIL +=;sed s/" + SIZEOF_HEADERS"// ldscript.$M\
FILES_CPU_FUNC = \
$S/$M/$M/cpufunc_asm_arm9.S \
$S/$M/$M/cpufunc_asm_xscale.S $S/$M/$M/cpufunc_asm.S \
$S/$M/$M/cpufunc_asm_xscale_c3.S $S/$M/$M/cpufunc_asm_armv5_ec.S \
$S/$M/$M/cpufunc_asm_fa526.S $S/$M/$M/cpufunc_asm_sheeva.S
$S/$M/$M/cpufunc_asm.S \
$S/$M/$M/cpufunc_asm_armv5_ec.S \
$S/$M/$M/cpufunc_asm_sheeva.S
.if ${MACHINE_ARCH:Marmv[67]*} == "" && defined(KERNPHYSADDR)
KERNEL_EXTRA=trampoline

View File

@ -30,16 +30,13 @@ arm/arm/busdma_machdep-v6.c optional armv7 | armv6 | ARM_USE_V6_BUSDMA
arm/arm/copystr.S standard
arm/arm/cpufunc.c standard
arm/arm/cpufunc_asm.S standard
arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 | cpu_arm9e
arm/arm/cpufunc_asm_arm9.S optional cpu_arm9e
arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176
arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_xscale_pxa2x0 | cpu_xscale_81342
arm/arm/cpufunc_asm_armv4.S optional cpu_arm9e
arm/arm/cpufunc_asm_armv5_ec.S optional cpu_arm9e
arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b
arm/arm/cpufunc_asm_fa526.S optional cpu_fa526
arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b
arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e
arm/arm/cpufunc_asm_xscale.S optional cpu_xscale_pxa2x0 | cpu_xscale_81342
arm/arm/cpufunc_asm_xscale_c3.S optional cpu_xscale_81342
arm/arm/cpuinfo.c standard
arm/arm/cpu_asm-v6.S optional armv7 | armv6
arm/arm/db_disasm.c optional ddb

View File

@ -9,12 +9,10 @@ ARM_USE_V6_BUSDMA opt_global.h
ARM_WANT_TP_ADDRESS opt_global.h
COUNTS_PER_SEC opt_timer.h
CPSW_ETHERSWITCH opt_cpsw.h
CPU_ARM9 opt_global.h
CPU_ARM9E opt_global.h
CPU_ARM1176 opt_global.h
CPU_CORTEXA opt_global.h
CPU_KRAIT opt_global.h
CPU_FA526 opt_global.h
CPU_MV_PJ4B opt_global.h
SMP_ON_UP opt_global.h # Runtime detection of MP extensions
DEV_GIC opt_global.h