Remove now unused armv4 and not-INTRNG files.

This commit is contained in:
Michal Meloun 2020-11-28 15:00:08 +00:00
parent 412ef5da8a
commit b88b275145
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=368126
16 changed files with 7 additions and 8157 deletions

View File

@ -256,9 +256,6 @@ struct cpu_functions cortexa_cpufuncs = {
struct cpu_functions cpufuncs;
u_int cputype;
#if __ARM_ARCH <= 5
u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
#endif
#if defined (CPU_ARM9E) || \
defined(CPU_ARM1176) || \

View File

@ -1,341 +0,0 @@
/* $NetBSD: cpu.c,v 1.55 2004/02/13 11:36:10 wiz Exp $ */
/*-
* Copyright (c) 1995 Mark Brinicombe.
* Copyright (c) 1995 Brini.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* RiscBSD kernel project
*
* cpu.c
*
* Probing and configuration for the master CPU
*
* Created : 10/10/95
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
#include <machine/endian.h>
#include <machine/md_var.h>
char machine[] = "arm";
SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD,
machine, 0, "Machine class");
static const char * const generic_steppings[16] = {
"rev 0", "rev 1", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const xscale_steppings[16] = {
"step A-0", "step A-1", "step B-0", "step C-0",
"step D-0", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const i80219_steppings[16] = {
"step A-0", "rev 1", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const i80321_steppings[16] = {
"step A-0", "step B-0", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
static const char * const i81342_steppings[16] = {
"step A-0", "rev 1", "rev 2", "rev 3",
"rev 4", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Steppings for PXA2[15]0 */
static const char * const pxa2x0_steppings[16] = {
"step A-0", "step A-1", "step B-0", "step B-1",
"step B-2", "step C-0", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Steppings for PXA255/26x.
* rev 5: PXA26x B0, rev 6: PXA255 A0
*/
static const char * const pxa255_steppings[16] = {
"rev 0", "rev 1", "rev 2", "step A-0",
"rev 4", "step B-0", "step A-0", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
/* Stepping for PXA27x */
static const char * const pxa27x_steppings[16] = {
"step A-0", "step A-1", "step B-0", "step B-1",
"step C-0", "rev 5", "rev 6", "rev 7",
"rev 8", "rev 9", "rev 10", "rev 11",
"rev 12", "rev 13", "rev 14", "rev 15",
};
struct cpuidtab {
u_int32_t cpuid;
enum cpu_class cpu_class;
const char *cpu_name;
const char * const *cpu_steppings;
};
const struct cpuidtab cpuids[] = {
{ CPU_ID_ARM920T, CPU_CLASS_ARM9TDMI, "ARM920T",
generic_steppings },
{ CPU_ID_ARM920T_ALT, CPU_CLASS_ARM9TDMI, "ARM920T",
generic_steppings },
{ CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T",
generic_steppings },
{ CPU_ID_ARM926EJS, CPU_CLASS_ARM9EJS, "ARM926EJ-S",
generic_steppings },
{ CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T",
generic_steppings },
{ CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S",
generic_steppings },
{ CPU_ID_ARM966ES, CPU_CLASS_ARM9ES, "ARM966E-S",
generic_steppings },
{ CPU_ID_ARM966ESR1, CPU_CLASS_ARM9ES, "ARM966E-S",
generic_steppings },
{ CPU_ID_FA526, CPU_CLASS_ARM9TDMI, "FA526",
generic_steppings },
{ CPU_ID_FA626TE, CPU_CLASS_ARM9ES, "FA626TE",
generic_steppings },
{ CPU_ID_TI925T, CPU_CLASS_ARM9TDMI, "TI ARM925T",
generic_steppings },
{ CPU_ID_ARM1020E, CPU_CLASS_ARM10E, "ARM1020E",
generic_steppings },
{ CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S",
generic_steppings },
{ CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S",
generic_steppings },
{ CPU_ID_80200, CPU_CLASS_XSCALE, "i80200",
xscale_steppings },
{ CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz",
i80321_steppings },
{ CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz",
i80321_steppings },
{ CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz",
i80321_steppings },
{ CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz",
i80321_steppings },
{ CPU_ID_81342, CPU_CLASS_XSCALE, "i81342",
i81342_steppings },
{ CPU_ID_80219_400, CPU_CLASS_XSCALE, "i80219 400MHz",
i80219_steppings },
{ CPU_ID_80219_600, CPU_CLASS_XSCALE, "i80219 600MHz",
i80219_steppings },
{ CPU_ID_PXA27X, CPU_CLASS_XSCALE, "PXA27x",
pxa27x_steppings },
{ CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250",
pxa2x0_steppings },
{ CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210",
pxa2x0_steppings },
{ CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250",
pxa2x0_steppings },
{ CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210",
pxa2x0_steppings },
{ CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA255",
pxa255_steppings },
{ CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210",
pxa2x0_steppings },
{ CPU_ID_MV88FR131, CPU_CLASS_MARVELL, "Feroceon 88FR131",
generic_steppings },
{ CPU_ID_MV88FR571_VD, CPU_CLASS_MARVELL, "Feroceon 88FR571-VD",
generic_steppings },
{ 0, CPU_CLASS_NONE, NULL, NULL }
};
struct cpu_classtab {
const char *class_name;
const char *class_option;
};
const struct cpu_classtab cpu_classes[] = {
{ "unknown", NULL }, /* CPU_CLASS_NONE */
{ "ARM9TDMI", "CPU_ARM9TDMI" }, /* CPU_CLASS_ARM9TDMI */
{ "ARM9E-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9ES */
{ "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */
{ "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
{ "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */
{ "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
{ "Marvell", "CPU_MARVELL" }, /* CPU_CLASS_MARVELL */
};
/*
* Report the type of the specified arm processor. This uses the generic and
* arm specific information in the cpu structure to identify the processor.
* The remaining fields in the cpu structure are filled in appropriately.
*/
static const char * const wtnames[] = {
"write-through",
"write-back",
"write-back",
"**unknown 3**",
"**unknown 4**",
"write-back-locking", /* XXX XScale-specific? */
"write-back-locking-A",
"write-back-locking-B",
"**unknown 8**",
"**unknown 9**",
"**unknown 10**",
"**unknown 11**",
"**unknown 12**",
"**unknown 13**",
"write-back-locking-C",
"**unknown 15**",
};
static void
print_enadis(int enadis, char *s)
{
printf(" %s %sabled", s, (enadis == 0) ? "dis" : "en");
}
enum cpu_class cpu_class = CPU_CLASS_NONE;
void
identify_arm_cpu(void)
{
u_int cpuid, ctrl;
int i;
ctrl = cp15_sctlr_get();
cpuid = cp15_midr_get();
if (cpuid == 0) {
printf("Processor failed probe - no CPU ID\n");
return;
}
for (i = 0; cpuids[i].cpuid != 0; i++)
if (cpuids[i].cpuid == (cpuid & CPU_ID_CPU_MASK)) {
cpu_class = cpuids[i].cpu_class;
printf("CPU: %s %s (%s core)\n",
cpuids[i].cpu_name,
cpuids[i].cpu_steppings[cpuid &
CPU_ID_REVISION_MASK],
cpu_classes[cpu_class].class_name);
break;
}
if (cpuids[i].cpuid == 0)
printf("unknown CPU (ID = 0x%x)\n", cpuid);
printf(" ");
if (ctrl & CPU_CONTROL_BEND_ENABLE)
printf(" Big-endian");
else
printf(" Little-endian");
switch (cpu_class) {
case CPU_CLASS_ARM9TDMI:
case CPU_CLASS_ARM9ES:
case CPU_CLASS_ARM9EJS:
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_XSCALE:
case CPU_CLASS_MARVELL:
print_enadis(ctrl & CPU_CONTROL_DC_ENABLE, "DC");
print_enadis(ctrl & CPU_CONTROL_IC_ENABLE, "IC");
#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
i = sheeva_control_ext(0, 0);
print_enadis(i & MV_WA_ENABLE, "WA");
print_enadis(i & MV_DC_STREAM_ENABLE, "DC streaming");
printf("\n ");
print_enadis((i & MV_BTB_DISABLE) == 0, "BTB");
print_enadis(i & MV_L2_ENABLE, "L2");
print_enadis((i & MV_L2_PREFETCH_DISABLE) == 0,
"L2 prefetch");
printf("\n ");
#endif
break;
default:
break;
}
print_enadis(ctrl & CPU_CONTROL_WBUF_ENABLE, "WB");
if (ctrl & CPU_CONTROL_LABT_ENABLE)
printf(" LABT");
else
printf(" EABT");
print_enadis(ctrl & CPU_CONTROL_BPRD_ENABLE, "branch prediction");
printf("\n");
/* Print cache info. */
if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0)
return;
if (arm_pcache_unified) {
printf(" %dKB/%dB %d-way %s unified cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
} else {
printf(" %dKB/%dB %d-way instruction cache\n",
arm_picache_size / 1024,
arm_picache_line_size, arm_picache_ways);
printf(" %dKB/%dB %d-way %s data cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
}
}

View File

@ -1,204 +0,0 @@
/* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2004 Olivier Houchard.
* Copyright (c) 1994-1998 Mark Brinicombe.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Mark Brinicombe
* for the NetBSD Project.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Soft interrupt and other generic interrupt functions.
*/
#include "opt_platform.h"
#include "opt_hwpmc_hooks.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/syslog.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/conf.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/vmmeter.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/intr.h>
#include <machine/cpu.h>
#ifdef FDT
#include <dev/fdt/fdt_common.h>
#include <machine/fdt.h>
#endif
#define INTRNAME_LEN (MAXCOMLEN + 1)
typedef void (*mask_fn)(void *);
static struct intr_event *intr_events[NIRQ];
void intr_irq_handler(struct trapframe *);
void (*arm_post_filter)(void *) = NULL;
int (*arm_config_irq)(int irq, enum intr_trigger trig,
enum intr_polarity pol) = NULL;
/* Data for statistics reporting. */
u_long intrcnt[NIRQ];
char intrnames[(NIRQ * INTRNAME_LEN) + 1];
size_t sintrcnt = sizeof(intrcnt);
size_t sintrnames = sizeof(intrnames);
/*
* Pre-format intrnames into an array of fixed-size strings containing spaces.
* This allows us to avoid the need for an intermediate table of indices into
* the names and counts arrays, while still meeting the requirements and
* assumptions of vmstat(8) and the kdb "show intrcnt" command, the two
* consumers of this data.
*/
static void
intr_init(void *unused)
{
int i;
for (i = 0; i < NIRQ; ++i) {
snprintf(&intrnames[i * INTRNAME_LEN], INTRNAME_LEN, "%-*s",
INTRNAME_LEN - 1, "");
}
}
SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
#ifdef FDT
int
intr_fdt_map_irq(phandle_t iparent, pcell_t *intr, int icells)
{
fdt_pic_decode_t intr_decode;
phandle_t intr_parent;
int i, rv, interrupt, trig, pol;
intr_parent = OF_node_from_xref(iparent);
for (i = 0; i < icells; i++)
intr[i] = cpu_to_fdt32(intr[i]);
for (i = 0; fdt_pic_table[i] != NULL; i++) {
intr_decode = fdt_pic_table[i];
rv = intr_decode(intr_parent, intr, &interrupt, &trig, &pol);
if (rv == 0) {
/* This was recognized as our PIC and decoded. */
interrupt = FDT_MAP_IRQ(intr_parent, interrupt);
return (interrupt);
}
}
/* Not in table, so guess */
interrupt = FDT_MAP_IRQ(intr_parent, fdt32_to_cpu(intr[0]));
return (interrupt);
}
#endif
void
arm_setup_irqhandler(const char *name, driver_filter_t *filt,
void (*hand)(void*), void *arg, int irq, int flags, void **cookiep)
{
struct intr_event *event;
int error;
if (irq < 0 || irq >= NIRQ)
return;
event = intr_events[irq];
if (event == NULL) {
error = intr_event_create(&event, (void *)irq, 0, irq,
(mask_fn)arm_mask_irq, (mask_fn)arm_unmask_irq,
arm_post_filter, NULL, "intr%d:", irq);
if (error)
return;
intr_events[irq] = event;
snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN,
"irq%d: %-*s", irq, INTRNAME_LEN - 1, name);
}
intr_event_add_handler(event, name, filt, hand, arg,
intr_priority(flags), flags, cookiep);
}
int
arm_remove_irqhandler(int irq, void *cookie)
{
struct intr_event *event;
int error;
event = intr_events[irq];
arm_mask_irq(irq);
error = intr_event_remove_handler(cookie);
if (!CK_SLIST_EMPTY(&event->ie_handlers))
arm_unmask_irq(irq);
return (error);
}
void dosoftints(void);
void
dosoftints(void)
{
}
void
intr_irq_handler(struct trapframe *frame)
{
struct intr_event *event;
int i;
VM_CNT_INC(v_intr);
i = -1;
while ((i = arm_get_next_irq(i)) != -1) {
intrcnt[i]++;
event = intr_events[i];
if (intr_event_handle(event, frame) != 0) {
/* XXX: Log stray IRQs */
arm_mask_irq(i);
}
}
#ifdef HWPMC_HOOKS
if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, frame);
#endif
}

View File

@ -1,494 +0,0 @@
/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
/*-
* Copyright 2011 Semihalf
* Copyright (C) 1994-1997 Mark Brinicombe
* Copyright (C) 1994 Brini
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of Brini may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "assym.inc"
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
#include <machine/pte-v4.h>
__FBSDID("$FreeBSD$");
/* 2K initial stack is plenty, it is only used by initarm() */
#define INIT_ARM_STACK_SIZE 2048
#define CPWAIT_BRANCH \
sub pc, pc, #4
#define CPWAIT(tmp) \
mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
mov tmp, tmp /* wait for it to complete */ ;\
CPWAIT_BRANCH /* branch to next insn */
/*
* This is for libkvm, and should be the address of the beginning
* of the kernel text segment (not necessarily the same as kernbase).
*
* These are being phased out. Newer copies of libkvm don't need these
* values as the information is added to the core file by inspecting
* the running kernel.
*/
.text
.align 2
.globl kernbase
.set kernbase,KERNVIRTADDR
#ifdef PHYSADDR
.globl physaddr
.set physaddr,PHYSADDR
#endif
/*
* On entry for FreeBSD boot ABI:
* r0 - metadata pointer or 0 (boothowto on AT91's boot2)
* r1 - if (r0 == 0) then metadata pointer
* On entry for Linux boot ABI:
* r0 - 0
* r1 - machine type (passed as arg2 to initarm)
* r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
*
* For both types of boot we gather up the args, put them in a struct arm_boot_params
* structure and pass that to initarm.
*/
.globl btext
btext:
ASENTRY_NP(_start)
STOP_UNWINDING /* Can't unwind into the bootloader! */
mov r9, r0 /* 0 or boot mode from boot2 */
mov r8, r1 /* Save Machine type */
mov ip, r2 /* Save meta data */
mov fp, r3 /* Future expansion */
/* Make sure interrupts are disabled. */
mrs r7, cpsr
orr r7, r7, #(PSR_I | PSR_F)
msr cpsr_c, r7
#if defined (FLASHADDR) && defined(LOADERRAMADDR)
/*
* Sanity check the configuration.
* FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
* ARMv4 and ARMv5 make assumptions on where they are loaded.
* TODO: Fix the ARMv4/v5 case.
*/
#ifndef PHYSADDR
#error PHYSADDR must be defined for this configuration
#endif
/* Check if we're running from flash. */
ldr r7, =FLASHADDR
/*
* If we're running with MMU disabled, test against the
* physical address instead.
*/
mrc CP15_SCTLR(r2)
ands r2, r2, #CPU_CONTROL_MMU_ENABLE
ldreq r6, =PHYSADDR
ldrne r6, =LOADERRAMADDR
cmp r7, r6
bls flash_lower
cmp r7, pc
bhi from_ram
b do_copy
flash_lower:
cmp r6, pc
bls from_ram
do_copy:
ldr r7, =KERNBASE
adr r1, _start
ldr r0, Lreal_start
ldr r2, Lend
sub r2, r2, r0
sub r0, r0, r7
add r0, r0, r6
mov r4, r0
bl memcpy
ldr r0, Lram_offset
add pc, r4, r0
Lram_offset: .word from_ram-_C_LABEL(_start)
from_ram:
nop
#endif
disable_mmu:
/* Disable MMU for a while */
mrc CP15_SCTLR(r2)
bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
CPU_CONTROL_WBUF_ENABLE)
bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
mcr CP15_SCTLR(r2)
nop
nop
nop
CPWAIT(r0)
Lunmapped:
/*
* Build page table from scratch.
*/
/*
* Figure out the physical address we're loaded at by assuming this
* entry point code is in the first L1 section and so if we clear the
* offset bits of the pc that will give us the section-aligned load
* address, which remains in r5 throughout all the following code.
*/
ldr r2, =(L1_S_OFFSET)
bic r5, pc, r2
/* Find the delta between VA and PA, result stays in r0 throughout. */
adr r0, Lpagetable
bl translate_va_to_pa
/*
* First map the entire 4GB address space as VA=PA. It's mapped as
* normal (cached) memory because it's for things like accessing the
* parameters passed in from the bootloader, which might be at any
* physical address, different for every platform.
*/
mov r1, #0
mov r2, #0
mov r3, #4096
bl build_pagetables
/*
* Next we do 64MiB starting at the physical load address, mapped to
* the VA the kernel is linked for.
*/
mov r1, r5
ldr r2, =(KERNVIRTADDR)
mov r3, #64
bl build_pagetables
#if defined(PHYSADDR) && (KERNVIRTADDR != KERNBASE)
/*
* If the kernel wasn't loaded at the beginning of the ram, map the memory
* before the kernel too, as some ports use that for pagetables, stack, etc...
*/
ldr r1, =PHYSADDR
ldr r2, =KERNBASE
ldr r3, =((KERNVIRTADDR - KERNBASE) / L1_S_SIZE)
bl build_pagetables
#endif
/* Create a device mapping for early_printf if specified. */
#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
ldr r1, =SOCDEV_PA
ldr r2, =SOCDEV_VA
mov r3, #1
bl build_device_pagetables
#endif
mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
/* Set the Domain Access register. Very important! */
mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
mcr p15, 0, r0, c3, c0, 0
/*
* Enable MMU.
*/
mrc CP15_SCTLR(r0)
orr r0, r0, #(CPU_CONTROL_MMU_ENABLE)
mcr CP15_SCTLR(r0)
nop
nop
nop
CPWAIT(r0)
/* Transition the PC from physical to virtual addressing. */
ldr pc,=mmu_done
mmu_done:
nop
adr r1, .Lstart
ldmia r1, {r1, r2, sp} /* Set initial stack and */
sub r2, r2, r1 /* get zero init data */
mov r3, #0
.L1:
str r3, [r1], #0x0004 /* get zero init data */
subs r2, r2, #4
bgt .L1
virt_done:
mov r1, #28 /* loader info size is 28 bytes also second arg */
subs sp, sp, r1 /* allocate arm_boot_params struct on stack */
mov r0, sp /* loader info pointer is first arg */
bic sp, sp, #7 /* align stack to 8 bytes */
str r1, [r0] /* Store length of loader info */
str r9, [r0, #4] /* Store r0 from boot loader */
str r8, [r0, #8] /* Store r1 from boot loader */
str ip, [r0, #12] /* store r2 from boot loader */
str fp, [r0, #16] /* store r3 from boot loader */
str r5, [r0, #20] /* store the physical address */
adr r4, Lpagetable /* load the pagetable address */
ldr r5, [r4, #4]
str r5, [r0, #24] /* store the pagetable address */
mov fp, #0 /* trace back starts here */
bl _C_LABEL(initarm) /* Off we go */
/* init arm will return the new stack pointer. */
mov sp, r0
bl _C_LABEL(mi_startup) /* call mi_startup()! */
adr r0, .Lmainreturned
b _C_LABEL(panic)
/* NOTREACHED */
END(_start)
#define VA_TO_PA_POINTER(name, table) \
name: ;\
.word . ;\
.word table
/*
* Returns the physical address of a magic va to pa pointer.
* r0 - The pagetable data pointer. This must be built using the
* VA_TO_PA_POINTER macro.
* e.g.
* VA_TO_PA_POINTER(Lpagetable, pagetable)
* ...
* adr r0, Lpagetable
* bl translate_va_to_pa
* r0 will now contain the physical address of pagetable
* r1, r2 - Trashed
*/
translate_va_to_pa:
ldr r1, [r0]
sub r2, r1, r0
/* At this point: r2 = VA - PA */
/*
* Find the physical address of the table. After these two
* instructions:
* r1 = va(pagetable)
*
* r0 = va(pagetable) - (VA - PA)
* = va(pagetable) - VA + PA
* = pa(pagetable)
*/
ldr r1, [r0, #4]
sub r0, r1, r2
RET
/*
* Builds the page table
* r0 - The table base address
* r1 - The physical address (trashed)
* r2 - The virtual address (trashed)
* r3 - The number of 1MiB sections
* r4 - Trashed
*
* Addresses must be 1MiB aligned
*/
build_device_pagetables:
ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
b 1f
build_pagetables:
/* Set the required page attributed */
ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
1:
orr r1, r4
/* Move the virtual address to the correct bit location */
lsr r2, #(L1_S_SHIFT - 2)
mov r4, r3
2:
str r1, [r0, r2]
add r2, r2, #4
add r1, r1, #(L1_S_SIZE)
adds r4, r4, #-1
bhi 2b
RET
VA_TO_PA_POINTER(Lpagetable, pagetable)
Lreal_start:
.word _start
Lend:
.word _edata
.Lstart:
.word _edata
.word _ebss
.word svcstk + INIT_ARM_STACK_SIZE
.Lvirt_done:
.word virt_done
.Lmainreturned:
.asciz "main() returned"
.align 2
.bss
svcstk:
.space INIT_ARM_STACK_SIZE
/*
* Memory for the initial pagetable. We are unable to place this in
* the bss as this will be cleared after the table is loaded.
*/
.section ".init_pagetable", "aw", %nobits
.align 14 /* 16KiB aligned */
pagetable:
.space L1_TABLE_SIZE
.text
.align 2
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
ENTRY_NP(cpu_halt)
mrs r2, cpsr
bic r2, r2, #(PSR_MODE)
orr r2, r2, #(PSR_SVC32_MODE)
orr r2, r2, #(PSR_I | PSR_F)
msr cpsr_fsxc, r2
ldr r4, .Lcpu_reset_address
ldr r4, [r4]
ldr r0, .Lcpufuncs
mov lr, pc
ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
mov lr, pc
ldr pc, [r0, #CF_L2CACHE_WBINV_ALL]
/*
* Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
* necessary.
*/
ldr r1, .Lcpu_reset_needs_v4_MMU_disable
ldr r1, [r1]
cmp r1, #0
mov r2, #0
/*
* MMU & IDC off, 32 bit program & data space
* Hurl ourselves into the ROM
*/
mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
mcr CP15_SCTLR(r0)
mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
mov pc, r4
/*
* _cpu_reset_address contains the address to branch to, to complete
* the cpu reset after turning the MMU off
* This variable is provided by the hardware specific code
*/
.Lcpu_reset_address:
.word _C_LABEL(cpu_reset_address)
/*
* cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
* v4 MMU disable instruction needs executing... it is an illegal instruction
* on f.e. ARM6/7 that locks up the computer in an endless illegal
* instruction / data-abort / reset loop.
*/
.Lcpu_reset_needs_v4_MMU_disable:
.word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
END(cpu_halt)
/*
* setjump + longjmp
*/
ENTRY(setjmp)
stmia r0, {r4-r14}
mov r0, #0x00000000
RET
END(setjmp)
ENTRY(longjmp)
ldmia r0, {r4-r14}
mov r0, #0x00000001
RET
END(longjmp)
.data
.global _C_LABEL(esym)
_C_LABEL(esym): .word _C_LABEL(end)
ENTRY_NP(abort)
b _C_LABEL(abort)
END(abort)
ENTRY_NP(sigcode)
mov r0, sp
add r0, r0, #SIGF_UC
/*
* Call the sigreturn system call.
*
* We have to load r7 manually rather than using
* "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
* correct. Using the alternative places esigcode at the address
* of the data rather than the address one past the data.
*/
ldr r7, [pc, #12] /* Load SYS_sigreturn */
swi SYS_sigreturn
/* Well if that failed we better exit quick ! */
ldr r7, [pc, #8] /* Load SYS_exit */
swi SYS_exit
/* Branch back to retry SYS_sigreturn */
b . - 16
END(sigcode)
.word SYS_sigreturn
.word SYS_exit
.align 2
.global _C_LABEL(esigcode)
_C_LABEL(esigcode):
.data
.global szsigcode
szsigcode:
.long esigcode-sigcode
/* End of locore.S */

View File

@ -34,8 +34,4 @@
#include <machine/acle-compat.h>
#if __ARM_ARCH >= 6
#include "locore-v6.S"
#else
#include "locore-v4.S"
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,376 +0,0 @@
/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
/*-
* Copyright 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Steve C. Woodford for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1994-1998 Mark Brinicombe.
* Copyright (c) 1994 Brini.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* RiscBSD kernel project
*
* cpuswitch.S
*
* cpu switching functions
*
* Created : 15/10/94
*
*/
#include "assym.inc"
#include "opt_sched.h"
#include <machine/asm.h>
#include <machine/asmacros.h>
#include <machine/armreg.h>
#include <machine/vfp.h>
__FBSDID("$FreeBSD$");
#define GET_PCPU(tmp, tmp2) \
ldr tmp, .Lcurpcpu
#ifdef VFP
.fpu vfp /* allow VFP instructions */
#endif
.Lcurpcpu:
.word _C_LABEL(__pcpu)
.Lblocked_lock:
.word _C_LABEL(blocked_lock)
#define DOMAIN_CLIENT 0x01
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
/*
* cpu_throw(oldtd, newtd)
*
* Remove current thread state, then select the next thread to run
* and load its state.
* r0 = oldtd
* r1 = newtd
*/
ENTRY(cpu_throw)
mov r5, r1
/*
* r0 = oldtd
* r5 = newtd
*/
#ifdef VFP /* This thread is dying, disable */
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
#endif
GET_PCPU(r7, r9)
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
/* Switch to lwp0 context */
ldr r9, .Lcpufuncs
mov lr, pc
ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
ldr r0, [r7, #(PCB_PL1VEC)]
ldr r1, [r7, #(PCB_DACR)]
/*
* r0 = Pointer to L1 slot for vector_page (or NULL)
* r1 = lwp0's DACR
* r5 = lwp0
* r7 = lwp0's PCB
* r9 = cpufuncs
*/
/*
* Ensure the vector table is accessible by fixing up lwp0's L1
*/
cmp r0, #0 /* No need to fixup vector table? */
ldrne r3, [r0] /* But if yes, fetch current value */
ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */
cmpne r3, r2 /* Stuffing the same value? */
strne r2, [r0] /* Store if not. */
#ifdef PMAP_INCLUDE_PTE_SYNC
/*
* Need to sync the cache to make sure that last store is
* visible to the MMU.
*/
movne r1, #4
movne lr, pc
ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
#endif /* PMAP_INCLUDE_PTE_SYNC */
/*
* Note: We don't do the same optimisation as cpu_switch() with
* respect to avoiding flushing the TLB if we're switching to
* the same L1 since this process' VM space may be about to go
* away, so we don't want *any* turds left in the TLB.
*/
/* Switch the memory to the new process */
ldr r0, [r7, #(PCB_PAGEDIR)]
mov lr, pc
ldr pc, [r9, #CF_CONTEXT_SWITCH]
GET_PCPU(r6, r4)
/* Hook in a new pcb */
str r7, [r6, #PC_CURPCB]
/* We have a new curthread now so make a note it */
str r5, [r6, #PC_CURTHREAD]
/* Set the new tp */
ldr r6, [r5, #(TD_MD + MD_TP)]
ldr r4, =ARM_TP_ADDRESS
str r6, [r4]
ldr r6, [r5, #(TD_MD + MD_RAS_START)]
str r6, [r4, #4] /* ARM_RAS_START */
ldr r6, [r5, #(TD_MD + MD_RAS_END)]
str r6, [r4, #8] /* ARM_RAS_END */
/* Restore all the saved registers and exit */
add r3, r7, #PCB_R4
ldmia r3, {r4-r12, sp, pc}
END(cpu_throw)
/*
* cpu_switch(oldtd, newtd, lock)
*
* Save the current thread state, then select the next thread to run
* and load its state.
* r0 = oldtd
* r1 = newtd
* r2 = lock (new lock for old thread)
*/
ENTRY(cpu_switch)
/* Interrupts are disabled. */
/* Save all the registers in the old thread's pcb. */
ldr r3, [r0, #(TD_PCB)]
/* Restore all the saved registers and exit */
add r3, #(PCB_R4)
stmia r3, {r4-r12, sp, lr, pc}
mov r6, r2 /* Save the mutex */
/* rem: r0 = old lwp */
/* rem: interrupts are disabled */
/* Process is now on a processor. */
/* We have a new curthread now so make a note it */
GET_PCPU(r7, r2)
str r1, [r7, #PC_CURTHREAD]
/* Hook in a new pcb */
ldr r2, [r1, #TD_PCB]
str r2, [r7, #PC_CURPCB]
/* Stage two : Save old context */
/* Get the user structure for the old thread. */
ldr r2, [r0, #(TD_PCB)]
mov r4, r0 /* Save the old thread. */
/* Store the old tp; userland can change it on armv4. */
ldr r3, =ARM_TP_ADDRESS
ldr r9, [r3]
str r9, [r0, #(TD_MD + MD_TP)]
ldr r9, [r3, #4]
str r9, [r0, #(TD_MD + MD_RAS_START)]
ldr r9, [r3, #8]
str r9, [r0, #(TD_MD + MD_RAS_END)]
/* Set the new tp */
ldr r9, [r1, #(TD_MD + MD_TP)]
str r9, [r3]
ldr r9, [r1, #(TD_MD + MD_RAS_START)]
str r9, [r3, #4]
ldr r9, [r1, #(TD_MD + MD_RAS_END)]
str r9, [r3, #8]
/* Get the user structure for the new process in r9 */
ldr r9, [r1, #(TD_PCB)]
/* rem: r2 = old PCB */
/* rem: r9 = new PCB */
/* rem: interrupts are enabled */
#ifdef VFP
fmrx r0, fpexc /* If the VFP is enabled */
tst r0, #(VFPEXC_EN) /* the current thread has */
movne r1, #1 /* used it, so go save */
addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */
blne _C_LABEL(vfp_store) /* and disable the VFP. */
#endif
/* r0-r3 now free! */
/* Third phase : restore saved context */
/* rem: r2 = old PCB */
/* rem: r9 = new PCB */
ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */
mov r2, #DOMAIN_CLIENT
cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
beq .Lcs_context_switched /* Yup. Don't flush cache */
mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */
/*
* Get the new L1 table pointer into r11. If we're switching to
* an LWP with the same address space as the outgoing one, we can
* skip the cache purge and the TTB load.
*
* To avoid data dep stalls that would happen anyway, we try
* and get some useful work done in the mean time.
*/
mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
teq r10, r11 /* Same L1? */
cmpeq r0, r5 /* Same DACR? */
beq .Lcs_context_switched /* yes! */
/*
* Definitely need to flush the cache.
*/
ldr r1, .Lcpufuncs
mov lr, pc
ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
.Lcs_cache_purge_skipped:
/* rem: r6 = lock */
/* rem: r9 = new PCB */
/* rem: r10 = old L1 */
/* rem: r11 = new L1 */
mov r2, #0x00000000
ldr r7, [r9, #(PCB_PL1VEC)]
/*
* Ensure the vector table is accessible by fixing up the L1
*/
cmp r7, #0 /* No need to fixup vector table? */
ldrne r2, [r7] /* But if yes, fetch current value */
ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */
cmpne r2, r0 /* Stuffing the same value? */
#ifndef PMAP_INCLUDE_PTE_SYNC
strne r0, [r7] /* Nope, update it */
#else
beq .Lcs_same_vector
str r0, [r7] /* Otherwise, update it */
/*
* Need to sync the cache to make sure that last store is
* visible to the MMU.
*/
ldr r2, .Lcpufuncs
mov r0, r7
mov r1, #4
mov lr, pc
ldr pc, [r2, #CF_DCACHE_WB_RANGE]
.Lcs_same_vector:
#endif /* PMAP_INCLUDE_PTE_SYNC */
cmp r10, r11 /* Switching to the same L1? */
ldr r10, .Lcpufuncs
beq .Lcs_same_l1 /* Yup. */
/*
* Do a full context switch, including full TLB flush.
*/
mov r0, r11
mov lr, pc
ldr pc, [r10, #CF_CONTEXT_SWITCH]
b .Lcs_context_switched
/*
* We're switching to a different process in the same L1.
* In this situation, we only need to flush the TLB for the
* vector_page mapping, and even then only if r7 is non-NULL.
*/
.Lcs_same_l1:
cmp r7, #0
movne r0, #0 /* We *know* vector_page's VA is 0x0 */
movne lr, pc
ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
.Lcs_context_switched:
/* Release the old thread */
str r6, [r4, #TD_LOCK]
/* XXXSCW: Safe to re-enable FIQs here */
/* rem: r9 = new PCB */
/* Restore all the saved registers and exit */
add r3, r9, #PCB_R4
ldmia r3, {r4-r12, sp, pc}
END(cpu_switch)

View File

@ -1,717 +0,0 @@
/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */
/*-
* Copyright 2004 Olivier Houchard
* Copyright 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Steve C. Woodford for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1994-1997 Mark Brinicombe.
* Copyright (c) 1994 Brini.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* RiscBSD kernel project
*
* fault.c
*
* Fault handlers
*
* Created : 28/11/94
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/signalvar.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/machdep.h>
#include <machine/pcb.h>
#ifdef KDB
#include <sys/kdb.h>
#endif
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
#endif
#define ReadWord(a) (*((volatile unsigned int *)(a)))
#ifdef DEBUG
int last_fault_code; /* For the benefit of pmap_fault_fixup() */
#endif
struct ksig {
int signb;
u_long code;
};
struct data_abort {
int (*func)(struct trapframe *, u_int, u_int, struct thread *,
struct ksig *);
const char *desc;
};
static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *,
struct ksig *);
static int dab_align(struct trapframe *, u_int, u_int, struct thread *,
struct ksig *);
static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *,
struct ksig *);
static void prefetch_abort_handler(struct trapframe *);
static const struct data_abort data_aborts[] = {
{dab_fatal, "Vector Exception"},
{dab_align, "Alignment Fault 1"},
{dab_fatal, "Terminal Exception"},
{dab_align, "Alignment Fault 3"},
{dab_buserr, "External Linefetch Abort (S)"},
{NULL, "Translation Fault (S)"},
{dab_buserr, "External Linefetch Abort (P)"},
{NULL, "Translation Fault (P)"},
{dab_buserr, "External Non-Linefetch Abort (S)"},
{NULL, "Domain Fault (S)"},
{dab_buserr, "External Non-Linefetch Abort (P)"},
{NULL, "Domain Fault (P)"},
{dab_buserr, "External Translation Abort (L1)"},
{NULL, "Permission Fault (S)"},
{dab_buserr, "External Translation Abort (L2)"},
{NULL, "Permission Fault (P)"}
};
/* Determine if a fault came from user mode */
#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
/* Determine if 'x' is a permission fault */
#define IS_PERMISSION_FAULT(x) \
(((1 << ((x) & FAULT_TYPE_MASK)) & \
((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0)
static __inline void
call_trapsignal(struct thread *td, int sig, u_long code)
{
ksiginfo_t ksi;
ksiginfo_init_trap(&ksi);
ksi.ksi_signo = sig;
ksi.ksi_code = (int)code;
trapsignal(td, &ksi);
}
void
abort_handler(struct trapframe *tf, int type)
{
struct vm_map *map;
struct pcb *pcb;
struct thread *td;
u_int user, far, fsr;
vm_prot_t ftype;
void *onfault;
vm_offset_t va;
int error = 0, signo, ucode;
struct ksig ksig;
struct proc *p;
if (type == 1)
return (prefetch_abort_handler(tf));
/* Grab FAR/FSR before enabling interrupts */
far = cp15_dfar_get();
fsr = cp15_dfsr_get();
#if 0
printf("data abort: fault address=%p (from pc=%p lr=%p)\n",
(void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr);
#endif
/* Update vmmeter statistics */
#if 0
vmexp.traps++;
#endif
td = curthread;
p = td->td_proc;
VM_CNT_INC(v_trap);
/* Data abort came from user mode? */
user = TRAP_USERMODE(tf);
if (user) {
td->td_pticks = 0;
td->td_frame = tf;
if (td->td_cowgen != td->td_proc->p_cowgen)
thread_cow_update(td);
}
/* Grab the current pcb */
pcb = td->td_pcb;
/* Re-enable interrupts if they were enabled previously */
if (td->td_md.md_spinlock_count == 0) {
if (__predict_true(tf->tf_spsr & PSR_I) == 0)
enable_interrupts(PSR_I);
if (__predict_true(tf->tf_spsr & PSR_F) == 0)
enable_interrupts(PSR_F);
}
/* Invoke the appropriate handler, if necessary */
if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
td, &ksig)) {
signo = ksig.signb;
ucode = ksig.code;
goto do_trapsignal;
}
goto out;
}
/*
* At this point, we're dealing with one of the following data aborts:
*
* FAULT_TRANS_S - Translation -- Section
* FAULT_TRANS_P - Translation -- Page
* FAULT_DOMAIN_S - Domain -- Section
* FAULT_DOMAIN_P - Domain -- Page
* FAULT_PERM_S - Permission -- Section
* FAULT_PERM_P - Permission -- Page
*
* These are the main virtual memory-related faults signalled by
* the MMU.
*/
/*
* Make sure the Program Counter is sane. We could fall foul of
* someone executing Thumb code, in which case the PC might not
* be word-aligned. This would cause a kernel alignment fault
* further down if we have to decode the current instruction.
* XXX: It would be nice to be able to support Thumb at some point.
*/
if (__predict_false((tf->tf_pc & 3) != 0)) {
if (user) {
/*
* Give the user an illegal instruction signal.
*/
/* Deliver a SIGILL to the process */
signo = SIGILL;
ucode = 0;
goto do_trapsignal;
}
/*
* The kernel never executes Thumb code.
*/
printf("\ndata_abort_fault: Misaligned Kernel-mode "
"Program Counter\n");
dab_fatal(tf, fsr, far, td, &ksig);
}
va = trunc_page((vm_offset_t)far);
/*
* It is only a kernel address space fault iff:
* 1. user == 0 and
* 2. pcb_onfault not set or
* 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
*/
if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
(va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
__predict_true((pcb->pcb_onfault == NULL ||
(ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
map = kernel_map;
/* Was the fault due to the FPE/IPKDB ? */
if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
/*
* Force exit via userret()
* This is necessary as the FPE is an extension to
* userland that actually runs in a priveledged mode
* but uses USR mode permissions for its accesses.
*/
user = 1;
signo = SIGSEGV;
ucode = 0;
goto do_trapsignal;
}
} else {
map = &td->td_proc->p_vmspace->vm_map;
}
/*
* We need to know whether the page should be mapped as R or R/W.
* On armv4, the fault status register does not indicate whether
* the access was a read or write. We know that a permission fault
* can only be the result of a write to a read-only location, so we
* can deal with those quickly. Otherwise we need to disassemble
* the faulting instruction to determine if it was a write.
*/
if (IS_PERMISSION_FAULT(fsr))
ftype = VM_PROT_WRITE;
else {
u_int insn = ReadWord(tf->tf_pc);
if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */
ftype = VM_PROT_WRITE;
} else {
if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
}
}
/*
* See if the fault is as a result of ref/mod emulation,
* or domain mismatch.
*/
#ifdef DEBUG
last_fault_code = fsr;
#endif
if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK,
NULL, "Kernel page fault") != 0)
goto fatal_pagefault;
if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
user)) {
goto out;
}
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = NULL;
error = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &signo, &ucode);
pcb->pcb_onfault = onfault;
if (__predict_true(error == KERN_SUCCESS))
goto out;
fatal_pagefault:
if (user == 0) {
if (pcb->pcb_onfault) {
tf->tf_r0 = error;
tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
return;
}
printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
error);
dab_fatal(tf, fsr, far, td, &ksig);
}
do_trapsignal:
call_trapsignal(td, signo, ucode);
out:
/* If returning to user mode, make sure to invoke userret() */
if (user)
userret(td, tf);
}
/*
* dab_fatal() handles the following data aborts:
*
* FAULT_WRTBUF_0 - Vector Exception
* FAULT_WRTBUF_1 - Terminal Exception
*
* We should never see these on a properly functioning system.
*
* This function is also called by the other handlers if they
* detect a fatal problem.
*
* Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
*/
static int
dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td,
struct ksig *ksig)
{
const char *mode;
#ifdef KDB
bool handled;
#endif
#ifdef KDB
if (kdb_active) {
kdb_reenter();
return (0);
}
#endif
#ifdef KDTRACE_HOOKS
if (!TRAP_USERMODE(tf)) {
if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK))
return (0);
}
#endif
mode = TRAP_USERMODE(tf) ? "user" : "kernel";
disable_interrupts(PSR_I|PSR_F);
if (td != NULL) {
printf("Fatal %s mode data abort: '%s'\n", mode,
data_aborts[fsr & FAULT_TYPE_MASK].desc);
printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
if ((fsr & FAULT_IMPRECISE) == 0)
printf("%08x, ", far);
else
printf("Invalid, ");
printf("spsr=%08x\n", tf->tf_spsr);
} else {
printf("Fatal %s mode prefetch abort at 0x%08x\n",
mode, tf->tf_pc);
printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
}
printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
printf("r12=%08x, ", tf->tf_r12);
if (TRAP_USERMODE(tf))
printf("usp=%08x, ulr=%08x",
tf->tf_usr_sp, tf->tf_usr_lr);
else
printf("ssp=%08x, slr=%08x",
tf->tf_svc_sp, tf->tf_svc_lr);
printf(", pc =%08x\n\n", tf->tf_pc);
#ifdef KDB
if (debugger_on_trap) {
kdb_why = KDB_WHY_TRAP;
handled = kdb_trap(fsr, 0, tf);
kdb_why = KDB_WHY_UNSET;
if (handled)
return (0);
}
#endif
panic("Fatal abort");
/*NOTREACHED*/
}
/*
* dab_align() handles the following data aborts:
*
* FAULT_ALIGN_0 - Alignment fault
* FAULT_ALIGN_1 - Alignment fault
*
* These faults are fatal if they happen in kernel mode. Otherwise, we
* deliver a bus error to the process.
*/
static int
dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td,
struct ksig *ksig)
{
/* Alignment faults are always fatal if they occur in kernel mode */
if (!TRAP_USERMODE(tf)) {
if (!td || !td->td_pcb->pcb_onfault)
dab_fatal(tf, fsr, far, td, ksig);
tf->tf_r0 = EFAULT;
tf->tf_pc = (int)td->td_pcb->pcb_onfault;
return (0);
}
/* pcb_onfault *must* be NULL at this point */
/* Deliver a bus error signal to the process */
ksig->code = 0;
ksig->signb = SIGBUS;
td->td_frame = tf;
return (1);
}
/*
* dab_buserr() handles the following data aborts:
*
* FAULT_BUSERR_0 - External Abort on Linefetch -- Section
* FAULT_BUSERR_1 - External Abort on Linefetch -- Page
* FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
* FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
* FAULT_BUSTRNL1 - External abort on Translation -- Level 1
* FAULT_BUSTRNL2 - External abort on Translation -- Level 2
*
* If pcb_onfault is set, flag the fault and return to the handler.
* If the fault occurred in user mode, give the process a SIGBUS.
*
* Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
* can be flagged as imprecise in the FSR. This causes a real headache
* since some of the machine state is lost. In this case, tf->tf_pc
* may not actually point to the offending instruction. In fact, if
* we've taken a double abort fault, it generally points somewhere near
* the top of "data_abort_entry" in exception.S.
*
* In all other cases, these data aborts are considered fatal.
*/
static int
dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td,
struct ksig *ksig)
{
struct pcb *pcb = td->td_pcb;
#ifdef __XSCALE__
if ((fsr & FAULT_IMPRECISE) != 0 &&
(tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
/*
* Oops, an imprecise, double abort fault. We've lost the
* r14_abt/spsr_abt values corresponding to the original
* abort, and the spsr saved in the trapframe indicates
* ABT mode.
*/
tf->tf_spsr &= ~PSR_MODE;
/*
* We use a simple heuristic to determine if the double abort
* happened as a result of a kernel or user mode access.
* If the current trapframe is at the top of the kernel stack,
* the fault _must_ have come from user mode.
*/
if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) {
/*
* Kernel mode. We're either about to die a
* spectacular death, or pcb_onfault will come
* to our rescue. Either way, the current value
* of tf->tf_pc is irrelevant.
*/
tf->tf_spsr |= PSR_SVC32_MODE;
if (pcb->pcb_onfault == NULL)
printf("\nKernel mode double abort!\n");
} else {
/*
* User mode. We've lost the program counter at the
* time of the fault (not that it was accurate anyway;
* it's not called an imprecise fault for nothing).
* About all we can do is copy r14_usr to tf_pc and
* hope for the best. The process is about to get a
* SIGBUS, so it's probably history anyway.
*/
tf->tf_spsr |= PSR_USR32_MODE;
tf->tf_pc = tf->tf_usr_lr;
}
}
/* FAR is invalid for imprecise exceptions */
if ((fsr & FAULT_IMPRECISE) != 0)
far = 0;
#endif /* __XSCALE__ */
if (pcb->pcb_onfault) {
tf->tf_r0 = EFAULT;
tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
return (0);
}
/*
* At this point, if the fault happened in kernel mode, we're toast
*/
if (!TRAP_USERMODE(tf))
dab_fatal(tf, fsr, far, td, ksig);
/* Deliver a bus error signal to the process */
ksig->signb = SIGBUS;
ksig->code = 0;
td->td_frame = tf;
return (1);
}
/*
* void prefetch_abort_handler(struct trapframe *tf)
*
* Abort handler called when instruction execution occurs at
* a non existent or restricted (access permissions) memory page.
* If the address is invalid and we were in SVC mode then panic as
* the kernel should never prefetch abort.
* If the address is invalid and the page is mapped then the user process
* does no have read permission so send it a signal.
* Otherwise fault the page in and try again.
*/
static void
prefetch_abort_handler(struct trapframe *tf)
{
struct thread *td;
struct proc * p;
struct vm_map *map;
vm_offset_t fault_pc, va;
int error = 0, signo, ucode;
struct ksig ksig;
#if 0
/* Update vmmeter statistics */
uvmexp.traps++;
#endif
#if 0
printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc,
(void*)tf->tf_usr_lr);
#endif
td = curthread;
p = td->td_proc;
VM_CNT_INC(v_trap);
if (TRAP_USERMODE(tf)) {
td->td_frame = tf;
if (td->td_cowgen != td->td_proc->p_cowgen)
thread_cow_update(td);
}
fault_pc = tf->tf_pc;
if (td->td_md.md_spinlock_count == 0) {
if (__predict_true(tf->tf_spsr & PSR_I) == 0)
enable_interrupts(PSR_I);
if (__predict_true(tf->tf_spsr & PSR_F) == 0)
enable_interrupts(PSR_F);
}
/* Prefetch aborts cannot happen in kernel mode */
if (__predict_false(!TRAP_USERMODE(tf)))
dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
td->td_pticks = 0;
/* Ok validate the address, can only execute in USER space */
if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
(fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
signo = SIGSEGV;
ucode = 0;
goto do_trapsignal;
}
map = &td->td_proc->p_vmspace->vm_map;
va = trunc_page(fault_pc);
/*
* See if the pmap can handle this fault on its own...
*/
#ifdef DEBUG
last_fault_code = -1;
#endif
if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
goto out;
error = vm_fault_trap(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
VM_FAULT_NORMAL, &signo, &ucode);
if (__predict_true(error == KERN_SUCCESS))
goto out;
do_trapsignal:
call_trapsignal(td, signo, ucode);
out:
userret(td, tf);
}
extern int badaddr_read_1(const uint8_t *, uint8_t *);
extern int badaddr_read_2(const uint16_t *, uint16_t *);
extern int badaddr_read_4(const uint32_t *, uint32_t *);
/*
* Tentatively read an 8, 16, or 32-bit value from 'addr'.
* If the read succeeds, the value is written to 'rptr' and zero is returned.
* Else, return EFAULT.
*/
int
badaddr_read(void *addr, size_t size, void *rptr)
{
union {
uint8_t v1;
uint16_t v2;
uint32_t v4;
} u;
int rv;
cpu_drain_writebuf();
/* Read from the test address. */
switch (size) {
case sizeof(uint8_t):
rv = badaddr_read_1(addr, &u.v1);
if (rv == 0 && rptr)
*(uint8_t *) rptr = u.v1;
break;
case sizeof(uint16_t):
rv = badaddr_read_2(addr, &u.v2);
if (rv == 0 && rptr)
*(uint16_t *) rptr = u.v2;
break;
case sizeof(uint32_t):
rv = badaddr_read_4(addr, &u.v4);
if (rv == 0 && rptr)
*(uint32_t *) rptr = u.v4;
break;
default:
panic("badaddr: invalid size (%lu)", (u_long) size);
}
/* Return EFAULT if the address was invalid, else zero */
return (rv);
}

View File

@ -1,660 +0,0 @@
/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
/*-
* Copyright (C) 2003-2004 Olivier Houchard
* Copyright (C) 1994-1997 Mark Brinicombe
* Copyright (C) 1994 Brini
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of Brini may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_ATOMIC_V4_H_
#define _MACHINE_ATOMIC_V4_H_
#ifndef _MACHINE_ATOMIC_H_
#error Do not include this file directly, use <machine/atomic.h>
#endif
#if __ARM_ARCH <= 5
#define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
#define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
#define dmb() dsb()
#else
#error Only use this file with ARMv5 and earlier
#endif
#define mb() dmb()
#define wmb() dmb()
#define rmb() dmb()
#define __with_interrupts_disabled(expr) \
do { \
u_int cpsr_save, tmp; \
\
__asm __volatile( \
"mrs %0, cpsr;" \
"orr %1, %0, %2;" \
"msr cpsr_fsxc, %1;" \
: "=r" (cpsr_save), "=r" (tmp) \
: "I" (PSR_I | PSR_F) \
: "cc" ); \
(expr); \
__asm __volatile( \
"msr cpsr_fsxc, %0" \
: /* no output */ \
: "r" (cpsr_save) \
: "cc" ); \
} while(0)
static __inline uint32_t
__swp(uint32_t val, volatile uint32_t *ptr)
{
__asm __volatile("swp %0, %2, [%3]"
: "=&r" (val), "=m" (*ptr)
: "r" (val), "r" (ptr), "m" (*ptr)
: "memory");
return (val);
}
#ifdef _KERNEL
#define ARM_HAVE_ATOMIC64
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
__with_interrupts_disabled(*p += val);
}
static __inline void
atomic_add_64(volatile u_int64_t *p, u_int64_t val)
{
__with_interrupts_disabled(*p += val);
}
static __inline void
atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
{
__with_interrupts_disabled(*address &= ~clearmask);
}
static __inline void
atomic_clear_64(volatile uint64_t *address, uint64_t clearmask)
{
__with_interrupts_disabled(*address &= ~clearmask);
}
static __inline int
atomic_fcmpset_8(volatile uint8_t *p, volatile uint8_t *cmpval, volatile uint8_t newval)
{
int ret;
__with_interrupts_disabled(
{
ret = *p;
if (*p == *cmpval) {
*p = newval;
ret = 1;
} else {
*cmpval = *p;
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_fcmpset_16(volatile uint16_t *p, volatile uint16_t *cmpval, volatile uint16_t newval)
{
int ret;
__with_interrupts_disabled(
{
ret = *p;
if (*p == *cmpval) {
*p = newval;
ret = 1;
} else {
*cmpval = *p;
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval)
{
int ret;
__with_interrupts_disabled(
{
ret = *p;
if (*p == *cmpval) {
*p = newval;
ret = 1;
} else {
*cmpval = *p;
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_fcmpset_64(volatile u_int64_t *p, volatile u_int64_t *cmpval, volatile u_int64_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == *cmpval) {
*p = newval;
ret = 1;
} else {
*cmpval = *p;
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_cmpset_8(volatile uint8_t *p, volatile uint8_t cmpval, volatile uint8_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
ret = 1;
} else {
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_cmpset_16(volatile uint16_t *p, volatile uint16_t cmpval, volatile uint16_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
ret = 1;
} else {
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
ret = 1;
} else {
ret = 0;
}
});
return (ret);
}
static __inline int
atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
ret = 1;
} else {
ret = 0;
}
});
return (ret);
}
static __inline uint32_t
atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
{
uint32_t value;
__with_interrupts_disabled(
{
value = *p;
*p += v;
});
return (value);
}
static __inline uint64_t
atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
{
uint64_t value;
__with_interrupts_disabled(
{
value = *p;
*p += v;
});
return (value);
}
static __inline uint64_t
atomic_load_64(volatile uint64_t *p)
{
uint64_t value;
__with_interrupts_disabled(value = *p);
return (value);
}
static __inline void
atomic_set_32(volatile uint32_t *address, uint32_t setmask)
{
__with_interrupts_disabled(*address |= setmask);
}
static __inline void
atomic_set_64(volatile uint64_t *address, uint64_t setmask)
{
__with_interrupts_disabled(*address |= setmask);
}
static __inline void
atomic_store_64(volatile uint64_t *p, uint64_t value)
{
__with_interrupts_disabled(*p = value);
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
__with_interrupts_disabled(*p -= val);
}
static __inline void
atomic_subtract_64(volatile u_int64_t *p, u_int64_t val)
{
__with_interrupts_disabled(*p -= val);
}
static __inline uint64_t
atomic_swap_64(volatile uint64_t *p, uint64_t v)
{
uint64_t value;
__with_interrupts_disabled(
{
value = *p;
*p = v;
});
return (value);
}
#else /* !_KERNEL */
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
int start, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"add %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
"mov %1, #0\n"
"str %1, [%0]\n"
"mov %1, #0xffffffff\n"
"str %1, [%0, #4]\n"
: "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
: : "memory");
}
static __inline void
atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
{
int start, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"bic %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
"mov %1, #0\n"
"str %1, [%0]\n"
"mov %1, #0xffffffff\n"
"str %1, [%0, #4]\n"
: "+r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask)
: : "memory");
}
static __inline int
atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
int done, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"cmp %1, %3\n"
"streq %4, [%2]\n"
"2:\n"
"mov %1, #0\n"
"str %1, [%0]\n"
"mov %1, #0xffffffff\n"
"str %1, [%0, #4]\n"
"moveq %1, #1\n"
"movne %1, #0\n"
: "+r" (ras_start), "=r" (done)
,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory");
return (done);
}
static __inline int
atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval)
{
int done, oldval, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"ldr %5, [%3]\n"
"cmp %1, %5\n"
"streq %4, [%2]\n"
"2:\n"
"mov %5, #0\n"
"str %5, [%0]\n"
"mov %5, #0xffffffff\n"
"str %5, [%0, #4]\n"
"strne %1, [%3]\n"
"moveq %1, #1\n"
"movne %1, #0\n"
: "+r" (ras_start), "=r" (done) ,"+r" (p)
, "+r" (cmpval), "+r" (newval), "+r" (oldval) : : "cc", "memory");
return (done);
}
static __inline uint32_t
atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
{
uint32_t start, tmp, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%3]\n"
"mov %2, %1\n"
"add %2, %2, %4\n"
"str %2, [%3]\n"
"2:\n"
"mov %2, #0\n"
"str %2, [%0]\n"
"mov %2, #0xffffffff\n"
"str %2, [%0, #4]\n"
: "+r" (ras_start), "=r" (start), "=r" (tmp), "+r" (p), "+r" (v)
: : "memory");
return (start);
}
static __inline void
atomic_set_32(volatile uint32_t *address, uint32_t setmask)
{
int start, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"orr %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
"mov %1, #0\n"
"str %1, [%0]\n"
"mov %1, #0xffffffff\n"
"str %1, [%0, #4]\n"
: "+r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask)
: : "memory");
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
int start, ras_start = ARM_RAS_START;
__asm __volatile("1:\n"
"adr %1, 1b\n"
"str %1, [%0]\n"
"adr %1, 2f\n"
"str %1, [%0, #4]\n"
"ldr %1, [%2]\n"
"sub %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
"mov %1, #0\n"
"str %1, [%0]\n"
"mov %1, #0xffffffff\n"
"str %1, [%0, #4]\n"
: "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
: : "memory");
}
#endif /* _KERNEL */
static __inline uint32_t
atomic_readandclear_32(volatile u_int32_t *p)
{
return (__swp(0, p));
}
static __inline uint32_t
atomic_swap_32(volatile u_int32_t *p, u_int32_t v)
{
return (__swp(v, p));
}
#define atomic_fcmpset_rel_32 atomic_fcmpset_32
#define atomic_fcmpset_acq_32 atomic_fcmpset_32
#ifdef _KERNEL
#define atomic_fcmpset_8 atomic_fcmpset_8
#define atomic_fcmpset_rel_8 atomic_fcmpset_8
#define atomic_fcmpset_acq_8 atomic_fcmpset_8
#define atomic_fcmpset_16 atomic_fcmpset_16
#define atomic_fcmpset_rel_16 atomic_fcmpset_16
#define atomic_fcmpset_acq_16 atomic_fcmpset_16
#define atomic_fcmpset_rel_64 atomic_fcmpset_64
#define atomic_fcmpset_acq_64 atomic_fcmpset_64
#endif
#define atomic_fcmpset_acq_long atomic_fcmpset_long
#define atomic_fcmpset_rel_long atomic_fcmpset_long
#define atomic_cmpset_rel_32 atomic_cmpset_32
#define atomic_cmpset_acq_32 atomic_cmpset_32
#ifdef _KERNEL
#define atomic_cmpset_8 atomic_cmpset_8
#define atomic_cmpset_rel_8 atomic_cmpset_8
#define atomic_cmpset_acq_8 atomic_cmpset_8
#define atomic_cmpset_16 atomic_cmpset_16
#define atomic_cmpset_rel_16 atomic_cmpset_16
#define atomic_cmpset_acq_16 atomic_cmpset_16
#define atomic_cmpset_rel_64 atomic_cmpset_64
#define atomic_cmpset_acq_64 atomic_cmpset_64
#endif
#define atomic_set_rel_32 atomic_set_32
#define atomic_set_acq_32 atomic_set_32
#define atomic_clear_rel_32 atomic_clear_32
#define atomic_clear_acq_32 atomic_clear_32
#define atomic_add_rel_32 atomic_add_32
#define atomic_add_acq_32 atomic_add_32
#define atomic_subtract_rel_32 atomic_subtract_32
#define atomic_subtract_acq_32 atomic_subtract_32
#define atomic_store_rel_32 atomic_store_32
#define atomic_store_rel_long atomic_store_long
#define atomic_load_acq_32 atomic_load_32
#define atomic_load_acq_long atomic_load_long
#define atomic_add_acq_long atomic_add_long
#define atomic_add_rel_long atomic_add_long
#define atomic_subtract_acq_long atomic_subtract_long
#define atomic_subtract_rel_long atomic_subtract_long
#define atomic_clear_acq_long atomic_clear_long
#define atomic_clear_rel_long atomic_clear_long
#define atomic_set_acq_long atomic_set_long
#define atomic_set_rel_long atomic_set_long
#define atomic_cmpset_acq_long atomic_cmpset_long
#define atomic_cmpset_rel_long atomic_cmpset_long
#define atomic_load_acq_long atomic_load_long
#undef __with_interrupts_disabled
static __inline void
atomic_add_long(volatile u_long *p, u_long v)
{
atomic_add_32((volatile uint32_t *)p, v);
}
static __inline void
atomic_clear_long(volatile u_long *p, u_long v)
{
atomic_clear_32((volatile uint32_t *)p, v);
}
static __inline int
atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe)
{
return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe));
}
static __inline u_long
atomic_fcmpset_long(volatile u_long *dst, u_long *old, u_long newe)
{
return (atomic_fcmpset_32((volatile uint32_t *)dst,
(uint32_t *)old, newe));
}
static __inline u_long
atomic_fetchadd_long(volatile u_long *p, u_long v)
{
return (atomic_fetchadd_32((volatile uint32_t *)p, v));
}
static __inline void
atomic_readandclear_long(volatile u_long *p)
{
atomic_readandclear_32((volatile uint32_t *)p);
}
static __inline void
atomic_set_long(volatile u_long *p, u_long v)
{
atomic_set_32((volatile uint32_t *)p, v);
}
static __inline void
atomic_subtract_long(volatile u_long *p, u_long v)
{
atomic_subtract_32((volatile uint32_t *)p, v);
}
/*
* ARMv5 does not support SMP. For both kernel and user modes, only a
* compiler barrier is needed for fences, since CPU is always
* self-consistent.
*/
static __inline void
atomic_thread_fence_acq(void)
{
__compiler_membar();
}
static __inline void
atomic_thread_fence_rel(void)
{
__compiler_membar();
}
static __inline void
atomic_thread_fence_acq_rel(void)
{
__compiler_membar();
}
static __inline void
atomic_thread_fence_seq_cst(void)
{
__compiler_membar();
}
#endif /* _MACHINE_ATOMIC_H_ */

View File

@ -49,11 +49,7 @@
#include <machine/sysarch.h>
#endif
#if __ARM_ARCH >= 6
#include <machine/atomic-v6.h>
#else /* < armv6 */
#include <machine/atomic-v4.h>
#endif /* Arch >= v6 */
static __inline u_long
atomic_swap_long(volatile u_long *p, u_long v)

View File

@ -1,183 +0,0 @@
/*-
* Copyright 2016 Svatopluk Kraus <skra@FreeBSD.org>
* Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MACHINE_CPU_V4_H
#define MACHINE_CPU_V4_H
/* There are no user serviceable parts here, they may change without notice */
#ifndef _KERNEL
#error Only include this file in the kernel
#endif
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#include <machine/cpuinfo.h>
#include <machine/sysreg.h>
#if __ARM_ARCH >= 6
#error Never include this file for ARMv6
#else
#define CPU_ASID_KERNEL 0
/*
* Macros to generate CP15 (system control processor) read/write functions.
*/
#define _FX(s...) #s
#define _RF0(fname, aname...) \
static __inline uint32_t \
fname(void) \
{ \
uint32_t reg; \
__asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
return(reg); \
}
#define _R64F0(fname, aname) \
static __inline uint64_t \
fname(void) \
{ \
uint64_t reg; \
__asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
return(reg); \
}
#define _WF0(fname, aname...) \
static __inline void \
fname(void) \
{ \
__asm __volatile("mcr\t" _FX(aname)); \
}
#define _WF1(fname, aname...) \
static __inline void \
fname(uint32_t reg) \
{ \
__asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
}
/*
* Publicly accessible functions
*/
/* Various control registers */
_RF0(cp15_cpacr_get, CP15_CPACR(%0))
_WF1(cp15_cpacr_set, CP15_CPACR(%0))
_RF0(cp15_dfsr_get, CP15_DFSR(%0))
_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
_RF0(cp15_dfar_get, CP15_DFAR(%0))
/* XScale */
_RF0(cp15_actlr_get, CP15_ACTLR(%0))
_WF1(cp15_actlr_set, CP15_ACTLR(%0))
/*CPU id registers */
_RF0(cp15_midr_get, CP15_MIDR(%0))
_RF0(cp15_ctr_get, CP15_CTR(%0))
_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
#undef _FX
#undef _RF0
#undef _WF0
#undef _WF1
/*
* armv4/5 compatibility shims.
*
* These functions provide armv4 cache maintenance using the new armv6 names.
* Included here are just the functions actually used now in common code; it may
* be necessary to add things here over time.
*
* The callers of the dcache functions expect these routines to handle address
* and size values which are not aligned to cacheline boundaries; the armv4 and
* armv5 asm code handles that.
*/
static __inline void
tlb_flush_all(void)
{
cpu_tlb_flushID();
cpu_cpwait();
}
static __inline void
icache_sync(vm_offset_t va, vm_size_t size)
{
cpu_icache_sync_range(va, size);
}
static __inline void
dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
{
cpu_dcache_inv_range(va, size);
#ifdef ARM_L2_PIPT
cpu_l2cache_inv_range(pa, size);
#else
cpu_l2cache_inv_range(va, size);
#endif
}
static __inline void
dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
{
/* See armv6 code, above, for why we do L2 before L1 in this case. */
#ifdef ARM_L2_PIPT
cpu_l2cache_inv_range(pa, size);
#else
cpu_l2cache_inv_range(va, size);
#endif
cpu_dcache_inv_range(va, size);
}
static __inline void
dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
{
cpu_dcache_wb_range(va, size);
#ifdef ARM_L2_PIPT
cpu_l2cache_wb_range(pa, size);
#else
cpu_l2cache_wb_range(va, size);
#endif
}
static __inline void
dcache_wbinv_poc_all(void)
{
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
}
#endif /* _KERNEL */
#endif /* MACHINE_CPU_V4_H */

View File

@ -11,11 +11,7 @@ void cpu_halt(void);
void swi_vm(void *);
#ifdef _KERNEL
#if __ARM_ARCH >= 6
#include <machine/cpu-v6.h>
#else
#include <machine/cpu-v4.h>
#endif /* __ARM_ARCH >= 6 */
static __inline uint64_t
get_cyclecount(void)

View File

@ -1,381 +0,0 @@
/*-
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and William Jolitz of UUNET Technologies Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Derived from hp300 version by Mike Hibler, this version by William
* Jolitz uses a recursive map [a pde points to the page directory] to
* map the page tables using the pagetables themselves. This is done to
* reduce the impact on kernel virtual memory for lots of sparse address
* space, and to reduce the cost of memory to each process.
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
*
* $FreeBSD$
*/
#ifndef _MACHINE_PMAP_V4_H_
#define _MACHINE_PMAP_V4_H_
#include <machine/pte-v4.h>
/*
* Pte related macros
*/
#define PTE_NOCACHE 1
#define PTE_CACHE 2
#define PTE_DEVICE PTE_NOCACHE
#define PTE_PAGETABLE 3
enum mem_type {
STRONG_ORD = 0,
DEVICE_NOSHARE,
DEVICE_SHARE,
NRML_NOCACHE,
NRML_IWT_OWT,
NRML_IWB_OWB,
NRML_IWBA_OWBA
};
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
/*
* Pmap stuff
*/
/*
* This structure is used to hold a virtual<->physical address
* association and is used mostly by bootstrap code
*/
struct pv_addr {
SLIST_ENTRY(pv_addr) pv_list;
vm_offset_t pv_va;
vm_paddr_t pv_pa;
};
struct pv_entry;
struct pv_chunk;
struct md_page {
int pvh_attrs;
vm_memattr_t pv_memattr;
vm_offset_t pv_kva; /* first kernel VA mapping */
TAILQ_HEAD(,pv_entry) pv_list;
};
struct l1_ttable;
struct l2_dtable;
/*
* The number of L2 descriptor tables which can be tracked by an l2_dtable.
* A bucket size of 16 provides for 16MB of contiguous virtual address
* space per l2_dtable. Most processes will, therefore, require only two or
* three of these to map their whole working set.
*/
#define L2_BUCKET_LOG2 4
#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
/*
* Given the above "L2-descriptors-per-l2_dtable" constant, the number
* of l2_dtable structures required to track all possible page descriptors
* mappable by an L1 translation table is given by the following constants:
*/
#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
#define L2_SIZE (1 << L2_LOG2)
struct pmap {
struct mtx pm_mtx;
u_int8_t pm_domain;
struct l1_ttable *pm_l1;
struct l2_dtable *pm_l2[L2_SIZE];
cpuset_t pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statictics */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
};
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#define PMAP_ASSERT_LOCKED(pmap) \
mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
NULL, MTX_DEF | MTX_DUPOK)
#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#endif
/*
* For each vm_page_t, there is a list of all currently valid virtual
* mappings of that page. An entry is a pv_entry_t, the list is pv_list.
*/
typedef struct pv_entry {
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_list;
int pv_flags; /* flags (wired, etc...) */
pmap_t pv_pmap; /* pmap where mapping lies */
TAILQ_ENTRY(pv_entry) pv_plist;
} *pv_entry_t;
/*
* pv_entries are allocated in chunks per-process. This avoids the
* need to track per-pmap assignments.
*/
#define _NPCM 8
#define _NPCPV 252
struct pv_chunk {
pmap_t pc_pmap;
TAILQ_ENTRY(pv_chunk) pc_list;
uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */
TAILQ_ENTRY(pv_chunk) pc_lru;
struct pv_entry pc_pventry[_NPCPV];
};
#ifdef _KERNEL
boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
/*
* virtual address to page table entry and
* to physical address. Likewise for alternate address space.
* Note: these work recursively, thus vtopte of a pte will give
* the corresponding pde that in turn maps it.
*/
/*
* The current top of kernel VM.
*/
extern vm_offset_t pmap_curmaxkvaddr;
/* Virtual address to page table entry */
static __inline pt_entry_t *
vtopte(vm_offset_t va)
{
pd_entry_t *pdep;
pt_entry_t *ptep;
if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE)
return (NULL);
return (ptep);
}
void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *);
void pmap_kremove(vm_offset_t);
vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
void pmap_debug(int);
void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
void
pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
int cache);
int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
/*
* Definitions for MMU domains
*/
#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
/*
* The new pmap ensures that page-tables are always mapping Write-Thru.
* Thus, on some platforms we can run fast and loose and avoid syncing PTEs
* on every change.
*
* Unfortunately, not all CPUs have a write-through cache mode. So we
* define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
* and if there is the chance for PTE syncs to be needed, we define
* PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
* the code.
*/
extern int pmap_needs_pte_sync;
/*
* These macros define the various bit masks in the PTE.
*/
#define L1_S_CACHE_MASK (L1_S_B|L1_S_C)
#define L2_L_CACHE_MASK (L2_B|L2_C)
#define L2_S_PROT_U (L2_AP(AP_U))
#define L2_S_PROT_W (L2_AP(AP_W))
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_W)
#define L2_S_CACHE_MASK (L2_B|L2_C)
#define L1_S_PROTO (L1_TYPE_S | L1_S_IMP)
#define L1_C_PROTO (L1_TYPE_C | L1_C_IMP2)
#define L2_L_PROTO (L2_TYPE_L)
#define L2_S_PROTO (L2_TYPE_S)
/*
* User-visible names for the ones that vary with MMU class.
*/
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
#if defined(CPU_XSCALE_81342)
#define CPU_XSCALE_CORE3
#define PMAP_NEEDS_PTE_SYNC 1
#define PMAP_INCLUDE_PTE_SYNC
#else
#define PMAP_NEEDS_PTE_SYNC 0
#endif
/*
* These macros return various bits based on kernel/user and protection.
* Note that the compiler will usually fold these at compile time.
*/
#define L1_S_PROT_U (L1_S_AP(AP_U))
#define L1_S_PROT_W (L1_S_AP(AP_W))
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W)
#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
#define L2_L_PROT_U (L2_AP(AP_U))
#define L2_L_PROT_W (L2_AP(AP_W))
#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
/*
* Macros to test if a mapping is mappable with an L1 Section mapping
* or an L2 Large Page mapping.
*/
#define L1_S_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
#define L2_L_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
/*
* Provide a fallback in case we were not able to determine it at
* compile-time.
*/
#ifndef PMAP_NEEDS_PTE_SYNC
#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
#define PMAP_INCLUDE_PTE_SYNC
#endif
#ifdef ARM_L2_PIPT
#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size)
#else
#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size)
#endif
#define PTE_SYNC(pte) \
do { \
if (PMAP_NEEDS_PTE_SYNC) { \
cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
cpu_drain_writebuf(); \
_sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\
} else \
cpu_drain_writebuf(); \
} while (/*CONSTCOND*/0)
#define PTE_SYNC_RANGE(pte, cnt) \
do { \
if (PMAP_NEEDS_PTE_SYNC) { \
cpu_dcache_wb_range((vm_offset_t)(pte), \
(cnt) << 2); /* * sizeof(pt_entry_t) */ \
cpu_drain_writebuf(); \
_sync_l2((vm_offset_t)(pte), \
(cnt) << 2); /* * sizeof(pt_entry_t) */ \
} else \
cpu_drain_writebuf(); \
} while (/*CONSTCOND*/0)
void pmap_pte_init_generic(void);
#define PTE_KERNEL 0
#define PTE_USER 1
/*
* Flags that indicate attributes of pages or mappings of pages.
*
* The PVF_MOD and PVF_REF flags are stored in the mdpage for each
* page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
* pv_entry's for each page. They live in the same "namespace" so
* that we can clear multiple attributes at a time.
*
* Note the "non-cacheable" flag generally means the page has
* multiple mappings in a given address space.
*/
#define PVF_MOD 0x01 /* page is modified */
#define PVF_REF 0x02 /* page is referenced */
#define PVF_WIRED 0x04 /* mapping is wired */
#define PVF_WRITE 0x08 /* mapping is writable */
#define PVF_EXEC 0x10 /* mapping is executable */
#define PVF_NC 0x20 /* mapping is non-cacheable */
#define PVF_MWC 0x40 /* mapping is used multiple times in userland */
#define PVF_UNMAN 0x80 /* mapping is unmanaged */
void vector_page_setprot(int);
#define SECTION_CACHE 0x1
#define SECTION_PT 0x2
void pmap_postinit(void);
#endif /* _KERNEL */
#endif /* !LOCORE */
#endif /* !_MACHINE_PMAP_V4_H_ */

View File

@ -32,11 +32,7 @@
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#if __ARM_ARCH >= 6
#include <machine/pmap-v6.h>
#else
#include <machine/pmap-v4.h>
#endif
#ifdef _KERNEL
#include <sys/systm.h>

View File

@ -1,350 +0,0 @@
/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */
/*-
* Copyright (c) 1994 Mark Brinicombe.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the RiscBSD team.
* 4. The name "RiscBSD" nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PTE_V4_H_
#define _MACHINE_PTE_V4_H_
#ifndef LOCORE
typedef uint32_t pd_entry_t; /* page directory entry */
typedef uint32_t pt_entry_t; /* page table entry */
typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */
#endif
#define PG_FRAME 0xfffff000
/* The PT_SIZE definition is misleading... A page table is only 0x400
* bytes long. But since VM mapping can only be done to 0x1000 a single
* 1KB blocks cannot be steered to a va by itself. Therefore the
* pages tables are allocated in blocks of 4. i.e. if a 1 KB block
* was allocated for a PT then the other 3KB would also get mapped
* whenever the 1KB was mapped.
*/
#define PT_RSIZE 0x0400 /* Real page table size */
#define PT_SIZE 0x1000
#define PD_SIZE 0x4000
/* Page table types and masks */
#define L1_PAGE 0x01 /* L1 page table mapping */
#define L1_SECTION 0x02 /* L1 section mapping */
#define L1_FPAGE 0x03 /* L1 fine page mapping */
#define L1_MASK 0x03 /* Mask for L1 entry type */
#define L2_LPAGE 0x01 /* L2 large page (64KB) */
#define L2_SPAGE 0x02 /* L2 small page (4KB) */
#define L2_MASK 0x03 /* Mask for L2 entry type */
#define L2_INVAL 0x00 /* L2 invalid type */
/*
* The ARM MMU architecture was introduced with ARM v3 (previous ARM
* architecture versions used an optional off-CPU memory controller
* to perform address translation).
*
* The ARM MMU consists of a TLB and translation table walking logic.
* There is typically one TLB per memory interface (or, put another
* way, one TLB per software-visible cache).
*
* The ARM MMU is capable of mapping memory in the following chunks:
*
* 1M Sections (L1 table)
*
* 64K Large Pages (L2 table)
*
* 4K Small Pages (L2 table)
*
* 1K Tiny Pages (L2 table)
*
* There are two types of L2 tables: Coarse Tables and Fine Tables.
* Coarse Tables can map Large and Small Pages. Fine Tables can
* map Tiny Pages.
*
* Coarse Tables can define 4 Subpages within Large and Small pages.
* Subpages define different permissions for each Subpage within
* a Page.
*
* Coarse Tables are 1K in length. Fine tables are 4K in length.
*
* The Translation Table Base register holds the pointer to the
* L1 Table. The L1 Table is a 16K contiguous chunk of memory
* aligned to a 16K boundary. Each entry in the L1 Table maps
* 1M of virtual address space, either via a Section mapping or
* via an L2 Table.
*
* In addition, the Fast Context Switching Extension (FCSE) is available
* on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating
* TLB/cache flushes on context switch by use of a smaller address space
* and a "process ID" that modifies the virtual address before being
* presented to the translation logic.
*/
/* ARMv6 super-sections. */
#define L1_SUP_SIZE 0x01000000 /* 16M */
#define L1_SUP_OFFSET (L1_SUP_SIZE - 1)
#define L1_SUP_FRAME (~L1_SUP_OFFSET)
#define L1_SUP_SHIFT 24
#define L1_S_SIZE 0x00100000 /* 1M */
#define L1_S_OFFSET (L1_S_SIZE - 1)
#define L1_S_FRAME (~L1_S_OFFSET)
#define L1_S_SHIFT 20
#define L2_L_SIZE 0x00010000 /* 64K */
#define L2_L_OFFSET (L2_L_SIZE - 1)
#define L2_L_FRAME (~L2_L_OFFSET)
#define L2_L_SHIFT 16
#define L2_S_SIZE 0x00001000 /* 4K */
#define L2_S_OFFSET (L2_S_SIZE - 1)
#define L2_S_FRAME (~L2_S_OFFSET)
#define L2_S_SHIFT 12
#define L2_T_SIZE 0x00000400 /* 1K */
#define L2_T_OFFSET (L2_T_SIZE - 1)
#define L2_T_FRAME (~L2_T_OFFSET)
#define L2_T_SHIFT 10
/*
* The NetBSD VM implementation only works on whole pages (4K),
* whereas the ARM MMU's Coarse tables are sized in terms of 1K
* (16K L1 table, 1K L2 table).
*
* So, we allocate L2 tables 4 at a time, thus yielding a 4K L2
* table.
*/
#define L1_TABLE_SIZE 0x4000 /* 16K */
#define L2_TABLE_SIZE 0x1000 /* 4K */
/*
* The new pmap deals with the 1KB coarse L2 tables by
* allocating them from a pool. Until every port has been converted,
* keep the old L2_TABLE_SIZE define lying around. Converted ports
* should use L2_TABLE_SIZE_REAL until then.
*/
#define L2_TABLE_SIZE_REAL 0x400 /* 1K */
/* Total number of page table entries in L2 table */
#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t))
/*
* ARM L1 Descriptors
*/
#define L1_TYPE_INV 0x00 /* Invalid (fault) */
#define L1_TYPE_C 0x01 /* Coarse L2 */
#define L1_TYPE_S 0x02 /* Section */
#define L1_TYPE_F 0x03 /* Fine L2 */
#define L1_TYPE_MASK 0x03 /* mask of type bits */
/* L1 Section Descriptor */
#define L1_S_B 0x00000004 /* bufferable Section */
#define L1_S_C 0x00000008 /* cacheable Section */
#define L1_S_IMP 0x00000010 /* implementation defined */
#define L1_S_XN (1 << 4) /* execute not */
#define L1_S_DOM(x) ((x) << 5) /* domain */
#define L1_S_DOM_MASK L1_S_DOM(0xf)
#define L1_S_AP(x) ((x) << 10) /* access permissions */
#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */
#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */
#define L1_S_APX (1 << 15)
#define L1_SHARED (1 << 16)
#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */
#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */
#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */
/* L1 Coarse Descriptor */
#define L1_C_IMP0 0x00000004 /* implementation defined */
#define L1_C_IMP1 0x00000008 /* implementation defined */
#define L1_C_IMP2 0x00000010 /* implementation defined */
#define L1_C_DOM(x) ((x) << 5) /* domain */
#define L1_C_DOM_MASK L1_C_DOM(0xf)
#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */
#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */
/* L1 Fine Descriptor */
#define L1_F_IMP0 0x00000004 /* implementation defined */
#define L1_F_IMP1 0x00000008 /* implementation defined */
#define L1_F_IMP2 0x00000010 /* implementation defined */
#define L1_F_DOM(x) ((x) << 5) /* domain */
#define L1_F_DOM_MASK L1_F_DOM(0xf)
#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */
#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */
/*
* ARM L2 Descriptors
*/
#define L2_TYPE_INV 0x00 /* Invalid (fault) */
#define L2_TYPE_L 0x01 /* Large Page */
#define L2_TYPE_S 0x02 /* Small Page */
#define L2_TYPE_T 0x03 /* Tiny Page */
#define L2_TYPE_MASK 0x03 /* mask of type bits */
/*
* This L2 Descriptor type is available on XScale processors
* when using a Coarse L1 Descriptor. The Extended Small
* Descriptor has the same format as the XScale Tiny Descriptor,
* but describes a 4K page, rather than a 1K page.
*/
#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */
#define L2_B 0x00000004 /* Bufferable page */
#define L2_C 0x00000008 /* Cacheable page */
#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */
#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */
#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */
#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */
#define L2_SHARED (1 << 10)
#define L2_APX (1 << 9)
#define L2_XN (1 << 0)
#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */
#define L2_L_TEX(x) (((x) & 0x7) << 12)
#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */
#define L2_S_TEX(x) (((x) & 0x7) << 6)
#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */
#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */
#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */
/*
* Access Permissions for L1 and L2 Descriptors.
*/
#define AP_W 0x01 /* writable */
#define AP_REF 0x01 /* referenced flag */
#define AP_U 0x02 /* user */
/*
* Short-hand for common AP_* constants.
*
* Note: These values assume the S (System) bit is set and
* the R (ROM) bit is clear in CP15 register 1.
*/
#define AP_KR 0x00 /* kernel read */
#define AP_KRW 0x01 /* kernel read/write */
#define AP_KRWUR 0x02 /* kernel read/write usr read */
#define AP_KRWURW 0x03 /* kernel read/write usr read/write */
/*
* Domain Types for the Domain Access Control Register.
*/
#define DOMAIN_FAULT 0x00 /* no access */
#define DOMAIN_CLIENT 0x01 /* client */
#define DOMAIN_RESERVED 0x02 /* reserved */
#define DOMAIN_MANAGER 0x03 /* manager */
/*
* Type Extension bits for XScale processors.
*
* Behavior of C and B when X == 0:
*
* C B Cacheable Bufferable Write Policy Line Allocate Policy
* 0 0 N N - -
* 0 1 N Y - -
* 1 0 Y Y Write-through Read Allocate
* 1 1 Y Y Write-back Read Allocate
*
* Behavior of C and B when X == 1:
* C B Cacheable Bufferable Write Policy Line Allocate Policy
* 0 0 - - - - DO NOT USE
* 0 1 N Y - -
* 1 0 Mini-Data - - -
* 1 1 Y Y Write-back R/W Allocate
*/
#define TEX_XSCALE_X 0x01 /* X modifies C and B */
#define TEX_XSCALE_E 0x02
#define TEX_XSCALE_T 0x04
/* Xscale core 3 */
/*
*
* Cache attributes with L2 present, S = 0
* T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce
* 0 0 0 0 0 N N - N N
* 0 0 0 0 1 N N - N Y
* 0 0 0 1 0 Y Y WT N Y
* 0 0 0 1 1 Y Y WB Y Y
* 0 0 1 0 0 N N - Y Y
* 0 0 1 0 1 N N - N N
* 0 0 1 1 0 Y Y - - N
* 0 0 1 1 1 Y Y WT Y Y
* 0 1 0 0 0 N N - N N
* 0 1 0 0 1 N/A N/A N/A N/A N/A
* 0 1 0 1 0 N/A N/A N/A N/A N/A
* 0 1 0 1 1 N/A N/A N/A N/A N/A
* 0 1 1 X X N/A N/A N/A N/A N/A
* 1 X 0 0 0 N N - N Y
* 1 X 0 0 1 Y N WB N Y
* 1 X 0 1 0 Y N WT N Y
* 1 X 0 1 1 Y N WB Y Y
* 1 X 1 0 0 N N - Y Y
* 1 X 1 0 1 Y Y WB Y Y
* 1 X 1 1 0 Y Y WT Y Y
* 1 X 1 1 1 Y Y WB Y Y
*
*
*
*
* Cache attributes with L2 present, S = 1
* T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce
* 0 0 0 0 0 N N - N N
* 0 0 0 0 1 N N - N Y
* 0 0 0 1 0 Y Y - N Y
* 0 0 0 1 1 Y Y WT Y Y
* 0 0 1 0 0 N N - Y Y
* 0 0 1 0 1 N N - N N
* 0 0 1 1 0 Y Y - - N
* 0 0 1 1 1 Y Y WT Y Y
* 0 1 0 0 0 N N - N N
* 0 1 0 0 1 N/A N/A N/A N/A N/A
* 0 1 0 1 0 N/A N/A N/A N/A N/A
* 0 1 0 1 1 N/A N/A N/A N/A N/A
* 0 1 1 X X N/A N/A N/A N/A N/A
* 1 X 0 0 0 N N - N Y
* 1 X 0 0 1 Y N - N Y
* 1 X 0 1 0 Y N - N Y
* 1 X 0 1 1 Y N - Y Y
* 1 X 1 0 0 N N - Y Y
* 1 X 1 0 1 Y Y WT Y Y
* 1 X 1 1 0 Y Y WT Y Y
* 1 X 1 1 1 Y Y WT Y Y
*/
#endif /* !_MACHINE_PTE_V4_H_ */
/* End of pte.h */

View File

@ -19,7 +19,7 @@ arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b
arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b
arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e
arm/arm/cpuinfo.c standard
arm/arm/cpu_asm-v6.S optional armv7 | armv6
arm/arm/cpu_asm-v6.S standard
arm/arm/db_disasm.c optional ddb
arm/arm/db_interface.c optional ddb
arm/arm/db_trace.c optional ddb
@ -37,14 +37,12 @@ arm/arm/gdb_machdep.c optional gdb
arm/arm/generic_timer.c optional generic_timer
arm/arm/gic.c optional gic
arm/arm/gic_fdt.c optional gic fdt
arm/arm/identcpu-v4.c optional !armv7 !armv6
arm/arm/identcpu-v6.c optional armv7 | armv6
arm/arm/identcpu-v6.c standard
arm/arm/in_cksum.c optional inet | inet6
arm/arm/in_cksum_arm.S optional inet | inet6
arm/arm/intr.c optional !intrng
kern/subr_intr.c optional intrng
kern/subr_intr.c standard
arm/arm/locore.S standard no-obj
arm/arm/hypervisor-stub.S optional armv7 | armv6
arm/arm/hypervisor-stub.S standard
arm/arm/machdep.c standard
arm/arm/machdep_boot.c standard
arm/arm/machdep_kdb.c standard
@ -61,8 +59,7 @@ arm/arm/pl310.c optional pl310
arm/arm/platform.c optional platform
arm/arm/platform_if.m optional platform
arm/arm/platform_pl310_if.m optional platform pl310
arm/arm/pmap-v4.c optional !armv7 !armv6
arm/arm/pmap-v6.c optional armv7 | armv6
arm/arm/pmap-v6.c standard
arm/arm/pmu.c optional pmu | fdt hwpmc
arm/arm/ptrace_machdep.c standard
arm/arm/sc_machdep.c optional sc
@ -73,12 +70,10 @@ arm/arm/stdatomic.c standard \
compile-with "${NORMAL_C:N-Wmissing-prototypes}"
arm/arm/support.S standard
arm/arm/swtch.S standard
arm/arm/swtch-v4.S optional !armv7 !armv6
arm/arm/swtch-v6.S optional armv7 | armv6
arm/arm/swtch-v6.S standard
arm/arm/sys_machdep.c standard
arm/arm/syscall.c standard
arm/arm/trap-v4.c optional !armv7 !armv6
arm/arm/trap-v6.c optional armv7 | armv6
arm/arm/trap-v6.c standard
arm/arm/uio_machdep.c standard
arm/arm/undefined.c standard
arm/arm/unwind.c optional ddb | kdtrace_hooks | stack