New pmap code for armv6. Disabled by default, option ARM_NEW_PMAP enables it.

This is pretty much a complete rewrite based on the existing i386 code.  The
patches have been circulating for a couple years and have been looked at by
plenty of people, but I'm not putting anybody on the hook as having reviewed
this in any formal sense except myself.

After this has gotten wider testing from the user community, ARM_NEW_PMAP
will become the default and various dregs of the old pmap code will be
removed.

Submitted by:	Svatopluk Kraus <onwahe@gmail.com>,
	  	Michal Meloun <meloun@miracle.cz>
This commit is contained in:
Ian Lepore 2015-03-26 21:13:53 +00:00
parent 147b9d0418
commit 84233ddb80
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=280712
20 changed files with 8563 additions and 20 deletions

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <machine/frame.h>
#include <machine/pcb.h>
#include <machine/cpu.h>
#include <machine/cpu-v6.h>
#include <machine/proc.h>
#include <machine/cpufunc.h>
#include <machine/cpuinfo.h>
@ -58,12 +59,19 @@ __FBSDID("$FreeBSD$");
ASSYM(KERNBASE, KERNBASE);
ASSYM(PCB_NOALIGNFLT, PCB_NOALIGNFLT);
#ifdef ARM_NEW_PMAP
ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL);
#endif
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
#ifndef ARM_NEW_PMAP
ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr));
#endif
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir));
#ifndef ARM_NEW_PMAP
ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec));
ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec));
#endif
ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4));
ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5));
ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6));
@ -131,7 +139,6 @@ ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
#endif
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(PDESIZE, PDESIZE);
ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL);
#ifdef PMAP_INCLUDE_PTE_SYNC
ASSYM(PMAP_INCLUDE_PTE_SYNC, 1);
@ -145,8 +152,13 @@ ASSYM(TRAPFRAMESIZE, sizeof(struct trapframe));
ASSYM(MAXCOMLEN, MAXCOMLEN);
ASSYM(MAXCPU, MAXCPU);
ASSYM(_NCPUWORDS, _NCPUWORDS);
ASSYM(NIRQ, NIRQ);
ASSYM(PCPU_SIZE, sizeof(struct pcpu));
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size));
ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask));

View File

@ -138,6 +138,14 @@ int _min_bzero_size = 0;
extern int *end;
#ifdef FDT
vm_paddr_t pmap_pa;
#ifdef ARM_NEW_PMAP
vm_offset_t systempage;
vm_offset_t irqstack;
vm_offset_t undstack;
vm_offset_t abtstack;
#else
/*
* This is the number of L2 page tables required for covering max
* (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
@ -147,15 +155,13 @@ extern int *end;
static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
vm_paddr_t pmap_pa;
struct pv_addr systempage;
static struct pv_addr msgbufpv;
struct pv_addr irqstack;
struct pv_addr undstack;
struct pv_addr abtstack;
static struct pv_addr kernelstack;
#endif
#endif
#if defined(LINUX_BOOT_ABI)
@ -381,9 +387,11 @@ cpu_startup(void *dummy)
vm_pager_bufferinit();
pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
USPACE_SVC_STACK_TOP;
vector_page_setprot(VM_PROT_READ);
pmap_set_pcb_pagedir(pmap_kernel(), pcb);
#ifndef ARM_NEW_PMAP
vector_page_setprot(VM_PROT_READ);
pmap_postinit();
#endif
#ifdef ARM_TP_ADDRESS
#ifdef ARM_CACHE_LOCK_ENABLE
pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
@ -1003,6 +1011,19 @@ init_proc0(vm_offset_t kstack)
pcpup->pc_curpcb = thread0.td_pcb;
}
#ifdef ARM_NEW_PMAP
void
set_stackptrs(int cpu)
{
set_stackptr(PSR_IRQ32_MODE,
irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
set_stackptr(PSR_ABT32_MODE,
abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
set_stackptr(PSR_UND32_MODE,
undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
}
#else
void
set_stackptrs(int cpu)
{
@ -1014,6 +1035,7 @@ set_stackptrs(int cpu)
set_stackptr(PSR_UND32_MODE,
undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
}
#endif
#ifdef FDT
static char *
@ -1048,6 +1070,7 @@ print_kenv(void)
debugf(" %x %s\n", (uint32_t)cp, cp);
}
#ifndef ARM_NEW_PMAP
void *
initarm(struct arm_boot_params *abp)
{
@ -1316,4 +1339,181 @@ initarm(struct arm_boot_params *abp)
return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
sizeof(struct pcb)));
}
#else /* !ARM_NEW_PMAP */
void *
initarm(struct arm_boot_params *abp)
{
struct mem_region mem_regions[FDT_MEM_REGIONS];
vm_paddr_t lastaddr;
vm_offset_t dtbp, kernelstack, dpcpu;
uint32_t memsize;
char *env;
void *kmdp;
int err_devmap, mem_regions_sz;
/* get last allocated physical address */
arm_physmem_kernaddr = abp->abp_physaddr;
lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
memsize = 0;
set_cpufuncs();
cpuinfo_init();
/*
* Find the dtb passed in by the boot loader.
*/
kmdp = preload_search_by_type("elf kernel");
if (kmdp != NULL)
dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
else
dtbp = (vm_offset_t)NULL;
#if defined(FDT_DTB_STATIC)
/*
* In case the device tree blob was not retrieved (from metadata) try
* to use the statically embedded one.
*/
if (dtbp == (vm_offset_t)NULL)
dtbp = (vm_offset_t)&fdt_static_dtb;
#endif
if (OF_install(OFW_FDT, 0) == FALSE)
panic("Cannot install FDT");
if (OF_init((void *)dtbp) != 0)
panic("OF_init failed with the found device tree");
/* Grab physical memory regions information from device tree. */
if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
panic("Cannot get physical memory regions");
arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
/* Grab reserved memory regions information from device tree. */
if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
EXFLAG_NODUMP | EXFLAG_NOALLOC);
/*
* Set TEX remapping registers.
* Setup kernel page tables and switch to kernel L1 page table.
*/
pmap_set_tex();
pmap_bootstrap_prepare(lastaddr);
/*
* Now that proper page tables are installed, call cpu_setup() to enable
* instruction and data caches and other chip-specific features.
*/
cpu_setup("");
/* Platform-specific initialisation */
platform_probe_and_attach();
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();
/*
* Allocate a page for the system page mapped to 0xffff0000
* This page will just contain the system vectors and can be
* shared by all processes.
*/
systempage = pmap_preboot_get_pages(1);
/* Map the vector page. */
pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
if (virtual_end >= ARM_VECTORS_HIGH)
virtual_end = ARM_VECTORS_HIGH - 1;
/* Allocate dynamic per-cpu area. */
dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
dpcpu_init((void *)dpcpu, 0);
/* Allocate stacks for all modes */
irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
kernelstack = pmap_preboot_get_vpages(KSTACK_PAGES * MAXCPU);
/* Allocate message buffer. */
msgbufp = (void *)pmap_preboot_get_vpages(
round_page(msgbufsize) / PAGE_SIZE);
/*
* Pages were allocated during the secondary bootstrap for the
* stacks for different CPU modes.
* We must now set the r13 registers in the different CPU modes to
* point to these stacks.
* Since the ARM stacks use STMFD etc. we must set r13 to the top end
* of the stack memory.
*/
set_stackptrs(0);
mutex_init();
/* Establish static device mappings. */
err_devmap = platform_devmap_init();
arm_devmap_bootstrap(0, NULL);
vm_max_kernel_address = platform_lastaddr();
/*
* Only after the SOC registers block is mapped we can perform device
* tree fixups, as they may attempt to read parameters from hardware.
*/
OF_interpret("perform-fixup", 0);
platform_gpio_init();
cninit();
debugf("initarm: console initialized\n");
debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
debugf(" boothowto = 0x%08x\n", boothowto);
debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
debugf(" lastaddr1: 0x%08x\n", lastaddr);
print_kenv();
env = kern_getenv("kernelname");
if (env != NULL)
strlcpy(kernelname, env, sizeof(kernelname));
if (err_devmap != 0)
printf("WARNING: could not fully configure devmap, error=%d\n",
err_devmap);
platform_late_init();
/*
* We must now clean the cache again....
* Cleaning may be done by reading new data to displace any
* dirty data in the cache. This will have happened in setttb()
* but since we are boot strapping the addresses used for the read
* may have just been remapped and thus the cache could be out
* of sync. A re-clean after the switch will cure this.
* After booting there are no gross relocations of the kernel thus
* this problem will not occur after initarm().
*/
/* Set stack for exception handlers */
undefined_init();
init_proc0(kernelstack);
arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
enable_interrupts(PSR_A);
pmap_bootstrap(0);
/* Exclude the kernel (and all the things we allocated which immediately
* follow the kernel) from the VM allocation pool but not from crash
* dumps. virtual_avail is a global variable which tracks the kva we've
* "allocated" while setting up pmaps.
*
* Prepare the list of physical memory available to the vm subsystem.
*/
arm_physmem_exclude_region(abp->abp_physaddr,
pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
arm_physmem_init_kernel_globals();
init_param2(physmem);
/* Init message buffer. */
msgbufinit(msgbufp, msgbufsize);
kdb_init();
return ((void *)STACKALIGN(thread0.td_pcb));
}
#endif /* !ARM_NEW_PMAP */
#endif /* FDT */

View File

@ -113,6 +113,9 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
return (EINVAL);
sx_xlock(&tmppt_lock);
pmap_kenter((vm_offset_t)_tmppt, v);
#ifdef ARM_NEW_PMAP
pmap_tlb_flush(kernel_pmap, (vm_offset_t)_tmppt);
#endif
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));

View File

@ -61,7 +61,10 @@ CTASSERT(sizeof(struct kerneldumpheader) == 512);
uint32_t *vm_page_dump;
int vm_page_dump_size;
#ifndef ARM_NEW_PMAP
static struct kerneldumpheader kdh;
static off_t dumplo;
/* Handle chunked writes. */
@ -473,8 +476,20 @@ minidumpsys(struct dumperinfo *di)
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
return (error);
return (0);
}
#else /* ARM_NEW_PMAP */
int
minidumpsys(struct dumperinfo *di)
{
return (0);
}
#endif
void
dump_add_page(vm_paddr_t pa)
{

View File

@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cpufunc.h>
#include <machine/smp.h>
#include <machine/pcb.h>
#include <machine/pmap.h>
#include <machine/pte.h>
#include <machine/physmem.h>
#include <machine/intr.h>
@ -151,10 +152,20 @@ init_secondary(int cpu)
uint32_t loop_counter;
int start = 0, end = 0;
#ifdef ARM_NEW_PMAP
pmap_set_tex();
reinit_mmu(pmap_kern_ttb, (1<<6) | (1<< 0), (1<<6) | (1<< 0));
cpu_setup("");
/* Provide stack pointers for other processor modes. */
set_stackptrs(cpu);
enable_interrupts(PSR_A);
#else /* ARM_NEW_PMAP */
cpu_setup(NULL);
setttb(pmap_pa);
cpu_tlb_flushID();
#endif /* ARM_NEW_PMAP */
pc = &__pcpu[cpu];
/*
@ -166,10 +177,10 @@ init_secondary(int cpu)
pcpu_init(pc, cpu, sizeof(struct pcpu));
dpcpu_init(dpcpu[cpu - 1], cpu);
#ifndef ARM_NEW_PMAP
/* Provide stack pointers for other processor modes. */
set_stackptrs(cpu);
#endif
/* Signal our startup to BSP */
atomic_add_rel_32(&mp_naps, 1);
@ -298,6 +309,12 @@ ipi_handler(void *arg)
CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
cpufuncs.cf_tlb_flushID();
break;
#ifdef ARM_NEW_PMAP
case IPI_LAZYPMAP:
CTR1(KTR_SMP, "%s: IPI_LAZYPMAP", __func__);
pmap_lazyfix_action();
break;
#endif
default:
panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
}

6723
sys/arm/arm/pmap-v6-new.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -88,8 +88,6 @@
__FBSDID("$FreeBSD$");
#define DOMAIN_CLIENT 0x01
#if defined(_ARM_ARCH_6) && defined(SMP)
#define GET_PCPU(tmp, tmp2) \
mrc p15, 0, tmp, c0, c0, 5; \
@ -109,13 +107,19 @@ __FBSDID("$FreeBSD$");
#endif
.Lcurpcpu:
.word _C_LABEL(__pcpu)
.word _C_LABEL(__pcpu)
.word PCPU_SIZE
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
.Lblocked_lock:
.word _C_LABEL(blocked_lock)
#ifndef ARM_NEW_PMAP
#define DOMAIN_CLIENT 0x01
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
/*
* cpu_throw(oldtd, newtd)
*
@ -412,6 +416,388 @@ ENTRY(cpu_switch)
ldmia r3, {r4-r12, sp, pc}
END(cpu_switch)
#else /* !ARM_NEW_PMAP */
#include <machine/sysreg.h>
ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */
DSB
mcr CP15_TTBR0(r0) /* set the new TTB */
ISB
mov r0, #(CPU_ASID_KERNEL)
mcr CP15_TLBIASID(r0) /* flush not global TLBs */
/*
* Flush entire Branch Target Cache because of the branch predictor
* is not architecturally invisible. See ARM Architecture Reference
* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
* predictors and Requirements for branch predictor maintenance
* operations sections.
*
* QQQ: The predictor is virtually addressed and holds virtual target
* addresses. Therefore, if mapping is changed, the predictor cache
* must be flushed.The flush is part of entire i-cache invalidation
* what is always called when code mapping is changed. So herein,
* it's the only place where standalone predictor flush must be
* executed in kernel (except self modifying code case).
*/
mcr CP15_BPIALL /* and flush entire Branch Target Cache */
DSB
mov pc, lr
END(cpu_context_switch)
/*
* cpu_throw(oldtd, newtd)
*
* Remove current thread state, then select the next thread to run
* and load its state.
* r0 = oldtd
* r1 = newtd
*/
ENTRY(cpu_throw)
mov r10, r0 /* r10 = oldtd */
mov r11, r1 /* r11 = newtd */
#ifdef VFP /* This thread is dying, disable */
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
#endif
GET_PCPU(r8, r9) /* r8 = current pcpu */
ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */
cmp r10, #0 /* old thread? */
beq 2f /* no, skip */
/* Remove this CPU from the active list. */
ldr r5, [r8, #PC_CURPMAP]
mov r0, #(PM_ACTIVE)
add r5, r0 /* r5 = old pm_active */
/* Compute position and mask. */
#if _NCPUWORDS > 1
lsr r0, r4, #3
bic r0, #3
add r5, r0 /* r5 = position in old pm_active */
mov r2, #1
and r0, r4, #31
lsl r2, r0 /* r2 = mask */
#else
mov r2, #1
lsl r2, r4 /* r2 = mask */
#endif
/* Clear cpu from old active list. */
#ifdef SMP
1: ldrex r0, [r5]
bic r0, r2
strex r1, r0, [r5]
teq r1, #0
bne 1b
#else
ldr r0, [r5]
bic r0, r2
str r0, [r5]
#endif
2:
#ifdef INVARIANTS
cmp r11, #0 /* new thread? */
beq badsw1 /* no, panic */
#endif
ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */
/*
* Registers at this point
* r4 = current cpu id
* r7 = new PCB
* r8 = current pcpu
* r11 = newtd
*/
/* MMU switch to new thread. */
ldr r0, [r7, #(PCB_PAGEDIR)]
#ifdef INVARIANTS
cmp r0, #0 /* new thread? */
beq badsw4 /* no, panic */
#endif
bl _C_LABEL(cpu_context_switch)
/*
* Set new PMAP as current one.
* Insert cpu to new active list.
*/
ldr r6, [r11, #(TD_PROC)] /* newtd->proc */
ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
str r6, [r8, #PC_CURPMAP] /* store to curpmap */
mov r0, #PM_ACTIVE
add r6, r0 /* r6 = new pm_active */
/* compute position and mask */
#if _NCPUWORDS > 1
lsr r0, r4, #3
bic r0, #3
add r6, r0 /* r6 = position in new pm_active */
mov r2, #1
and r0, r4, #31
lsl r2, r0 /* r2 = mask */
#else
mov r2, #1
lsl r2, r4 /* r2 = mask */
#endif
/* Set cpu to new active list. */
#ifdef SMP
1: ldrex r0, [r6]
orr r0, r2
strex r1, r0, [r6]
teq r1, #0
bne 1b
#else
ldr r0, [r6]
orr r0, r2
str r0, [r6]
#endif
/*
* Registers at this point.
* r7 = new PCB
* r8 = current pcpu
* r11 = newtd
* They must match the ones in sw1 position !!!
*/
DMB
b sw1 /* share new thread init with cpu_switch() */
END(cpu_throw)
/*
* cpu_switch(oldtd, newtd, lock)
*
* Save the current thread state, then select the next thread to run
* and load its state.
* r0 = oldtd
* r1 = newtd
* r2 = lock (new lock for old thread)
*/
ENTRY(cpu_switch)
/* Interrupts are disabled. */
#ifdef INVARIANTS
cmp r0, #0 /* old thread? */
beq badsw2 /* no, panic */
#endif
/* Save all the registers in the old thread's pcb. */
ldr r3, [r0, #(TD_PCB)]
add r3, #(PCB_R4)
stmia r3, {r4-r12, sp, lr, pc}
#ifdef INVARIANTS
cmp r1, #0 /* new thread? */
beq badsw3 /* no, panic */
#endif
/*
* Save arguments. Note that we can now use r0-r14 until
* it is time to restore them for the new thread. However,
* some registers are not safe over function call.
*/
mov r9, r2 /* r9 = lock */
mov r10, r0 /* r10 = oldtd */
mov r11, r1 /* r11 = newtd */
GET_PCPU(r8, r3) /* r8 = current PCPU */
ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */
#ifdef VFP
ldr r3, [r10, #(TD_PCB)]
fmrx r0, fpexc /* If the VFP is enabled */
tst r0, #(VFPEXC_EN) /* the current thread has */
movne r1, #1 /* used it, so go save */
addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */
blne _C_LABEL(vfp_store) /* and disable the VFP. */
#endif
/*
* MMU switch. If we're switching to a thread with the same
* address space as the outgoing one, we can skip the MMU switch.
*/
mrc CP15_TTBR0(r1) /* r1 = old TTB */
ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
cmp r0, r1 /* Switching to the TTB? */
beq sw0 /* same TTB, skip */
#if 1 /* Lazy context switch */
/* Don't switch mapping for kernel threads */
ldr r1, =pmap_kern_ttb
ldr r1, [r1] /* r1 = kernel TTB */
cmp r0, r1 /* Switching to kernel TTB? */
beq sw0 /* kernel TTB, skip */
#endif
#ifdef INVARIANTS
cmp r0, #0 /* new thread? */
beq badsw4 /* no, panic */
#endif
bl cpu_context_switch /* new TTB as argument */
/*
* Registers at this point
* r7 = new PCB
* r8 = current pcpu
* r9 = lock
* r10 = oldtd
* r11 = newtd
*/
/*
* Set new PMAP as current one.
* Update active list on PMAPs.
*/
ldr r6, [r11, #TD_PROC] /* newtd->proc */
ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */
str r6, [r8, #PC_CURPMAP] /* and save new one */
mov r0, #PM_ACTIVE
add r5, r0 /* r5 = old pm_active */
add r6, r0 /* r6 = new pm_active */
/* Compute position and mask. */
ldr r4, [r8, #PC_CPUID]
#if _NCPUWORDS > 1
lsr r0, r4, #3
bic r0, #3
add r5, r0 /* r5 = position in old pm_active */
add r6, r0 /* r6 = position in new pm_active */
mov r2, #1
and r0, r4, #31
lsl r2, r0 /* r2 = mask */
#else
mov r2, #1
lsl r2, r4 /* r2 = mask */
#endif
/* Clear cpu from old active list. */
#ifdef SMP
1: ldrex r0, [r5]
bic r0, r2
strex r1, r0, [r5]
teq r1, #0
bne 1b
#else
ldr r0, [r5]
bic r0, r2
str r0, [r5]
#endif
/* Set cpu to new active list. */
#ifdef SMP
1: ldrex r0, [r6]
orr r0, r2
strex r1, r0, [r6]
teq r1, #0
bne 1b
#else
ldr r0, [r6]
orr r0, r2
str r0, [r6]
#endif
sw0:
/*
* Registers at this point
* r7 = new PCB
* r8 = current pcpu
* r9 = lock
* r10 = oldtd
* r11 = newtd
*/
/* Change the old thread lock. */
add r5, r10, #TD_LOCK
DMB
1: ldrex r0, [r5]
strex r1, r9, [r5]
teq r1, #0
bne 1b
DMB
sw1:
clrex
/*
* Registers at this point
* r7 = new PCB
* r8 = current pcpu
* r11 = newtd
*/
#if defined(SMP) && defined(SCHED_ULE)
/*
* 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
* QQQ: What does it mean in reality and why is it done?
*/
ldr r6, =blocked_lock
1:
ldr r3, [r11, #TD_LOCK] /* atomic write regular read */
cmp r3, r6
beq 1b
#endif
/* Set the new tls */
ldr r0, [r11, #(TD_MD + MD_TP)]
mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */
/* We have a new curthread now so make a note it */
str r11, [r8, #PC_CURTHREAD]
mcr CP15_TPIDRPRW(r11)
/* store pcb in per cpu structure */
str r7, [r8, #PC_CURPCB]
/*
* Restore all saved registers and return. Note that some saved
* registers can be changed when either cpu_fork(), cpu_set_upcall(),
* cpu_set_fork_handler(), or makectx() was called.
*/
add r3, r7, #PCB_R4
ldmia r3, {r4-r12, sp, pc}
#ifdef INVARIANTS
badsw1:
ldr r0, =sw1_panic_str
bl _C_LABEL(panic)
1: nop
b 1b
badsw2:
ldr r0, =sw2_panic_str
bl _C_LABEL(panic)
1: nop
b 1b
badsw3:
ldr r0, =sw3_panic_str
bl _C_LABEL(panic)
1: nop
b 1b
badsw4:
ldr r0, =sw4_panic_str
bl _C_LABEL(panic)
1: nop
b 1b
sw1_panic_str:
.asciz "cpu_throw: no newthread supplied.\n"
sw2_panic_str:
.asciz "cpu_switch: no curthread supplied.\n"
sw3_panic_str:
.asciz "cpu_switch: no newthread supplied.\n"
sw4_panic_str:
.asciz "cpu_switch: new pagedir is NULL.\n"
#endif
END(cpu_switch)
#endif /* !ARM_NEW_PMAP */
ENTRY(savectx)
stmfd sp!, {lr}
sub sp, sp, #4

View File

@ -5,10 +5,16 @@
#define _MACHDEP_BOOT_MACHDEP_H_
/* Structs that need to be initialised by initarm */
#ifdef ARM_NEW_PMAP
extern vm_offset_t irqstack;
extern vm_offset_t undstack;
extern vm_offset_t abtstack;
#else
struct pv_addr;
extern struct pv_addr irqstack;
extern struct pv_addr undstack;
extern struct pv_addr abtstack;
#endif
/* Define various stack sizes in pages */
#define IRQ_STACK_SIZE 1

View File

@ -52,11 +52,14 @@ struct pcb {
#define PCB_OWNFPU 0x00000001
#define PCB_NOALIGNFLT 0x00000002
caddr_t pcb_onfault; /* On fault handler */
#ifdef ARM_NEW_PMAP
uint32_t pcb_pagedir; /* TTB0 value */
#else
vm_offset_t pcb_pagedir; /* PT hooks */
uint32_t *pcb_pl1vec; /* PTR to vector_base L1 entry*/
uint32_t pcb_l1vec; /* Value to stuff on ctx sw */
u_int pcb_dacr; /* Domain Access Control Reg */
#endif
struct vfp_state pcb_vfpstate; /* VP/NEON state */
u_int pcb_vfpcpu; /* VP/NEON last cpu */
} __aligned(8); /*

313
sys/arm/include/pmap-v6.h Normal file
View File

@ -0,0 +1,313 @@
/*-
* Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
* Copyright 2014 Michal Meloun <meloun@miracle.cz>
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and William Jolitz of UUNET Technologies Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The ARM version of this file was more or less based on the i386 version,
* which has the following provenance...
*
* Derived from hp300 version by Mike Hibler, this version by William
* Jolitz uses a recursive map [a pde points to the page directory] to
* map the page tables using the pagetables themselves. This is done to
* reduce the impact on kernel virtual memory for lots of sparse address
* space, and to reduce the cost of memory to each process.
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
*
* $FreeBSD$
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
typedef uint32_t pt1_entry_t; /* L1 table entry */
typedef uint32_t pt2_entry_t; /* L2 table entry */
typedef uint32_t ttb_entry_t; /* TTB entry */
#ifdef _KERNEL
#if 0
#define PMAP_PTE_NOCACHE // Use uncached page tables
#endif
/*
* (1) During pmap bootstrap, physical pages for L2 page tables are
* allocated in advance which are used for KVA continuous mapping
* starting from KERNBASE. This makes things more simple.
* (2) During vm subsystem initialization, only vm subsystem itself can
* allocate physical memory safely. As pmap_map() is called during
* this initialization, we must be prepared for that and have some
* preallocated physical pages for L2 page tables.
*
* Note that some more pages for L2 page tables are preallocated too
* for mappings laying above VM_MAX_KERNEL_ADDRESS.
*/
#ifndef NKPT2PG
/*
* The optimal way is to define this in board configuration as
* definition here must be safe enough. It means really big.
*
* 1 GB KVA <=> 256 kernel L2 page table pages
*
* From real platforms:
* 1 GB physical memory <=> 10 pages is enough
* 2 GB physical memory <=> 21 pages is enough
*/
#define NKPT2PG 32
#endif
extern vm_paddr_t phys_avail[];
extern vm_paddr_t dump_avail[];
extern char *_tmppt; /* poor name! */
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
/*
* Pmap stuff
*/
/*
* This structure is used to hold a virtual<->physical address
* association and is used mostly by bootstrap code
*/
struct pv_addr {
SLIST_ENTRY(pv_addr) pv_list;
vm_offset_t pv_va;
vm_paddr_t pv_pa;
};
#endif
struct pv_entry;
struct pv_chunk;
struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
uint16_t pt2_wirecount[4];
int pat_mode;
};
struct pmap {
struct mtx pm_mtx;
pt1_entry_t *pm_pt1; /* KVA of pt1 */
pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
cpuset_t pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statictics */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
};
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
#define PMAP_LOCK_ASSERT(pmap, type) \
mtx_assert(&(pmap)->pm_mtx, (type))
#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
NULL, MTX_DEF | MTX_DUPOK)
#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#endif
/*
* For each vm_page_t, there is a list of all currently valid virtual
* mappings of that page. An entry is a pv_entry_t, the list is pv_list.
*/
typedef struct pv_entry {
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_next;
} *pv_entry_t;
/*
* pv_entries are allocated in chunks per-process. This avoids the
* need to track per-pmap assignments.
*/
#define _NPCM 11
#define _NPCPV 336
struct pv_chunk {
pmap_t pc_pmap;
TAILQ_ENTRY(pv_chunk) pc_list;
uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
TAILQ_ENTRY(pv_chunk) pc_lru;
struct pv_entry pc_pventry[_NPCPV];
};
#ifdef _KERNEL
struct pcb;
extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
/*
* Only the following functions or macros may be used before pmap_bootstrap()
* is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
* vtopte2().
*/
void pmap_bootstrap(vm_offset_t );
void pmap_kenter(vm_offset_t , vm_paddr_t );
void *pmap_kenter_temporary(vm_paddr_t , int );
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t );
void pmap_page_set_memattr(vm_page_t , vm_memattr_t );
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_kenter_device(vm_offset_t , vm_paddr_t );
void pmap_set_pcb_pagedir(pmap_t , struct pcb *);
void pmap_lazyfix_action(void);
void pmap_tlb_flush(pmap_t , vm_offset_t );
void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t );
void pmap_tlb_flush_ng(pmap_t );
void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t );
vm_paddr_t pmap_kextract(vm_offset_t );
int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , int );
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
void pmap_set_tex(void);
void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set);
/*
* Pre-bootstrap epoch functions set.
*/
void pmap_bootstrap_prepare(vm_paddr_t );
vm_paddr_t pmap_preboot_get_pages(u_int );
void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int );
vm_offset_t pmap_preboot_reserve_pages(u_int );
vm_offset_t pmap_preboot_get_vpages(u_int );
void pmap_preboot_map_attr(vm_paddr_t , vm_offset_t , vm_size_t ,
int , int );
static __inline void
pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
vm_size_t size, int prot, int cache)
{
pmap_preboot_map_attr(pa, va, size, prot, cache);
}
/*
* This structure is used by machine-dependent code to describe
* static mappings of devices, created at bootstrap time.
*/
struct pmap_devmap {
vm_offset_t pd_va; /* virtual address */
vm_paddr_t pd_pa; /* physical address */
vm_size_t pd_size; /* size of region */
vm_prot_t pd_prot; /* protection code */
int pd_cache; /* cache attributes */
};
void pmap_devmap_bootstrap(const struct pmap_devmap *);
#endif /* _KERNEL */
// ----------------- TO BE DELETED ---------------------------------------------
#include <machine/pte-v6.h>
#ifdef _KERNEL
/*
* sys/arm/arm/elf_trampoline.c
* sys/arm/arm/genassym.c
* sys/arm/arm/machdep.c
* sys/arm/arm/mp_machdep.c
* sys/arm/arm/locore.S
* sys/arm/arm/pmap.c
* sys/arm/arm/swtch.S
* sys/arm/at91/at91_machdep.c
* sys/arm/cavium/cns11xx/econa_machdep.c
* sys/arm/s3c2xx0/s3c24x0_machdep.c
* sys/arm/xscale/ixp425/avila_machdep.c
* sys/arm/xscale/i8134x/crb_machdep.c
* sys/arm/xscale/i80321/ep80219_machdep.c
* sys/arm/xscale/i80321/iq31244_machdep.c
* sys/arm/xscale/pxa/pxa_machdep.c
*/
#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
/*
* sys/arm/arm/busdma_machdep-v6.c
*/
int pmap_dmap_iscurrent(pmap_t pmap);
/*
* sys/arm/arm/cpufunc.c
*/
void pmap_pte_init_mmu_v6(void);
void vector_page_setprot(int);
/*
* sys/arm/arm/db_interface.c
* sys/arm/arm/machdep.c
* sys/arm/arm/minidump_machdep.c
* sys/arm/arm/pmap.c
*/
#define pmap_kernel() kernel_pmap
/*
* sys/arm/arm/bus_space_generic.c (just comment)
* sys/arm/arm/devmap.c
* sys/arm/arm/pmap.c (just comment)
* sys/arm/at91/at91_machdep.c
* sys/arm/cavium/cns11xx/econa_machdep.c
* sys/arm/freescale/imx/imx6_machdep.c (just comment)
* sys/arm/mv/orion/db88f5xxx.c
* sys/arm/mv/mv_localbus.c
* sys/arm/mv/mv_machdep.c
* sys/arm/mv/mv_pci.c
* sys/arm/s3c2xx0/s3c24x0_machdep.c
* sys/arm/versatile/versatile_machdep.c
* sys/arm/xscale/ixp425/avila_machdep.c
* sys/arm/xscale/i8134x/crb_machdep.c
* sys/arm/xscale/i80321/ep80219_machdep.c
* sys/arm/xscale/i80321/iq31244_machdep.c
* sys/arm/xscale/pxa/pxa_machdep.c
*/
#define PTE_DEVICE PTE2_ATTR_DEVICE
#endif /* _KERNEL */
// -----------------------------------------------------------------------------
#endif /* !_MACHINE_PMAP_H_ */

View File

@ -46,6 +46,9 @@
*
* $FreeBSD$
*/
#ifdef ARM_NEW_PMAP
#include <machine/pmap-v6.h>
#else /* ARM_NEW_PMAP */
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
@ -706,3 +709,4 @@ extern vm_paddr_t dump_avail[];
#endif /* !LOCORE */
#endif /* !_MACHINE_PMAP_H_ */
#endif /* !ARM_NEW_PMAP */

511
sys/arm/include/pmap_var.h Normal file
View File

@ -0,0 +1,511 @@
/*-
* Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
* Copyright 2014 Michal Meloun <meloun@miracle.cz>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PMAP_VAR_H_
#define _MACHINE_PMAP_VAR_H_
#include <machine/cpu-v6.h>
/*
* Various PMAP defines, exports, and inline functions
* definitions also usable in other MD code.
*/
/* A number of pages in L1 page table. */
#define NPG_IN_PT1 (NB_IN_PT1 / PAGE_SIZE)
/* A number of L2 page tables in a page. */
#define NPT2_IN_PG (PAGE_SIZE / NB_IN_PT2)
/* A number of L2 page table entries in a page. */
#define NPTE2_IN_PG (NPT2_IN_PG * NPTE2_IN_PT2)
#ifdef _KERNEL
/*
* A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
* pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
* in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
* I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
*/
#define PT2PG_SHIFT 2
#define PT2PG_MASK ((1 << PT2PG_SHIFT) - 1)
/*
* A PT2TAB holds all allocated L2 page table pages in a pmap.
* Right shifting of virtual address by PT2TAB_SHIFT gives us an index
* to L2 page table page in PT2TAB which holds the address mapping.
*/
#define PT2TAB_ENTRIES (NPTE1_IN_PT1 / NPT2_IN_PG)
#define PT2TAB_SHIFT (PTE1_SHIFT + PT2PG_SHIFT)
/*
* All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
* An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
* which maps the address.
*/
#define PT2MAP_SIZE (NPTE1_IN_PT1 * NB_IN_PT2)
#define PT2MAP_SHIFT PTE2_SHIFT
extern pt1_entry_t *kern_pt1;
extern pt2_entry_t *kern_pt2tab;
extern pt2_entry_t *PT2MAP;
/*
* Virtual interface for L1 page table management.
*/
static __inline u_int
pte1_index(vm_offset_t va)
{
return (va >> PTE1_SHIFT);
}
static __inline pt1_entry_t *
pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
{
return (pt1 + pte1_index(va));
}
static __inline vm_offset_t
pte1_trunc(vm_offset_t va)
{
return (va & PTE1_FRAME);
}
static __inline vm_offset_t
pte1_roundup(vm_offset_t va)
{
return ((va + PTE1_OFFSET) & PTE1_FRAME);
}
/*
* Virtual interface for L1 page table entries management.
*
* XXX: Some of the following functions now with a synchronization barrier
* are called in a loop, so it could be useful to have two versions of them.
* One with the barrier and one without the barrier. In this case, pure
* barrier pte1_sync() should be implemented as well.
*/
static __inline void
pte1_sync(pt1_entry_t *pte1p)
{
dsb();
#ifndef PMAP_PTE_NOCACHE
if (!cpuinfo.coherent_walk)
dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
#endif
}
static __inline void
pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
{
dsb();
#ifndef PMAP_PTE_NOCACHE
if (!cpuinfo.coherent_walk)
dcache_wb_pou((vm_offset_t)pte1p, size);
#endif
}
static __inline void
pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
{
atomic_store_rel_int(pte1p, pte1);
pte1_sync(pte1p);
}
static __inline void
pte1_clear(pt1_entry_t *pte1p)
{
pte1_store(pte1p, 0);
}
static __inline void
pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
{
atomic_clear_int(pte1p, bit);
pte1_sync(pte1p);
}
static __inline boolean_t
pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1)
{
boolean_t ret;
ret = atomic_cmpset_int(pte1p, opte1, npte1);
if (ret) pte1_sync(pte1p);
return (ret);
}
static __inline boolean_t
pte1_is_link(pt1_entry_t pte1)
{
return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
}
static __inline int
pte1_is_section(pt1_entry_t pte1)
{
return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
}
static __inline boolean_t
pte1_is_dirty(pt1_entry_t pte1)
{
return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
}
static __inline boolean_t
pte1_is_global(pt1_entry_t pte1)
{
return ((pte1 & PTE1_NG) == 0);
}
static __inline boolean_t
pte1_is_valid(pt1_entry_t pte1)
{
int l1_type;
l1_type = pte1 & L1_TYPE_MASK;
return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
}
static __inline boolean_t
pte1_is_wired(pt1_entry_t pte1)
{
return (pte1 & PTE1_W);
}
static __inline pt1_entry_t
pte1_load(pt1_entry_t *pte1p)
{
pt1_entry_t pte1;
pte1 = *pte1p;
return (pte1);
}
static __inline pt1_entry_t
pte1_load_clear(pt1_entry_t *pte1p)
{
pt1_entry_t opte1;
opte1 = atomic_readandclear_int(pte1p);
pte1_sync(pte1p);
return (opte1);
}
static __inline void
pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
{
atomic_set_int(pte1p, bit);
pte1_sync(pte1p);
}
static __inline vm_paddr_t
pte1_pa(pt1_entry_t pte1)
{
return ((vm_paddr_t)(pte1 & PTE1_FRAME));
}
static __inline vm_paddr_t
pte1_link_pa(pt1_entry_t pte1)
{
return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
}
/*
* Virtual interface for L2 page table entries management.
*
* XXX: Some of the following functions now with a synchronization barrier
* are called in a loop, so it could be useful to have two versions of them.
* One with the barrier and one without the barrier.
*/
static __inline void
pte2_sync(pt2_entry_t *pte2p)
{
dsb();
#ifndef PMAP_PTE_NOCACHE
if (!cpuinfo.coherent_walk)
dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
#endif
}
static __inline void
pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
{
dsb();
#ifndef PMAP_PTE_NOCACHE
if (!cpuinfo.coherent_walk)
dcache_wb_pou((vm_offset_t)pte2p, size);
#endif
}
static __inline void
pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
{
atomic_store_rel_int(pte2p, pte2);
pte2_sync(pte2p);
}
static __inline void
pte2_clear(pt2_entry_t *pte2p)
{
pte2_store(pte2p, 0);
}
static __inline void
pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
{
atomic_clear_int(pte2p, bit);
pte2_sync(pte2p);
}
static __inline boolean_t
pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2)
{
boolean_t ret;
ret = atomic_cmpset_int(pte2p, opte2, npte2);
if (ret) pte2_sync(pte2p);
return (ret);
}
static __inline boolean_t
pte2_is_dirty(pt2_entry_t pte2)
{
return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
}
static __inline boolean_t
pte2_is_global(pt2_entry_t pte2)
{
return ((pte2 & PTE2_NG) == 0);
}
static __inline boolean_t
pte2_is_valid(pt2_entry_t pte2)
{
return (pte2 & PTE2_V);
}
static __inline boolean_t
pte2_is_wired(pt2_entry_t pte2)
{
return (pte2 & PTE2_W);
}
static __inline pt2_entry_t
pte2_load(pt2_entry_t *pte2p)
{
pt2_entry_t pte2;
pte2 = *pte2p;
return (pte2);
}
static __inline pt2_entry_t
pte2_load_clear(pt2_entry_t *pte2p)
{
pt2_entry_t opte2;
opte2 = atomic_readandclear_int(pte2p);
pte2_sync(pte2p);
return (opte2);
}
static __inline void
pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
{
atomic_set_int(pte2p, bit);
pte2_sync(pte2p);
}
static __inline void
pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
{
/*
* Wired bit is transparent for page table walk,
* so pte2_sync() is not needed.
*/
if (wired)
atomic_set_int(pte2p, PTE2_W);
else
atomic_clear_int(pte2p, PTE2_W);
}
static __inline vm_paddr_t
pte2_pa(pt2_entry_t pte2)
{
return ((vm_paddr_t)(pte2 & PTE2_FRAME));
}
static __inline u_int
pte2_attr(pt2_entry_t pte2)
{
return ((u_int)(pte2 & PTE2_ATTR_MASK));
}
/*
* Virtual interface for L2 page tables mapping management.
*/
static __inline u_int
pt2tab_index(vm_offset_t va)
{
return (va >> PT2TAB_SHIFT);
}
static __inline pt2_entry_t *
pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
{
return (pt2tab + pt2tab_index(va));
}
static __inline void
pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
{
pte2_store(pte2p,pte2);
}
static __inline pt2_entry_t
pt2tab_load(pt2_entry_t *pte2p)
{
return (pte2_load(pte2p));
}
static __inline pt2_entry_t
pt2tab_load_clear(pt2_entry_t *pte2p)
{
return (pte2_load_clear(pte2p));
}
static __inline u_int
pt2map_index(vm_offset_t va)
{
return (va >> PT2MAP_SHIFT);
}
static __inline pt2_entry_t *
pt2map_entry(vm_offset_t va)
{
return (PT2MAP + pt2map_index(va));
}
/*
* Virtual interface for pmap structure & kernel shortcuts.
*/
static __inline pt1_entry_t *
pmap_pte1(pmap_t pmap, vm_offset_t va)
{
return (pte1_ptr(pmap->pm_pt1, va));
}
static __inline pt1_entry_t *
kern_pte1(vm_offset_t va)
{
return (pte1_ptr(kern_pt1, va));
}
static __inline pt2_entry_t *
pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
{
return (pt2tab_entry(pmap->pm_pt2tab, va));
}
static __inline pt2_entry_t *
kern_pt2tab_entry(vm_offset_t va)
{
return (pt2tab_entry(kern_pt2tab, va));
}
static __inline vm_page_t
pmap_pt2_page(pmap_t pmap, vm_offset_t va)
{
pt2_entry_t pte2;
pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
}
static __inline vm_page_t
kern_pt2_page(vm_offset_t va)
{
pt2_entry_t pte2;
pte2 = pte2_load(kern_pt2tab_entry(va));
return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
}
#endif /* _KERNEL */
#endif /* !_MACHINE_PMAP_VAR_H_ */

327
sys/arm/include/pte-v6.h Normal file
View File

@ -0,0 +1,327 @@
/*-
* Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
* Copyright 2014 Michal Meloun <meloun@miracle.cz>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
/*
* Domain Types for the Domain Access Control Register.
*/
#define DOMAIN_FAULT 0x00 /* no access */
#define DOMAIN_CLIENT 0x01 /* client */
#define DOMAIN_RESERVED 0x02 /* reserved */
#define DOMAIN_MANAGER 0x03 /* manager */
/*
* TEX remap registers attributes
*/
#define PRRR_SO 0 /* Strongly ordered memory */
#define PRRR_DEV 1 /* Device memory */
#define PRRR_MEM 2 /* Normal memory */
#define PRRR_DS0 (1 << 16) /* Shared bit for Device, S = 0 */
#define PRRR_DS1 (1 << 17) /* Shared bit for Device, S = 1 */
#define PRRR_NS0 (1 << 18) /* Shared bit for Normal, S = 0 */
#define PRRR_NS1 (1 << 19) /* Shared bit for Normal, S = 1 */
#define PRRR_NOS_SHIFT 24 /* base shif for Not Outer Shared bits */
#define NMRR_NC 0 /* Noncachable*/
#define NMRR_WB_WA 1 /* Write Back, Write Allocate */
#define NMRR_WT 2 /* Write Through, Non-Write Allocate */
#define NMRR_WB 3 /* Write Back, Non-Write Allocate */
/*
*
* The ARM MMU is capable of mapping memory in the following chunks:
*
* 16M Supersections (L1 table)
*
* 1M Sections (L1 table)
*
* 64K Large Pages (L2 table)
*
* 4K Small Pages (L2 table)
*
*
* Coarse Tables can map Large and Small Pages.
* Coarse Tables are 1K in length.
*
* The Translation Table Base register holds the pointer to the
* L1 Table. The L1 Table is a 16K contiguous chunk of memory
* aligned to a 16K boundary. Each entry in the L1 Table maps
* 1M of virtual address space, either via a Section mapping or
* via an L2 Table.
*
*/
#define L1_TABLE_SIZE 0x4000 /* 16K */
#define L1_ENTRIES 0x1000 /* 4K */
#define L2_TABLE_SIZE 0x0400 /* 1K */
#define L2_ENTRIES 0x0100 /* 256 */
/* ARMv6 super-sections. */
#define L1_SUP_SIZE 0x01000000 /* 16M */
#define L1_SUP_OFFSET (L1_SUP_SIZE - 1)
#define L1_SUP_FRAME (~L1_SUP_OFFSET)
#define L1_SUP_SHIFT 24
#define L1_S_SIZE 0x00100000 /* 1M */
#define L1_S_OFFSET (L1_S_SIZE - 1)
#define L1_S_FRAME (~L1_S_OFFSET)
#define L1_S_SHIFT 20
#define L2_L_SIZE 0x00010000 /* 64K */
#define L2_L_OFFSET (L2_L_SIZE - 1)
#define L2_L_FRAME (~L2_L_OFFSET)
#define L2_L_SHIFT 16
#define L2_S_SIZE 0x00001000 /* 4K */
#define L2_S_OFFSET (L2_S_SIZE - 1)
#define L2_S_FRAME (~L2_S_OFFSET)
#define L2_S_SHIFT 12
/*
* ARM MMU L1 Descriptors
*/
#define L1_TYPE_INV 0x00 /* Invalid (fault) */
#define L1_TYPE_C 0x01 /* Coarse L2 */
#define L1_TYPE_S 0x02 /* Section */
#define L1_TYPE_MASK 0x03 /* Mask of type bits */
/* L1 Section Descriptor */
#define L1_S_B 0x00000004 /* bufferable Section */
#define L1_S_C 0x00000008 /* cacheable Section */
#define L1_S_NX 0x00000010 /* not executeable */
#define L1_S_DOM(x) ((x) << 5) /* domain */
#define L1_S_DOM_MASK L1_S_DOM(0xf)
#define L1_S_P 0x00000200 /* ECC enable for this section */
#define L1_S_AP(x) ((x) << 10) /* access permissions */
#define L1_S_AP0 0x00000400 /* access permissions bit 0 */
#define L1_S_AP1 0x00000800 /* access permissions bit 1 */
#define L1_S_TEX(x) ((x) << 12) /* type extension */
#define L1_S_TEX0 0x00001000 /* type extension bit 0 */
#define L1_S_TEX1 0x00002000 /* type extension bit 1 */
#define L1_S_TEX2 0x00004000 /* type extension bit 2 */
#define L1_S_AP2 0x00008000 /* access permissions bit 2 */
#define L1_S_SHARED 0x00010000 /* shared */
#define L1_S_NG 0x00020000 /* not global */
#define L1_S_SUPERSEC 0x00040000 /* Section is a super-section. */
#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
/* L1 Coarse Descriptor */
#define L1_C_DOM(x) ((x) << 5) /* domain */
#define L1_C_DOM_MASK L1_C_DOM(0xf)
#define L1_C_P 0x00000200 /* ECC enable for this section */
#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */
/*
* ARM MMU L2 Descriptors
*/
#define L2_TYPE_INV 0x00 /* Invalid (fault) */
#define L2_TYPE_L 0x01 /* Large Page - 64k - not used yet*/
#define L2_TYPE_S 0x02 /* Small Page - 4 */
#define L2_TYPE_MASK 0x03
#define L2_NX 0x00000001 /* Not executable */
#define L2_B 0x00000004 /* Bufferable page */
#define L2_C 0x00000008 /* Cacheable page */
#define L2_AP(x) ((x) << 4)
#define L2_AP0 0x00000010 /* access permissions bit 0*/
#define L2_AP1 0x00000020 /* access permissions bit 1*/
#define L2_TEX(x) ((x) << 6) /* type extension */
#define L2_TEX0 0x00000040 /* type extension bit 0 */
#define L2_TEX1 0x00000080 /* type extension bit 1 */
#define L2_TEX2 0x00000100 /* type extension bit 2 */
#define L2_AP2 0x00000200 /* access permissions bit 2*/
#define L2_SHARED 0x00000400 /* shared */
#define L2_NG 0x00000800 /* not global */
/*
* TEX classes encoding
*/
#define TEX1_CLASS_0 ( 0)
#define TEX1_CLASS_1 ( L1_S_B)
#define TEX1_CLASS_2 ( L1_S_C )
#define TEX1_CLASS_3 ( L1_S_C | L1_S_B)
#define TEX1_CLASS_4 (L1_S_TEX0 )
#define TEX1_CLASS_5 (L1_S_TEX0 | L1_S_B)
#define TEX1_CLASS_6 (L1_S_TEX0 | L1_S_C ) /* Reserved for ARM11 */
#define TEX1_CLASS_7 (L1_S_TEX0 | L1_S_C | L1_S_B)
#define TEX2_CLASS_0 ( 0)
#define TEX2_CLASS_1 ( L2_B)
#define TEX2_CLASS_2 ( L2_C )
#define TEX2_CLASS_3 ( L2_C | L2_B)
#define TEX2_CLASS_4 (L2_TEX0 )
#define TEX2_CLASS_5 (L2_TEX0 | L2_B)
#define TEX2_CLASS_6 (L2_TEX0 | L2_C ) /* Reserved for ARM11 */
#define TEX2_CLASS_7 (L2_TEX0 | L2_C | L2_B)
/* L1 table definitions. */
#define NB_IN_PT1 L1_TABLE_SIZE
#define NPTE1_IN_PT1 L1_ENTRIES
/* L2 table definitions. */
#define NB_IN_PT2 L2_TABLE_SIZE
#define NPTE2_IN_PT2 L2_ENTRIES
/*
* Map memory attributes to TEX classes
*/
#define PTE2_ATTR_WB_WA TEX2_CLASS_0
#define PTE2_ATTR_NOCACHE TEX2_CLASS_1
#define PTE2_ATTR_DEVICE TEX2_CLASS_2
#define PTE2_ATTR_SO TEX2_CLASS_3
/*
* Software defined bits for L1 descriptors
* - L1_AP0 is used as page accessed bit
* - L1_AP2 (RO / not RW) is used as page not modified bit
* - L1_TEX0 is used as software emulated RO bit
*/
#define PTE1_V L1_TYPE_S /* Valid bit */
#define PTE1_A L1_S_AP0 /* Accessed - software emulated */
#define PTE1_NM L1_S_AP2 /* not modified bit - software emulated
* used as real write enable bit */
#define PTE1_M 0 /* Modified (dummy) */
#define PTE1_S L1_S_SHARED /* Shared */
#define PTE1_NG L1_S_NG /* Not global */
#define PTE1_G 0 /* Global (dummy) */
#define PTE1_NX L1_S_NX /* Not executable */
#define PTE1_X 0 /* Executable (dummy) */
#define PTE1_RO L1_S_TEX1 /* Read Only */
#define PTE1_RW 0 /* Read-Write (dummy) */
#define PTE1_U L1_S_AP1 /* User */
#define PTE1_NU 0 /* Not user (kernel only) (dummy) */
#define PTE1_W L1_S_TEX2 /* Wired */
#define PTE1_SHIFT L1_S_SHIFT
#define PTE1_SIZE L1_S_SIZE
#define PTE1_OFFSET L1_S_OFFSET
#define PTE1_FRAME L1_S_FRAME
#define PTE1_ATTR_MASK (L1_S_TEX0 | L1_S_C | L1_S_B)
#define PTE1_AP_KR (PTE1_RO | PTE1_NM)
#define PTE1_AP_KRW 0
#define PTE1_AP_KRUR (PTE1_RO | PTE1_NM | PTE1_U)
#define PTE1_AP_KRWURW PTE1_U
/*
* PTE1 descriptors creation macros.
*/
#define PTE1_PA(pa) ((pa) & PTE1_FRAME)
#define PTE1_AP_COMMON (PTE1_V | PTE1_S)
#define PTE1(pa, ap, attr) (PTE1_PA(pa) | (ap) | (attr) | PTE1_AP_COMMON)
#define PTE1_KERN(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_G, attr)
#define PTE1_KERN_NG(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_NG, attr)
#define PTE1_LINK(pa) (((pa) & L1_C_ADDR_MASK) | L1_TYPE_C)
/*
* Software defined bits for L2 descriptors
* - L2_AP0 is used as page accessed bit
* - L2_AP2 (RO / not RW) is used as page not modified bit
* - L2_TEX0 is used as software emulated RO bit
*/
#define PTE2_V L2_TYPE_S /* Valid bit */
#define PTE2_A L2_AP0 /* Accessed - software emulated */
#define PTE2_NM L2_AP2 /* not modified bit - software emulated
* used as real write enable bit */
#define PTE2_M 0 /* Modified (dummy) */
#define PTE2_S L2_SHARED /* Shared */
#define PTE2_NG L2_NG /* Not global */
#define PTE2_G 0 /* Global (dummy) */
#define PTE2_NX L2_NX /* Not executable */
#define PTE2_X 0 /* Not executable (dummy) */
#define PTE2_RO L2_TEX1 /* Read Only */
#define PTE2_U L2_AP1 /* User */
#define PTE2_NU 0 /* Not user (kernel only) (dummy) */
#define PTE2_W L2_TEX2 /* Wired */
#define PTE2_SHIFT L2_S_SHIFT
#define PTE2_SIZE L2_S_SIZE
#define PTE2_OFFSET L2_S_OFFSET
#define PTE2_FRAME L2_S_FRAME
#define PTE2_ATTR_MASK (L2_TEX0 | L2_C | L2_B)
#define PTE2_AP_KR (PTE2_RO | PTE2_NM)
#define PTE2_AP_KRW 0
#define PTE2_AP_KRUR (PTE2_RO | PTE2_NM | PTE2_U)
#define PTE2_AP_KRWURW PTE2_U
/*
* PTE2 descriptors creation macros.
*/
#define PTE2_PA(pa) ((pa) & PTE2_FRAME)
#define PTE2_AP_COMMON (PTE2_V | PTE2_S)
#define PTE2(pa, ap, attr) (PTE2_PA(pa) | (ap) | (attr) | PTE2_AP_COMMON)
#define PTE2_KERN(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_G, attr)
#define PTE2_KERN_NG(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_NG, attr)
// ----------------- TO BE DELETED ---------------------------------------------
/*
* sys/arm/arm/elf_trampoline.c
*/
#define AP_KRW 0x01 /* kernel read/write */
/*
* lib/libkvm/kvm_arm.c
*/
#define L1_ADDR_MASK 0xfffffc00
/*
* lib/libkvm/kvm_arm.c
*/
#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */
#ifndef LOCORE
/*
* sys/arm/arm/minidump_machdep.c
* sys/arm/arm/pmap.c
* sys/arm/arm/pmap.h (hack for our hack in pmap.h )
* lib/libkvm/kvm_arm.c
*/
typedef uint32_t pd_entry_t; /* page directory entry */
/*
* sys/arm/arm/minidump_machdep.c
* sys/arm/arm/pmap.c
* sys/arm/arm/pmap.h (hack for our hack in pmap.h )
* sys/arm/include/param.h
*/
typedef uint32_t pt_entry_t; /* page table entry */
#endif
// -----------------------------------------------------------------------------
#endif /* !_MACHINE_PTE_H_ */

View File

@ -33,6 +33,9 @@
*
* $FreeBSD$
*/
#ifdef ARM_NEW_PMAP
#include <machine/pte-v6.h>
#else /* ARM_NEW_PMAP */
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
@ -352,5 +355,6 @@ typedef uint32_t pt_entry_t; /* page table entry */
* 1 X 1 1 1 Y Y WT Y Y
*/
#endif /* !_MACHINE_PTE_H_ */
#endif /* !ARM_NEW_PMAP */
/* End of pte.h */

View File

@ -33,7 +33,11 @@ static inline void
sf_buf_map(struct sf_buf *sf, int flags)
{
#ifdef ARM_NEW_PMAP
pmap_qenter(sf->kva, &(sf->m), 1);
#else
pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
#endif
}
static inline int

View File

@ -13,6 +13,8 @@
#define IPI_STOP_HARD 4
#define IPI_HARDCLOCK 6
#define IPI_TLB 7
#define IPI_CACHE 8
#define IPI_LAZYPMAP 9
void init_secondary(int cpu);
void mpentry(void);

View File

@ -29,8 +29,22 @@
#ifndef _MACHINE_VM_H_
#define _MACHINE_VM_H_
#ifdef ARM_NEW_PMAP
#include <machine/pte-v6.h>
#define VM_MEMATTR_WB_WA ((vm_memattr_t)PTE2_ATTR_WB_WA)
#define VM_MEMATTR_NOCACHE ((vm_memattr_t)PTE2_ATTR_NOCACHE)
#define VM_MEMATTR_DEVICE ((vm_memattr_t)PTE2_ATTR_DEVICE)
#define VM_MEMATTR_SO ((vm_memattr_t)PTE2_ATTR_SO)
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WB_WA
#define VM_MEMATTR_UNCACHEABLE VM_MEMATTR_SO /*name is misused by DMA */
#else
/* Memory attribute configuration. */
#define VM_MEMATTR_DEFAULT 0
#define VM_MEMATTR_UNCACHEABLE 1
#endif
#endif /* !_MACHINE_VM_H_ */

View File

@ -119,13 +119,10 @@
#define VM_LEVEL_0_ORDER 8
#endif
#define UPT_MAX_ADDRESS VADDR(UPTPTDI + 3, 0)
#define UPT_MIN_ADDRESS VADDR(UPTPTDI, 0)
#define VM_MIN_ADDRESS (0x00001000)
#ifndef VM_MAXUSER_ADDRESS
#define VM_MAXUSER_ADDRESS KERNBASE
#endif /* VM_MAXUSER_ADDRESS */
#define VM_MAXUSER_ADDRESS (KERNBASE - 0x00400000) /* !!! PT2MAP_SIZE */
#endif
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#define USRSTACK VM_MAXUSER_ADDRESS

View File

@ -42,7 +42,8 @@ arm/arm/pl310.c optional pl310
arm/arm/platform.c optional platform
arm/arm/platform_if.m optional platform
arm/arm/pmap.c optional !armv6
arm/arm/pmap-v6.c optional armv6
arm/arm/pmap-v6.c optional armv6 !arm_new_pmap
arm/arm/pmap-v6-new.c optional armv6 arm_new_pmap
arm/arm/sc_machdep.c optional sc
arm/arm/setcpsr.S standard
arm/arm/setstack.s standard

View File

@ -6,6 +6,7 @@ ARM_KERN_DIRECTMAP opt_vm.h
ARM_L2_PIPT opt_global.h
ARM_MANY_BOARD opt_global.h
ARM_NEW_PMAP opt_global.h
NKPT2PG opt_pmap.h
ARM_WANT_TP_ADDRESS opt_global.h
COUNTS_PER_SEC opt_timer.h
CPU_ARM9 opt_global.h