Increase the arm64 kernel address space to 512GB, and the DMAP region to

2TB. The latter can be increased in 512GB chunks by adjusting the lower
address, however more work will be needed to increase the former.

There is still some work needed to only create a DMAP region for the RAM
address space as on ARM architectures all mappings should have the same
memory attributes, and these will be different for device and normal memory.

Reviewed by:	kib
Obtained from:	ABT Systems Ltd
Relnotes:	yes
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D5859
This commit is contained in:
andrew 2016-04-13 09:44:32 +00:00
parent 3eeca05f1b
commit 66ef37d31b
3 changed files with 43 additions and 25 deletions

View File

@ -37,6 +37,7 @@
#include <machine/vmparam.h>
#define VIRT_BITS 48
#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
.globl kernbase
.set kernbase, KERNBASE
@ -322,10 +323,12 @@ virt_map:
* TODO: This is out of date.
* There are at least 5 pages before that address for the page tables
* The pages used are:
* - The identity (PA = VA) table (TTBR0)
* - The Kernel L1 table (TTBR1)(not yet)
* - The PA != VA L2 table to jump into (not yet)
* - The FDT L2 table (not yet)
* - The Kernel L2 table
* - The Kernel L1 table
* - The Kernel L0 table (TTBR1)
* - The identity (PA = VA) L1 table
* - The identity (PA = VA) L0 table (TTBR0)
* - The DMAP L1 tables
*/
create_pagetables:
/* Save the Link register */
@ -381,6 +384,12 @@ create_pagetables:
mov x10, #1
bl link_l0_pagetable
/* Link the DMAP tables */
ldr x8, =DMAP_MIN_ADDRESS
adr x9, pagetable_dmap;
mov x10, #DMAP_TABLES
bl link_l0_pagetable
/*
* Build the TTBR0 maps.
*/
@ -644,6 +653,10 @@ pagetable_l1_ttbr0:
.space PAGE_SIZE
pagetable_l0_ttbr0:
.space PAGE_SIZE
.globl pagetable_dmap
pagetable_dmap:
.space PAGE_SIZE * DMAP_TABLES
pagetable_end:
el2_pagetable:

View File

@ -222,6 +222,13 @@ static struct rwlock_padalign pvh_global_lock;
vm_paddr_t dmap_phys_base; /* The start of the dmap region */
/* This code assumes all L1 DMAP entries will be used */
CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
extern pt_entry_t pagetable_dmap[];
/*
* Data for the pv entry allocation mechanism
*/
@ -543,28 +550,25 @@ pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
}
static void
pmap_bootstrap_dmap(vm_offset_t l1pt, vm_paddr_t kernstart)
pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t kernstart)
{
vm_offset_t va;
vm_paddr_t pa;
pd_entry_t *l1;
u_int l1_slot;
pa = dmap_phys_base = kernstart & ~L1_OFFSET;
va = DMAP_MIN_ADDRESS;
l1 = (pd_entry_t *)l1pt;
l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
for (; va < DMAP_MAX_ADDRESS;
pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
pmap_load_store(&l1[l1_slot],
pmap_load_store(&pagetable_dmap[l1_slot],
(pa & ~L1_OFFSET) | ATTR_DEFAULT |
ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
}
cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
cpu_dcache_wb_range((vm_offset_t)pagetable_dmap,
PAGE_SIZE * DMAP_TABLES);
cpu_tlb_flushID();
}

View File

@ -125,15 +125,16 @@
* split into 2 regions at each end of the 64 bit address space, with an
* out of range "hole" in the middle.
*
* We limit the size of the two spaces to 39 bits each.
* We use the full 48 bits for each region, however the kernel may only use
* a limited range within this space.
*
* Upper region: 0xffffffffffffffff
* 0xffffff8000000000
* 0xffff000000000000
*
* Hole: 0xffffff7fffffffff
* 0x0000008000000000
* Hole: 0xfffeffffffffffff
* 0x0001000000000000
*
* Lower region: 0x0000007fffffffff
* Lower region: 0x0000ffffffffffff
* 0x0000000000000000
*
* We use the upper region for the kernel, and the lower region for userland.
@ -152,23 +153,23 @@
#define VM_MIN_ADDRESS (0x0000000000000000UL)
#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
/* 32 GiB of kernel addresses */
#define VM_MIN_KERNEL_ADDRESS (0xffffff8000000000UL)
#define VM_MAX_KERNEL_ADDRESS (0xffffff8800000000UL)
/* 512 GiB of kernel addresses */
#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL)
#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL)
/* Direct Map for 128 GiB of PA: 0x0 - 0x1fffffffff */
#define DMAP_MIN_ADDRESS (0xffffffc000000000UL)
#define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL)
/* 2TiB for the direct map region */
#define DMAP_MIN_ADDRESS (0xfffffd0000000000UL)
#define DMAP_MAX_ADDRESS (0xffffff0000000000UL)
#define DMAP_MIN_PHYSADDR (dmap_phys_base)
#define DMAP_MAX_PHYSADDR (dmap_phys_base + (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS))
/* True if pa is in the dmap range */
#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
(pa) <= DMAP_MAX_PHYSADDR)
(pa) < DMAP_MAX_PHYSADDR)
/* True if va is in the dmap range */
#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
(va) <= DMAP_MAX_ADDRESS)
(va) < DMAP_MAX_ADDRESS)
#define PHYS_TO_DMAP(pa) \
({ \