powerpc64: Implement Radix MMU for POWER9 CPUs

Summary:
POWER9 supports two MMU formats: traditional hashed page tables, and Radix
page tables, similar to what's presesnt on most other architectures.  The
PowerISA also specifies a process table -- a table of page table pointers--
which on the POWER9 is only available with the Radix MMU, so we can take
advantage of it with the Radix MMU driver.

Written by Matt Macy.

Differential Revision: https://reviews.freebsd.org/D19516
This commit is contained in:
Justin Hibbits 2020-05-11 02:33:37 +00:00
parent 83ed508055
commit 65bbba25d2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=360887
20 changed files with 6808 additions and 32 deletions

View File

@ -135,6 +135,7 @@ powerpc/aim/locore.S optional aim no-obj
powerpc/aim/aim_machdep.c optional aim
powerpc/aim/mmu_oea.c optional aim powerpc
powerpc/aim/mmu_oea64.c optional aim
powerpc/aim/mmu_radix.c optional aim powerpc64
powerpc/aim/moea64_if.m optional aim
powerpc/aim/moea64_native.c optional aim
powerpc/aim/mp_cpudep.c optional aim

View File

@ -136,6 +136,8 @@ __FBSDID("$FreeBSD$");
struct bat battable[16];
#endif
int radix_mmu = 0;
#ifndef __powerpc64__
/* Bits for running on 64-bit systems in 32-bit mode. */
extern void *testppc64, *testppc64size;
@ -451,7 +453,14 @@ aim_cpu_init(vm_offset_t toc)
* in case the platform module had a better idea of what we
* should do.
*/
if (cpu_features & PPC_FEATURE_64)
if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
radix_mmu = 0;
TUNABLE_INT_FETCH("radix_mmu", &radix_mmu);
if (radix_mmu)
pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC);
else
pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
} else if (cpu_features & PPC_FEATURE_64)
pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
else
pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);

View File

@ -322,6 +322,7 @@ void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
void moea_scan_init(mmu_t mmu);
vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
boolean_t moea_page_is_mapped(mmu_t mmu, vm_page_t m);
static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
@ -364,6 +365,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page),
MMUMETHOD(mmu_page_is_mapped, moea_page_is_mapped),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, moea_bootstrap),
@ -1104,6 +1106,12 @@ moea_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
}
boolean_t
moea_page_is_mapped(mmu_t mmu, vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -120,8 +120,7 @@ uintptr_t moea64_get_unique_vsid(void);
*
*/
#define PV_LOCK_PER_DOM (PA_LOCK_COUNT * 3)
#define PV_LOCK_COUNT (PV_LOCK_PER_DOM * MAXMEMDOM)
#define PV_LOCK_COUNT PA_LOCK_COUNT
static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
/*
@ -130,8 +129,7 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
* index at (N << 45).
*/
#ifdef __powerpc64__
#define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_PER_DOM + \
(((pa) >> 45) % MAXMEMDOM) * PV_LOCK_PER_DOM)
#define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT)
#else
#define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT)
#endif
@ -305,6 +303,7 @@ void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
void moea64_scan_init(mmu_t mmu);
vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
boolean_t moea64_page_is_mapped(mmu_t mmu, vm_page_t m);
static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
@ -353,6 +352,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
MMUMETHOD(mmu_page_is_mapped, moea64_page_is_mapped),
#ifdef __powerpc64__
MMUMETHOD(mmu_page_array_startup, moea64_page_array_startup),
#endif
@ -1425,6 +1425,12 @@ moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
sched_unpin();
}
boolean_t
moea64_page_is_mapped(mmu_t mmu, vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

6507
sys/powerpc/aim/mmu_radix.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -356,6 +356,7 @@ static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
static void mmu_booke_page_array_startup(mmu_t , long);
static boolean_t mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m);
static mmu_method_t mmu_booke_methods[] = {
@ -398,6 +399,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
MMUMETHOD(mmu_page_array_startup, mmu_booke_page_array_startup),
MMUMETHOD(mmu_page_is_mapped, mmu_booke_page_is_mapped),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
@ -1249,6 +1251,13 @@ mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
return (0);
}
static boolean_t
mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m)
{
return (!TAILQ_EMPTY(&(m)->md.pv_list));
}
/*
* Initialize pmap associated with process 0.
*/

View File

@ -185,6 +185,34 @@ powerpc_sync(void)
__asm __volatile ("sync" : : : "memory");
}
static __inline int
cntlzd(uint64_t word)
{
uint64_t result;
/* cntlzd %0, %1 */
__asm __volatile(".long 0x7c000074 | (%1 << 21) | (%0 << 16)" :
"=r"(result) : "r"(word));
return (int)result;
}
static __inline int
cnttzd(uint64_t word)
{
uint64_t result;
/* cnttzd %0, %1 */
__asm __volatile(".long 0x7c000474 | (%1 << 21) | (%0 << 16)" :
"=r"(result) : "r"(word));
return (int)result;
}
static __inline void
ptesync(void)
{
__asm __volatile("ptesync");
}
static __inline register_t
intr_disable(void)
{

View File

@ -116,6 +116,7 @@ DATA_SET(mmu_set, name)
#define MMU_TYPE_BOOKE "mmu_booke" /* Book-E MMU specification */
#define MMU_TYPE_OEA "mmu_oea" /* 32-bit OEA */
#define MMU_TYPE_G5 "mmu_g5" /* 64-bit bridge (ibm 970) */
#define MMU_TYPE_RADIX "mmu_radix" /* 64-bit native ISA 3.0 (POWER9) radix */
#define MMU_TYPE_8xx "mmu_8xx" /* 8xx quicc TLB */
#endif /* _MACHINE_MMUVAR_H_ */

View File

@ -106,14 +106,27 @@
#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
#define PAGE_MASK (PAGE_SIZE - 1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define NPDEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
#define L1_PAGE_SIZE_SHIFT 39
#define L1_PAGE_SIZE (1UL<<L1_PAGE_SIZE_SHIFT)
#define L1_PAGE_MASK (L1_PAGE_SIZE-1)
#define L2_PAGE_SIZE_SHIFT 30
#define L2_PAGE_SIZE (1UL<<L2_PAGE_SIZE_SHIFT)
#define L2_PAGE_MASK (L2_PAGE_SIZE-1)
#define L3_PAGE_SIZE_SHIFT 21
#define L3_PAGE_SIZE (1UL<<L3_PAGE_SIZE_SHIFT)
#define L3_PAGE_MASK (L3_PAGE_SIZE-1)
#define MAXPAGESIZES 3 /* maximum number of supported page sizes */
#define RELOCATABLE_KERNEL 1 /* kernel may relocate during startup */
#ifndef KSTACK_PAGES
#ifdef __powerpc64__
#define KSTACK_PAGES 8 /* includes pcb */
#define KSTACK_PAGES 12 /* includes pcb */
#else
#define KSTACK_PAGES 4 /* includes pcb */
#endif
@ -126,6 +139,9 @@
*/
#define trunc_page(x) ((x) & ~(PAGE_MASK))
#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
#define trunc_2mpage(x) ((unsigned long)(x) & ~L3_PAGE_MASK)
#define round_2mpage(x) ((((unsigned long)(x)) + L3_PAGE_MASK) & ~L3_PAGE_MASK)
#define trunc_1gpage(x) ((unsigned long)(x) & ~L2_PAGE_MASK)
#define atop(x) ((x) >> PAGE_SHIFT)
#define ptoa(x) ((x) << PAGE_SHIFT)

View File

@ -76,6 +76,28 @@
#include <machine/slb.h>
#include <machine/tlb.h>
#include <machine/vmparam.h>
#ifdef __powerpc64__
#include <vm/vm_radix.h>
#endif
/*
* The radix page table structure is described by levels 1-4.
* See Fig 33. on p. 1002 of Power ISA v3.0B
*
* Page directories and tables must be size aligned.
*/
/* Root page directory - 64k -- each entry covers 512GB */
typedef uint64_t pml1_entry_t;
/* l2 page directory - 4k -- each entry covers 1GB */
typedef uint64_t pml2_entry_t;
/* l3 page directory - 4k -- each entry covers 2MB */
typedef uint64_t pml3_entry_t;
/* l4 page directory - 256B/4k -- each entry covers 64k/4k */
typedef uint64_t pml4_entry_t;
typedef uint64_t pt_entry_t;
struct pmap;
typedef struct pmap *pmap_t;
@ -144,7 +166,6 @@ struct pmap {
cpuset_t pm_active;
union {
struct {
#ifdef __powerpc64__
struct slbtnode *pm_slb_tree_root;
struct slb **pm_slb;
@ -156,9 +177,19 @@ struct pmap {
struct pmap *pmap_phys;
struct pvo_tree pmap_pvo;
};
#ifdef __powerpc64__
/* Radix support */
struct {
pml1_entry_t *pm_pml1; /* KVA of root page directory */
struct vm_radix pm_radix; /* spare page table pages */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
uint64_t pm_pid; /* PIDR value */
int pm_flags;
};
#endif
struct {
/* TID to identify this pmap entries in TLB */
tlbtid_t pm_tid[MAXCPU];
tlbtid_t pm_tid[MAXCPU];
#ifdef __powerpc64__
/*
@ -177,9 +208,21 @@ struct pmap {
TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
#endif
};
};
} __aligned(CACHE_LINE_SIZE);
};
/*
* pv_entries are allocated in chunks per-process. This avoids the
* need to track per-pmap assignments.
*/
#define _NPCM 2
#define _NPCPV 126
#define PV_CHUNK_HEADER \
pmap_t pc_pmap; \
TAILQ_ENTRY(pv_chunk) pc_list; \
uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
TAILQ_ENTRY(pv_chunk) pc_lru;
struct pv_entry {
pmap_t pv_pmap;
vm_offset_t pv_va;
@ -187,27 +230,35 @@ struct pv_entry {
};
typedef struct pv_entry *pv_entry_t;
struct pv_chunk_header {
PV_CHUNK_HEADER
};
struct pv_chunk {
PV_CHUNK_HEADER
uint64_t reserved;
struct pv_entry pc_pventry[_NPCPV];
};
struct md_page {
union {
struct {
volatile int32_t mdpg_attrs;
vm_memattr_t mdpg_cache_attrs;
struct pvo_head mdpg_pvoh;
int pv_gen; /* (p) */
};
struct {
TAILQ_HEAD(, pv_entry) pv_list;
int pv_tracked;
};
};
TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
};
#ifdef AIM
#define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs)
#define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
#else
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#endif
#endif /* AIM */
/*
* Return the VSID corresponding to a given virtual address.
@ -243,7 +294,7 @@ extern struct pmap kernel_pmap_store;
#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, \
(pmap == kernel_pmap) ? "kernelpmap" : \
"pmap", NULL, MTX_DEF)
"pmap", NULL, MTX_DEF | MTX_DUPOK)
#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
@ -269,6 +320,9 @@ vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
boolean_t pmap_mmu_install(char *name, int prio);
const char *pmap_mmu_name(void);
bool pmap_ps_enabled(pmap_t pmap);
int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
boolean_t pmap_page_is_mapped(vm_page_t m);
void pmap_page_array_startup(long count);
@ -281,10 +335,12 @@ extern caddr_t crashdumpmap;
extern vm_offset_t msgbuf_phys;
extern int pmap_bootstrapped;
extern int radix_mmu;
vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
void pmap_early_io_unmap(vm_offset_t va, vm_size_t size);
void pmap_track_page(pmap_t pmap, vm_offset_t va);
void pmap_page_print_mappings(vm_page_t m);
static inline int
pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)

View File

@ -37,12 +37,18 @@
#ifndef _MACHINE_PROC_H_
#define _MACHINE_PROC_H_
struct pmap_invl_gen {
u_long gen; /* (k) */
LIST_ENTRY(pmap_invl_gen) link; /* (pp) */
};
/*
* Machine-dependent part of the proc structure
*/
struct mdthread {
int md_spinlock_count; /* (k) */
register_t md_saved_msr; /* (k) */
struct pmap_invl_gen md_invl_gen;
};
struct mdproc {

View File

@ -70,6 +70,12 @@ struct pate {
u_int64_t proctab;
};
/* Process table entry */
struct prte {
u_int64_t proctab0;
u_int64_t proctab1;
};
typedef struct pte pte_t;
typedef struct lpte lpte_t;
#endif /* LOCORE */
@ -145,6 +151,10 @@ typedef struct lpte lpte_t;
#define RPTE_R 0x0000000000000100ULL
#define RPTE_C 0x0000000000000080ULL
#define RPTE_MANAGED RPTE_SW1
#define RPTE_WIRED RPTE_SW2
#define RPTE_PROMOTED RPTE_SW3
#define RPTE_ATTR_MASK 0x0000000000000030ULL
#define RPTE_ATTR_MEM 0x0000000000000000ULL /* PTE M */
#define RPTE_ATTR_SAO 0x0000000000000010ULL /* PTE WIM */
@ -159,10 +169,12 @@ typedef struct lpte lpte_t;
#define RPDE_VALID RPTE_VALID
#define RPDE_LEAF RPTE_LEAF /* is a PTE: always 0 */
#define RPDE_NLB_MASK 0x0FFFFFFFFFFFFF00ULL
#define RPDE_NLB_MASK 0x00FFFFFFFFFFFF00ULL
#define RPDE_NLB_SHIFT 8
#define RPDE_NLS_MASK 0x000000000000001FULL
#define PG_FRAME (0x000ffffffffff000ul)
#define PG_PS_FRAME (0x000fffffffe00000ul)
/*
* Extract bits from address
*/

View File

@ -130,7 +130,14 @@
#define SRR1_MCHK_DATA 0x00200000 /* Machine check data in DSISR */
#define SRR1_MCHK_IFETCH_M 0x081c0000 /* Machine check instr fetch mask */
#define SRR1_MCHK_IFETCH_SLBMH 0x000c0000 /* SLB multihit */
#define SPR_CFAR 0x01c /* Come From Address Register */
#define SPR_AMR 0x01d /* Authority Mask Register */
#define SPR_PID 0x030 /* 4.. Process ID */
#define SPR_DECAR 0x036 /* ..8 Decrementer auto reload */
#define SPR_IAMR 0x03d /* Instr. Authority Mask Reg */
#define SPR_EIE 0x050 /* ..8 Exception Interrupt ??? */
#define SPR_EID 0x051 /* ..8 Exception Interrupt ??? */
#define SPR_NRI 0x052 /* ..8 Exception Interrupt ??? */
@ -155,6 +162,7 @@
#define FSCR_TAR 0x0000000000000100 /* TAR register available */
#define FSCR_EBB 0x0000000000000080 /* Event-based branch available */
#define FSCR_DSCR 0x0000000000000004 /* DSCR available in PR state */
#define SPR_UAMOR 0x09d /* User Authority Mask Override Register */
#define SPR_DPDES 0x0b0 /* .6. Directed Privileged Doorbell Exception State Register */
#define SPR_USPRG0 0x100 /* 4.8 User SPR General 0 */
#define SPR_VRSAVE 0x100 /* .6. AltiVec VRSAVE */
@ -285,6 +293,8 @@
#define SPR_LPCR 0x13e /* .6. Logical Partitioning Control */
#define LPCR_LPES 0x008 /* Bit 60 */
#define LPCR_HVICE 0x002 /* Hypervisor Virtualization Interrupt (Arch 3.0) */
#define LPCR_UPRT (1ULL << 22) /* Use Process Table (ISA 3) */
#define LPCR_HR (1ULL << 20) /* Host Radix mode */
#define LPCR_PECE_DRBL (1ULL << 16) /* Directed Privileged Doorbell */
#define LPCR_PECE_HDRBL (1ULL << 15) /* Directed Hypervisor Doorbell */
#define LPCR_PECE_EXT (1ULL << 14) /* External exceptions */
@ -294,6 +304,7 @@
#define SPR_LPID 0x13f /* .6. Logical Partitioning Control */
#define SPR_HMER 0x150 /* Hypervisor Maintenance Exception Register */
#define SPR_HMEER 0x151 /* Hypervisor Maintenance Exception Enable Register */
#define SPR_AMOR 0x15d /* Authority Mask Override Register */
#define SPR_TIR 0x1be /* .6. Thread Identification Register */
#define SPR_PTCR 0x1d0 /* Partition Table Control Register */

View File

@ -53,7 +53,7 @@
#define KERNEL2_SEGMENT (0xfffff0 + KERNEL2_SR)
#define EMPTY_SEGMENT 0xfffff0
#ifdef __powerpc64__
#define USER_ADDR 0xeffffffff0000000UL
#define USER_ADDR 0xc00ffffff0000000UL
#else
#define USER_ADDR ((uintptr_t)USER_SR << ADDR_SR_SHFT)
#endif

View File

@ -81,15 +81,23 @@
* Would like to have MAX addresses = 0, but this doesn't (currently) work
*/
#ifdef __powerpc64__
/*
* Virtual addresses of things. Derived from the page directory and
* page table indexes from pmap.h for precision.
*
* kernel map should be able to start at 0xc008000000000000 -
* but at least the functional simulator doesn't like it
*
* 0x0000000000000000 - 0x000fffffffffffff user map
* 0xc000000000000000 - 0xc007ffffffffffff direct map
* 0xc008000000000000 - 0xc00fffffffffffff kernel map
*
*/
#define VM_MIN_ADDRESS 0x0000000000000000
#ifdef BOOKE
#define VM_MAXUSER_ADDRESS 0x000ffffffffff000
#else
#define VM_MAXUSER_ADDRESS 0x3ffffffffffff000
#endif
#define VM_MAX_ADDRESS 0xffffffffffffffff
#define VM_MIN_KERNEL_ADDRESS 0xe000000000000000
#define VM_MAX_KERNEL_ADDRESS 0xe0000007ffffffff
#define VM_MAXUSER_ADDRESS 0x000fffffc0000000
#define VM_MAX_ADDRESS 0xc00fffffffffffff
#define VM_MIN_KERNEL_ADDRESS 0xc008000000000000
#define VM_MAX_KERNEL_ADDRESS 0xc0080007ffffffff
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
#else
#define VM_MIN_ADDRESS 0
@ -137,7 +145,11 @@ struct pmap_physseg {
};
#endif
#define VM_PHYSSEG_MAX 16
#ifdef __powerpc64__
#define VM_PHYSSEG_MAX 63 /* 1? */
#else
#define VM_PHYSSEG_MAX 16 /* 1? */
#endif
#define PHYS_AVAIL_SZ 256 /* Allows up to 16GB Ram on pSeries with
* logical memory block size of 64MB.
@ -176,14 +188,35 @@ struct pmap_physseg {
/*
* The largest allocation size is 4MB.
*/
#ifdef __powerpc64__
#define VM_NFREEORDER 13
#else
#define VM_NFREEORDER 11
#endif
#ifndef VM_NRESERVLEVEL
#ifdef __powerpc64__
#define VM_NRESERVLEVEL 1
#else
/*
* Disable superpage reservations.
*/
#ifndef VM_NRESERVLEVEL
#define VM_NRESERVLEVEL 0
#endif
#endif
/*
* Level 0 reservations consist of 512 pages.
*/
#ifndef VM_LEVEL_0_ORDER
#define VM_LEVEL_0_ORDER 9
#endif
#ifdef __powerpc64__
#ifdef SMP
#define PA_LOCK_COUNT 256
#endif
#endif
#ifndef VM_INITIAL_PAGEIN
#define VM_INITIAL_PAGEIN 16
@ -216,7 +249,19 @@ struct pmap_physseg {
VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
#endif
#ifdef __powerpc64__
#define ZERO_REGION_SIZE (2 * 1024 * 1024) /* 2MB */
#else
#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
#endif
/*
* Use a fairly large batch size since we expect ppc64 systems to have lots of
* memory.
*/
#ifdef __powerpc64__
#define VM_BATCHQUEUE_SIZE 31
#endif
/*
* On 32-bit OEA, the only purpose for which sf_buf is used is to implement
@ -239,7 +284,8 @@ struct pmap_physseg {
#ifndef LOCORE
#ifdef __powerpc64__
#define DMAP_BASE_ADDRESS 0xc000000000000000UL
#define DMAP_MAX_ADDRESS 0xcfffffffffffffffUL
#define DMAP_MIN_ADDRESS DMAP_BASE_ADDRESS
#define DMAP_MAX_ADDRESS 0xc007ffffffffffffUL
#else
#define DMAP_BASE_ADDRESS 0x00000000UL
#define DMAP_MAX_ADDRESS 0xbfffffffUL

View File

@ -145,7 +145,7 @@ extern vm_paddr_t kernload;
extern void *ap_pcpu;
struct pcpu __pcpu[MAXCPU];
struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE);
static char init_kenv[2048];
static struct trapframe frame0;

View File

@ -147,6 +147,11 @@ CODE {
{
return (NULL);
}
static boolean_t mmu_null_ps_enabled(mmu_t mmu)
{
return (FALSE);
}
};
@ -1085,3 +1090,13 @@ METHOD void page_array_startup {
mmu_t _mmu;
long _pages;
};
METHOD boolean_t page_is_mapped {
mmu_t _mmu;
vm_page_t _pg;
} DEFAULT;
METHOD boolean_t ps_enabled {
mmu_t _mmu;
pmap_t _pmap;
} DEFAULT mmu_null_ps_enabled;

View File

@ -617,6 +617,20 @@ pmap_page_array_startup(long pages)
MMU_PAGE_ARRAY_STARTUP(mmu_obj, pages);
}
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_PAGE_IS_MAPPED(mmu_obj, m));
}
bool
pmap_ps_enabled(pmap_t pmap)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
return (MMU_PS_ENABLED(mmu_obj, pmap));
}
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.

View File

@ -260,7 +260,9 @@ trap(struct trapframe *frame)
#if defined(__powerpc64__) && defined(AIM)
case EXC_ISE:
case EXC_DSE:
if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
/* DSE/ISE are automatically fatal with radix pmap. */
if (radix_mmu ||
handle_user_slb_spill(&p->p_vmspace->vm_pmap,
(type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
sig = SIGSEGV;
ucode = SEGV_MAPERR;
@ -444,6 +446,9 @@ trap(struct trapframe *frame)
break;
#if defined(__powerpc64__) && defined(AIM)
case EXC_DSE:
/* DSE on radix mmu is automatically fatal. */
if (radix_mmu)
break;
if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
(frame->dar & SEGMENT_MASK) == USER_ADDR) {
__asm __volatile ("slbmte %0, %1" ::
@ -738,7 +743,33 @@ trap_pfault(struct trapframe *frame, bool user, int *signo, int *ucode)
else
ftype = VM_PROT_READ;
}
#if defined(__powerpc64__) && defined(AIM)
if (radix_mmu && pmap_nofault(&p->p_vmspace->vm_pmap, eva, ftype) == 0)
return (true);
#endif
if (__predict_false((td->td_pflags & TDP_NOFAULTING) == 0)) {
/*
* If we get a page fault while in a critical section, then
* it is most likely a fatal kernel page fault. The kernel
* is already going to panic trying to get a sleep lock to
* do the VM lookup, so just consider it a fatal trap so the
* kernel can print out a useful trap message and even get
* to the debugger.
*
* If we get a page fault while holding a non-sleepable
* lock, then it is most likely a fatal kernel page fault.
* If WITNESS is enabled, then it's going to whine about
* bogus LORs with various VM locks, so just skip to the
* fatal trap handling directly.
*/
if (td->td_critnest != 0 ||
WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
"Kernel page fault") != 0) {
trap_fatal(frame);
return (false);
}
}
if (user) {
KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
map = &p->p_vmspace->vm_map;

View File

@ -299,7 +299,7 @@ static int
vm_fault_soft_fast(struct faultstate *fs)
{
vm_page_t m, m_map;
#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
#if (defined(__aarch64__) || defined(__amd64__) || defined(__powerpc64__) || (defined(__arm__) && \
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
VM_NRESERVLEVEL > 0
vm_page_t m_super;
@ -320,7 +320,7 @@ vm_fault_soft_fast(struct faultstate *fs)
}
m_map = m;
psind = 0;
#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
#if (defined(__aarch64__) || defined(__amd64__) || defined(__powerpc64__) || (defined(__arm__) && \
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
VM_NRESERVLEVEL > 0
if ((m->flags & PG_FICTITIOUS) == 0 &&