From 195a6bb9e6786b51f359b427af10f9b7132ebeb2 Mon Sep 17 00:00:00 2001 From: Konstantin Belousov Date: Wed, 21 Sep 2016 10:05:51 +0000 Subject: [PATCH] Export the pmap_cache_bits() and pmap_pinit_pml4() functions from the amd64 pmap. The new pmap_pinit_pml4() function initializes the level 4 page table with entries for the kernel mappings. Both functions are needed for upcoming EFI Runtime Services support. Sponsored by: The FreeBSD Foundation MFC after: 1 week --- sys/amd64/amd64/pmap.c | 40 +++++++++++++++++++++++++--------------- sys/amd64/include/pmap.h | 2 ++ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index f80b8947d9e4..63042e41dd14 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1324,7 +1324,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry) * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ -static int +int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) { int cache_bits, pat_flag, pat_idx; @@ -2345,6 +2345,29 @@ pmap_pinit0(pmap_t pmap) CPU_FILL(&kernel_pmap->pm_active); } +void +pmap_pinit_pml4(vm_page_t pml4pg) +{ + pml4_entry_t *pm_pml4; + int i; + + pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg)); + + /* Wire in kernel global address entries. */ + for (i = 0; i < NKPML4E; i++) { + pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW | + X86_PG_V | PG_U; + } + for (i = 0; i < ndmpdpphys; i++) { + pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | + X86_PG_V | PG_U; + } + + /* install self-referential address mapping entry(s) */ + pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | + X86_PG_A | X86_PG_M; +} + /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -2381,20 +2404,7 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) */ if ((pmap->pm_type = pm_type) == PT_X86) { pmap->pm_cr3 = pml4phys; - - /* Wire in kernel global address entries. */ - for (i = 0; i < NKPML4E; i++) { - pmap->pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | - X86_PG_RW | X86_PG_V | PG_U; - } - for (i = 0; i < ndmpdpphys; i++) { - pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | - X86_PG_RW | X86_PG_V | PG_U; - } - - /* install self-referential address mapping entry(s) */ - pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | - X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; + pmap_pinit_pml4(pml4pg); } pmap->pm_root.rt_root = 0; diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 3300c47c516e..4d924bdbf31e 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -391,6 +391,7 @@ struct thread; void pmap_activate_sw(struct thread *); void pmap_bootstrap(vm_paddr_t *); +int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde); int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); void pmap_init_pat(void); @@ -403,6 +404,7 @@ void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t m); void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); +void pmap_pinit_pml4(vm_page_t); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_invalidate_page(pmap_t, vm_offset_t); void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);