Export the pmap_cache_bits() and pmap_pinit_pml4() functions from the
amd64 pmap. The new pmap_pinit_pml4() function initializes the level 4 page table with entries for the kernel mappings. Both functions are needed for upcoming EFI Runtime Services support. Sponsored by: The FreeBSD Foundation MFC after: 1 week
This commit is contained in:
parent
bc116e04c0
commit
195a6bb9e6
@ -1324,7 +1324,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
|
||||
* Determine the appropriate bits to set in a PTE or PDE for a specified
|
||||
* caching mode.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
|
||||
{
|
||||
int cache_bits, pat_flag, pat_idx;
|
||||
@ -2345,6 +2345,29 @@ pmap_pinit0(pmap_t pmap)
|
||||
CPU_FILL(&kernel_pmap->pm_active);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit_pml4(vm_page_t pml4pg)
|
||||
{
|
||||
pml4_entry_t *pm_pml4;
|
||||
int i;
|
||||
|
||||
pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
|
||||
|
||||
/* Wire in kernel global address entries. */
|
||||
for (i = 0; i < NKPML4E; i++) {
|
||||
pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
|
||||
X86_PG_V | PG_U;
|
||||
}
|
||||
for (i = 0; i < ndmpdpphys; i++) {
|
||||
pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
|
||||
X86_PG_V | PG_U;
|
||||
}
|
||||
|
||||
/* install self-referential address mapping entry(s) */
|
||||
pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
|
||||
X86_PG_A | X86_PG_M;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a preallocated and zeroed pmap structure,
|
||||
* such as one in a vmspace structure.
|
||||
@ -2381,20 +2404,7 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
|
||||
*/
|
||||
if ((pmap->pm_type = pm_type) == PT_X86) {
|
||||
pmap->pm_cr3 = pml4phys;
|
||||
|
||||
/* Wire in kernel global address entries. */
|
||||
for (i = 0; i < NKPML4E; i++) {
|
||||
pmap->pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) |
|
||||
X86_PG_RW | X86_PG_V | PG_U;
|
||||
}
|
||||
for (i = 0; i < ndmpdpphys; i++) {
|
||||
pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) |
|
||||
X86_PG_RW | X86_PG_V | PG_U;
|
||||
}
|
||||
|
||||
/* install self-referential address mapping entry(s) */
|
||||
pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) |
|
||||
X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
|
||||
pmap_pinit_pml4(pml4pg);
|
||||
}
|
||||
|
||||
pmap->pm_root.rt_root = 0;
|
||||
|
@ -391,6 +391,7 @@ struct thread;
|
||||
|
||||
void pmap_activate_sw(struct thread *);
|
||||
void pmap_bootstrap(vm_paddr_t *);
|
||||
int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
|
||||
int pmap_change_attr(vm_offset_t, vm_size_t, int);
|
||||
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
|
||||
void pmap_init_pat(void);
|
||||
@ -403,6 +404,7 @@ void *pmap_mapdev(vm_paddr_t, vm_size_t);
|
||||
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
|
||||
boolean_t pmap_page_is_mapped(vm_page_t m);
|
||||
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
|
||||
void pmap_pinit_pml4(vm_page_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_invalidate_page(pmap_t, vm_offset_t);
|
||||
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
|
||||
|
Loading…
Reference in New Issue
Block a user