Eliminate pmap_growkernel()'s dependence on create_pagetables() preallocating

page directory pages from VM_MIN_KERNEL_ADDRESS through the end of the
kernel's bss.  Specifically, the dependence was in pmap_growkernel()'s one-
time initialization of kernel_vm_end, not in its main body.  (I could not,
however, resist the urge to optimize the main body.)

Reduce the number of preallocated page directory pages to just those needed
to support NKPT page table pages.  (In fact, this allows me to revert a
couple of my earlier changes to create_pagetables().)
This commit is contained in:
Alan Cox 2008-07-08 22:59:17 +00:00
parent 1566e059bf
commit 8136b7265f
3 changed files with 34 additions and 29 deletions

View File

@ -175,7 +175,7 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static int ndmpdp;
static vm_paddr_t dmaplimit;
vm_offset_t kernel_vm_end;
vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
pt_entry_t pg_nx;
SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
@ -446,19 +446,15 @@ create_pagetables(vm_paddr_t *firstaddr)
/* Now map the page tables at their location within PTmap */
for (i = 0; i < NKPT; i++) {
((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
NBPDR + i] = KPTphys + (i << PAGE_SHIFT);
((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
NBPDR + i] |= PG_RW | PG_V;
((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
}
/* Map from zero to end of allocations under 2M pages */
/* This replaces some of the KPTphys entries above */
for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
NBPDR + i] = i << PDRSHIFT;
((pd_entry_t *)KPDphys)[(KERNBASE - VM_MIN_KERNEL_ADDRESS) /
NBPDR + i] |= PG_RW | PG_V | PG_PS | PG_G;
((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
}
/* And connect up the PD to the PDP */
@ -1703,25 +1699,34 @@ pmap_growkernel(vm_offset_t addr)
vm_paddr_t paddr;
vm_page_t nkpg;
pd_entry_t *pde, newpdir;
pdp_entry_t newpdp;
pdp_entry_t *pdpe;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
if (kernel_vm_end == 0) {
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
break;
}
}
}
/*
* Return if "addr" is within the range of kernel page table pages
* that were preallocated during pmap bootstrap. Moreover, leave
* "kernel_vm_end" and the kernel page table as they were.
*
* The correctness of this action is based on the following
* argument: vm_map_findspace() allocates contiguous ranges of the
* kernel virtual address space. It calls this function if a range
* ends after "kernel_vm_end". If the kernel is mapped between
* "kernel_vm_end" and "addr", then the range cannot begin at
* "kernel_vm_end". In fact, its beginning address cannot be less
* than the kernel. Thus, there is no immediate need to allocate
* any new kernel page table pages between "kernel_vm_end" and
* "KERNBASE".
*/
if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
return;
addr = roundup2(addr, PAGE_SIZE * NPTEPG);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
while (kernel_vm_end < addr) {
pde = pmap_pde(kernel_pmap, kernel_vm_end);
if (pde == NULL) {
pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
if ((*pdpe & PG_V) == 0) {
/* We need a new PDP entry */
nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
@ -1731,11 +1736,11 @@ pmap_growkernel(vm_offset_t addr)
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
paddr = VM_PAGE_TO_PHYS(nkpg);
newpdp = (pdp_entry_t)
*pdpe = (pdp_entry_t)
(paddr | PG_V | PG_RW | PG_A | PG_M);
*pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp;
continue; /* try again */
}
pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
if ((*pde & PG_V) != 0) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
@ -1754,7 +1759,7 @@ pmap_growkernel(vm_offset_t addr)
pmap_zero_page(nkpg);
paddr = VM_PAGE_TO_PHYS(nkpg);
newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
pde_store(pde, newpdir);
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
if (kernel_vm_end - 1 >= kernel_map->max_offset) {

View File

@ -115,7 +115,7 @@
#endif
#define NKPML4E 1 /* number of kernel PML4 slots */
#define NKPDPE 6 /* number of kernel PDP slots */
#define NKPDPE howmany(NKPT, NPDEPG)/* number of kernel PDP slots */
#define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
@ -131,7 +131,7 @@
#define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
#define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */
#define KPDPI (NPDPEPG-7) /* kernel map starts at -7GB */
#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
/*
* XXX doesn't really belong here I guess...

View File

@ -163,12 +163,12 @@
*/
#define VM_MAX_KERNEL_ADDRESS KVADDR(KPML4I, NPDPEPG-1, NPDEPG-1, NPTEPG-1)
#define VM_MIN_KERNEL_ADDRESS KVADDR(KPML4I, KPDPI, 0, 0)
#define VM_MIN_KERNEL_ADDRESS KVADDR(KPML4I, NPDPEPG-7, 0, 0)
#define DMAP_MIN_ADDRESS KVADDR(DMPML4I, 0, 0, 0)
#define DMAP_MAX_ADDRESS KVADDR(DMPML4I+1, 0, 0, 0)
#define KERNBASE MAX(0xffffffff80000000ul, VM_MIN_KERNEL_ADDRESS)
#define KERNBASE KVADDR(KPML4I, KPDPI, 0, 0)
#define UPT_MAX_ADDRESS KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)
#define UPT_MIN_ADDRESS KVADDR(PML4PML4I, 0, 0, 0)