Fix handling of KVA in kmem_bootstrap_free().

Do not use vm_map_remove() to release KVA back to the system.  Because
kernel map entries do not have an associated VM object, with r336030
the vm_map_remove() call will not update the kernel page tables.  Avoid
relying on the vm_map layer and instead update the pmap and release KVA
to the kernel arena directly in kmem_bootstrap_free().

Because the pmap updates will generally result in superpage demotions,
modify pmap_init() to insert PTPs shadowed by superpage mappings into
the kernel pmap's radix tree.

While here, port r329171 to i386.

Reported by:	alc
Reviewed by:	alc, kib
X-MFC with:	r336505
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D16426
This commit is contained in:
Mark Johnston 2018-07-27 15:46:34 +00:00
parent 45ed991d96
commit 6c85795a25
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336764
3 changed files with 24 additions and 5 deletions

View File

@ -372,6 +372,8 @@ static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
static int ndmpdpphys; /* number of DMPDPphys pages */
static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
/*
* pmap_mapdev support pre initialization (i.e. console)
*/
@ -998,8 +1000,9 @@ create_pagetables(vm_paddr_t *firstaddr)
/* Map from zero to end of allocations under 2M pages */
/* This replaces some of the KPTphys entries above */
for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
/* Preset PG_M and PG_A because demotion expects it. */
pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
bootaddr_rwx(i << PDRSHIFT);
X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT);
/*
* Because we map the physical blocks in 2M pages, adjust firstaddr
@ -1091,6 +1094,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
pt_entry_t *pte;
int i;
KERNend = *firstaddr;
if (!pti)
pg_g = X86_PG_G;
@ -1323,6 +1328,7 @@ pmap_init(void)
* Initialize the vm page array entries for the kernel pmap's
* page table pages.
*/
PMAP_LOCK(kernel_pmap);
for (i = 0; i < nkpt; i++) {
mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
KASSERT(mpte >= vm_page_array &&
@ -1331,7 +1337,11 @@ pmap_init(void)
mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
mpte->wire_count = 1;
if (i << PDRSHIFT < KERNend &&
pmap_insert_pt_page(kernel_pmap, mpte))
panic("pmap_init: pmap_insert_pt_page failed");
}
PMAP_UNLOCK(kernel_pmap);
vm_wire_add(nkpt);
/*

View File

@ -931,6 +931,7 @@ pmap_init(void)
* Initialize the vm page array entries for the kernel pmap's
* page table pages.
*/
PMAP_LOCK(kernel_pmap);
for (i = 0; i < NKPT; i++) {
mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i));
KASSERT(mpte >= vm_page_array &&
@ -938,7 +939,14 @@ pmap_init(void)
("pmap_init: page table page is out of range"));
mpte->pindex = i + KPTDI;
mpte->phys_addr = KPTphys + ptoa(i);
mpte->wire_count = 1;
if (pseflag != 0 &&
KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend &&
pmap_insert_pt_page(kernel_pmap, mpte))
panic("pmap_init: pmap_insert_pt_page failed");
}
PMAP_UNLOCK(kernel_pmap);
vm_wire_add(NKPT);
/*
* Initialize the address space (zone) for the pv entries. Set a

View File

@ -700,16 +700,15 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
{
#if defined(__i386__) || defined(__amd64__)
struct vm_domain *vmd;
vm_offset_t end;
vm_offset_t end, va;
vm_paddr_t pa;
vm_page_t m;
end = trunc_page(start + size);
start = round_page(start);
(void)vm_map_remove(kernel_map, start, end);
for (; start < end; start += PAGE_SIZE) {
pa = pmap_kextract(start);
for (va = start; va < end; va += PAGE_SIZE) {
pa = pmap_kextract(va);
m = PHYS_TO_VM_PAGE(pa);
vmd = vm_pagequeue_domain(m);
@ -717,6 +716,8 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
vm_phys_free_pages(m, 0);
vm_domain_free_unlock(vmd);
}
pmap_remove(kernel_pmap, start, end);
(void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
#endif
}