Fix too loose assert in pmap_large_unmap().

The upper bound for the valid address from the large map used
LARGEMAP_MAX_ADDRESS instead of LARGEMAP_MIN_ADDRESS.  Provide a
function-like macro for proper upper value.

Noted by:	markj
Reviewed by:	alc, markj
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D20386
This commit is contained in:
Konstantin Belousov 2019-05-24 23:28:11 +00:00
parent f3dbf2ca4a
commit 59659e78e1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=348258

View File

@ -421,6 +421,8 @@ static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
static vmem_t *large_vmem;
static u_int lm_ents;
#define PMAP_LARGEMAP_MAX_ADDRESS() \
(LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
int pmap_pcid_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
@ -1060,6 +1062,7 @@ static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
pd_entry_t pde);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static vm_page_t pmap_large_map_getptp_unlocked(void);
static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
#if VM_NRESERVLEVEL > 0
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
@ -2961,6 +2964,9 @@ pmap_kextract(vm_offset_t va)
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
pa = DMAP_TO_PHYS(va);
} else if (LARGEMAP_MIN_ADDRESS <= va &&
va < PMAP_LARGEMAP_MAX_ADDRESS()) {
pa = pmap_large_map_kextract(va);
} else {
pde = *vtopde(va);
if (pde & PG_PS) {
@ -8775,6 +8781,39 @@ pmap_large_map_pte(vm_offset_t va)
return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
}
static vm_paddr_t
pmap_large_map_kextract(vm_offset_t va)
{
pdp_entry_t *pdpe, pdp;
pd_entry_t *pde, pd;
pt_entry_t *pte, pt;
KASSERT(LARGEMAP_MIN_ADDRESS <= va && va < PMAP_LARGEMAP_MAX_ADDRESS(),
("not largemap range %#lx", (u_long)va));
pdpe = pmap_large_map_pdpe(va);
pdp = *pdpe;
KASSERT((pdp & X86_PG_V) != 0,
("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
(u_long)pdpe, pdp));
if ((pdp & X86_PG_PS) != 0) {
KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
(u_long)pdpe, pdp));
return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
}
pde = pmap_pdpe_to_pde(pdpe, va);
pd = *pde;
KASSERT((pd & X86_PG_V) != 0,
("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
if ((pd & X86_PG_PS) != 0)
return ((pd & PG_PS_FRAME) | (va & PDRMASK));
pte = pmap_pde_to_pte(pde, va);
pt = *pte;
KASSERT((pt & X86_PG_V) != 0,
("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
return ((pt & PG_FRAME) | (va & PAGE_MASK));
}
static int
pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
vmem_addr_t *vmem_res)
@ -8891,8 +8930,8 @@ pmap_large_unmap(void *svaa, vm_size_t len)
return;
SLIST_INIT(&spgf);
KASSERT(LARGEMAP_MIN_ADDRESS <= sva && sva + len <=
LARGEMAP_MAX_ADDRESS + NBPML4 * (u_long)lm_ents,
KASSERT(LARGEMAP_MIN_ADDRESS <= sva &&
sva + len <= PMAP_LARGEMAP_MAX_ADDRESS(),
("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
PMAP_LOCK(kernel_pmap);
for (va = sva; va < sva + len; va += inc) {