amd64 pmap: handle cases where pml4 page table page is not allocated.

Possible in LA57 pmap config.

Noted by:	alc
Reviewed by:	alc, markj
Sponsored by:	The FreeBSD Foundation
Differential revision:	https://reviews.freebsd.org/D26492
This commit is contained in:
Konstantin Belousov 2020-09-20 22:16:24 +00:00
parent 1440f62266
commit 7149d7209e

View File

@ -6219,7 +6219,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
PMAP_LOCK(pmap); PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) { for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva); pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) { if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK; va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva) if (va_next < sva)
va_next = eva; va_next = eva;
@ -6502,7 +6502,7 @@ pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
if (!pmap_pkru_same(pmap, va, va + NBPDP)) if (!pmap_pkru_same(pmap, va, va + NBPDP))
return (KERN_PROTECTION_FAILURE); return (KERN_PROTECTION_FAILURE);
pml4e = pmap_pml4e(pmap, va); pml4e = pmap_pml4e(pmap, va);
if ((*pml4e & PG_V) == 0) { if (pml4e == NULL || (*pml4e & PG_V) == 0) {
mp = _pmap_allocpte(pmap, pmap_pml4e_pindex(va), mp = _pmap_allocpte(pmap, pmap_pml4e_pindex(va),
NULL, va); NULL, va);
if (mp == NULL) { if (mp == NULL) {
@ -7363,7 +7363,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
PMAP_LOCK(pmap); PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) { for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva); pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) { if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK; va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva) if (va_next < sva)
va_next = eva; va_next = eva;
@ -7488,7 +7488,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
("pmap_copy: invalid to pmap_copy page tables")); ("pmap_copy: invalid to pmap_copy page tables"));
pml4e = pmap_pml4e(src_pmap, addr); pml4e = pmap_pml4e(src_pmap, addr);
if ((*pml4e & PG_V) == 0) { if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (addr + NBPML4) & ~PML4MASK; va_next = (addr + NBPML4) & ~PML4MASK;
if (va_next < addr) if (va_next < addr)
va_next = end_addr; va_next = end_addr;
@ -8571,7 +8571,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
PMAP_LOCK(pmap); PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) { for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva); pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) { if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK; va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva) if (va_next < sva)
va_next = eva; va_next = eva;
@ -9795,6 +9795,8 @@ pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
PMAP_LOCK(pmap); PMAP_LOCK(pmap);
pml4 = pmap_pml4e(pmap, va); pml4 = pmap_pml4e(pmap, va);
if (pml4 == NULL)
goto done;
ptr[idx++] = *pml4; ptr[idx++] = *pml4;
if ((*pml4 & PG_V) == 0) if ((*pml4 & PG_V) == 0)
goto done; goto done;
@ -10893,7 +10895,7 @@ pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
for (changed = false, va = sva; va < eva; va = va_next) { for (changed = false, va = sva; va < eva; va = va_next) {
pml4e = pmap_pml4e(pmap, va); pml4e = pmap_pml4e(pmap, va);
if ((*pml4e & X86_PG_V) == 0) { if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
va_next = (va + NBPML4) & ~PML4MASK; va_next = (va + NBPML4) & ~PML4MASK;
if (va_next < va) if (va_next < va)
va_next = eva; va_next = eva;