Apply r350463(43ded0a321) to powerpc64 radix pmap

Invalidate the last page of a demoted superpage mapping, instead of the
first page, as it results in slightly more promotions and fewer
failures.  While here, replace 'boolean_t's with 'bool's in
mmu_radix_advise().
This commit is contained in:
Justin Hibbits 2021-05-09 19:19:07 -05:00
parent 09947faee8
commit 811e645d28

View File

@ -2207,11 +2207,11 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pt_entry_t *pte;
vm_offset_t va, va_next;
vm_page_t m;
boolean_t anychanged;
bool anychanged;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
anychanged = FALSE;
anychanged = false;
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
l1e = pmap_pml1e(pmap, sva);
@ -2252,17 +2252,25 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
/*
* Unless the page mappings are wired, remove the
* mapping to a single page so that a subsequent
* access may repromote. Since the underlying page
* table page is fully populated, this removal never
* frees a page table page.
* access may repromote. Choosing the last page
* within the address range [sva, min(va_next, eva))
* generally results in more repromotions. Since the
* underlying page table page is fully populated, this
* removal never frees a page table page.
*/
if ((oldl3e & PG_W) == 0) {
pte = pmap_l3e_to_pte(l3e, sva);
va = eva;
if (va > va_next)
va = va_next;
va -= PAGE_SIZE;
KASSERT(va >= sva,
("mmu_radix_advise: no address gap"));
pte = pmap_l3e_to_pte(l3e, va);
KASSERT((be64toh(*pte) & PG_V) != 0,
("pmap_advise: invalid PTE"));
pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), NULL,
pmap_remove_pte(pmap, pte, va, be64toh(*l3e), NULL,
&lock);
anychanged = TRUE;
anychanged = true;
}
if (lock != NULL)
rw_wunlock(lock);
@ -2291,7 +2299,7 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
atomic_clear_long(pte, htobe64(PG_A));
else
goto maybe_invlrng;
anychanged = TRUE;
anychanged = true;
continue;
maybe_invlrng:
if (va != va_next) {