In pmap_advise(), when we encounter a superpage mapping, we first demote the

mapping and then destroy one of the 4 KB page mappings so that there is a
potential trigger for repromotion.  Currently, we destroy the first 4 KB
page mapping that falls within the (current) superpage mapping or the
virtual address range [sva, eva).  However, I have found empirically that
destroying the last 4 KB mapping produces slightly better results,
specifically, more promotions and fewer failed promotion attempts.
Accordingly, this revision changes pmap_advise() to destroy the last 4 KB
page mapping.  It also replaces some nearby uses of boolean_t with bool.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D21115
This commit is contained in:
Alan Cox 2019-07-31 05:38:39 +00:00
parent e843651ba0
commit 43ded0a321
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=350463
3 changed files with 50 additions and 26 deletions

View File

@ -7444,7 +7444,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
vm_offset_t va, va_next;
vm_page_t m;
boolean_t anychanged;
bool anychanged;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
@ -7463,7 +7463,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
PG_M = pmap_modified_bit(pmap);
PG_V = pmap_valid_bit(pmap);
PG_RW = pmap_rw_bit(pmap);
anychanged = FALSE;
anychanged = false;
pmap_delayed_invl_start();
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
@ -7505,17 +7505,25 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
/*
* Unless the page mappings are wired, remove the
* mapping to a single page so that a subsequent
* access may repromote. Since the underlying page
* table page is fully populated, this removal never
* frees a page table page.
* access may repromote. Choosing the last page
* within the address range [sva, min(va_next, eva))
* generally results in more repromotions. Since the
* underlying page table page is fully populated, this
* removal never frees a page table page.
*/
if ((oldpde & PG_W) == 0) {
pte = pmap_pde_to_pte(pde, sva);
va = eva;
if (va > va_next)
va = va_next;
va -= PAGE_SIZE;
KASSERT(va >= sva,
("pmap_advise: no address gap"));
pte = pmap_pde_to_pte(pde, va);
KASSERT((*pte & PG_V) != 0,
("pmap_advise: invalid PTE"));
pmap_remove_pte(pmap, pte, sva, *pde, NULL,
pmap_remove_pte(pmap, pte, va, *pde, NULL,
&lock);
anychanged = TRUE;
anychanged = true;
}
if (lock != NULL)
rw_wunlock(lock);
@ -7547,7 +7555,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
if (va == va_next)
va = sva;
} else
anychanged = TRUE;
anychanged = true;
continue;
maybe_invlrng:
if (va != va_next) {

View File

@ -4888,15 +4888,23 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
/*
* Unless the page mappings are wired, remove the
* mapping to a single page so that a subsequent
* access may repromote. Since the underlying page
* table page is fully populated, this removal never
* frees a page table page.
* access may repromote. Choosing the last page
* within the address range [sva, min(va_next, eva))
* generally results in more repromotions. Since the
* underlying page table page is fully populated, this
* removal never frees a page table page.
*/
if ((oldl2 & ATTR_SW_WIRED) == 0) {
l3 = pmap_l2_to_l3(l2, sva);
va = eva;
if (va > va_next)
va = va_next;
va -= PAGE_SIZE;
KASSERT(va >= sva,
("pmap_advise: no address gap"));
l3 = pmap_l2_to_l3(l2, va);
KASSERT(pmap_load(l3) != 0,
("pmap_advise: invalid PTE"));
pmap_remove_l3(pmap, l3, sva, pmap_load(l2),
pmap_remove_l3(pmap, l3, va, pmap_load(l2),
NULL, &lock);
}
if (lock != NULL)

View File

@ -5167,19 +5167,19 @@ __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pt_entry_t *pte;
vm_offset_t va, pdnxt;
vm_page_t m;
boolean_t anychanged, pv_lists_locked;
bool anychanged, pv_lists_locked;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
}
anychanged = FALSE;
anychanged = false;
PMAP_LOCK(pmap);
for (; sva < eva; sva = pdnxt) {
pdnxt = (sva + NBPDR) & ~PDRMASK;
@ -5193,7 +5193,7 @@ __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if ((oldpde & PG_MANAGED) == 0)
continue;
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
if (anychanged)
pmap_invalidate_all_int(pmap);
@ -5212,16 +5212,24 @@ __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
/*
* Unless the page mappings are wired, remove the
* mapping to a single page so that a subsequent
* access may repromote. Since the underlying page
* table page is fully populated, this removal never
* frees a page table page.
* access may repromote. Choosing the last page
* within the address range [sva, min(pdnxt, eva))
* generally results in more repromotions. Since the
* underlying page table page is fully populated, this
* removal never frees a page table page.
*/
if ((oldpde & PG_W) == 0) {
pte = pmap_pte_quick(pmap, sva);
va = eva;
if (va > pdnxt)
va = pdnxt;
va -= PAGE_SIZE;
KASSERT(va >= sva,
("pmap_advise: no address gap"));
pte = pmap_pte_quick(pmap, va);
KASSERT((*pte & PG_V) != 0,
("pmap_advise: invalid PTE"));
pmap_remove_pte(pmap, pte, sva, NULL);
anychanged = TRUE;
pmap_remove_pte(pmap, pte, va, NULL);
anychanged = true;
}
}
if (pdnxt > eva)
@ -5250,7 +5258,7 @@ __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if (va == pdnxt)
va = sva;
} else
anychanged = TRUE;
anychanged = true;
continue;
maybe_invlrng:
if (va != pdnxt) {