Various changes to pmap_ts_referenced()

Move PMAP_TS_REFERENCED_MAX out of the various pmap implementations and
into vm/pmap.h, and describe what its purpose is.  Eliminate the archaic
"XXX" comment about its value.  I don't believe that its exact value, e.g.,
5 versus 6, matters.

Update the arm64 and riscv pmap implementations of pmap_ts_referenced()
to opportunistically update the page's dirty field.

On amd64, use the PDE value already cached in a local variable rather than
dereferencing a pointer again and again.

Reviewed by:	kib, markj
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D7836
This commit is contained in:
Alan Cox 2016-09-10 16:49:25 +00:00
parent a27815330c
commit 8cb0c1029d
8 changed files with 53 additions and 44 deletions

View File

@ -5816,8 +5816,6 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
return (FALSE);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* pmap_ts_referenced:
*
@ -5826,10 +5824,6 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls
@ -5898,7 +5892,7 @@ pmap_ts_referenced(vm_page_t m)
*/
vm_page_dirty(m);
}
if ((*pde & PG_A) != 0) {
if ((oldpde & PG_A) != 0) {
/*
* Since this reference bit is shared by 512 4KB
* pages, it should not be cleared every time it is
@ -5919,7 +5913,7 @@ pmap_ts_referenced(vm_page_t m)
*/
if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
(uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
(*pde & PG_W) == 0) {
(oldpde & PG_W) == 0) {
if (safe_to_clear_referenced(pmap, oldpde)) {
atomic_clear_long(pde, PG_A);
pmap_invalidate_page(pmap, pv->pv_va);

View File

@ -5161,8 +5161,6 @@ pmap_is_referenced(vm_page_t m)
return (rv);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* pmap_ts_referenced:
*
@ -5171,10 +5169,6 @@ pmap_is_referenced(vm_page_t m)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls

View File

@ -3880,8 +3880,6 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
return (FALSE);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* pmap_ts_referenced:
*
@ -3890,9 +3888,13 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls
* to pmap_is_modified(). However, since this function stops after
* finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
* dirty pages. Those dirty pages will only be detected by a future call
* to pmap_is_modified().
*/
int
pmap_ts_referenced(vm_page_t m)
@ -3947,6 +3949,14 @@ pmap_ts_referenced(vm_page_t m)
("pmap_ts_referenced: found an invalid l1 table"));
pte = pmap_l1_to_l2(pde, pv->pv_va);
tpte = pmap_load(pte);
if (pmap_page_dirty(tpte)) {
/*
* Although "tpte" is mapping a 2MB page, because
* this function is called at a 4KB page granularity,
* we only update the 4KB page under test.
*/
vm_page_dirty(m);
}
if ((tpte & ATTR_AF) != 0) {
/*
* Since this reference bit is shared by 512 4KB
@ -4043,6 +4053,8 @@ pmap_ts_referenced(vm_page_t m)
("pmap_ts_referenced: found an invalid l2 table"));
pte = pmap_l2_to_l3(pde, pv->pv_va);
tpte = pmap_load(pte);
if (pmap_page_dirty(tpte))
vm_page_dirty(m);
if ((tpte & ATTR_AF) != 0) {
if (safe_to_clear_referenced(pmap, tpte)) {
/*

View File

@ -4765,8 +4765,6 @@ pmap_remove_write(vm_page_t m)
rw_wunlock(&pvh_global_lock);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* pmap_ts_referenced:
*
@ -4775,10 +4773,6 @@ pmap_remove_write(vm_page_t m)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls

View File

@ -2499,9 +2499,13 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls
* to pmap_is_modified(). However, since this function stops after
* finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
* dirty pages. Those dirty pages will only be detected by a future call
* to pmap_is_modified().
*/
static int
mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
@ -2518,6 +2522,8 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISMODIFIED(pte))
vm_page_dirty(m);
if (PTE_ISREFERENCED(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@ -2528,7 +2534,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
tlb_miss_unlock();
mtx_unlock_spin(&tlbivax_mutex);
if (++count > 4) {
if (++count >= PMAP_TS_REFERENCED_MAX) {
PMAP_UNLOCK(pv->pv_pmap);
break;
}

View File

@ -2991,8 +2991,6 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
return (FALSE);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* pmap_ts_referenced:
*
@ -3001,9 +2999,13 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls
* to pmap_is_modified(). However, since this function stops after
* finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
* dirty pages. Those dirty pages will only be detected by a future call
* to pmap_is_modified().
*/
int
pmap_ts_referenced(vm_page_t m)
@ -3012,7 +3014,7 @@ pmap_ts_referenced(vm_page_t m)
pmap_t pmap;
struct rwlock *lock;
pd_entry_t *l2;
pt_entry_t *l3;
pt_entry_t *l3, old_l3;
vm_paddr_t pa;
int cleared, md_gen, not_cleared;
struct spglist free;
@ -3050,15 +3052,18 @@ pmap_ts_referenced(vm_page_t m)
("pmap_ts_referenced: found an invalid l2 table"));
l3 = pmap_l2_to_l3(l2, pv->pv_va);
if ((pmap_load(l3) & PTE_A) != 0) {
if (safe_to_clear_referenced(pmap, pmap_load(l3))) {
old_l3 = pmap_load(l3);
if (pmap_page_dirty(old_l3))
vm_page_dirty(m);
if ((old_l3 & PTE_A) != 0) {
if (safe_to_clear_referenced(pmap, old_l3)) {
/*
* TODO: We don't handle the access flag
* at all. We need to be able to set it in
* the exception handler.
*/
panic("RISCVTODO: safe_to_clear_referenced\n");
} else if ((pmap_load(l3) & PTE_SW_WIRED) == 0) {
} else if ((old_l3 & PTE_SW_WIRED) == 0) {
/*
* Wired pages cannot be paged out so
* doing accessed bit emulation for

View File

@ -2073,18 +2073,12 @@ pmap_page_is_mapped(vm_page_t m)
return (rv);
}
#define PMAP_TS_REFERENCED_MAX 5
/*
* Return a count of reference bits for a page, clearing those bits.
* It is not necessary for every reference bit to be cleared, but it
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
* XXX: The exact number of bits to check and clear is a matter that
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*
* As an optimization, update the page's dirty field if a modified bit is
* found while counting reference bits. This opportunistic update can be
* performed at low cost and can eliminate the need for some future calls

View File

@ -104,6 +104,16 @@ extern vm_offset_t kernel_vm_end;
#define PMAP_ENTER_NOSLEEP 0x0100
#define PMAP_ENTER_WIRED 0x0200
/*
* Define the maximum number of machine-dependent reference bits that are
* cleared by a call to pmap_ts_referenced(). This limit serves two purposes.
* First, it bounds the cost of reference bit maintenance on widely shared
* pages. Second, it prevents numeric overflow during maintenance of a
* widely shared page's "act_count" field. An overflow could result in the
* premature deactivation of the page.
*/
#define PMAP_TS_REFERENCED_MAX 5
void pmap_activate(struct thread *td);
void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
int advice);