Resurrect pmap_is_referenced() and use it in mincore(). Essentially,

pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters.  For example, in mincore(), clearing the reference
bits has two negative consequences.  First, it throws off the activity
count calculations performed by the page daemon.  Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should.  Consequently, the page could be
deactivated prematurely by the page daemon.  Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page.  However, there is a second problem for which
that is not a solution.  In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping.  Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
This commit is contained in:
Alan Cox 2010-04-24 17:32:52 +00:00
parent b901701726
commit 7b85f59183
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207155
15 changed files with 289 additions and 18 deletions

View File

@ -236,6 +236,7 @@ static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
@ -4177,6 +4178,49 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (rv);
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
return (FALSE);
if (pmap_is_referenced_pvh(&m->md))
return (TRUE);
return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
}
/*
* Returns TRUE if any of the given mappings were referenced and FALSE
* otherwise. Both page and 2mpage mappings are supported.
*/
static boolean_t
pmap_is_referenced_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rv = FALSE;
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
PMAP_UNLOCK(pmap);
if (rv)
break;
}
return (rv);
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
@ -4893,10 +4937,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(m)) {
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
vm_page_unlock_queues();
}
}

View File

@ -4492,6 +4492,20 @@ pmap_clear_modify(vm_page_t m)
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
return ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
(m->md.pvh_attrs & PVF_REF) != 0);
}
/*
* pmap_clear_reference:
*

View File

@ -296,6 +296,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
@ -4355,6 +4356,51 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (rv);
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
return (FALSE);
if (pmap_is_referenced_pvh(&m->md))
return (TRUE);
return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
}
/*
* Returns TRUE if any of the given mappings were referenced and FALSE
* otherwise. Both page and 4mpage mappings are supported.
*/
static boolean_t
pmap_is_referenced_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rv = FALSE;
sched_pin();
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
PMAP_UNLOCK(pmap);
if (rv)
break;
}
sched_unpin();
return (rv);
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
@ -4961,10 +5007,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(m)) {
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
vm_page_unlock_queues();
}
}

View File

@ -3718,6 +3718,34 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (rv);
}
boolean_t
pmap_is_referenced(vm_page_t m)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
rv = FALSE;
if (m->flags & PG_FICTITIOUS)
return (rv);
sched_pin();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
PMAP_UNLOCK(pmap);
if (rv)
break;
}
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
return (rv);
}
void
pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
{
@ -4145,10 +4173,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(m)) {
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
vm_page_unlock_queues();
}
}

View File

@ -2022,6 +2022,37 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (TRUE);
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
struct ia64_lpte *pte;
pmap_t oldpmap;
pv_entry_t pv;
boolean_t rv;
rv = FALSE;
if (m->flags & PG_FICTITIOUS)
return (rv);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
PMAP_LOCK(pv->pv_pmap);
oldpmap = pmap_switch(pv->pv_pmap);
pte = pmap_find_vhpt(pv->pv_va);
pmap_switch(oldpmap);
KASSERT(pte != NULL, ("pte"));
rv = pmap_accessed(pte) ? TRUE : FALSE;
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
break;
}
return (rv);
}
/*
* Clear the modify bits on the specified physical page.
*/
@ -2197,10 +2228,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
* Referenced by someone
*/
vm_page_lock_queues();
if (pmap_ts_referenced(m)) {
if (pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
vm_page_unlock_queues();
}
}

View File

@ -2625,6 +2625,20 @@ pmap_clear_modify(vm_page_t m)
}
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
return ((m->flags & PG_FICTITIOUS) == 0 &&
(m->md.pv_flags & PV_TABLE_REF) != 0);
}
/*
* pmap_clear_reference:
*
@ -2750,10 +2764,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
* Referenced by us or someone
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
if ((m->flags & PG_REFERENCED) || pmap_is_referenced(m))
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
vm_page_unlock_queues();
}
return val;

View File

@ -305,6 +305,7 @@ vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea_init(mmu_t);
boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_is_referenced(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
@ -344,6 +345,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
MMUMETHOD(mmu_init, moea_init),
MMUMETHOD(mmu_is_modified, moea_is_modified),
MMUMETHOD(mmu_is_referenced, moea_is_referenced),
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
@ -1268,6 +1270,15 @@ moea_init(mmu_t mmu)
moea_initialized = TRUE;
}
boolean_t
moea_is_referenced(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
return (moea_query_bit(m, PTE_REF));
}
boolean_t
moea_is_modified(mmu_t mmu, vm_page_t m)
{

View File

@ -379,6 +379,7 @@ vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea64_init(mmu_t);
boolean_t moea64_is_modified(mmu_t, vm_page_t);
boolean_t moea64_is_referenced(mmu_t, vm_page_t);
boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
@ -416,6 +417,7 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
MMUMETHOD(mmu_init, moea64_init),
MMUMETHOD(mmu_is_modified, moea64_is_modified),
MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
MMUMETHOD(mmu_map, moea64_map),
MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
@ -1462,6 +1464,15 @@ moea64_init(mmu_t mmu)
moea64_initialized = TRUE;
}
boolean_t
moea64_is_referenced(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
return (moea64_query_bit(m, PTE_REF));
}
boolean_t
moea64_is_modified(mmu_t mmu, vm_page_t m)
{

View File

@ -288,6 +288,7 @@ static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
static void mmu_booke_init(mmu_t);
static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
int);
@ -342,6 +343,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_init, mmu_booke_init),
MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
MMUMETHOD(mmu_map, mmu_booke_map),
MMUMETHOD(mmu_mincore, mmu_booke_mincore),
@ -2180,6 +2182,33 @@ mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
return (FALSE);
}
/*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
static boolean_t
mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
boolean_t rv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rv = FALSE;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (rv);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte))
rv = PTE_ISREFERENCED(pte) ? TRUE : FALSE;
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
break;
}
return (rv);
}
/*
* Clear the modify bits on the specified physical page.
*/

View File

@ -345,6 +345,20 @@ METHOD boolean_t is_prefaultable {
} DEFAULT mmu_null_is_prefaultable;
/**
* @brief Return whether or not the specified physical page was referenced
* in any physical maps.
*
* @params _pg physical page
*
* @retval boolean TRUE if page has been referenced
*/
METHOD boolean_t is_referenced {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Return a count of referenced bits for a page, clearing those bits.
* Not all referenced bits need to be cleared, but it is necessary that 0

View File

@ -194,6 +194,14 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
}
boolean_t
pmap_is_referenced(vm_page_t m)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_IS_REFERENCED(mmu_obj, m));
}
boolean_t
pmap_ts_referenced(vm_page_t m)
{

View File

@ -1917,6 +1917,27 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (FALSE);
}
/*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
struct tte *tp;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
if ((tp->tte_data & TD_REF) != 0)
return (TRUE);
}
return (FALSE);
}
void
pmap_clear_modify(vm_page_t m)
{

View File

@ -1591,6 +1591,17 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
return (tte_hash_lookup(pmap->pm_hash, va) == 0);
}
/*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
pmap_is_referenced(vm_page_t m)
{
return (tte_get_phys_bit(m, VTD_REF));
}
/*
* Extract the physical page address associated with the given kernel virtual
* address.

View File

@ -119,6 +119,7 @@ void pmap_growkernel(vm_offset_t);
void pmap_init(void);
boolean_t pmap_is_modified(vm_page_t m);
boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_is_referenced(vm_page_t m);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,

View File

@ -871,10 +871,8 @@ mincore(td, uap)
pmap_is_modified(m))
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(m)) {
vm_page_flag_set(m, PG_REFERENCED);
pmap_is_referenced(m))
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(current->object.vm_object);