Only lock pvh_global_lock read-only for pmap_page_wired_mappings(),

pmap_is_modified() and pmap_is_referenced(), same as it was done for
pmap_ts_referenced().

Consolidate identical code for pmap_is_modified() and
pmap_is_referenced() into helper pmap_page_test_mappings().

Reviewed by:	alc
Tested by:	pho (previous version)
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2013-09-06 16:53:48 +00:00
parent 3e4f32be7d
commit 9430f833ca

View File

@ -315,7 +315,6 @@ static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static int pmap_pvh_wired_mappings(struct md_page *pvh, int count);
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
@ -329,8 +328,6 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
@ -4604,42 +4601,61 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int
pmap_page_wired_mappings(vm_page_t m)
{
int count;
count = 0;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
rw_wlock(&pvh_global_lock);
count = pmap_pvh_wired_mappings(&m->md, count);
if ((m->flags & PG_FICTITIOUS) == 0) {
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
count);
}
rw_wunlock(&pvh_global_lock);
return (count);
}
/*
* pmap_pvh_wired_mappings:
*
* Return the updated number "count" of managed mappings that are wired.
*/
static int
pmap_pvh_wired_mappings(struct md_page *pvh, int count)
{
struct rwlock *lock;
struct md_page *pvh;
pmap_t pmap;
pt_entry_t *pte;
pv_entry_t pv;
int count, md_gen, pvh_gen;
rw_assert(&pvh_global_lock, RA_WLOCKED);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if ((m->oflags & VPO_UNMANAGED) != 0)
return (0);
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
count = 0;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
rw_runlock(lock);
PMAP_LOCK(pmap);
rw_rlock(lock);
if (md_gen != m->md.pv_gen) {
PMAP_UNLOCK(pmap);
goto restart;
}
}
pte = pmap_pte(pmap, pv->pv_va);
if ((*pte & PG_W) != 0)
count++;
PMAP_UNLOCK(pmap);
}
if ((m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
pvh_gen = pvh->pv_gen;
rw_runlock(lock);
PMAP_LOCK(pmap);
rw_rlock(lock);
if (md_gen != m->md.pv_gen ||
pvh_gen != pvh->pv_gen) {
PMAP_UNLOCK(pmap);
goto restart;
}
}
pte = pmap_pde(pmap, pv->pv_va);
if ((*pte & PG_W) != 0)
count++;
PMAP_UNLOCK(pmap);
}
}
rw_runlock(lock);
rw_runlock(&pvh_global_lock);
return (count);
}
@ -4835,6 +4851,69 @@ pmap_remove_pages(pmap_t pmap)
pmap_free_zero_pages(&free);
}
static boolean_t
pmap_page_test_mappings(vm_page_t m, pt_entry_t mask)
{
struct rwlock *lock;
pv_entry_t pv;
struct md_page *pvh;
pt_entry_t *pte;
pmap_t pmap;
int md_gen, pvh_gen;
boolean_t rv;
rv = FALSE;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
rw_runlock(lock);
PMAP_LOCK(pmap);
rw_rlock(lock);
if (md_gen != m->md.pv_gen) {
PMAP_UNLOCK(pmap);
goto restart;
}
}
pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & mask) == mask;
PMAP_UNLOCK(pmap);
if (rv)
goto out;
}
if ((m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
pvh_gen = pvh->pv_gen;
rw_runlock(lock);
PMAP_LOCK(pmap);
rw_rlock(lock);
if (md_gen != m->md.pv_gen ||
pvh_gen != pvh->pv_gen) {
PMAP_UNLOCK(pmap);
goto restart;
}
}
pte = pmap_pde(pmap, pv->pv_va);
rv = (*pte & mask) == mask;
PMAP_UNLOCK(pmap);
if (rv)
goto out;
}
}
out:
rw_runlock(lock);
rw_runlock(&pvh_global_lock);
return (rv);
}
/*
* pmap_is_modified:
*
@ -4844,7 +4923,6 @@ pmap_remove_pages(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
boolean_t rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_modified: page %p is not managed", m));
@ -4857,39 +4935,7 @@ pmap_is_modified(vm_page_t m)
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
rw_wlock(&pvh_global_lock);
rv = pmap_is_modified_pvh(&m->md) ||
((m->flags & PG_FICTITIOUS) == 0 &&
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
rw_wunlock(&pvh_global_lock);
return (rv);
}
/*
* Returns TRUE if any of the given mappings were used to modify
* physical memory. Otherwise, returns FALSE. Both page and 2mpage
* mappings are supported.
*/
static boolean_t
pmap_is_modified_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
PMAP_UNLOCK(pmap);
if (rv)
break;
}
return (rv);
return (pmap_page_test_mappings(m, PG_M | PG_RW));
}
/*
@ -4925,42 +4971,10 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
boolean_t
pmap_is_referenced(vm_page_t m)
{
boolean_t rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
rw_wlock(&pvh_global_lock);
rv = pmap_is_referenced_pvh(&m->md) ||
((m->flags & PG_FICTITIOUS) == 0 &&
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
rw_wunlock(&pvh_global_lock);
return (rv);
}
/*
* Returns TRUE if any of the given mappings were referenced and FALSE
* otherwise. Both page and 2mpage mappings are supported.
*/
static boolean_t
pmap_is_referenced_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
PMAP_UNLOCK(pmap);
if (rv)
break;
}
return (rv);
return (pmap_page_test_mappings(m, PG_A | PG_V));
}
/*