Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
This commit is contained in:
parent
6c0a2eb136
commit
ce18658792
@ -3899,30 +3899,35 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
struct md_page *pvh;
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (FALSE);
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
return (TRUE);
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
if (loops < 16) {
|
||||
if (!rv && loops < 16) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap)
|
||||
return (TRUE);
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4335,10 +4340,10 @@ pmap_ts_referenced(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
int rtval = 0;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (rtval);
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4362,7 +4367,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
rtval++;
|
||||
if (rtval > 4) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (rtval);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4391,6 +4396,8 @@ pmap_ts_referenced(vm_page_t m)
|
||||
PMAP_UNLOCK(pmap);
|
||||
} while ((pv = pvn) != NULL && pv != pvf);
|
||||
}
|
||||
out:
|
||||
vm_page_unlock_queues();
|
||||
return (rtval);
|
||||
}
|
||||
|
||||
|
@ -1423,7 +1423,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
|
||||
u_int oflags;
|
||||
int count = 0;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
vm_page_lock_queues();
|
||||
|
||||
if (maskbits & PVF_WRITE)
|
||||
maskbits |= PVF_MOD;
|
||||
@ -1433,6 +1433,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
|
||||
pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
|
||||
|
||||
if (TAILQ_EMPTY(&pg->md.pv_list)) {
|
||||
vm_page_unlock_queues();
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1568,6 +1569,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
|
||||
|
||||
if (maskbits & PVF_WRITE)
|
||||
vm_page_flag_clear(pg, PG_WRITEABLE);
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -4417,24 +4419,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (FALSE);
|
||||
|
||||
/*
|
||||
* Not found, check current mappings returning immediately
|
||||
*/
|
||||
for (pv = TAILQ_FIRST(&m->md.pv_list);
|
||||
pv;
|
||||
pv = TAILQ_NEXT(pv, pv_list)) {
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (pv->pv_pmap == pmap) {
|
||||
return (TRUE);
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4469,8 +4470,8 @@ int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (0);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
return (pmap_clearbit(m, PVF_REF));
|
||||
}
|
||||
|
||||
@ -4508,10 +4509,8 @@ pmap_clear_modify(vm_page_t m)
|
||||
*/
|
||||
if ((m->flags & PG_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
if (m->md.pvh_attrs & PVF_MOD)
|
||||
pmap_clearbit(m, PVF_MOD);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
|
||||
@ -4541,10 +4540,8 @@ pmap_clear_reference(vm_page_t m)
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
if (m->md.pvh_attrs & PVF_REF)
|
||||
pmap_clearbit(m, PVF_REF);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
|
||||
@ -4565,11 +4562,8 @@ pmap_remove_write(vm_page_t m)
|
||||
*/
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if ((m->oflags & VPO_BUSY) != 0 ||
|
||||
(m->flags & PG_WRITEABLE) != 0) {
|
||||
vm_page_lock_queues();
|
||||
(m->flags & PG_WRITEABLE) != 0)
|
||||
pmap_clearbit(m, PVF_WRITE);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -4061,30 +4061,35 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
struct md_page *pvh;
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (FALSE);
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
return (TRUE);
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
if (loops < 16) {
|
||||
if (!rv && loops < 16) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap)
|
||||
return (TRUE);
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4512,11 +4517,11 @@ pmap_ts_referenced(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
int rtval = 0;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (rtval);
|
||||
sched_pin();
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
|
||||
va = pv->pv_va;
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4571,6 +4576,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
}
|
||||
out:
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
return (rtval);
|
||||
}
|
||||
|
||||
|
@ -3449,20 +3449,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (FALSE);
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
return TRUE;
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3839,10 +3842,10 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pt_entry_t *pte;
|
||||
int rtval = 0;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (rtval);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pvf = pv;
|
||||
do {
|
||||
@ -3867,6 +3870,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
PT_SET_MA(PADDR1, 0);
|
||||
|
||||
sched_unpin();
|
||||
vm_page_unlock_queues();
|
||||
return (rtval);
|
||||
}
|
||||
|
||||
|
@ -1837,23 +1837,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return FALSE;
|
||||
|
||||
/*
|
||||
* Not found, check current mappings returning immediately if found.
|
||||
*/
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (pv->pv_pmap == pmap) {
|
||||
return TRUE;
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1949,9 +1949,9 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
int count = 0;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
oldpmap = pmap_switch(pv->pv_pmap);
|
||||
@ -1965,8 +1965,8 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pmap_switch(oldpmap);
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
}
|
||||
|
||||
return count;
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2348,20 +2348,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return FALSE;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (pv->pv_pmap == pmap) {
|
||||
return TRUE;
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2594,14 +2597,16 @@ pmap_remove_write(vm_page_t m)
|
||||
int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (0);
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
if (m->md.pv_flags & PV_TABLE_REF) {
|
||||
vm_page_lock_queues();
|
||||
m->md.pv_flags &= ~PV_TABLE_REF;
|
||||
return 1;
|
||||
vm_page_unlock_queues();
|
||||
return (1);
|
||||
}
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -286,7 +286,7 @@ static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static void moea_syncicache(vm_offset_t, vm_size_t);
|
||||
static boolean_t moea_query_bit(vm_page_t, int);
|
||||
static u_int moea_clear_bit(vm_page_t, int, int *);
|
||||
static u_int moea_clear_bit(vm_page_t, int);
|
||||
static void moea_kremove(mmu_t, vm_offset_t);
|
||||
int moea_pte_spill(vm_offset_t);
|
||||
|
||||
@ -1315,9 +1315,7 @@ moea_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
moea_clear_bit(m, PTE_REF, NULL);
|
||||
vm_page_unlock_queues();
|
||||
moea_clear_bit(m, PTE_REF);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1337,9 +1335,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
*/
|
||||
if ((m->flags & PG_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
moea_clear_bit(m, PTE_CHG, NULL);
|
||||
vm_page_unlock_queues();
|
||||
moea_clear_bit(m, PTE_CHG);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1409,14 +1405,10 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
boolean_t
|
||||
moea_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
int count;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
|
||||
count = moea_clear_bit(m, PTE_REF, NULL);
|
||||
|
||||
return (count);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea_ts_referenced: page %p is not managed", m));
|
||||
return (moea_clear_bit(m, PTE_REF));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1531,19 +1523,23 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
int loops;
|
||||
struct pvo_entry *pvo;
|
||||
boolean_t rv;
|
||||
|
||||
if (!moea_initialized || (m->flags & PG_FICTITIOUS))
|
||||
return FALSE;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
||||
if (pvo->pvo_pmap == pmap)
|
||||
return (TRUE);
|
||||
if (pvo->pvo_pmap == pmap) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1557,7 +1553,7 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
||||
@ -2315,17 +2311,17 @@ moea_query_bit(vm_page_t m, int ptebit)
|
||||
}
|
||||
|
||||
static u_int
|
||||
moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
|
||||
moea_clear_bit(vm_page_t m, int ptebit)
|
||||
{
|
||||
u_int count;
|
||||
struct pvo_entry *pvo;
|
||||
struct pte *pt;
|
||||
int rv;
|
||||
|
||||
vm_page_lock_queues();
|
||||
|
||||
/*
|
||||
* Clear the cached value.
|
||||
*/
|
||||
rv = moea_attr_fetch(m);
|
||||
moea_attr_clear(m, ptebit);
|
||||
|
||||
/*
|
||||
@ -2353,15 +2349,11 @@ moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
|
||||
}
|
||||
mtx_unlock(&moea_table_mutex);
|
||||
}
|
||||
rv |= pvo->pvo_pte.pte.pte_lo;
|
||||
pvo->pvo_pte.pte.pte_lo &= ~ptebit;
|
||||
MOEA_PVO_CHECK(pvo); /* sanity check */
|
||||
}
|
||||
|
||||
if (origbit != NULL) {
|
||||
*origbit = rv;
|
||||
}
|
||||
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
|
@ -358,7 +358,7 @@ static void moea64_bridge_cpu_bootstrap(mmu_t, int ap);
|
||||
static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
|
||||
static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
|
||||
static u_int moea64_clear_bit(vm_page_t, u_int64_t);
|
||||
static void moea64_kremove(mmu_t, vm_offset_t);
|
||||
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
|
||||
vm_offset_t pa, vm_size_t sz);
|
||||
@ -1510,9 +1510,7 @@ moea64_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea64_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
moea64_clear_bit(m, LPTE_REF, NULL);
|
||||
vm_page_unlock_queues();
|
||||
moea64_clear_bit(m, LPTE_REF);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1532,9 +1530,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
*/
|
||||
if ((m->flags & PG_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
moea64_clear_bit(m, LPTE_CHG, NULL);
|
||||
vm_page_unlock_queues();
|
||||
moea64_clear_bit(m, LPTE_CHG);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1605,14 +1601,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
|
||||
boolean_t
|
||||
moea64_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
int count;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
|
||||
count = moea64_clear_bit(m, LPTE_REF, NULL);
|
||||
|
||||
return (count);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea64_ts_referenced: page %p is not managed", m));
|
||||
return (moea64_clear_bit(m, LPTE_REF));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1721,21 +1713,23 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
int loops;
|
||||
struct pvo_entry *pvo;
|
||||
boolean_t rv;
|
||||
|
||||
if (!moea64_initialized || (m->flags & PG_FICTITIOUS))
|
||||
return FALSE;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("moea64_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
||||
if (pvo->pvo_pmap == pmap)
|
||||
return (TRUE);
|
||||
if (pvo->pvo_pmap == pmap) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1749,7 +1743,7 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
||||
@ -2445,19 +2439,17 @@ moea64_query_bit(vm_page_t m, u_int64_t ptebit)
|
||||
}
|
||||
|
||||
static u_int
|
||||
moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
|
||||
moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
|
||||
{
|
||||
u_int count;
|
||||
struct pvo_entry *pvo;
|
||||
struct lpte *pt;
|
||||
uint64_t rv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
vm_page_lock_queues();
|
||||
|
||||
/*
|
||||
* Clear the cached value.
|
||||
*/
|
||||
rv = moea64_attr_fetch(m);
|
||||
moea64_attr_clear(m, ptebit);
|
||||
|
||||
/*
|
||||
@ -2486,16 +2478,12 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
|
||||
moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
|
||||
}
|
||||
}
|
||||
rv |= pvo->pvo_pte.lpte.pte_lo;
|
||||
pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
|
||||
MOEA_PVO_CHECK(pvo); /* sanity check */
|
||||
UNLOCK_TABLE();
|
||||
}
|
||||
|
||||
if (origbit != NULL) {
|
||||
*origbit = rv;
|
||||
}
|
||||
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
|
@ -2293,17 +2293,14 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
int count;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("mmu_booke_ts_referenced: page %p is not managed", m));
|
||||
count = 0;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
|
||||
if (!PTE_ISVALID(pte))
|
||||
goto make_sure_to_unlock;
|
||||
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (PTE_ISREFERENCED(pte)) {
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
@ -2320,9 +2317,9 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
}
|
||||
}
|
||||
make_sure_to_unlock:
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -2394,20 +2391,23 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops;
|
||||
boolean_t rv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (FALSE);
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("mmu_booke_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
if (pv->pv_pmap == pmap)
|
||||
return (TRUE);
|
||||
|
||||
if (pv->pv_pmap == pmap) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1789,20 +1789,25 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
|
||||
{
|
||||
struct tte *tp;
|
||||
int loops;
|
||||
boolean_t rv;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (FALSE);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if ((tp->tte_data & TD_PV) == 0)
|
||||
continue;
|
||||
if (TTE_GET_PMAP(tp) == pm)
|
||||
return (TRUE);
|
||||
if (TTE_GET_PMAP(tp) == pm) {
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1878,10 +1883,10 @@ pmap_ts_referenced(vm_page_t m)
|
||||
u_long data;
|
||||
int count;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
count = 0;
|
||||
vm_page_lock_queues();
|
||||
if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
|
||||
tpf = tp;
|
||||
do {
|
||||
@ -1895,6 +1900,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
break;
|
||||
} while ((tp = tpn) != NULL && tp != tpf);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
|
||||
|
@ -1738,20 +1738,23 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return FALSE;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (pv->pv_pmap == pmap) {
|
||||
return TRUE;
|
||||
rv = TRUE;
|
||||
break;
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2309,17 +2312,15 @@ pmap_tte_hash_resize(pmap_t pmap)
|
||||
int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
int rv;
|
||||
pv_entry_t pv, pvf, pvn;
|
||||
pmap_t pmap;
|
||||
tte_t otte_data;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
rv = 0;
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (rv);
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
vm_page_lock_queues();
|
||||
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
|
||||
pvf = pv;
|
||||
@ -2347,6 +2348,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
PMAP_UNLOCK(pmap);
|
||||
} while ((pv = pvn) != NULL && pv != pvf);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
|
||||
|
@ -502,6 +502,8 @@ vm_page_flag_clear(vm_page_t m, unsigned short bits)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT((bits & PG_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
|
||||
("PG_REFERENCED and !VM_OBJECT_LOCKED"));
|
||||
m->flags &= ~bits;
|
||||
}
|
||||
|
||||
@ -1333,8 +1335,7 @@ vm_waitpfault(void)
|
||||
/*
|
||||
* vm_page_requeue:
|
||||
*
|
||||
* If the given page is contained within a page queue, move it to the tail
|
||||
* of that queue.
|
||||
* Move the given page to the tail of its present page queue.
|
||||
*
|
||||
* The page queues must be locked.
|
||||
*/
|
||||
@ -1344,11 +1345,12 @@ vm_page_requeue(vm_page_t m)
|
||||
int queue = VM_PAGE_GETQUEUE(m);
|
||||
struct vpgqueues *vpq;
|
||||
|
||||
if (queue != PQ_NONE) {
|
||||
vpq = &vm_page_queues[queue];
|
||||
TAILQ_REMOVE(&vpq->pl, m, pageq);
|
||||
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
|
||||
}
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT(queue != PQ_NONE,
|
||||
("vm_page_requeue: page %p is not queued", m));
|
||||
vpq = &vm_page_queues[queue];
|
||||
TAILQ_REMOVE(&vpq->pl, m, pageq);
|
||||
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -219,6 +219,9 @@ extern struct vpglocks pa_lock[];
|
||||
* pte mappings, nor can they be removed from their objects via
|
||||
* the object, and such pages are also not on any PQ queue.
|
||||
*
|
||||
* PG_REFERENCED may be cleared only if the object containing the page is
|
||||
* locked.
|
||||
*
|
||||
* PG_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
|
||||
* does so, the page must be VPO_BUSY.
|
||||
*/
|
||||
|
@ -547,21 +547,17 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
|
||||
/*
|
||||
* vm_pageout_object_deactivate_pages
|
||||
*
|
||||
* deactivate enough pages to satisfy the inactive target
|
||||
* requirements or if vm_page_proc_limit is set, then
|
||||
* deactivate all of the pages in the object and its
|
||||
* backing_objects.
|
||||
* Deactivate enough pages to satisfy the inactive target
|
||||
* requirements.
|
||||
*
|
||||
* The object and map must be locked.
|
||||
*/
|
||||
static void
|
||||
vm_pageout_object_deactivate_pages(pmap, first_object, desired)
|
||||
pmap_t pmap;
|
||||
vm_object_t first_object;
|
||||
long desired;
|
||||
vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
|
||||
long desired)
|
||||
{
|
||||
vm_object_t backing_object, object;
|
||||
vm_page_t p, next;
|
||||
vm_page_t p;
|
||||
int actcount, remove_mode;
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
|
||||
@ -579,61 +575,57 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
|
||||
if (object->shadow_count > 1)
|
||||
remove_mode = 1;
|
||||
/*
|
||||
* scan the objects entire memory queue
|
||||
* Scan the object's entire memory queue.
|
||||
*/
|
||||
p = TAILQ_FIRST(&object->memq);
|
||||
while (p != NULL) {
|
||||
TAILQ_FOREACH(p, &object->memq, listq) {
|
||||
if (pmap_resident_count(pmap) <= desired)
|
||||
goto unlock_return;
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) {
|
||||
p = next;
|
||||
if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0)
|
||||
continue;
|
||||
}
|
||||
PCPU_INC(cnt.v_pdpages);
|
||||
vm_page_lock(p);
|
||||
vm_page_lock_queues();
|
||||
cnt.v_pdpages++;
|
||||
if (p->wire_count != 0 ||
|
||||
p->hold_count != 0 ||
|
||||
if (p->wire_count != 0 || p->hold_count != 0 ||
|
||||
!pmap_page_exists_quick(pmap, p)) {
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(p);
|
||||
p = next;
|
||||
continue;
|
||||
}
|
||||
actcount = pmap_ts_referenced(p);
|
||||
if (actcount) {
|
||||
vm_page_flag_set(p, PG_REFERENCED);
|
||||
} else if (p->flags & PG_REFERENCED) {
|
||||
actcount = 1;
|
||||
if ((p->flags & PG_REFERENCED) != 0) {
|
||||
if (actcount == 0)
|
||||
actcount = 1;
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(p, PG_REFERENCED);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
if ((p->queue != PQ_ACTIVE) &&
|
||||
(p->flags & PG_REFERENCED)) {
|
||||
if (p->queue != PQ_ACTIVE && actcount != 0) {
|
||||
vm_page_activate(p);
|
||||
p->act_count += actcount;
|
||||
vm_page_flag_clear(p, PG_REFERENCED);
|
||||
} else if (p->queue == PQ_ACTIVE) {
|
||||
if ((p->flags & PG_REFERENCED) == 0) {
|
||||
p->act_count -= min(p->act_count, ACT_DECLINE);
|
||||
if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
|
||||
if (actcount == 0) {
|
||||
p->act_count -= min(p->act_count,
|
||||
ACT_DECLINE);
|
||||
if (!remove_mode &&
|
||||
(vm_pageout_algorithm ||
|
||||
p->act_count == 0)) {
|
||||
pmap_remove_all(p);
|
||||
vm_page_deactivate(p);
|
||||
} else {
|
||||
vm_page_lock_queues();
|
||||
vm_page_requeue(p);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
} else {
|
||||
vm_page_activate(p);
|
||||
vm_page_flag_clear(p, PG_REFERENCED);
|
||||
if (p->act_count < (ACT_MAX - ACT_ADVANCE))
|
||||
if (p->act_count < ACT_MAX -
|
||||
ACT_ADVANCE)
|
||||
p->act_count += ACT_ADVANCE;
|
||||
vm_page_lock_queues();
|
||||
vm_page_requeue(p);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
} else if (p->queue == PQ_INACTIVE) {
|
||||
} else if (p->queue == PQ_INACTIVE)
|
||||
pmap_remove_all(p);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
vm_page_unlock(p);
|
||||
p = next;
|
||||
}
|
||||
if ((backing_object = object->backing_object) == NULL)
|
||||
goto unlock_return;
|
||||
|
Loading…
Reference in New Issue
Block a user