Roughly half of a typical pmap_mincore() implementation is machine-

independent code.  Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().

Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified().  Assert that these
functions are never passed an unmanaged page.

Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization.  Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.

Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean().  Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.

Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().

Reviewed by:	kib (an earlier version)
This commit is contained in:
Alan Cox 2010-05-24 14:26:57 +00:00
parent 1cfc8fc759
commit 567e51e18c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=208504
21 changed files with 598 additions and 394 deletions

View File

@ -4128,12 +4128,25 @@ pmap_remove_pages(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
boolean_t rv;
if (m->flags & PG_FICTITIOUS)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
if (pmap_is_modified_pvh(&m->md))
return (TRUE);
return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
vm_page_unlock_queues();
return (rv);
}
/*
@ -4384,9 +4397,20 @@ pmap_clear_modify(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
if ((m->flags & PG_FICTITIOUS) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
pmap = PV_PMAP(pv);
@ -4432,6 +4456,7 @@ pmap_clear_modify(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
vm_page_unlock_queues();
}
/*
@ -4449,9 +4474,9 @@ pmap_clear_reference(vm_page_t m)
pt_entry_t *pte;
vm_offset_t va;
if ((m->flags & PG_FICTITIOUS) != 0)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
pmap = PV_PMAP(pv);
@ -4488,6 +4513,7 @@ pmap_clear_reference(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
vm_page_unlock_queues();
}
/*
@ -4897,70 +4923,49 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pd_entry_t *pdep;
pt_entry_t pte;
vm_paddr_t pa;
vm_page_t m;
int val = 0;
int val;
PMAP_LOCK(pmap);
retry:
pdep = pmap_pde(pmap, addr);
if (pdep != NULL && (*pdep & PG_V)) {
if (*pdep & PG_PS) {
pte = *pdep;
val = MINCORE_SUPER;
/* Compute the physical address of the 4KB page. */
pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
PG_FRAME;
val = MINCORE_SUPER;
} else {
pte = *pmap_pde_to_pte(pdep, addr);
pa = pte & PG_FRAME;
val = 0;
}
} else {
pte = 0;
pa = 0;
val = 0;
}
PMAP_UNLOCK(pmap);
if (pte != 0) {
if ((pte & PG_V) != 0) {
val |= MINCORE_INCORE;
if ((pte & PG_MANAGED) == 0)
return (val);
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
else {
/*
* Modified by someone else
*/
vm_page_lock_queues();
if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
vm_page_unlock_queues();
}
/*
* Referenced by us
*/
if (pte & PG_A)
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
else {
/*
* Referenced by someone else
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
}
}
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((pte & PG_A) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}

View File

@ -4475,6 +4475,8 @@ boolean_t
pmap_is_modified(vm_page_t m)
{
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
if (m->md.pvh_attrs & PVF_MOD)
return (TRUE);
@ -4489,8 +4491,23 @@ void
pmap_clear_modify(vm_page_t m)
{
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no mappings can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
if (m->md.pvh_attrs & PVF_MOD)
pmap_clearbit(m, PVF_MOD);
vm_page_unlock_queues();
}
@ -4517,8 +4534,12 @@ void
pmap_clear_reference(vm_page_t m)
{
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
if (m->md.pvh_attrs & PVF_REF)
pmap_clearbit(m, PVF_REF);
vm_page_unlock_queues();
}
@ -4551,7 +4572,7 @@ pmap_remove_write(vm_page_t m)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
printf("pmap_mincore()\n");

View File

@ -4294,12 +4294,25 @@ pmap_remove_pages(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
boolean_t rv;
if (m->flags & PG_FICTITIOUS)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
if (pmap_is_modified_pvh(&m->md))
return (TRUE);
return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
vm_page_unlock_queues();
return (rv);
}
/*
@ -4563,9 +4576,20 @@ pmap_clear_modify(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
@ -4623,6 +4647,7 @@ pmap_clear_modify(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -4640,9 +4665,9 @@ pmap_clear_reference(vm_page_t m)
pt_entry_t *pte;
vm_offset_t va;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
sched_pin();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
@ -4686,6 +4711,7 @@ pmap_clear_reference(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -4955,72 +4981,51 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pd_entry_t *pdep;
pt_entry_t *ptep, pte;
vm_paddr_t pa;
vm_page_t m;
int val = 0;
int val;
PMAP_LOCK(pmap);
retry:
pdep = pmap_pde(pmap, addr);
if (*pdep != 0) {
if (*pdep & PG_PS) {
pte = *pdep;
val = MINCORE_SUPER;
/* Compute the physical address of the 4KB page. */
pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
PG_FRAME;
val = MINCORE_SUPER;
} else {
ptep = pmap_pte(pmap, addr);
pte = *ptep;
pmap_pte_release(ptep);
pa = pte & PG_FRAME;
val = 0;
}
} else {
pte = 0;
pa = 0;
val = 0;
}
PMAP_UNLOCK(pmap);
if (pte != 0) {
if ((pte & PG_V) != 0) {
val |= MINCORE_INCORE;
if ((pte & PG_MANAGED) == 0)
return (val);
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
else {
/*
* Modified by someone else
*/
vm_page_lock_queues();
if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
vm_page_unlock_queues();
}
/*
* Referenced by us
*/
if (pte & PG_A)
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
else {
/*
* Referenced by someone else
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
}
}
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((pte & PG_A) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}

View File

@ -3663,12 +3663,21 @@ pmap_is_modified(vm_page_t m)
pmap_t pmap;
boolean_t rv;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
rv = FALSE;
if (m->flags & PG_FICTITIOUS)
return (rv);
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
sched_pin();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@ -3681,6 +3690,7 @@ pmap_is_modified(vm_page_t m)
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
vm_page_unlock_queues();
return (rv);
}
@ -3887,9 +3897,20 @@ pmap_clear_modify(vm_page_t m)
pmap_t pmap;
pt_entry_t *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@ -3907,6 +3928,7 @@ pmap_clear_modify(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -3921,9 +3943,9 @@ pmap_clear_reference(vm_page_t m)
pmap_t pmap;
pt_entry_t *pte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@ -3941,6 +3963,7 @@ pmap_clear_reference(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
vm_page_unlock_queues();
}
/*
@ -4133,60 +4156,36 @@ pmap_change_attr(va, size, mode)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pt_entry_t *ptep, pte;
vm_page_t m;
int val = 0;
vm_paddr_t pa;
int val;
PMAP_LOCK(pmap);
retry:
ptep = pmap_pte(pmap, addr);
pte = (ptep != NULL) ? PT_GET(ptep) : 0;
pmap_pte_release(ptep);
PMAP_UNLOCK(pmap);
if (pte != 0) {
vm_paddr_t pa;
val = MINCORE_INCORE;
if ((pte & PG_MANAGED) == 0)
return val;
val = 0;
if ((pte & PG_V) != 0) {
val |= MINCORE_INCORE;
if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((pte & PG_A) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
pa = pte & PG_FRAME;
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if (pte & PG_M)
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
else {
/*
* Modified by someone else
*/
vm_page_lock_queues();
if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
vm_page_unlock_queues();
}
/*
* Referenced by us
*/
if (pte & PG_A)
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
else {
/*
* Referenced by someone else
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
}
}
return val;
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}
void

View File

@ -1981,10 +1981,20 @@ pmap_is_modified(vm_page_t m)
pv_entry_t pv;
boolean_t rv;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
rv = FALSE;
if (m->flags & PG_FICTITIOUS)
return (rv);
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can be dirty.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
PMAP_LOCK(pv->pv_pmap);
oldpmap = pmap_switch(pv->pv_pmap);
@ -1996,7 +2006,7 @@ pmap_is_modified(vm_page_t m)
if (rv)
break;
}
vm_page_unlock_queues();
return (rv);
}
@ -2058,9 +2068,20 @@ pmap_clear_modify(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
if (m->flags & PG_FICTITIOUS)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
PMAP_LOCK(pv->pv_pmap);
oldpmap = pmap_switch(pv->pv_pmap);
@ -2073,6 +2094,7 @@ pmap_clear_modify(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_unlock_queues();
}
/*
@ -2087,9 +2109,9 @@ pmap_clear_reference(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
if (m->flags & PG_FICTITIOUS)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
PMAP_LOCK(pv->pv_pmap);
oldpmap = pmap_switch(pv->pv_pmap);
@ -2102,6 +2124,7 @@ pmap_clear_reference(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_unlock_queues();
}
/*
@ -2178,13 +2201,15 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pmap_t oldpmap;
struct ia64_lpte *pte, tpte;
int val = 0;
vm_paddr_t pa;
int val;
PMAP_LOCK(pmap);
retry:
oldpmap = pmap_switch(pmap);
pte = pmap_find_vhpt(addr);
if (pte != NULL) {
@ -2192,53 +2217,27 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
pte = &tpte;
}
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
if (pte == NULL)
return 0;
if (pmap_present(pte)) {
vm_page_t m;
vm_offset_t pa;
val = MINCORE_INCORE;
if (!pmap_managed(pte))
return val;
if (pte == NULL || !pmap_present(pte)) {
val = 0;
goto out;
}
val = MINCORE_INCORE;
if (pmap_dirty(pte))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if (pmap_accessed(pte))
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
pmap_managed(pte)) {
pa = pmap_ppn(pte);
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if (pmap_dirty(pte))
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
else {
/*
* Modified by someone
*/
vm_page_lock_queues();
if (pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
vm_page_unlock_queues();
}
/*
* Referenced by us
*/
if (pmap_accessed(pte))
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
else {
/*
* Referenced by someone
*/
vm_page_lock_queues();
if (pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
}
}
return val;
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
out:
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}
void

View File

@ -2443,7 +2443,6 @@ vfs_setdirty_locked_object(struct buf *bp)
vm_offset_t boffset;
vm_offset_t eoffset;
vm_page_lock_queues();
/*
* test the pages to see if they have been modified directly
* by users through the VM system.
@ -2469,7 +2468,6 @@ vfs_setdirty_locked_object(struct buf *bp)
}
eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
vm_page_unlock_queues();
/*
* Fit it to the buffer.
*/

View File

@ -2586,13 +2586,27 @@ pmap_ts_referenced(vm_page_t m)
boolean_t
pmap_is_modified(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
return FALSE;
boolean_t rv;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have PTE_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD)
return TRUE;
rv = TRUE;
else
return pmap_testbit(m, PTE_M);
rv = pmap_testbit(m, PTE_M);
vm_page_unlock_queues();
return (rv);
}
/* N/C */
@ -2625,13 +2639,26 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
void
pmap_clear_modify(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PTE_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD) {
pmap_changebit(m, PTE_M, FALSE);
m->md.pv_flags &= ~PV_TABLE_MOD;
}
vm_page_unlock_queues();
}
/*
@ -2656,13 +2683,14 @@ pmap_is_referenced(vm_page_t m)
void
pmap_clear_reference(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_REF) {
m->md.pv_flags &= ~PV_TABLE_REF;
}
vm_page_unlock_queues();
}
/*
@ -2733,51 +2761,47 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
* perform the pmap work for mincore
*/
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pt_entry_t *ptep, pte;
vm_page_t m;
int val = 0;
vm_offset_t pa;
int val;
boolean_t managed;
PMAP_LOCK(pmap);
retry:
ptep = pmap_pte(pmap, addr);
pte = (ptep != NULL) ? *ptep : 0;
PMAP_UNLOCK(pmap);
if (mips_pg_v(pte)) {
vm_offset_t pa;
val = MINCORE_INCORE;
pa = mips_tlbpfn_to_paddr(pte);
if (!page_is_managed(pa))
return val;
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if (pte & PTE_M)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
/*
* Modified by someone
*/
else {
vm_page_lock_queues();
if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
vm_page_unlock_queues();
}
/*
* Referenced by us or someone
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) || pmap_is_referenced(m))
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
if (!mips_pg_v(pte)) {
val = 0;
goto out;
}
return val;
val = MINCORE_INCORE;
if ((pte & PTE_M) != 0)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
pa = mips_tlbpfn_to_paddr(pte);
managed = page_is_managed(pa);
if (managed) {
/*
* This may falsely report the given address as
* MINCORE_REFERENCED. Unfortunately, due to the lack of
* per-PTE reference information, it is impossible to
* determine if the address is MINCORE_REFERENCED.
*/
m = PHYS_TO_VM_PAGE(pa);
if ((m->flags & PG_REFERENCED) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
out:
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}
void

View File

@ -1290,29 +1290,57 @@ moea_is_referenced(mmu_t mmu, vm_page_t m)
boolean_t
moea_is_modified(mmu_t mmu, vm_page_t m)
{
boolean_t rv;
if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
return (moea_query_bit(m, PTE_CHG));
vm_page_lock_queues();
rv = moea_query_bit(m, PTE_CHG);
vm_page_unlock_queues();
return (rv);
}
void
moea_clear_reference(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
moea_clear_bit(m, PTE_REF, NULL);
vm_page_unlock_queues();
}
void
moea_clear_modify(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG
* set. If the object containing the page is locked and the page is
* not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
moea_clear_bit(m, PTE_CHG, NULL);
vm_page_unlock_queues();
}
/*

View File

@ -1485,29 +1485,57 @@ moea64_is_referenced(mmu_t mmu, vm_page_t m)
boolean_t
moea64_is_modified(mmu_t mmu, vm_page_t m)
{
boolean_t rv;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea64_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
return (moea64_query_bit(m, LPTE_CHG));
vm_page_lock_queues();
rv = moea64_query_bit(m, LPTE_CHG);
vm_page_unlock_queues();
return (rv);
}
void
moea64_clear_reference(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea64_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
moea64_clear_bit(m, LPTE_REF, NULL);
vm_page_unlock_queues();
}
void
moea64_clear_modify(mmu_t mmu, vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea64_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea64_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG
* set. If the object containing the page is locked and the page is
* not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
moea64_clear_bit(m, LPTE_CHG, NULL);
vm_page_unlock_queues();
}
/*

View File

@ -292,7 +292,8 @@ static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
int);
static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
vm_paddr_t *);
static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
vm_object_t, vm_pindex_t, vm_size_t);
static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
@ -2155,26 +2156,35 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
boolean_t rv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("mmu_booke_is_modified: page %p is not managed", m));
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no PTEs can be modified.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
if (!PTE_ISVALID(pte))
goto make_sure_to_unlock;
if (PTE_ISMODIFIED(pte)) {
PMAP_UNLOCK(pv->pv_pmap);
return (TRUE);
}
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISMODIFIED(pte))
rv = TRUE;
}
make_sure_to_unlock:
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
break;
}
return (FALSE);
vm_page_unlock_queues();
return (rv);
}
/*
@ -2224,16 +2234,24 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
pte_t *pte;
pv_entry_t pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("mmu_booke_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("mmu_booke_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
if (!PTE_ISVALID(pte))
goto make_sure_to_unlock;
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@ -2246,9 +2264,9 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
tlb_miss_unlock();
mtx_unlock_spin(&tlbivax_mutex);
}
make_sure_to_unlock:
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_unlock_queues();
}
/*
@ -2310,16 +2328,13 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
pte_t *pte;
pv_entry_t pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("mmu_booke_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
if (!PTE_ISVALID(pte))
goto make_sure_to_unlock;
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISREFERENCED(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@ -2331,9 +2346,9 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
mtx_unlock_spin(&tlbivax_mutex);
}
}
make_sure_to_unlock:
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_unlock_queues();
}
/*
@ -2632,7 +2647,8 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
* Perform the pmap work for mincore.
*/
static int
mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
vm_paddr_t *locked_pa)
{
TODO;

View File

@ -90,7 +90,8 @@ CODE {
return;
}
static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
vm_paddr_t *locked_pa)
{
return (0);
}
@ -633,12 +634,11 @@ METHOD void zero_page_idle {
/**
* @brief Extract mincore(2) information from a mapping. This routine is
* optional and is an optimisation: the mincore code will call is_modified
* and ts_referenced if no result is returned.
* @brief Extract mincore(2) information from a mapping.
*
* @param _pmap physical map
* @param _addr page virtual address
* @param _locked_pa page physical address
*
* @retval 0 no result
* @retval non-zero mincore(2) flag values
@ -647,6 +647,7 @@ METHOD int mincore {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _addr;
vm_paddr_t *_locked_pa;
} DEFAULT mmu_null_mincore;

View File

@ -360,11 +360,11 @@ pmap_zero_page_idle(vm_page_t m)
}
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
return (MMU_MINCORE(mmu_obj, pmap, addr));
return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
}
void

View File

@ -1898,17 +1898,32 @@ boolean_t
pmap_is_modified(vm_page_t m)
{
struct tte *tp;
boolean_t rv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
if ((tp->tte_data & TD_W) != 0)
return (TRUE);
if ((tp->tte_data & TD_W) != 0) {
rv = TRUE;
break;
}
}
return (FALSE);
vm_page_unlock_queues();
return (rv);
}
/*
@ -1951,9 +1966,20 @@ pmap_clear_modify(vm_page_t m)
struct tte *tp;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no TTEs can have TD_W set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -1961,6 +1987,7 @@ pmap_clear_modify(vm_page_t m)
if ((data & TD_W) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
vm_page_unlock_queues();
}
void
@ -1969,9 +1996,9 @@ pmap_clear_reference(vm_page_t m)
struct tte *tp;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -1979,6 +2006,7 @@ pmap_clear_reference(vm_page_t m)
if ((data & TD_REF) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
vm_page_unlock_queues();
}
void
@ -2014,7 +2042,7 @@ pmap_remove_write(vm_page_t m)
}
int
pmap_mincore(pmap_t pm, vm_offset_t addr)
pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa)
{
/* TODO; */

View File

@ -966,14 +966,33 @@ void
pmap_clear_modify(vm_page_t m)
{
KDPRINTF("pmap_clear_modify(0x%lx)\n", VM_PAGE_TO_PHYS(m));
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no TTEs can have VTD_W set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_W);
vm_page_unlock_queues();
}
void
pmap_clear_reference(vm_page_t m)
{
KDPRINTF("pmap_clear_reference(0x%lx)\n", VM_PAGE_TO_PHYS(m));
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_REF);
vm_page_unlock_queues();
}
void
@ -1589,8 +1608,24 @@ pmap_invalidate_all(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
boolean_t rv;
return (tte_get_phys_bit(m, VTD_W));
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* is clear, no TTEs can have VTD_W set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
rv = tte_get_phys_bit(m, VTD_W);
vm_page_unlock_queues();
return (rv);
}
@ -1652,7 +1687,7 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
}
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
return (0);
}

View File

@ -143,6 +143,7 @@ struct pcpu {
long pc_cp_time[CPUSTATES]; /* statclock ticks */
struct device *pc_device;
void *pc_netisr; /* netisr SWI cookie */
int pc_dnweight; /* vm_page_dontneed() */
/*
* Stuff for read mostly lock

View File

@ -122,6 +122,8 @@ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_is_referenced(vm_page_t m);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
int pmap_mincore(pmap_t pmap, vm_offset_t addr,
vm_paddr_t *locked_pa);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
@ -141,7 +143,6 @@ void pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_activate(struct thread *td);
#define pmap_resident_count(pm) ((pm)->pm_stats.resident_count)

View File

@ -772,8 +772,13 @@ mincore(td, uap)
int vecindex, lastvecindex;
vm_map_entry_t current;
vm_map_entry_t entry;
vm_object_t object;
vm_paddr_t locked_pa;
vm_page_t m;
vm_pindex_t pindex;
int mincoreinfo;
unsigned int timestamp;
boolean_t locked;
/*
* Make sure that the addresses presented are valid for user
@ -847,38 +852,74 @@ mincore(td, uap)
* it can provide info as to whether we are the
* one referencing or modifying the page.
*/
mincoreinfo = pmap_mincore(pmap, addr);
if (!mincoreinfo) {
vm_pindex_t pindex;
vm_ooffset_t offset;
vm_page_t m;
object = NULL;
locked_pa = 0;
retry:
m = NULL;
mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
if (locked_pa != 0) {
/*
* calculate the page index into the object
* The page is mapped by this process but not
* both accessed and modified. It is also
* managed. Acquire the object lock so that
* other mappings might be examined.
*/
offset = current->offset + (addr - current->start);
pindex = OFF_TO_IDX(offset);
VM_OBJECT_LOCK(current->object.vm_object);
m = vm_page_lookup(current->object.vm_object,
pindex);
/*
* if the page is resident, then gather information about
* it.
*/
if (m != NULL && m->valid != 0) {
mincoreinfo = MINCORE_INCORE;
vm_page_lock(m);
vm_page_lock_queues();
if (m->dirty ||
pmap_is_modified(m))
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
pmap_is_referenced(m))
mincoreinfo |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
m = PHYS_TO_VM_PAGE(locked_pa);
if (m->object != object) {
if (object != NULL)
VM_OBJECT_UNLOCK(object);
object = m->object;
locked = VM_OBJECT_TRYLOCK(object);
vm_page_unlock(m);
if (!locked) {
VM_OBJECT_LOCK(object);
vm_page_lock(m);
goto retry;
}
} else
vm_page_unlock(m);
KASSERT(m->valid == VM_PAGE_BITS_ALL,
("mincore: page %p is mapped but invalid",
m));
} else if (mincoreinfo == 0) {
/*
* The page is not mapped by this process. If
* the object implements managed pages, then
* determine if the page is resident so that
* the mappings might be examined.
*/
if (current->object.vm_object != object) {
if (object != NULL)
VM_OBJECT_UNLOCK(object);
object = current->object.vm_object;
VM_OBJECT_LOCK(object);
}
if (object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP ||
object->type == OBJT_VNODE) {
pindex = OFF_TO_IDX(current->offset +
(addr - current->start));
m = vm_page_lookup(object, pindex);
if (m != NULL && m->valid == 0)
m = NULL;
if (m != NULL)
mincoreinfo = MINCORE_INCORE;
}
VM_OBJECT_UNLOCK(current->object.vm_object);
}
if (m != NULL) {
/* Examine other mappings to the page. */
if (m->dirty == 0 && pmap_is_modified(m))
vm_page_dirty(m);
if (m->dirty != 0)
mincoreinfo |= MINCORE_MODIFIED_OTHER;
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) != 0 ||
pmap_is_referenced(m))
mincoreinfo |= MINCORE_REFERENCED_OTHER;
vm_page_unlock_queues();
}
if (object != NULL)
VM_OBJECT_UNLOCK(object);
/*
* subyte may page fault. In case it needs to modify

View File

@ -817,19 +817,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
++tscan;
continue;
}
vm_page_lock(p);
vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
vm_page_unlock_queues();
vm_page_unlock(p);
if (--scanlimit == 0)
break;
++tscan;
continue;
}
vm_page_unlock_queues();
vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and
* this is a nosync page, we can't continue.
@ -900,17 +894,11 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
continue;
}
vm_page_lock(p);
vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
vm_page_unlock_queues();
vm_page_unlock(p);
p->oflags &= ~VPO_CLEANCHK;
continue;
}
vm_page_unlock_queues();
vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and this is a
* nosync page, skip it. Note that the object flags were
@ -977,17 +965,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
vm_page_lock(tp);
vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
vm_page_unlock(tp);
vm_page_unlock_queues();
tp->oflags &= ~VPO_CLEANCHK;
break;
}
vm_page_unlock(tp);
vm_page_unlock_queues();
maf[ i - 1 ] = tp;
maxf++;
continue;
@ -1007,17 +989,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
vm_page_lock(tp);
vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
vm_page_unlock_queues();
vm_page_unlock(tp);
tp->oflags &= ~VPO_CLEANCHK;
break;
}
vm_page_unlock_queues();
vm_page_unlock(tp);
mab[ i - 1 ] = tp;
maxb++;
continue;
@ -1217,21 +1193,23 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
* If the page is not in a normal state, skip it.
*/
vm_page_lock(m);
vm_page_lock_queues();
if (m->hold_count != 0 || m->wire_count != 0) {
vm_page_unlock_queues();
vm_page_unlock(m);
goto unlock_tobject;
}
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("vm_object_madvise: page %p is not managed", m));
if ((m->oflags & VPO_BUSY) || m->busy) {
if (advise == MADV_WILLNEED)
if (advise == MADV_WILLNEED) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_unlock_queues();
}
vm_page_unlock(m);
if (object != tobject)
VM_OBJECT_UNLOCK(object);
@ -1266,7 +1244,6 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
m->act_count = 0;
vm_page_dontneed(m);
}
vm_page_unlock_queues();
vm_page_unlock(m);
if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);

View File

@ -1885,14 +1885,13 @@ vm_page_cache(vm_page_t m)
void
vm_page_dontneed(vm_page_t m)
{
static int dnweight;
int dnw;
int head;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
dnw = ++dnweight;
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
/*
* occassionally leave the page alone
@ -1908,7 +1907,9 @@ vm_page_dontneed(vm_page_t m)
* Clear any references to the page. Otherwise, the page daemon will
* immediately reactivate the page.
*/
vm_page_lock_queues();
vm_page_flag_clear(m, PG_REFERENCED);
vm_page_unlock_queues();
pmap_clear_reference(m);
if (m->dirty == 0 && pmap_is_modified(m))

View File

@ -194,8 +194,10 @@ extern struct vpglocks pa_lock[];
#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
#define PA_UNLOCK_COND(pa) \
do { \
if (pa) \
PA_UNLOCK(pa); \
if ((pa) != 0) { \
PA_UNLOCK((pa)); \
(pa) = 0; \
} \
} while (0)
#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))

View File

@ -391,17 +391,14 @@ vm_pageout_clean(vm_page_t m)
break;
}
vm_page_lock(p);
vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0 ||
p->queue != PQ_INACTIVE ||
p->hold_count != 0) { /* may be undergoing I/O */
vm_page_unlock(p);
vm_page_unlock_queues();
ib = 0;
break;
}
vm_page_unlock_queues();
vm_page_unlock(p);
mc[--page_base] = p;
++pageout_count;
@ -424,16 +421,13 @@ vm_pageout_clean(vm_page_t m)
break;
}
vm_page_lock(p);
vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0 ||
p->queue != PQ_INACTIVE ||
p->hold_count != 0) { /* may be undergoing I/O */
vm_page_unlock_queues();
vm_page_unlock(p);
break;
}
vm_page_unlock_queues();
vm_page_unlock(p);
mc[page_base + pageout_count] = p;
++pageout_count;