(6/6) Convert pmap to expect busy in write related operations now that all
callers hold it. This simplifies pmap code and removes a dependency on the object lock. Reviewed by: kib, markj Tested by: pho Sponsored by: Netflix, Intel Differential Revision: https://reviews.freebsd.org/D21596
This commit is contained in:
parent
786dad5c20
commit
50eb2e4288
@ -7285,12 +7285,9 @@ pmap_is_modified(vm_page_t m)
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -7353,14 +7350,10 @@ pmap_remove_write(vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
vm_page_assert_busied(m);
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
@ -7833,16 +7826,9 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -4095,16 +4095,9 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no mappings can be modified.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
if (m->md.pvh_attrs & PVF_MOD)
|
||||
pmap_clearbit(m, PVF_MOD);
|
||||
@ -4136,14 +4129,9 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
|
||||
if (pmap_page_is_write_mapped(m))
|
||||
pmap_clearbit(m, PVF_WRITE);
|
||||
}
|
||||
|
||||
|
@ -5192,12 +5192,9 @@ pmap_is_modified(vm_page_t m)
|
||||
("%s: page %p is not managed", __func__, m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTE2s can have PG_M set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
@ -5533,14 +5530,9 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("%s: page %p is not managed", __func__, m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
@ -5691,17 +5683,9 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("%s: page %p is not managed", __func__, m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("%s: page %p is exclusive busy", __func__, m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM
|
||||
* cleared. If the object containing the page is locked and the page
|
||||
* is not exclusive busied, then PGA_WRITEABLE cannot be concurrently
|
||||
* set.
|
||||
*/
|
||||
if ((m->flags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
|
@ -4548,12 +4548,9 @@ pmap_is_modified(vm_page_t m)
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -4612,14 +4609,9 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
@ -4987,16 +4979,9 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have ATTR_SW_DBM
|
||||
* set. If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -4874,12 +4874,9 @@ __CONCAT(PMTYPE, is_modified)(vm_page_t m)
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
@ -5003,14 +5000,9 @@ __CONCAT(PMTYPE, remove_write)(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
@ -5313,16 +5305,9 @@ __CONCAT(PMTYPE, clear_modify)(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
|
@ -2893,14 +2893,9 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2960,13 +2955,11 @@ pmap_is_modified(vm_page_t m)
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PTE_D set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_testbit(m, PTE_D);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -3101,15 +3094,9 @@ pmap_clear_modify(vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* write busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
|
@ -1314,13 +1314,11 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
|
||||
("moea_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PTE_CHG set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = moea_query_bit(m, PTE_CHG);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -1346,16 +1344,9 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("moea_clear_modify: page %p is exclusive busy", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
|
||||
* set. If the object containing the page is locked and the page is
|
||||
* not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
moea_clear_bit(m, PTE_CHG);
|
||||
@ -1375,14 +1366,9 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
lo = moea_attr_fetch(m);
|
||||
|
@ -1694,13 +1694,11 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
|
||||
("moea64_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have LPTE_CHG set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
|
||||
return (moea64_query_bit(mmu, m, LPTE_CHG));
|
||||
}
|
||||
|
||||
@ -1724,16 +1722,9 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("moea64_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
|
||||
* set. If the object containing the page is locked and the page is
|
||||
* not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
moea64_clear_bit(mmu, m, LPTE_CHG);
|
||||
}
|
||||
@ -1750,15 +1741,11 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
powerpc_sync();
|
||||
PV_PAGE_LOCK(m);
|
||||
refchg = 0;
|
||||
|
@ -2687,15 +2687,10 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
@ -3035,13 +3030,11 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
rv = FALSE;
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can be modified.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (rv);
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
@ -3110,17 +3103,11 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("mmu_booke_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the page is not PG_AWRITEABLE, then no PTEs can be modified.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
|
@ -3814,12 +3814,9 @@ pmap_is_modified(vm_page_t m)
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -3878,14 +3875,9 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
@ -4136,9 +4128,10 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
|
||||
|
@ -2116,12 +2116,9 @@ pmap_is_modified(vm_page_t m)
|
||||
rv = FALSE;
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
||||
* is clear, no TTEs can have TD_W set.
|
||||
* If the page is not busied then this check is racy.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return (rv);
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
@ -2195,17 +2192,11 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
KASSERT(!vm_page_xbusied(m),
|
||||
("pmap_clear_modify: page %p is exclusive busied", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
/*
|
||||
* If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if ((tp->tte_data & TD_PV) == 0)
|
||||
@ -2225,15 +2216,11 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
vm_page_assert_busied(m);
|
||||
|
||||
if (!pmap_page_is_write_mapped(m))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be
|
||||
* set by another thread while the object is locked. Thus,
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if ((tp->tte_data & TD_PV) == 0)
|
||||
|
Loading…
Reference in New Issue
Block a user