Revert r352406, which contained changes I didn't intend to commit.
This commit is contained in:
parent
41fd4b9422
commit
e8bcf6966b
@ -51,7 +51,7 @@ The
|
||||
.Fn vm_page_wire
|
||||
and
|
||||
.Fn vm_page_wire_mapped
|
||||
functions wire the page, which prevents it from being reclaimed by the page
|
||||
function wire the page, prevent it from being reclaimed by the page
|
||||
daemon or when its containing object is destroyed.
|
||||
Both functions require that the page belong to an object.
|
||||
The
|
||||
|
@ -3064,8 +3064,10 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
{
|
||||
pd_entry_t pde, *pdep;
|
||||
pt_entry_t pte, PG_RW, PG_V;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
pa = 0;
|
||||
m = NULL;
|
||||
PG_RW = pmap_rw_bit(pmap);
|
||||
PG_V = pmap_valid_bit(pmap);
|
||||
@ -5804,7 +5806,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
("pmap_enter: no PV entry for %#lx", va));
|
||||
if ((newpte & PG_MANAGED) == 0)
|
||||
free_pv_entry(pmap, pv);
|
||||
if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
|
||||
if ((om->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list) &&
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
@ -6987,7 +6989,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
pvh->pv_gen++;
|
||||
if (TAILQ_EMPTY(&pvh->pv_list)) {
|
||||
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
|
||||
if ((vm_page_aflags(mt) & PGA_WRITEABLE) != 0 &&
|
||||
if ((mt->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&mt->md.pv_list))
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
@ -7005,7 +7007,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
pmap_resident_count_dec(pmap, 1);
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
|
||||
m->md.pv_gen++;
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
|
||||
if ((m->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&m->md.pv_list) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
@ -7136,7 +7138,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -7205,7 +7207,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
@ -7688,7 +7690,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -423,8 +423,7 @@ extern int pmap_pcid_enabled;
|
||||
extern int invpcid_works;
|
||||
|
||||
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
|
||||
#define pmap_page_is_write_mapped(m) \
|
||||
(((m)->astate.flags & PGA_WRITEABLE) != 0)
|
||||
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
|
||||
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
|
||||
|
||||
struct thread;
|
||||
|
@ -4104,7 +4104,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
if (m->md.pvh_attrs & PVF_MOD)
|
||||
pmap_clearbit(m, PVF_MOD);
|
||||
@ -4143,7 +4143,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (vm_page_xbusied(m) || (vm_page_aflags(m) & PGA_WRITEABLE) != 0)
|
||||
if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
|
||||
pmap_clearbit(m, PVF_WRITE);
|
||||
}
|
||||
|
||||
|
@ -5197,7 +5197,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no PTE2s can have PG_M set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
@ -5540,7 +5540,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
|
@ -3333,7 +3333,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pv = pmap_pvh_remove(&om->md, pmap, va);
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
free_pv_entry(pmap, pv);
|
||||
if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
|
||||
if ((om->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list) &&
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
@ -4372,7 +4372,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
pvh->pv_gen++;
|
||||
if (TAILQ_EMPTY(&pvh->pv_list)) {
|
||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
|
||||
if (vm_page_aflags(mt) & PGA_WRITEABLE) != 0 &&
|
||||
if ((mt->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&mt->md.pv_list))
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
@ -4394,7 +4394,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv,
|
||||
pv_next);
|
||||
m->md.pv_gen++;
|
||||
if (vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
|
||||
if ((m->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&m->md.pv_list) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(
|
||||
@ -4534,7 +4534,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -4600,7 +4600,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
@ -4977,7 +4977,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* set. If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -1718,10 +1718,12 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
|
||||
zfs_unmap_page(sf);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_lock(m);
|
||||
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
|
||||
vm_page_activate(m);
|
||||
else
|
||||
vm_page_deactivate(m);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
*rbehind = i;
|
||||
|
||||
@ -1836,10 +1838,12 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
}
|
||||
zfs_unmap_page(sf);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_lock(m);
|
||||
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
|
||||
vm_page_activate(m);
|
||||
else
|
||||
vm_page_deactivate(m);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
*rahead = i;
|
||||
zfs_vmobject_wunlock(vmobj);
|
||||
|
@ -332,6 +332,8 @@ vtballoon_inflate(struct vtballoon_softc *sc, int npages)
|
||||
sc->vtballoon_page_frames[i] =
|
||||
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
|
||||
|
||||
KASSERT(m->queue == PQ_NONE,
|
||||
("%s: allocated page %p on queue", __func__, m));
|
||||
TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
|
||||
}
|
||||
|
||||
|
@ -3752,7 +3752,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
("pmap_enter: no PV entry for %#x", va));
|
||||
if ((newpte & PG_MANAGED) == 0)
|
||||
free_pv_entry(pmap, pv);
|
||||
if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
|
||||
if ((om->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list) &&
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
@ -4848,7 +4848,7 @@ __CONCAT(PMTYPE, is_modified)(vm_page_t m)
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
@ -4979,7 +4979,7 @@ __CONCAT(PMTYPE, remove_write)(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
@ -5291,7 +5291,7 @@ __CONCAT(PMTYPE, clear_modify)(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
|
@ -2164,7 +2164,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pv = pmap_pvh_remove(&om->md, pmap, va);
|
||||
if (!pte_test(&newpte, PTE_MANAGED))
|
||||
free_pv_entry(pmap, pv);
|
||||
if (vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
|
||||
if ((om->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
}
|
||||
@ -2934,7 +2934,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2999,7 +2999,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no PTEs can have PTE_D set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = pmap_testbit(m, PTE_D);
|
||||
@ -3143,7 +3143,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* write busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -3270,7 +3270,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
* determine if the address is MINCORE_REFERENCED.
|
||||
*/
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
if ((vm_page_aflags(m) & PGA_REFERENCED) != 0)
|
||||
if ((m->aflags & PGA_REFERENCED) != 0)
|
||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
|
||||
}
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
|
@ -1319,7 +1319,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
|
||||
* is clear, no PTEs can have PTE_CHG set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
rv = moea_query_bit(m, PTE_CHG);
|
||||
@ -1355,7 +1355,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
* set. If the object containing the page is locked and the page is
|
||||
* not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
moea_clear_bit(m, PTE_CHG);
|
||||
@ -1382,7 +1382,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
lo = moea_attr_fetch(m);
|
||||
@ -1915,8 +1915,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
|
||||
moea_pvo_remove(pvo, -1);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
|
||||
moea_query_bit(m, PTE_CHG)) {
|
||||
if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
|
||||
moea_attr_clear(m, PTE_CHG);
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
|
@ -1467,7 +1467,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* Flush the page from the instruction cache if this page is
|
||||
* mapped executable and cacheable.
|
||||
*/
|
||||
if (pmap != kernel_pmap && (vm_page_aflags(m) & PGA_EXECUTABLE) != 0 &&
|
||||
if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
|
||||
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
|
||||
vm_page_aflag_set(m, PGA_EXECUTABLE);
|
||||
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
@ -1688,7 +1688,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
|
||||
* is clear, no PTEs can have LPTE_CHG set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
return (moea64_query_bit(mmu, m, LPTE_CHG));
|
||||
}
|
||||
@ -1722,7 +1722,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
* set. If the object containing the page is locked and the page is
|
||||
* not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
moea64_clear_bit(mmu, m, LPTE_CHG);
|
||||
}
|
||||
@ -1746,7 +1746,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
powerpc_sync();
|
||||
PV_PAGE_LOCK(m);
|
||||
@ -2240,8 +2240,7 @@ moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
|
||||
if (refchg < 0)
|
||||
refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
|
||||
|
||||
if (pm != kernel_pmap && pg != NULL &&
|
||||
(vm_page_aflags(pg) & PGA_EXECUTABLE) == 0 &&
|
||||
if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
|
||||
(pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
|
||||
if ((pg->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_aflag_set(pg, PGA_EXECUTABLE);
|
||||
@ -2455,8 +2454,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
|
||||
|
||||
}
|
||||
KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
|
||||
KASSERT((vm_page_aflags(m) & PGA_WRITEABLE) == 0,
|
||||
("Page still writable"));
|
||||
KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
|
||||
PV_PAGE_UNLOCK(m);
|
||||
|
||||
/* Clean up UMA allocations */
|
||||
|
@ -2694,7 +2694,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
@ -3040,7 +3040,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
* is clear, no PTEs can be modified.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (rv);
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
@ -3119,7 +3119,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
|
@ -2825,7 +2825,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
("pmap_enter: no PV entry for %#lx", va));
|
||||
if ((new_l3 & PTE_SW_MANAGED) == 0)
|
||||
free_pv_entry(pmap, pv);
|
||||
if ((vm_page_aflags(om) & PGA_WRITEABLE) == 0 &&
|
||||
if ((om->aflags & PGA_WRITEABLE) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
}
|
||||
@ -3556,7 +3556,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
|
||||
if (TAILQ_EMPTY(&pvh->pv_list)) {
|
||||
for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
|
||||
if (TAILQ_EMPTY(&mt->md.pv_list) &&
|
||||
(vm_page_aflags(mt) & PGA_WRITEABLE) != 0)
|
||||
(mt->aflags & PGA_WRITEABLE) != 0)
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
|
||||
@ -3574,7 +3574,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
|
||||
m->md.pv_gen++;
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) &&
|
||||
(vm_page_aflags(m) & PGA_WRITEABLE) != 0) {
|
||||
(m->aflags & PGA_WRITEABLE) != 0) {
|
||||
pvh = pa_to_pvh(m->phys_addr);
|
||||
if (TAILQ_EMPTY(&pvh->pv_list))
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -3789,7 +3789,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no PTEs can have PG_M set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (FALSE);
|
||||
return (pmap_page_test_mappings(m, FALSE, TRUE));
|
||||
}
|
||||
@ -3855,7 +3855,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
@ -4115,7 +4115,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
|
||||
pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -2121,7 +2121,7 @@ pmap_is_modified(vm_page_t m)
|
||||
* is clear, no TTEs can have TD_W set.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return (rv);
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
@ -2204,7 +2204,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
* If the object containing the page is locked and the page is not
|
||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
@ -2232,7 +2232,7 @@ pmap_remove_write(vm_page_t m)
|
||||
* if PGA_WRITEABLE is clear, no page table entries need updating.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
|
||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
|
@ -1648,6 +1648,12 @@ swp_pager_force_dirty(vm_page_t m)
|
||||
{
|
||||
|
||||
vm_page_dirty(m);
|
||||
#ifdef INVARIANTS
|
||||
vm_page_lock(m);
|
||||
if (!vm_page_wired(m) && m->queue == PQ_NONE)
|
||||
panic("page %p is neither wired nor queued", m);
|
||||
vm_page_unlock(m);
|
||||
#endif
|
||||
vm_page_xunbusy(m);
|
||||
swap_pager_unswapped(m);
|
||||
}
|
||||
|
@ -153,7 +153,9 @@ release_page(struct faultstate *fs)
|
||||
{
|
||||
|
||||
vm_page_xunbusy(fs->m);
|
||||
vm_page_lock(fs->m);
|
||||
vm_page_deactivate(fs->m);
|
||||
vm_page_unlock(fs->m);
|
||||
fs->m = NULL;
|
||||
}
|
||||
|
||||
@ -374,7 +376,9 @@ vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
|
||||
for (pidx = first, m = vm_page_lookup(object, pidx);
|
||||
pidx <= last; pidx++, m = vm_page_next(m)) {
|
||||
vm_fault_populate_check_page(m);
|
||||
vm_page_lock(m);
|
||||
vm_page_deactivate(m);
|
||||
vm_page_unlock(m);
|
||||
vm_page_xunbusy(m);
|
||||
}
|
||||
}
|
||||
@ -1321,7 +1325,9 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
|
||||
if ((fault_flags & VM_FAULT_WIRE) != 0) {
|
||||
vm_page_wire(fs.m);
|
||||
} else {
|
||||
vm_page_lock(fs.m);
|
||||
vm_page_activate(fs.m);
|
||||
vm_page_unlock(fs.m);
|
||||
}
|
||||
if (m_hold != NULL) {
|
||||
*m_hold = fs.m;
|
||||
|
@ -935,9 +935,9 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
* and set PGA_REFERENCED before the call to
|
||||
* pmap_is_referenced().
|
||||
*/
|
||||
if ((vm_page_aflags(m) & PGA_REFERENCED) != 0 ||
|
||||
if ((m->aflags & PGA_REFERENCED) != 0 ||
|
||||
pmap_is_referenced(m) ||
|
||||
(vm_page_aflags(m) & PGA_REFERENCED) != 0)
|
||||
(m->aflags & PGA_REFERENCED) != 0)
|
||||
mincoreinfo |= MINCORE_REFERENCED_OTHER;
|
||||
}
|
||||
if (object != NULL)
|
||||
|
@ -2312,9 +2312,9 @@ sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
|
||||
* sysctl is only meant to give an
|
||||
* approximation of the system anyway.
|
||||
*/
|
||||
if (m->astate.queue == PQ_ACTIVE)
|
||||
if (m->queue == PQ_ACTIVE)
|
||||
kvo->kvo_active++;
|
||||
else if (m->astate.queue == PQ_INACTIVE)
|
||||
else if (m->queue == PQ_INACTIVE)
|
||||
kvo->kvo_inactive++;
|
||||
}
|
||||
|
||||
|
716
sys/vm/vm_page.c
716
sys/vm/vm_page.c
File diff suppressed because it is too large
Load Diff
149
sys/vm/vm_page.h
149
sys/vm/vm_page.h
@ -190,15 +190,6 @@ typedef uint32_t vm_page_bits_t;
|
||||
typedef uint64_t vm_page_bits_t;
|
||||
#endif
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
uint16_t flags;
|
||||
uint8_t queue;
|
||||
uint8_t act_count;
|
||||
};
|
||||
uint32_t _bits;
|
||||
} vm_page_astate_t;
|
||||
|
||||
struct vm_page {
|
||||
union {
|
||||
TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
|
||||
@ -221,13 +212,15 @@ struct vm_page {
|
||||
u_int ref_count; /* page references */
|
||||
};
|
||||
volatile u_int busy_lock; /* busy owners lock */
|
||||
vm_page_astate_t astate; /* atomically updated state */
|
||||
uint8_t flags; /* page PG_* flags (P) */
|
||||
uint16_t flags; /* page PG_* flags (P) */
|
||||
uint8_t order; /* index of the buddy queue (F) */
|
||||
uint8_t pool; /* vm_phys freepool index (F) */
|
||||
uint8_t aflags; /* access is atomic */
|
||||
uint8_t oflags; /* page VPO_* flags (O) */
|
||||
uint8_t queue; /* page queue index (Q) */
|
||||
int8_t psind; /* pagesizes[] index (O) */
|
||||
int8_t segind; /* vm_phys segment index (C) */
|
||||
uint8_t oflags; /* page VPO_* flags (O) */
|
||||
u_char act_count; /* page usage count (P) */
|
||||
/* NOTE that these must support one bit per DEV_BSIZE in a page */
|
||||
/* so, on normal X86 kernels, they must be at least 8 bits wide */
|
||||
vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
|
||||
@ -406,8 +399,8 @@ extern struct mtx_padalign pa_lock[];
|
||||
#define PGA_REQUEUE 0x20 /* page is due to be requeued */
|
||||
#define PGA_REQUEUE_HEAD 0x40 /* page requeue should bypass LRU */
|
||||
|
||||
#define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
|
||||
#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
|
||||
#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
|
||||
PGA_REQUEUE_HEAD)
|
||||
|
||||
/*
|
||||
* Page flags. If changed at any other time than page allocation or
|
||||
@ -417,11 +410,11 @@ extern struct mtx_padalign pa_lock[];
|
||||
* allocated from a per-CPU cache. It is cleared the next time that the
|
||||
* page is allocated from the physical memory allocator.
|
||||
*/
|
||||
#define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */
|
||||
#define PG_FICTITIOUS 0x04 /* physical page doesn't exist */
|
||||
#define PG_ZERO 0x08 /* page is zeroed */
|
||||
#define PG_MARKER 0x10 /* special queue marker page */
|
||||
#define PG_NODUMP 0x80 /* don't include this page in a dump */
|
||||
#define PG_PCPU_CACHE 0x0001 /* was allocated from per-CPU caches */
|
||||
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
|
||||
#define PG_ZERO 0x0008 /* page is zeroed */
|
||||
#define PG_MARKER 0x0010 /* special queue marker page */
|
||||
#define PG_NODUMP 0x0080 /* don't include this page in a dump */
|
||||
|
||||
/*
|
||||
* Misc constants.
|
||||
@ -579,6 +572,7 @@ int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
|
||||
void vm_page_deactivate(vm_page_t);
|
||||
void vm_page_deactivate_noreuse(vm_page_t);
|
||||
void vm_page_dequeue(vm_page_t m);
|
||||
void vm_page_dequeue_deferred(vm_page_t m);
|
||||
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
|
||||
bool vm_page_free_prep(vm_page_t m);
|
||||
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
|
||||
@ -590,8 +584,6 @@ vm_page_t vm_page_next(vm_page_t m);
|
||||
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
|
||||
void vm_page_pqbatch_drain(void);
|
||||
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
|
||||
bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
|
||||
vm_page_astate_t new);
|
||||
vm_page_t vm_page_prev(vm_page_t m);
|
||||
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
|
||||
void vm_page_putfake(vm_page_t m);
|
||||
@ -696,52 +688,64 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
|
||||
#ifdef INVARIANTS
|
||||
void vm_page_object_lock_assert(vm_page_t m);
|
||||
#define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m)
|
||||
void vm_page_pagequeue_lock_assert(vm_page_t m, uint8_t queue);
|
||||
#define VM_PAGE_PAGEQUEUE_LOCK_ASSERT(m, q) vm_page_pagequeue_lock_assert(m, q)
|
||||
void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
|
||||
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
|
||||
vm_page_assert_pga_writeable(m, bits)
|
||||
#else
|
||||
#define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0
|
||||
#define VM_PAGE_PAGEQUEUE_LOCK_ASSERT(m, q) (void)0
|
||||
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We want to use atomic updates for the aflags field, which is 16 bits wide.
|
||||
* However, not all architectures support atomic operations on 16-bit
|
||||
* We want to use atomic updates for the aflags field, which is 8 bits wide.
|
||||
* However, not all architectures support atomic operations on 8-bit
|
||||
* destinations. In order that we can easily use a 32-bit operation, we
|
||||
* require that the aflags field be 32-bit aligned.
|
||||
*/
|
||||
_Static_assert(offsetof(struct vm_page, astate.flags) % sizeof(uint32_t) == 0,
|
||||
_Static_assert(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0,
|
||||
"aflags field is not 32-bit aligned");
|
||||
|
||||
#define VM_PAGE_AFLAG_SHIFT __offsetof(vm_page_astate_t, flags)
|
||||
|
||||
/*
|
||||
* Return the atomic flag set for the page.
|
||||
* We want to be able to update the aflags and queue fields atomically in
|
||||
* the same operation.
|
||||
*/
|
||||
static inline int
|
||||
vm_page_aflags(vm_page_t m)
|
||||
{
|
||||
_Static_assert(offsetof(struct vm_page, aflags) / sizeof(uint32_t) ==
|
||||
offsetof(struct vm_page, queue) / sizeof(uint32_t),
|
||||
"aflags and queue fields do not belong to the same 32-bit word");
|
||||
_Static_assert(offsetof(struct vm_page, queue) % sizeof(uint32_t) == 2,
|
||||
"queue field is at an unexpected offset");
|
||||
_Static_assert(sizeof(((struct vm_page *)NULL)->queue) == 1,
|
||||
"queue field has an unexpected size");
|
||||
|
||||
return (m->astate.flags);
|
||||
}
|
||||
#if BYTE_ORDER == LITTLE_ENDIAN
|
||||
#define VM_PAGE_AFLAG_SHIFT 0
|
||||
#define VM_PAGE_QUEUE_SHIFT 16
|
||||
#else
|
||||
#define VM_PAGE_AFLAG_SHIFT 24
|
||||
#define VM_PAGE_QUEUE_SHIFT 8
|
||||
#endif
|
||||
#define VM_PAGE_QUEUE_MASK (0xff << VM_PAGE_QUEUE_SHIFT)
|
||||
|
||||
/*
|
||||
* Clear the given bits in the specified page.
|
||||
*/
|
||||
static inline void
|
||||
vm_page_aflag_clear(vm_page_t m, uint16_t bits)
|
||||
vm_page_aflag_clear(vm_page_t m, uint8_t bits)
|
||||
{
|
||||
uint32_t *addr, val;
|
||||
|
||||
/*
|
||||
* The PGA_REFERENCED flag can only be cleared if the page is locked.
|
||||
*/
|
||||
if ((bits & PGA_REFERENCED) != 0)
|
||||
vm_page_assert_locked(m);
|
||||
|
||||
/*
|
||||
* Access the whole 32-bit word containing the aflags field with an
|
||||
* atomic update. Parallel non-atomic updates to the other fields
|
||||
* within this word are handled properly by the atomic update.
|
||||
*/
|
||||
addr = (void *)&m->astate;
|
||||
addr = (void *)&m->aflags;
|
||||
val = bits << VM_PAGE_AFLAG_SHIFT;
|
||||
atomic_clear_32(addr, val);
|
||||
}
|
||||
@ -750,7 +754,7 @@ vm_page_aflag_clear(vm_page_t m, uint16_t bits)
|
||||
* Set the given bits in the specified page.
|
||||
*/
|
||||
static inline void
|
||||
vm_page_aflag_set(vm_page_t m, uint16_t bits)
|
||||
vm_page_aflag_set(vm_page_t m, uint8_t bits)
|
||||
{
|
||||
uint32_t *addr, val;
|
||||
|
||||
@ -761,43 +765,44 @@ vm_page_aflag_set(vm_page_t m, uint16_t bits)
|
||||
* atomic update. Parallel non-atomic updates to the other fields
|
||||
* within this word are handled properly by the atomic update.
|
||||
*/
|
||||
addr = (void *)&m->astate;
|
||||
addr = (void *)&m->aflags;
|
||||
val = bits << VM_PAGE_AFLAG_SHIFT;
|
||||
atomic_set_32(addr, val);
|
||||
}
|
||||
|
||||
static inline vm_page_astate_t
|
||||
vm_page_astate_load(vm_page_t m)
|
||||
{
|
||||
vm_page_astate_t astate;
|
||||
|
||||
astate._bits = atomic_load_32(&m->astate);
|
||||
return (astate);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically update the queue state of the page. The operation fails if
|
||||
* any of the queue flags in "fflags" are set or if the "queue" field of
|
||||
* the page does not match the expected value; if the operation is
|
||||
* successful, the flags in "nflags" are set and all other queue state
|
||||
* flags are cleared.
|
||||
*/
|
||||
static inline bool
|
||||
vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old,
|
||||
vm_page_astate_t new)
|
||||
vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq,
|
||||
uint32_t fflags, uint32_t nflags)
|
||||
{
|
||||
int ret;
|
||||
uint32_t *addr, nval, oval, qsmask;
|
||||
|
||||
KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
|
||||
("vm_page_astate_fcmpset: unexecpted head requeue for page %p",
|
||||
m));
|
||||
KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
|
||||
("vm_page_astate_fcmpset: setting PGA_ENQUEUED without a queue"));
|
||||
KASSERT(new._bits != old->_bits,
|
||||
("vm_page_astate_fcmpset: bits are not changing"));
|
||||
vm_page_assert_locked(m);
|
||||
|
||||
ret = atomic_fcmpset_32(&m->astate._bits, &old->_bits, new._bits);
|
||||
if (ret != 0) {
|
||||
if (old->queue != PQ_NONE && old->queue != new.queue)
|
||||
VM_PAGE_PAGEQUEUE_LOCK_ASSERT(m, old->queue);
|
||||
KASSERT((new.flags & PGA_ENQUEUED) == 0 || old->queue == new.queue,
|
||||
("vm_page_astate_fcmpset: PGA_ENQUEUED set after queue change for page %p", m));
|
||||
}
|
||||
fflags <<= VM_PAGE_AFLAG_SHIFT;
|
||||
nflags <<= VM_PAGE_AFLAG_SHIFT;
|
||||
newq <<= VM_PAGE_QUEUE_SHIFT;
|
||||
oldq <<= VM_PAGE_QUEUE_SHIFT;
|
||||
qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) <<
|
||||
VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK;
|
||||
|
||||
return (ret != 0);
|
||||
addr = (void *)&m->aflags;
|
||||
oval = atomic_load_32(addr);
|
||||
do {
|
||||
if ((oval & fflags) != 0)
|
||||
return (false);
|
||||
if ((oval & VM_PAGE_QUEUE_MASK) != oldq)
|
||||
return (false);
|
||||
nval = (oval & ~qsmask) | nflags | newq;
|
||||
} while (!atomic_fcmpset_32(addr, &oval, nval));
|
||||
|
||||
return (true);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -853,17 +858,19 @@ vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
|
||||
/*
|
||||
* vm_page_queue:
|
||||
*
|
||||
* Return the index of the queue containing m.
|
||||
* Return the index of the queue containing m. This index is guaranteed
|
||||
* not to change while the page lock is held.
|
||||
*/
|
||||
static inline uint8_t
|
||||
vm_page_queue(vm_page_t m)
|
||||
{
|
||||
vm_page_astate_t as;
|
||||
|
||||
as = vm_page_astate_load(m);
|
||||
if ((as.flags & PGA_DEQUEUE) != 0)
|
||||
vm_page_assert_locked(m);
|
||||
|
||||
if ((m->aflags & PGA_DEQUEUE) != 0)
|
||||
return (PQ_NONE);
|
||||
return (as.queue);
|
||||
atomic_thread_fence_acq();
|
||||
return (m->queue);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -218,7 +218,7 @@ vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
|
||||
{
|
||||
|
||||
vm_pagequeue_assert_locked(pq);
|
||||
KASSERT((vm_page_aflags(marker) & PGA_ENQUEUED) == 0,
|
||||
KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
|
||||
("marker %p already enqueued", marker));
|
||||
|
||||
if (after == NULL)
|
||||
@ -242,7 +242,7 @@ vm_pageout_end_scan(struct scan_state *ss)
|
||||
|
||||
pq = ss->pq;
|
||||
vm_pagequeue_assert_locked(pq);
|
||||
KASSERT((vm_page_aflags(ss->marker) & PGA_ENQUEUED) != 0,
|
||||
KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
|
||||
("marker %p not enqueued", ss->marker));
|
||||
|
||||
TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
|
||||
@ -271,7 +271,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
|
||||
marker = ss->marker;
|
||||
pq = ss->pq;
|
||||
|
||||
KASSERT((marker->astate.flags & PGA_ENQUEUED) != 0,
|
||||
KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
|
||||
("marker %p not enqueued", ss->marker));
|
||||
|
||||
vm_pagequeue_lock(pq);
|
||||
@ -280,7 +280,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
|
||||
m = n, ss->scanned++) {
|
||||
n = TAILQ_NEXT(m, plinks.q);
|
||||
if ((m->flags & PG_MARKER) == 0) {
|
||||
KASSERT((m->astate.flags & PGA_ENQUEUED) != 0,
|
||||
KASSERT((m->aflags & PGA_ENQUEUED) != 0,
|
||||
("page %p not enqueued", m));
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("Fictitious page %p cannot be in page queue", m));
|
||||
@ -370,10 +370,13 @@ vm_pageout_cluster(vm_page_t m)
|
||||
ib = 0;
|
||||
break;
|
||||
}
|
||||
vm_page_lock(p);
|
||||
if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
|
||||
vm_page_unlock(p);
|
||||
ib = 0;
|
||||
break;
|
||||
}
|
||||
vm_page_unlock(p);
|
||||
mc[--page_base] = pb = p;
|
||||
++pageout_count;
|
||||
++ib;
|
||||
@ -393,8 +396,12 @@ vm_pageout_cluster(vm_page_t m)
|
||||
vm_page_test_dirty(p);
|
||||
if (p->dirty == 0)
|
||||
break;
|
||||
if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p))
|
||||
vm_page_lock(p);
|
||||
if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
|
||||
vm_page_unlock(p);
|
||||
break;
|
||||
}
|
||||
vm_page_unlock(p);
|
||||
mc[page_base + pageout_count] = ps = p;
|
||||
++pageout_count;
|
||||
++is;
|
||||
@ -451,7 +458,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
|
||||
KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
|
||||
("vm_pageout_flush: partially invalid page %p index %d/%d",
|
||||
mc[i], i, count));
|
||||
KASSERT((vm_page_aflags(mc[i]) & PGA_WRITEABLE) == 0,
|
||||
KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
|
||||
("vm_pageout_flush: writeable page %p", mc[i]));
|
||||
vm_page_sbusy(mc[i]);
|
||||
}
|
||||
@ -570,6 +577,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
|
||||
vm_pindex_t pindex;
|
||||
int error, lockmode;
|
||||
|
||||
vm_page_assert_locked(m);
|
||||
object = m->object;
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
error = 0;
|
||||
@ -589,6 +597,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
|
||||
* of time.
|
||||
*/
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vm_page_unlock(m);
|
||||
vp = object->handle;
|
||||
if (vp->v_type == VREG &&
|
||||
vn_start_write(vp, &mp, V_NOWAIT) != 0) {
|
||||
@ -618,6 +627,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
|
||||
error = ENOENT;
|
||||
goto unlock_all;
|
||||
}
|
||||
vm_page_lock(m);
|
||||
|
||||
/*
|
||||
* While the object and page were unlocked, the page
|
||||
@ -653,6 +663,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
|
||||
error = EBUSY;
|
||||
goto unlock_all;
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
|
||||
/*
|
||||
* If a page is dirty, then it is either being washed
|
||||
@ -688,13 +699,14 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
{
|
||||
struct scan_state ss;
|
||||
struct vm_pagequeue *pq;
|
||||
struct mtx *mtx;
|
||||
vm_object_t object;
|
||||
vm_page_t m, marker;
|
||||
vm_page_astate_t old, new;
|
||||
int act_delta, error, numpagedout, queue, refs, starting_target;
|
||||
int act_delta, error, numpagedout, queue, starting_target;
|
||||
int vnodes_skipped;
|
||||
bool pageout_ok;
|
||||
|
||||
mtx = NULL;
|
||||
object = NULL;
|
||||
starting_target = launder;
|
||||
vnodes_skipped = 0;
|
||||
@ -722,45 +734,77 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
if (__predict_false((m->flags & PG_MARKER) != 0))
|
||||
continue;
|
||||
|
||||
vm_page_change_lock(m, &mtx);
|
||||
|
||||
recheck:
|
||||
/*
|
||||
* Perform some quick and racy checks of the page's queue state.
|
||||
* Bail if things are not as we expect.
|
||||
* The page may have been disassociated from the queue
|
||||
* or even freed while locks were dropped. We thus must be
|
||||
* careful whenever modifying page state. Once the object lock
|
||||
* has been acquired, we have a stable reference to the page.
|
||||
*/
|
||||
old = vm_page_astate_load(m);
|
||||
if (old.queue != PQ_LAUNDRY || (old.flags & PGA_ENQUEUED) == 0)
|
||||
if (vm_page_queue(m) != queue)
|
||||
continue;
|
||||
if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
|
||||
|
||||
/*
|
||||
* A requeue was requested, so this page gets a second
|
||||
* chance.
|
||||
*/
|
||||
if ((m->aflags & PGA_REQUEUE) != 0) {
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wired pages may not be freed. Complete their removal
|
||||
* from the queue now to avoid needless revisits during
|
||||
* future scans. This check is racy and must be reverified once
|
||||
* we hold the object lock and have verified that the page
|
||||
* is not busy.
|
||||
*/
|
||||
if (vm_page_wired(m)) {
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (object != m->object) {
|
||||
if (object != NULL)
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* A page's object pointer may be set to NULL before
|
||||
* the object lock is acquired.
|
||||
*/
|
||||
object = (vm_object_t)atomic_load_ptr(&m->object);
|
||||
if (object == NULL)
|
||||
continue;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (m->object != object) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = NULL;
|
||||
continue;
|
||||
if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) {
|
||||
mtx_unlock(mtx);
|
||||
/* Depends on type-stability. */
|
||||
VM_OBJECT_WLOCK(object);
|
||||
mtx_lock(mtx);
|
||||
goto recheck;
|
||||
}
|
||||
}
|
||||
if (__predict_false(m->object == NULL))
|
||||
/*
|
||||
* The page has been removed from its object.
|
||||
*/
|
||||
continue;
|
||||
KASSERT(m->object == object, ("page %p does not belong to %p",
|
||||
m, object));
|
||||
|
||||
if (vm_page_busied(m))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check for wirings now that we hold the object lock and have
|
||||
* verified that the page is unbusied. If the page is mapped,
|
||||
* it may still be wired by pmap lookups. The call to
|
||||
* Re-check for wirings now that we hold the object lock and
|
||||
* have verified that the page is unbusied. If the page is
|
||||
* mapped, it may still be wired by pmap lookups. The call to
|
||||
* vm_page_try_remove_all() below atomically checks for such
|
||||
* wirings and removes mappings. If the page is unmapped, the
|
||||
* wire count is guaranteed not to increase.
|
||||
*/
|
||||
if (__predict_false(vm_page_wired(m))) {
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -780,64 +824,46 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
* that a reference from a concurrently destroyed mapping is
|
||||
* observed here and now.
|
||||
*/
|
||||
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
|
||||
if (object->ref_count != 0)
|
||||
act_delta = pmap_ts_referenced(m);
|
||||
else {
|
||||
KASSERT(!pmap_page_is_mapped(m),
|
||||
("page %p is mapped", m));
|
||||
act_delta = 0;
|
||||
}
|
||||
if ((m->aflags & PGA_REFERENCED) != 0) {
|
||||
vm_page_aflag_clear(m, PGA_REFERENCED);
|
||||
act_delta++;
|
||||
}
|
||||
if (act_delta != 0) {
|
||||
if (object->ref_count != 0) {
|
||||
VM_CNT_INC(v_reactivated);
|
||||
vm_page_activate(m);
|
||||
|
||||
for (old = vm_page_astate_load(m);;) {
|
||||
if (old.queue != queue ||
|
||||
(old.flags & PGA_ENQUEUED) == 0)
|
||||
goto next_page;
|
||||
/*
|
||||
* Increase the activation count if the page
|
||||
* was referenced while in the laundry queue.
|
||||
* This makes it less likely that the page will
|
||||
* be returned prematurely to the inactive
|
||||
* queue.
|
||||
*/
|
||||
m->act_count += act_delta + ACT_ADVANCE;
|
||||
|
||||
if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
goto next_page;
|
||||
/*
|
||||
* If this was a background laundering, count
|
||||
* activated pages towards our target. The
|
||||
* purpose of background laundering is to ensure
|
||||
* that pages are eventually cycled through the
|
||||
* laundry queue, and an activation is a valid
|
||||
* way out.
|
||||
*/
|
||||
if (!in_shortfall)
|
||||
launder--;
|
||||
continue;
|
||||
} else if ((object->flags & OBJ_DEAD) == 0) {
|
||||
vm_page_requeue(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
new = old;
|
||||
act_delta = refs;
|
||||
if ((old.flags & PGA_REFERENCED) != 0) {
|
||||
new.flags &= ~PGA_REFERENCED;
|
||||
act_delta++;
|
||||
}
|
||||
if (act_delta != 0) {
|
||||
if (object->ref_count != 0) {
|
||||
/*
|
||||
* Increase the activation count if the
|
||||
* page was referenced while in the
|
||||
* laundry queue. This makes it less
|
||||
* likely that the page will be returned
|
||||
* prematurely to the inactive queue.
|
||||
*/
|
||||
new.act_count += ACT_ADVANCE +
|
||||
act_delta;
|
||||
if (new.act_count > ACT_MAX)
|
||||
new.act_count = ACT_MAX;
|
||||
|
||||
new.flags |= PGA_REQUEUE;
|
||||
new.queue = PQ_ACTIVE;
|
||||
if (!vm_page_pqstate_commit(m, &old,
|
||||
new))
|
||||
continue;
|
||||
|
||||
VM_CNT_INC(v_reactivated);
|
||||
|
||||
/*
|
||||
* If this was a background laundering,
|
||||
* count activated pages towards our
|
||||
* target. The purpose of background
|
||||
* laundering is to ensure that pages
|
||||
* are eventually cycled through the
|
||||
* laundry queue, and an activation is a
|
||||
* valid way out.
|
||||
*/
|
||||
if (!in_shortfall)
|
||||
launder--;
|
||||
goto next_page;
|
||||
} else if ((object->flags & OBJ_DEAD) == 0) {
|
||||
vm_page_launder(m);
|
||||
goto next_page;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -850,7 +876,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
if (object->ref_count != 0) {
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -874,7 +900,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
else
|
||||
pageout_ok = true;
|
||||
if (!pageout_ok) {
|
||||
vm_page_launder(m);
|
||||
vm_page_requeue(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -899,9 +925,13 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
|
||||
pageout_lock_miss++;
|
||||
vnodes_skipped++;
|
||||
}
|
||||
mtx = NULL;
|
||||
object = NULL;
|
||||
}
|
||||
next_page:;
|
||||
}
|
||||
if (mtx != NULL) {
|
||||
mtx_unlock(mtx);
|
||||
mtx = NULL;
|
||||
}
|
||||
if (object != NULL) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
@ -1139,13 +1169,12 @@ static void
|
||||
vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
|
||||
{
|
||||
struct scan_state ss;
|
||||
struct mtx *mtx;
|
||||
vm_object_t object;
|
||||
vm_page_t m, marker;
|
||||
vm_page_astate_t old, new;
|
||||
struct vm_pagequeue *pq;
|
||||
long min_scan;
|
||||
int act_delta, max_scan, ps_delta, refs, scan_tick;
|
||||
uint8_t nqueue;
|
||||
int act_delta, max_scan, scan_tick;
|
||||
|
||||
marker = &vmd->vmd_markers[PQ_ACTIVE];
|
||||
pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
|
||||
@ -1179,6 +1208,7 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
|
||||
* and scanning resumes.
|
||||
*/
|
||||
max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
|
||||
mtx = NULL;
|
||||
act_scan:
|
||||
vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
|
||||
while ((m = vm_pageout_next(&ss, false)) != NULL) {
|
||||
@ -1197,6 +1227,29 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
|
||||
if (__predict_false((m->flags & PG_MARKER) != 0))
|
||||
continue;
|
||||
|
||||
vm_page_change_lock(m, &mtx);
|
||||
|
||||
/*
|
||||
* The page may have been disassociated from the queue
|
||||
* or even freed while locks were dropped. We thus must be
|
||||
* careful whenever modifying page state. Once the object lock
|
||||
* has been acquired, we have a stable reference to the page.
|
||||
*/
|
||||
if (vm_page_queue(m) != PQ_ACTIVE)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Wired pages are dequeued lazily.
|
||||
*/
|
||||
if (vm_page_wired(m)) {
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* A page's object pointer may be set to NULL before
|
||||
* the object lock is acquired.
|
||||
*/
|
||||
object = (vm_object_t)atomic_load_ptr(&m->object);
|
||||
if (__predict_false(object == NULL))
|
||||
/*
|
||||
@ -1211,104 +1264,80 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
|
||||
* that a reference from a concurrently destroyed mapping is
|
||||
* observed here and now.
|
||||
*
|
||||
* Perform an unsynchronized object ref count check. While the
|
||||
* page lock ensures that the page is not reallocated to another
|
||||
* object, in particular, one with unmanaged mappings that
|
||||
* cannot support pmap_ts_referenced(), two races are,
|
||||
* Perform an unsynchronized object ref count check. While
|
||||
* the page lock ensures that the page is not reallocated to
|
||||
* another object, in particular, one with unmanaged mappings
|
||||
* that cannot support pmap_ts_referenced(), two races are,
|
||||
* nonetheless, possible:
|
||||
*
|
||||
* 1) The count was transitioning to zero, but we saw a non-
|
||||
* zero value. pmap_ts_referenced() will return zero because
|
||||
* the page is not mapped.
|
||||
* 2) The count was transitioning to one, but we saw zero. This
|
||||
* race delays the detection of a new reference. At worst,
|
||||
* we will deactivate and reactivate the page.
|
||||
* zero value. pmap_ts_referenced() will return zero
|
||||
* because the page is not mapped.
|
||||
* 2) The count was transitioning to one, but we saw zero.
|
||||
* This race delays the detection of a new reference. At
|
||||
* worst, we will deactivate and reactivate the page.
|
||||
*/
|
||||
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
|
||||
if (object->ref_count != 0)
|
||||
act_delta = pmap_ts_referenced(m);
|
||||
else
|
||||
act_delta = 0;
|
||||
if ((m->aflags & PGA_REFERENCED) != 0) {
|
||||
vm_page_aflag_clear(m, PGA_REFERENCED);
|
||||
act_delta++;
|
||||
}
|
||||
|
||||
for (old = vm_page_astate_load(m);;) {
|
||||
if (old.queue != PQ_ACTIVE ||
|
||||
(old.flags & PGA_ENQUEUED) == 0)
|
||||
/*
|
||||
* Something has moved the page out of the
|
||||
* active queue. Don't touch it.
|
||||
*/
|
||||
break;
|
||||
if ((old.flags & PGA_DEQUEUE) != 0) {
|
||||
vm_page_pqbatch_submit(m, PQ_ACTIVE);
|
||||
break;
|
||||
}
|
||||
|
||||
new = old;
|
||||
act_delta = refs;
|
||||
if ((old.flags & PGA_REFERENCED) != 0) {
|
||||
new.flags &= ~PGA_REFERENCED;
|
||||
act_delta++;
|
||||
}
|
||||
/*
|
||||
* Advance or decay the act_count based on recent usage.
|
||||
*/
|
||||
if (act_delta != 0) {
|
||||
m->act_count += ACT_ADVANCE + act_delta;
|
||||
if (m->act_count > ACT_MAX)
|
||||
m->act_count = ACT_MAX;
|
||||
} else
|
||||
m->act_count -= min(m->act_count, ACT_DECLINE);
|
||||
|
||||
if (m->act_count == 0) {
|
||||
/*
|
||||
* Advance or decay the act_count based on recent usage.
|
||||
* When not short for inactive pages, let dirty pages go
|
||||
* through the inactive queue before moving to the
|
||||
* laundry queues. This gives them some extra time to
|
||||
* be reactivated, potentially avoiding an expensive
|
||||
* pageout. However, during a page shortage, the
|
||||
* inactive queue is necessarily small, and so dirty
|
||||
* pages would only spend a trivial amount of time in
|
||||
* the inactive queue. Therefore, we might as well
|
||||
* place them directly in the laundry queue to reduce
|
||||
* queuing overhead.
|
||||
*/
|
||||
if (act_delta != 0) {
|
||||
new.act_count += ACT_ADVANCE + act_delta;
|
||||
if (new.act_count > ACT_MAX)
|
||||
new.act_count = ACT_MAX;
|
||||
} else {
|
||||
new.act_count -= min(new.act_count, ACT_DECLINE);
|
||||
}
|
||||
|
||||
if (new.act_count > 0) {
|
||||
/*
|
||||
* Adjust the activation count and keep the page
|
||||
* in the active queue. The count might be left
|
||||
* unchanged if it is saturated.
|
||||
*/
|
||||
if (new.act_count == old.act_count ||
|
||||
vm_page_astate_fcmpset(m, &old, new))
|
||||
break;
|
||||
if (page_shortage <= 0) {
|
||||
vm_page_swapqueue(m, PQ_ACTIVE, PQ_INACTIVE);
|
||||
} else {
|
||||
/*
|
||||
* When not short for inactive pages, let dirty
|
||||
* pages go through the inactive queue before
|
||||
* moving to the laundry queues. This gives
|
||||
* them some extra time to be reactivated,
|
||||
* potentially avoiding an expensive pageout.
|
||||
* However, during a page shortage, the inactive
|
||||
* queue is necessarily small, and so dirty
|
||||
* pages would only spend a trivial amount of
|
||||
* time in the inactive queue. Therefore, we
|
||||
* might as well place them directly in the
|
||||
* laundry queue to reduce queuing overhead.
|
||||
*
|
||||
* Calling vm_page_test_dirty() here would
|
||||
* require acquisition of the object's write
|
||||
* lock. However, during a page shortage,
|
||||
* directing dirty pages into the laundry queue
|
||||
* is only an optimization and not a
|
||||
* directing dirty pages into the laundry
|
||||
* queue is only an optimization and not a
|
||||
* requirement. Therefore, we simply rely on
|
||||
* the opportunistic updates to the page's dirty
|
||||
* field by the pmap.
|
||||
* the opportunistic updates to the page's
|
||||
* dirty field by the pmap.
|
||||
*/
|
||||
if (page_shortage <= 0) {
|
||||
nqueue = PQ_INACTIVE;
|
||||
ps_delta = 0;
|
||||
} else if (m->dirty == 0) {
|
||||
nqueue = PQ_INACTIVE;
|
||||
ps_delta = act_scan_laundry_weight;
|
||||
if (m->dirty == 0) {
|
||||
vm_page_swapqueue(m, PQ_ACTIVE,
|
||||
PQ_INACTIVE);
|
||||
page_shortage -=
|
||||
act_scan_laundry_weight;
|
||||
} else {
|
||||
nqueue = PQ_LAUNDRY;
|
||||
ps_delta = 1;
|
||||
}
|
||||
|
||||
new.flags |= PGA_REQUEUE;
|
||||
new.queue = nqueue;
|
||||
if (vm_page_pqstate_commit(m, &old, new)) {
|
||||
page_shortage -= ps_delta;
|
||||
break;
|
||||
vm_page_swapqueue(m, PQ_ACTIVE,
|
||||
PQ_LAUNDRY);
|
||||
page_shortage--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mtx != NULL) {
|
||||
mtx_unlock(mtx);
|
||||
mtx = NULL;
|
||||
}
|
||||
vm_pagequeue_lock(pq);
|
||||
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
|
||||
TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
|
||||
@ -1320,30 +1349,20 @@ static int
|
||||
vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
vm_page_astate_t old, new;
|
||||
|
||||
for (old = vm_page_astate_load(m);;) {
|
||||
if (old.queue != PQ_INACTIVE ||
|
||||
(old.flags & (PGA_DEQUEUE | PGA_ENQUEUED)) != 0)
|
||||
break;
|
||||
|
||||
new = old;
|
||||
new.flags |= PGA_ENQUEUED;
|
||||
new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
|
||||
if (!vm_page_astate_fcmpset(m, &old, new))
|
||||
continue;
|
||||
|
||||
if ((old.flags & PGA_REQUEUE_HEAD) != 0) {
|
||||
vmd = vm_pagequeue_domain(m);
|
||||
TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
|
||||
} else if ((old.flags & PGA_REQUEUE) != 0) {
|
||||
TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
|
||||
} else {
|
||||
TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
|
||||
}
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
|
||||
return (0);
|
||||
vm_page_aflag_set(m, PGA_ENQUEUED);
|
||||
if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {
|
||||
vmd = vm_pagequeue_domain(m);
|
||||
TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
|
||||
vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
|
||||
} else if ((m->aflags & PGA_REQUEUE) != 0) {
|
||||
TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
|
||||
vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
|
||||
} else
|
||||
TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1386,11 +1405,11 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
{
|
||||
struct scan_state ss;
|
||||
struct vm_batchqueue rq;
|
||||
struct mtx *mtx;
|
||||
vm_page_t m, marker;
|
||||
vm_page_astate_t old, new;
|
||||
struct vm_pagequeue *pq;
|
||||
vm_object_t object;
|
||||
int act_delta, addl_page_shortage, deficit, page_shortage, refs;
|
||||
int act_delta, addl_page_shortage, deficit, page_shortage;
|
||||
int starting_page_shortage;
|
||||
|
||||
/*
|
||||
@ -1410,6 +1429,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
|
||||
starting_page_shortage = page_shortage = shortage + deficit;
|
||||
|
||||
mtx = NULL;
|
||||
object = NULL;
|
||||
vm_batchqueue_init(&rq);
|
||||
|
||||
@ -1427,31 +1447,65 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
KASSERT((m->flags & PG_MARKER) == 0,
|
||||
("marker page %p was dequeued", m));
|
||||
|
||||
vm_page_change_lock(m, &mtx);
|
||||
|
||||
recheck:
|
||||
/*
|
||||
* Perform some quick and racy checks of the page's queue state.
|
||||
* Bail if things are not as we expect.
|
||||
* The page may have been disassociated from the queue
|
||||
* or even freed while locks were dropped. We thus must be
|
||||
* careful whenever modifying page state. Once the object lock
|
||||
* has been acquired, we have a stable reference to the page.
|
||||
*/
|
||||
old = vm_page_astate_load(m);
|
||||
if (old.queue != PQ_INACTIVE || (old.flags & PGA_ENQUEUED) != 0)
|
||||
if (vm_page_queue(m) != PQ_INACTIVE) {
|
||||
addl_page_shortage++;
|
||||
continue;
|
||||
if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
|
||||
vm_page_pqbatch_submit(m, PQ_INACTIVE);
|
||||
}
|
||||
|
||||
/*
|
||||
* The page was re-enqueued after the page queue lock was
|
||||
* dropped, or a requeue was requested. This page gets a second
|
||||
* chance.
|
||||
*/
|
||||
if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE |
|
||||
PGA_REQUEUE_HEAD)) != 0)
|
||||
goto reinsert;
|
||||
|
||||
/*
|
||||
* Wired pages may not be freed. Complete their removal
|
||||
* from the queue now to avoid needless revisits during
|
||||
* future scans. This check is racy and must be reverified once
|
||||
* we hold the object lock and have verified that the page
|
||||
* is not busy.
|
||||
*/
|
||||
if (vm_page_wired(m)) {
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (object != m->object) {
|
||||
if (object != NULL)
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* A page's object pointer may be set to NULL before
|
||||
* the object lock is acquired.
|
||||
*/
|
||||
object = (vm_object_t)atomic_load_ptr(&m->object);
|
||||
if (object == NULL)
|
||||
continue;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (m->object != object) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = NULL;
|
||||
goto reinsert;
|
||||
if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) {
|
||||
mtx_unlock(mtx);
|
||||
/* Depends on type-stability. */
|
||||
VM_OBJECT_WLOCK(object);
|
||||
mtx_lock(mtx);
|
||||
goto recheck;
|
||||
}
|
||||
}
|
||||
if (__predict_false(m->object == NULL))
|
||||
/*
|
||||
* The page has been removed from its object.
|
||||
*/
|
||||
continue;
|
||||
KASSERT(m->object == object, ("page %p does not belong to %p",
|
||||
m, object));
|
||||
|
||||
if (vm_page_busied(m)) {
|
||||
/*
|
||||
@ -1467,15 +1521,15 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for wirings now that we hold the object lock and have
|
||||
* verified that the page is unbusied. If the page is mapped,
|
||||
* it may still be wired by pmap lookups. The call to
|
||||
* Re-check for wirings now that we hold the object lock and
|
||||
* have verified that the page is unbusied. If the page is
|
||||
* mapped, it may still be wired by pmap lookups. The call to
|
||||
* vm_page_try_remove_all() below atomically checks for such
|
||||
* wirings and removes mappings. If the page is unmapped, the
|
||||
* wire count is guaranteed not to increase.
|
||||
*/
|
||||
if (__predict_false(vm_page_wired(m))) {
|
||||
vm_page_pqbatch_submit(m, PQ_INACTIVE);
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1495,52 +1549,35 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
* that a reference from a concurrently destroyed mapping is
|
||||
* observed here and now.
|
||||
*/
|
||||
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
|
||||
if (object->ref_count != 0)
|
||||
act_delta = pmap_ts_referenced(m);
|
||||
else {
|
||||
KASSERT(!pmap_page_is_mapped(m),
|
||||
("page %p is mapped", m));
|
||||
act_delta = 0;
|
||||
}
|
||||
if ((m->aflags & PGA_REFERENCED) != 0) {
|
||||
vm_page_aflag_clear(m, PGA_REFERENCED);
|
||||
act_delta++;
|
||||
}
|
||||
if (act_delta != 0) {
|
||||
if (object->ref_count != 0) {
|
||||
VM_CNT_INC(v_reactivated);
|
||||
vm_page_activate(m);
|
||||
|
||||
for (old = vm_page_astate_load(m);;) {
|
||||
if (old.queue != PQ_INACTIVE ||
|
||||
(old.flags & PGA_ENQUEUED) != 0)
|
||||
goto next_page;
|
||||
|
||||
if ((old.flags & PGA_QUEUE_OP_MASK) != 0) {
|
||||
vm_page_pqbatch_submit(m, PQ_INACTIVE);
|
||||
goto next_page;
|
||||
/*
|
||||
* Increase the activation count if the page
|
||||
* was referenced while in the inactive queue.
|
||||
* This makes it less likely that the page will
|
||||
* be returned prematurely to the inactive
|
||||
* queue.
|
||||
*/
|
||||
m->act_count += act_delta + ACT_ADVANCE;
|
||||
continue;
|
||||
} else if ((object->flags & OBJ_DEAD) == 0) {
|
||||
vm_page_aflag_set(m, PGA_REQUEUE);
|
||||
goto reinsert;
|
||||
}
|
||||
|
||||
new = old;
|
||||
act_delta = refs;
|
||||
if ((old.flags & PGA_REFERENCED) != 0) {
|
||||
new.flags &= ~PGA_REFERENCED;
|
||||
act_delta++;
|
||||
}
|
||||
if (act_delta != 0) {
|
||||
if (object->ref_count != 0) {
|
||||
/*
|
||||
* Increase the activation count if the
|
||||
* page was referenced while in the
|
||||
* inactive queue. This makes it less
|
||||
* likely that the page will be returned
|
||||
* prematurely to the inactive queue.
|
||||
*/
|
||||
new.act_count += ACT_ADVANCE +
|
||||
act_delta;
|
||||
if (new.act_count > ACT_MAX)
|
||||
new.act_count = ACT_MAX;
|
||||
|
||||
new.flags |= PGA_REQUEUE;
|
||||
new.queue = PQ_ACTIVE;
|
||||
if (!vm_page_pqstate_commit(m, &old,
|
||||
new))
|
||||
continue;
|
||||
|
||||
VM_CNT_INC(v_reactivated);
|
||||
goto next_page;
|
||||
} else if ((object->flags & OBJ_DEAD) == 0) {
|
||||
vm_page_aflag_set(m, PGA_REQUEUE);
|
||||
goto reinsert;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1553,7 +1590,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
if (object->ref_count != 0) {
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
|
||||
vm_page_pqbatch_submit(m, PQ_INACTIVE);
|
||||
vm_page_dequeue_deferred(m);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1567,30 +1604,25 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
|
||||
*/
|
||||
if (m->dirty == 0) {
|
||||
free_page:
|
||||
/* XXX comment */
|
||||
old = vm_page_astate_load(m);
|
||||
if (old.queue != PQ_INACTIVE ||
|
||||
(old.flags & PGA_QUEUE_STATE_MASK) != 0) {
|
||||
vm_page_pqbatch_submit(m, PQ_INACTIVE);
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because we dequeued the page and have already
|
||||
* checked for concurrent dequeue and enqueue
|
||||
* requests, we can safely disassociate the page
|
||||
* from the inactive queue.
|
||||
*/
|
||||
m->astate.queue = PQ_NONE;
|
||||
KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
|
||||
("page %p has queue state", m));
|
||||
m->queue = PQ_NONE;
|
||||
vm_page_free(m);
|
||||
page_shortage--;
|
||||
} else if ((object->flags & OBJ_DEAD) == 0)
|
||||
vm_page_launder(m);
|
||||
next_page:
|
||||
continue;
|
||||
reinsert:
|
||||
vm_pageout_reinsert_inactive(&ss, &rq, m);
|
||||
}
|
||||
if (mtx != NULL)
|
||||
mtx_unlock(mtx);
|
||||
if (object != NULL)
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_pageout_reinsert_inactive(&ss, &rq, NULL);
|
||||
|
@ -202,8 +202,6 @@ static inline void
|
||||
vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
|
||||
{
|
||||
|
||||
vm_pagequeue_assert_locked(pq);
|
||||
|
||||
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
|
||||
vm_pagequeue_cnt_dec(pq);
|
||||
}
|
||||
@ -251,22 +249,6 @@ vm_pagequeue_domain(vm_page_t m)
|
||||
return (VM_DOMAIN(vm_phys_domain(m)));
|
||||
}
|
||||
|
||||
static inline struct vm_pagequeue *
|
||||
_vm_page_pagequeue(vm_page_t m, uint8_t queue)
|
||||
{
|
||||
|
||||
if (queue == PQ_NONE)
|
||||
return (NULL);
|
||||
return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
|
||||
}
|
||||
|
||||
static inline struct vm_pagequeue *
|
||||
vm_page_pagequeue(vm_page_t m)
|
||||
{
|
||||
|
||||
return (_vm_page_pagequeue(m, atomic_load_8(&m->astate.queue)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the number of pages we need to free-up or cache
|
||||
* A positive number indicates that we do not have enough free pages.
|
||||
|
@ -108,9 +108,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/vm_phys.h>
|
||||
#include <vm/vm_pagequeue.h>
|
||||
#include <vm/vm_pager.h>
|
||||
#include <vm/vm_phys.h>
|
||||
#include <vm/swap_pager.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/uma.h>
|
||||
@ -171,56 +170,6 @@ static void swapout_procs(int action);
|
||||
static void vm_req_vmdaemon(int req);
|
||||
static void vm_thread_swapout(struct thread *td);
|
||||
|
||||
static void
|
||||
vm_swapout_object_deactivate_page(vm_page_t m, int remove_mode)
|
||||
{
|
||||
vm_page_astate_t old, new;
|
||||
int act_delta, refs;
|
||||
|
||||
refs = pmap_ts_referenced(m);
|
||||
|
||||
for (old = vm_page_astate_load(m);;) {
|
||||
if ((old.flags & PGA_DEQUEUE) != 0)
|
||||
break;
|
||||
|
||||
act_delta = refs;
|
||||
if ((old.flags & PGA_REFERENCED) != 0) {
|
||||
new.flags &= ~PGA_REFERENCED;
|
||||
act_delta++;
|
||||
}
|
||||
|
||||
if (old.queue != PQ_ACTIVE && act_delta != 0) {
|
||||
if (new.act_count == ACT_MAX)
|
||||
break;
|
||||
new.act_count += act_delta;
|
||||
new.flags |= PGA_REQUEUE;
|
||||
new.queue = PQ_ACTIVE;
|
||||
if (vm_page_pqstate_commit(m, &old, new))
|
||||
break;
|
||||
} else if (old.queue == PQ_ACTIVE) {
|
||||
if (act_delta == 0) {
|
||||
new.act_count -= min(new.act_count,
|
||||
ACT_DECLINE);
|
||||
if (!remove_mode && new.act_count == 0) {
|
||||
(void)vm_page_try_remove_all(m);
|
||||
|
||||
new.flags |= PGA_REQUEUE;
|
||||
new.queue = PQ_INACTIVE;
|
||||
}
|
||||
if (vm_page_pqstate_commit(m, &old, new))
|
||||
break;
|
||||
} else {
|
||||
if (new.act_count < ACT_MAX - ACT_ADVANCE)
|
||||
new.act_count += ACT_ADVANCE;
|
||||
if (vm_page_astate_fcmpset(m, &old, new))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
(void)vm_page_try_remove_all(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_swapout_object_deactivate_pages
|
||||
*
|
||||
@ -235,7 +184,7 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
|
||||
{
|
||||
vm_object_t backing_object, object;
|
||||
vm_page_t p;
|
||||
int remove_mode;
|
||||
int act_delta, remove_mode;
|
||||
|
||||
VM_OBJECT_ASSERT_LOCKED(first_object);
|
||||
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
|
||||
@ -271,8 +220,37 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
|
||||
VM_CNT_INC(v_pdpages);
|
||||
if (!pmap_page_exists_quick(pmap, p))
|
||||
continue;
|
||||
|
||||
vm_swapout_object_deactivate_page(p, remove_mode);
|
||||
act_delta = pmap_ts_referenced(p);
|
||||
vm_page_lock(p);
|
||||
if ((p->aflags & PGA_REFERENCED) != 0) {
|
||||
if (act_delta == 0)
|
||||
act_delta = 1;
|
||||
vm_page_aflag_clear(p, PGA_REFERENCED);
|
||||
}
|
||||
if (!vm_page_active(p) && act_delta != 0) {
|
||||
vm_page_activate(p);
|
||||
p->act_count += act_delta;
|
||||
} else if (vm_page_active(p)) {
|
||||
/*
|
||||
* The page daemon does not requeue pages
|
||||
* after modifying their activation count.
|
||||
*/
|
||||
if (act_delta == 0) {
|
||||
p->act_count -= min(p->act_count,
|
||||
ACT_DECLINE);
|
||||
if (!remove_mode && p->act_count == 0) {
|
||||
(void)vm_page_try_remove_all(p);
|
||||
vm_page_deactivate(p);
|
||||
}
|
||||
} else {
|
||||
vm_page_activate(p);
|
||||
if (p->act_count < ACT_MAX -
|
||||
ACT_ADVANCE)
|
||||
p->act_count += ACT_ADVANCE;
|
||||
}
|
||||
} else if (vm_page_inactive(p))
|
||||
(void)vm_page_try_remove_all(p);
|
||||
vm_page_unlock(p);
|
||||
}
|
||||
if ((backing_object = object->backing_object) == NULL)
|
||||
goto unlock_return;
|
||||
|
Loading…
Reference in New Issue
Block a user