Split the vm_page flags PG_WRITEABLE and PG_REFERENCED into atomic

flags field. Updates to the atomic flags are performed using the atomic
ops on the containing word, do not require any vm lock to be held, and
are non-blocking. The vm_page_aflag_set(9) and vm_page_aflag_clear(9)
functions are provided to modify afalgs.

Document the changes to flags field to only require the page lock.

Introduce vm_page_reference(9) function to provide a stable KPI and
KBI for filesystems like tmpfs and zfs which need to mark a page as
referenced.

Reviewed by:    alc, attilio
Tested by:      marius, flo (sparc64); andreast (powerpc, powerpc64)
Approved by:	re (bz)
This commit is contained in:
Konstantin Belousov 2011-09-06 10:30:11 +00:00
parent b236731716
commit 3407fefef6
21 changed files with 289 additions and 276 deletions

View File

@ -2123,7 +2123,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#lx", tpte));
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@ -2137,7 +2137,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
}
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@ -2391,7 +2391,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@ -2615,10 +2615,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpde & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
if (pmap == kernel_pmap) {
@ -2659,7 +2659,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, ptepde, free));
@ -2872,7 +2872,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@ -2885,7 +2885,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
pmap_free_zero_pages(free);
}
@ -3301,7 +3301,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if ((prot & VM_PROT_EXECUTE) == 0)
newpte |= pg_nx;
@ -3325,7 +3325,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
origpte = pte_load_store(pte, newpte);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
vm_page_flag_set(om, PG_REFERENCED);
vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
PG_NX) == 0 && (newpte & PG_NX)))
invlva = TRUE;
@ -3339,7 +3339,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else
@ -4147,7 +4147,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
if (TAILQ_EMPTY(&mt->md.pv_list))
vm_page_flag_clear(mt, PG_WRITEABLE);
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
@ -4165,7 +4165,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
@ -4203,13 +4203,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
@ -4332,13 +4332,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
@ -4370,7 +4370,7 @@ pmap_remove_write(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -4478,11 +4478,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));

View File

@ -1402,7 +1402,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
if ((kwritable == 0) && (writable == 0)) {
pg->md.pvh_attrs &= ~PVF_MOD;
vm_page_flag_clear(pg, PG_WRITEABLE);
vm_page_aflag_clear(pg, PGA_WRITEABLE);
return;
}
}
@ -1568,7 +1568,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
}
if (maskbits & PVF_WRITE)
vm_page_flag_clear(pg, PG_WRITEABLE);
vm_page_aflag_clear(pg, PGA_WRITEABLE);
vm_page_unlock_queues();
return (count);
}
@ -1630,7 +1630,7 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
if (pve->pv_flags & PVF_WIRED)
++pm->pm_stats.wired_count;
vm_page_flag_set(pg, PG_REFERENCED);
vm_page_aflag_set(pg, PGA_REFERENCED);
}
/*
@ -1699,7 +1699,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
pg->md.pvh_attrs &= ~PVF_REF;
else
vm_page_flag_set(pg, PG_REFERENCED);
vm_page_aflag_set(pg, PGA_REFERENCED);
if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
(pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
pmap_fix_cache(pg, pm, 0);
@ -1709,7 +1709,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
break;
if (!pve) {
pg->md.pvh_attrs &= ~PVF_MOD;
vm_page_flag_clear(pg, PG_WRITEABLE);
vm_page_aflag_clear(pg, PGA_WRITEABLE);
}
}
pv = TAILQ_FIRST(&pg->md.pv_list);
@ -1724,7 +1724,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
--pm->pm_stats.wired_count;
pg->md.pvh_attrs &= ~PVF_REF;
pg->md.pvh_attrs &= ~PVF_MOD;
vm_page_flag_clear(pg, PG_WRITEABLE);
vm_page_aflag_clear(pg, PGA_WRITEABLE);
pmap_free_pv_entry(pv);
}
}
@ -2695,7 +2695,7 @@ pmap_remove_pages(pmap_t pmap)
npv = TAILQ_NEXT(pv, pv_plist);
pmap_nuke_pv(m, pmap, pv);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
pmap_free_pv_entry(pv);
pmap_free_l2_bucket(pmap, l2b, 1);
}
@ -3172,7 +3172,7 @@ pmap_remove_all(vm_page_t m)
else
pmap_tlb_flushD(curpm);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -3406,7 +3406,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_dirty(m);
}
if (m && opte)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
} else {
/*
* Need to do page referenced emulation.
@ -3418,7 +3418,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
npte |= L2_S_PROT_W;
if (m != NULL &&
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
npte |= pte_l2_s_cache_mode;
if (m && m == opg) {
@ -4505,11 +4505,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no mappings can be modified.
* If the page is not PGA_WRITEABLE, then no mappings can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
if (m->md.pvh_attrs & PVF_MOD)
pmap_clearbit(m, PVF_MOD);
@ -4558,13 +4558,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->flags & PG_WRITEABLE) != 0)
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
}

View File

@ -331,8 +331,7 @@ page_lookup(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(pp, PG_REFERENCED);
vm_page_reference(pp);
vm_page_sleep(pp, "zfsmwb");
continue;
}

View File

@ -518,8 +518,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_reference(m);
vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
}
@ -538,8 +537,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_reference(m);
vm_page_sleep(m, "tmfsmr");
goto lookupvpg;
}
@ -650,8 +648,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
* Reference the page before unlocking and sleeping so
* that the page daemon is less likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(vpg, PG_REFERENCED);
vm_page_reference(vpg);
vm_page_sleep(vpg, "tmfsmw");
goto lookupvpg;
}

View File

@ -2207,7 +2207,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@ -2221,7 +2221,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
}
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
sched_unpin();
}
@ -2461,7 +2461,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
@ -2714,10 +2714,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpde & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.pv_list) &&
TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
if (pmap == kernel_pmap) {
@ -2763,7 +2763,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, free));
@ -2953,7 +2953,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@ -2966,7 +2966,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
pmap_free_zero_pages(free);
@ -3413,7 +3413,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
@ -3439,7 +3439,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
origpte = pte_load_store(pte, newpte);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
vm_page_flag_set(om, PG_REFERENCED);
vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m))
invlva = TRUE;
#ifdef PAE
@ -3457,7 +3457,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else
@ -4287,7 +4287,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
if (TAILQ_EMPTY(&mt->md.pv_list))
vm_page_flag_clear(mt, PG_WRITEABLE);
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
@ -4305,7 +4305,7 @@ pmap_remove_pages(pmap_t pmap)
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
pmap_unuse_pt(pmap, pv->pv_va, &free);
}
@ -4345,13 +4345,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
@ -4478,13 +4478,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
@ -4522,7 +4522,7 @@ pmap_remove_write(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
vm_page_unlock_queues();
}
@ -4633,11 +4633,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();

View File

@ -2037,7 +2037,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_W) == 0,
("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
free = NULL;
@ -2050,7 +2050,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
PMAP_UNLOCK(pmap);
}
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
sched_unpin();
}
@ -2222,7 +2222,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@ -2274,7 +2274,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
if (oldpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
pmap_remove_entry(pmap, m, va);
}
return (pmap_unuse_pt(pmap, va, free));
@ -2446,7 +2446,7 @@ pmap_remove_all(vm_page_t m)
if (tpte & PG_W)
pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@ -2459,7 +2459,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
PT_UPDATES_FLUSH();
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
@ -2739,7 +2739,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
if ((newpte & PG_MANAGED) != 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
@ -2764,7 +2764,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
PT_SET_VA(pte, newpte | PG_A, FALSE);
if (origpte & PG_A) {
if (origpte & PG_MANAGED)
vm_page_flag_set(om, PG_REFERENCED);
vm_page_aflag_set(om, PGA_REFERENCED);
if (opa != VM_PAGE_TO_PHYS(m))
invlva = TRUE;
#ifdef PAE
@ -2781,7 +2781,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
}
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
} else{
@ -3549,7 +3549,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
pmap_unuse_pt(pmap, pv->pv_va, &free);
@ -3604,13 +3604,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
sched_pin();
@ -3735,13 +3735,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();
@ -3769,7 +3769,7 @@ pmap_remove_write(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
PT_UPDATES_FLUSH();
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
@ -3846,11 +3846,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
sched_pin();

View File

@ -804,7 +804,7 @@ get_pv_entry(pmap_t locked_pmap)
pmap_invalidate_page(va);
pmap_switch(oldpmap);
if (pmap_accessed(pte))
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if (pmap_dirty(pte))
vm_page_dirty(m);
pmap_free_pte(pte, va);
@ -819,7 +819,7 @@ get_pv_entry(pmap_t locked_pmap)
free_pv_entry(pv);
}
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
if (allocated_pv == NULL) {
if (vpq == &vm_page_queues[PQ_INACTIVE]) {
@ -972,7 +972,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
@ -1198,7 +1198,7 @@ pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
if (pmap_dirty(pte))
vm_page_dirty(m);
if (pmap_accessed(pte))
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
error = pmap_remove_entry(pmap, m, va, pv);
}
@ -1460,7 +1460,7 @@ pmap_remove_all(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -1647,7 +1647,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
ia64_sync_icache(va, PAGE_SIZE);
if ((prot & VM_PROT_WRITE) != 0 && managed)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
vm_page_unlock_queues();
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
@ -2048,13 +2048,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be dirty.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -2139,11 +2139,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can be modified.
* If the page is not PGA_WRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -2206,13 +2206,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -2235,7 +2235,7 @@ pmap_remove_write(vm_page_t m)
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}

View File

@ -1432,7 +1432,7 @@ get_pv_entry(pmap_t locked_pmap)
KASSERT(!pte_test(&oldpte, PTE_W),
("wired pte for unwired page"));
if (m->md.pv_flags & PV_TABLE_REF)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if (pte_test(&oldpte, PTE_D))
vm_page_dirty(m);
pmap_invalidate_page(pmap, va);
@ -1448,7 +1448,7 @@ get_pv_entry(pmap_t locked_pmap)
free_pv_entry(pv);
}
if (TAILQ_EMPTY(&m->md.pv_list)) {
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
}
}
@ -1527,7 +1527,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@ -1589,7 +1589,7 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
vm_page_dirty(m);
}
if (m->md.pv_flags & PV_TABLE_REF)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
pmap_remove_entry(pmap, m, va);
@ -1713,7 +1713,7 @@ pmap_remove_all(vm_page_t m)
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_REF)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
PMAP_LOCK(pv->pv_pmap);
@ -1757,7 +1757,7 @@ pmap_remove_all(vm_page_t m)
free_pv_entry(pv);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
vm_page_unlock_queues();
}
@ -2004,7 +2004,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
*pte = newpte;
if (page_is_managed(opa) && (opa != pa)) {
if (om->md.pv_flags & PV_TABLE_REF)
vm_page_flag_set(om, PG_REFERENCED);
vm_page_aflag_set(om, PGA_REFERENCED);
om->md.pv_flags &=
~(PV_TABLE_REF | PV_TABLE_MOD);
}
@ -2017,7 +2017,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
}
if (page_is_managed(opa) &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
vm_page_aflag_clear(om, PGA_WRITEABLE);
} else {
*pte = newpte;
}
@ -2535,7 +2535,7 @@ pmap_remove_pages(pmap_t pmap)
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
@ -2615,7 +2615,7 @@ pmap_changebit(vm_page_t m, int bit, boolean_t setem)
PMAP_UNLOCK(pv->pv_pmap);
}
if (!setem && bit == PTE_D)
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
/*
@ -2662,13 +2662,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
/*
@ -2685,7 +2685,7 @@ pmap_remove_write(vm_page_t m)
pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
VM_PROT_READ | VM_PROT_EXECUTE);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -2724,13 +2724,13 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_D set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD)
@ -2781,11 +2781,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PTE_D set.
* If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
if (m->md.pv_flags & PV_TABLE_MOD) {
@ -2929,7 +2929,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
* determine if the address is MINCORE_REFERENCED.
*/
m = PHYS_TO_VM_PAGE(pa);
if ((m->flags & PG_REFERENCED) != 0)
if ((m->aflags & PGA_REFERENCED) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
@ -3185,7 +3185,7 @@ init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
rw = PTE_V | PTE_D | PTE_C_CACHE;
else
rw = PTE_V | PTE_C_CACHE;
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
} else
/* Needn't emulate a modified bit for unmanaged pages. */
rw = PTE_V | PTE_D | PTE_C_CACHE;

View File

@ -1102,7 +1102,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pte_lo |= PTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= PTE_BR;
@ -1255,13 +1255,13 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
("moea_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea_query_bit(m, PTE_CHG));
}
@ -1299,11 +1299,11 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
("moea_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG
* If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
* set. If the object containing the page is locked and the page is
* not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea_clear_bit(m, PTE_CHG);
}
@ -1323,13 +1323,13 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
("moea_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea_attr_fetch(m);
@ -1356,7 +1356,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -1794,11 +1794,11 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
if ((m->flags & PG_WRITEABLE) && moea_is_modified(mmu, m)) {
if ((m->aflags & PGA_WRITEABLE) && moea_is_modified(mmu, m)) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}

View File

@ -1239,7 +1239,7 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
pte_lo |= LPTE_BW;
if (pmap_bootstrapped &&
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
} else
pte_lo |= LPTE_BR;
@ -1484,13 +1484,13 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
("moea64_is_modified: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
return (moea64_query_bit(mmu, m, LPTE_CHG));
}
@ -1528,11 +1528,11 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
("moea64_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG
* If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
* set. If the object containing the page is locked and the page is
* not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
moea64_clear_bit(mmu, m, LPTE_CHG);
}
@ -1552,13 +1552,13 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
("moea64_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
lo = moea64_attr_fetch(m);
@ -1588,7 +1588,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -2064,11 +2064,11 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
moea64_pvo_remove(mmu, pvo);
PMAP_UNLOCK(pmap);
}
if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
moea64_attr_clear(m, LPTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}

View File

@ -771,7 +771,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
/* remove from pv_list */
TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
/* free pv entry struct */
pv_free(pve);
@ -820,7 +820,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
vm_page_dirty(m);
if (PTE_ISREFERENCED(pte))
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
pv_remove(pmap, va, m);
}
@ -1600,7 +1600,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((flags & PTE_MANAGED) != 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
} else {
/* Handle modified pages, sense modify status. */
@ -1667,7 +1667,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
flags |= PTE_UW;
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
@ -1804,7 +1804,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -1957,13 +1957,13 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
("mmu_booke_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@ -1988,7 +1988,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -2172,13 +2172,13 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@ -2253,11 +2253,11 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
("mmu_booke_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no PTEs can be modified.
* If the page is not PG_AWRITEABLE, then no PTEs can be modified.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {

View File

@ -584,7 +584,7 @@ METHOD void remove {
/**
* @brief Traverse the reverse-map list off the given physical page and
* remove all mappings. Clear the PG_WRITEABLE attribute from the page.
* remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
*
* @param _pg physical page
*/

View File

@ -1340,9 +1340,9 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
if ((data & TD_W) != 0)
vm_page_dirty(m);
if ((data & TD_REF) != 0)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if (TAILQ_EMPTY(&m->md.tte_list))
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
pm->pm_stats.resident_count--;
}
pmap_cache_remove(m, va);
@ -1403,7 +1403,7 @@ pmap_remove_all(vm_page_t m)
if ((tp->tte_data & TD_WIRED) != 0)
pm->pm_stats.wired_count--;
if ((tp->tte_data & TD_REF) != 0)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
if ((tp->tte_data & TD_W) != 0)
vm_page_dirty(m);
tp->tte_data &= ~TD_V;
@ -1414,7 +1414,7 @@ pmap_remove_all(vm_page_t m)
TTE_ZERO(tp);
PMAP_UNLOCK(pm);
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}
@ -1560,7 +1560,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (wired)
tp->tte_data |= TD_W;
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
} else if ((data & TD_W) != 0)
vm_page_dirty(m);
@ -1601,7 +1601,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((prot & VM_PROT_WRITE) != 0) {
data |= TD_SW;
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_flag_set(m, PG_WRITEABLE);
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
data |= TD_EXEC;
@ -2066,13 +2066,13 @@ pmap_is_modified(vm_page_t m)
rv = FALSE;
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@ -2143,11 +2143,11 @@ pmap_clear_modify(vm_page_t m)
("pmap_clear_modify: page %p is busy", m));
/*
* If the page is not PG_WRITEABLE, then no TTEs can have TD_W set.
* If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
* If the object containing the page is locked and the page is not
* VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
* VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->flags & PG_WRITEABLE) == 0)
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@ -2189,13 +2189,13 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@ -2207,7 +2207,7 @@ pmap_remove_write(vm_page_t m)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
vm_page_flag_clear(m, PG_WRITEABLE);
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
}

View File

@ -1593,7 +1593,7 @@ swp_pager_async_iodone(struct buf *bp)
* status, then finish the I/O ( which decrements the
* busy count and possibly wakes waiter's up ).
*/
KASSERT((m->flags & PG_WRITEABLE) == 0,
KASSERT((m->aflags & PGA_WRITEABLE) == 0,
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);

View File

@ -345,9 +345,7 @@ RetryFault:;
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(fs.m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_aflag_set(fs.m, PGA_REFERENCED);
vm_page_unlock(fs.m);
if (fs.object != fs.first_object) {
if (!VM_OBJECT_TRYLOCK(

View File

@ -901,16 +901,16 @@ mincore(td, uap)
if (m->dirty != 0)
mincoreinfo |= MINCORE_MODIFIED_OTHER;
/*
* The first test for PG_REFERENCED is an
* The first test for PGA_REFERENCED is an
* optimization. The second test is
* required because a concurrent pmap
* operation could clear the last reference
* and set PG_REFERENCED before the call to
* and set PGA_REFERENCED before the call to
* pmap_is_referenced().
*/
if ((m->flags & PG_REFERENCED) != 0 ||
if ((m->aflags & PGA_REFERENCED) != 0 ||
pmap_is_referenced(m) ||
(m->flags & PG_REFERENCED) != 0)
(m->aflags & PGA_REFERENCED) != 0)
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)

View File

@ -1098,9 +1098,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_aflag_set(m, PGA_REFERENCED);
}
vm_page_unlock(m);
if (object != tobject)

View File

@ -67,30 +67,9 @@
* page queue (vm_page_queue[]), regardless of other mutexes or the
* busy state of a page.
*
* - a hash chain mutex is required when associating or disassociating
* a page from the VM PAGE CACHE hash table (vm_page_buckets),
* regardless of other mutexes or the busy state of a page.
* - The object mutex is held when inserting or removing
* pages from an object (vm_page_insert() or vm_page_remove()).
*
* - either a hash chain mutex OR a busied page is required in order
* to modify the page flags. A hash chain mutex must be obtained in
* order to busy a page. A page's flags cannot be modified by a
* hash chain mutex if the page is marked busy.
*
* - The object memq mutex is held when inserting or removing
* pages from an object (vm_page_insert() or vm_page_remove()). This
* is different from the object's main mutex.
*
* Generally speaking, you have to be aware of side effects when running
* vm_page ops. A vm_page_lookup() will return with the hash chain
* locked, whether it was able to lookup the page or not. vm_page_free(),
* vm_page_cache(), vm_page_activate(), and a number of other routines
* will release the hash chain mutex for you. Intermediate manipulation
* routines such as vm_page_flag_set() expect the hash chain to be held
* on entry and the hash chain will remain held on return.
*
* pageq scanning can only occur with the pageq in question locked.
* We have a known bottleneck with the active queue, but the cache
* and free queues are actually arrays already.
*/
/*
@ -473,33 +452,68 @@ vm_page_startup(vm_offset_t vaddr)
return (vaddr);
}
void
vm_page_flag_set(vm_page_t m, unsigned short bits)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
void
vm_page_aflag_set(vm_page_t m, uint8_t bits)
{
uint32_t *addr, val;
/*
* The PG_WRITEABLE flag can only be set if the page is managed and
* The PGA_WRITEABLE flag can only be set if the page is managed and
* VPO_BUSY. Currently, this flag is only set by pmap_enter().
*/
KASSERT((bits & PG_WRITEABLE) == 0 ||
KASSERT((bits & PGA_WRITEABLE) == 0 ||
(m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
("PG_WRITEABLE and !VPO_BUSY"));
m->flags |= bits;
("PGA_WRITEABLE and !VPO_BUSY"));
/*
* We want to use atomic updates for m->aflags, which is a
* byte wide. Not all architectures provide atomic operations
* on the single-byte destination. Punt and access the whole
* 4-byte word with an atomic update. Parallel non-atomic
* updates to the fields included in the update by proximity
* are handled properly by atomics.
*/
addr = (void *)&m->aflags;
MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0);
val = bits;
#if BYTE_ORDER == BIG_ENDIAN
val <<= 24;
#endif
atomic_set_32(addr, val);
}
void
vm_page_flag_clear(vm_page_t m, unsigned short bits)
vm_page_aflag_clear(vm_page_t m, uint8_t bits)
{
uint32_t *addr, val;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
/*
* The PG_REFERENCED flag can only be cleared if the object
* The PGA_REFERENCED flag can only be cleared if the object
* containing the page is locked.
*/
KASSERT((bits & PG_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
("PG_REFERENCED and !VM_OBJECT_LOCKED"));
m->flags &= ~bits;
KASSERT((bits & PGA_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object),
("PGA_REFERENCED and !VM_OBJECT_LOCKED"));
/*
* See the comment in vm_page_aflag_set().
*/
addr = (void *)&m->aflags;
MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0);
val = bits;
#if BYTE_ORDER == BIG_ENDIAN
val <<= 24;
#endif
atomic_clear_32(addr, val);
}
void
vm_page_reference(vm_page_t m)
{
vm_page_aflag_set(m, PGA_REFERENCED);
}
void
@ -874,7 +888,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
* Since we are inserting a new and possibly dirty page,
* update the object's OBJ_MIGHTBEDIRTY flag.
*/
if (m->flags & PG_WRITEABLE)
if (m->aflags & PGA_WRITEABLE)
vm_object_set_writeable_dirty(object);
}
@ -1390,6 +1404,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
}
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
m->aflags = 0;
if (object == NULL || object->type == OBJT_PHYS)
m->oflags = VPO_UNMANAGED;
else
@ -1480,6 +1495,7 @@ vm_page_alloc_init(vm_page_t m)
vm_page_zero_count--;
/* Don't clear the PG_ZERO flag; we'll need it later. */
m->flags &= PG_ZERO;
m->aflags = 0;
m->oflags = VPO_UNMANAGED;
/* Unmanaged pages don't use "act_count". */
return (drop);
@ -1880,7 +1896,7 @@ vm_page_unwire(vm_page_t m, int activate)
if (activate)
vm_page_enqueue(PQ_ACTIVE, m);
else {
vm_page_flag_clear(m, PG_WINATCFLS);
m->flags &= ~PG_WINATCFLS;
vm_page_enqueue(PQ_INACTIVE, m);
}
vm_page_unlock_queues();
@ -1923,7 +1939,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
vm_page_lock_queues();
vm_page_flag_clear(m, PG_WINATCFLS);
m->flags &= ~PG_WINATCFLS;
if (queue != PQ_NONE)
vm_page_queue_remove(queue, m);
if (athead)
@ -2156,15 +2172,13 @@ vm_page_dontneed(vm_page_t m)
*
* Perform the pmap_clear_reference() first. Otherwise, a concurrent
* pmap operation, such as pmap_remove(), could clear a reference in
* the pmap and set PG_REFERENCED on the page before the
* the pmap and set PGA_REFERENCED on the page before the
* pmap_clear_reference() had completed. Consequently, the page would
* appear referenced based upon an old reference that occurred before
* this function ran.
*/
pmap_clear_reference(m);
vm_page_lock_queues();
vm_page_flag_clear(m, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_aflag_clear(m, PGA_REFERENCED);
if (m->dirty == 0 && pmap_is_modified(m))
vm_page_dirty(m);
@ -2213,8 +2227,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
vm_page_aflag_set(m, PGA_REFERENCED);
vm_page_sleep(m, "pgrbwt");
goto retrylookup;
} else {
@ -2329,11 +2342,11 @@ vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
/*
* If the object is locked and the page is neither VPO_BUSY nor
* PG_WRITEABLE, then the page's dirty field cannot possibly be
* PGA_WRITEABLE, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
m->dirty &= ~pagebits;
else {
#if defined(__amd64__) || defined(__i386__) || defined(__ia64__)

View File

@ -125,12 +125,13 @@ struct vm_page {
struct md_page md; /* machine dependant stuff */
uint8_t queue; /* page queue index (P,Q) */
int8_t segind;
u_short flags; /* see below */
short hold_count; /* page hold count (P) */
uint8_t order; /* index of the buddy queue */
uint8_t pool;
u_short cow; /* page cow mapping count (P) */
u_int wire_count; /* wired down maps refs (P) */
short hold_count; /* page hold count (P) */
uint8_t aflags; /* access is atomic */
uint8_t flags; /* see below, often immutable after alloc */
u_short oflags; /* page flags (O) */
u_char act_count; /* page usage count (O) */
u_char busy; /* page busy count (O) */
@ -225,21 +226,29 @@ extern struct vpglocks pa_lock[];
/*
* These are the flags defined for vm_page.
*
* PG_REFERENCED may be cleared only if the object containing the page is
* aflags are updated by atomic accesses. Use the vm_page_aflag_set()
* and vm_page_aflag_clear() functions to set and clear the flags.
*
* PGA_REFERENCED may be cleared only if the object containing the page is
* locked.
*
* PG_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
* PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
* does so, the page must be VPO_BUSY.
*/
#define PG_CACHED 0x0001 /* page is cached */
#define PG_FREE 0x0002 /* page is free */
#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
#define PG_ZERO 0x0040 /* page is zeroed */
#define PG_REFERENCED 0x0080 /* page has been referenced */
#define PG_MARKER 0x1000 /* special queue marker page */
#define PG_SLAB 0x2000 /* object pointer is actually a slab */
#define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
#define PGA_REFERENCED 0x02 /* page has been referenced */
/*
* Page flags. If changed at any other time than page allocation or
* freeing, the modification must be protected by the vm_page lock.
*/
#define PG_CACHED 0x01 /* page is cached */
#define PG_FREE 0x02 /* page is free */
#define PG_FICTITIOUS 0x04 /* physical page doesn't exist (O) */
#define PG_ZERO 0x08 /* page is zeroed */
#define PG_MARKER 0x10 /* special queue marker page */
#define PG_SLAB 0x20 /* object pointer is actually a slab */
#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
/*
* Misc constants.
@ -341,8 +350,8 @@ extern struct vpglocks vm_page_queue_lock;
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
void vm_page_flag_set(vm_page_t m, unsigned short bits);
void vm_page_flag_clear(vm_page_t m, unsigned short bits);
void vm_page_aflag_set(vm_page_t m, uint8_t bits);
void vm_page_aflag_clear(vm_page_t m, uint8_t bits);
void vm_page_busy(vm_page_t m);
void vm_page_flash(vm_page_t m);
void vm_page_io_start(vm_page_t m);
@ -377,6 +386,7 @@ vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
vm_page_t vm_page_prev(vm_page_t m);
void vm_page_putfake(vm_page_t m);
void vm_page_reference(vm_page_t m);
void vm_page_remove (vm_page_t);
void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
void vm_page_requeue(vm_page_t m);

View File

@ -497,7 +497,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen)
vm_page_t mt = mc[i];
KASSERT(pageout_status[i] == VM_PAGER_PEND ||
(mt->flags & PG_WRITEABLE) == 0,
(mt->aflags & PGA_WRITEABLE) == 0,
("vm_pageout_flush: page %p is not write protected", mt));
switch (pageout_status[i]) {
case VM_PAGER_OK:
@ -597,12 +597,10 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
continue;
}
actcount = pmap_ts_referenced(p);
if ((p->flags & PG_REFERENCED) != 0) {
if ((p->aflags & PGA_REFERENCED) != 0) {
if (actcount == 0)
actcount = 1;
vm_page_lock_queues();
vm_page_flag_clear(p, PG_REFERENCED);
vm_page_unlock_queues();
vm_page_aflag_clear(p, PGA_REFERENCED);
}
if (p->queue != PQ_ACTIVE && actcount != 0) {
vm_page_activate(p);
@ -846,7 +844,7 @@ vm_pageout_scan(int pass)
* references.
*/
if (object->ref_count == 0) {
vm_page_flag_clear(m, PG_REFERENCED);
vm_page_aflag_clear(m, PGA_REFERENCED);
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
@ -859,7 +857,7 @@ vm_pageout_scan(int pass)
* level VM system not knowing anything about existing
* references.
*/
} else if (((m->flags & PG_REFERENCED) == 0) &&
} else if (((m->aflags & PGA_REFERENCED) == 0) &&
(actcount = pmap_ts_referenced(m))) {
vm_page_activate(m);
vm_page_unlock(m);
@ -874,8 +872,8 @@ vm_pageout_scan(int pass)
* "activation count" higher than normal so that we will less
* likely place pages back onto the inactive queue again.
*/
if ((m->flags & PG_REFERENCED) != 0) {
vm_page_flag_clear(m, PG_REFERENCED);
if ((m->aflags & PGA_REFERENCED) != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED);
actcount = pmap_ts_referenced(m);
vm_page_activate(m);
vm_page_unlock(m);
@ -891,7 +889,7 @@ vm_pageout_scan(int pass)
* be updated.
*/
if (m->dirty != VM_PAGE_BITS_ALL &&
(m->flags & PG_WRITEABLE) != 0) {
(m->aflags & PGA_WRITEABLE) != 0) {
/*
* Avoid a race condition: Unless write access is
* removed from the page, another processor could
@ -938,7 +936,7 @@ vm_pageout_scan(int pass)
* before being freed. This significantly extends
* the thrash point for a heavily loaded machine.
*/
vm_page_flag_set(m, PG_WINATCFLS);
m->flags |= PG_WINATCFLS;
vm_page_requeue(m);
} else if (maxlaunder > 0) {
/*
@ -1178,7 +1176,7 @@ vm_pageout_scan(int pass)
*/
actcount = 0;
if (object->ref_count != 0) {
if (m->flags & PG_REFERENCED) {
if (m->aflags & PGA_REFERENCED) {
actcount += 1;
}
actcount += pmap_ts_referenced(m);
@ -1192,7 +1190,7 @@ vm_pageout_scan(int pass)
/*
* Since we have "tested" this bit, we need to clear it now.
*/
vm_page_flag_clear(m, PG_REFERENCED);
vm_page_aflag_clear(m, PGA_REFERENCED);
/*
* Only if an object is currently being used, do we use the
@ -1435,8 +1433,8 @@ vm_pageout_page_stats()
}
actcount = 0;
if (m->flags & PG_REFERENCED) {
vm_page_flag_clear(m, PG_REFERENCED);
if (m->aflags & PGA_REFERENCED) {
vm_page_aflag_clear(m, PGA_REFERENCED);
actcount += 1;
}

View File

@ -1132,7 +1132,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
m = ma[ncount - 1];
KASSERT(m->busy > 0,
("vnode_pager_generic_putpages: page %p is not busy", m));
KASSERT((m->flags & PG_WRITEABLE) == 0,
KASSERT((m->aflags & PGA_WRITEABLE) == 0,
("vnode_pager_generic_putpages: page %p is not read-only", m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);