Introduce vm_page_astate.

This is a 32-bit structure embedded in each vm_page, consisting mostly
of page queue state.  The use of a structure makes it easy to store a
snapshot of a page's queue state in a stack variable and use cmpset
loops to update that state without requiring the page lock.

This change merely adds the structure and updates references to atomic
state fields.  No functional change intended.

Reviewed by:	alc, jeff, kib
Sponsored by:	Netflix, Intel
Differential Revision:	https://reviews.freebsd.org/D22650
This commit is contained in:
Mark Johnston 2019-12-10 18:14:50 +00:00
parent be35d6745b
commit 5cff1f4dc3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355586
24 changed files with 131 additions and 121 deletions

View File

@ -6104,7 +6104,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
("pmap_enter: no PV entry for %#lx", va));
if ((newpte & PG_MANAGED) == 0)
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
if ((om->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
((om->flags & PG_FICTITIOUS) != 0 ||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@ -7297,7 +7297,7 @@ pmap_remove_pages(pmap_t pmap)
pvh->pv_gen++;
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
if ((mt->aflags & PGA_WRITEABLE) != 0 &&
if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&mt->md.pv_list))
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
@ -7315,7 +7315,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_resident_count_dec(pmap, 1);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
if ((m->aflags & PGA_WRITEABLE) != 0 &&
if ((m->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));

View File

@ -415,8 +415,8 @@ extern int pmap_pcid_enabled;
extern int invpcid_works;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
struct thread;

View File

@ -47,7 +47,7 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
void *pmap_kenter_temporary(vm_paddr_t, int);
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);

View File

@ -3415,7 +3415,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pv = pmap_pvh_remove(&om->md, pmap, va);
if ((m->oflags & VPO_UNMANAGED) != 0)
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
if ((om->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
((om->flags & PG_FICTITIOUS) != 0 ||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@ -4486,7 +4486,7 @@ pmap_remove_pages(pmap_t pmap)
pvh->pv_gen++;
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
if ((mt->aflags & PGA_WRITEABLE) != 0 &&
if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&mt->md.pv_list))
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
@ -4508,7 +4508,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_REMOVE(&m->md.pv_list, pv,
pv_next);
m->md.pv_gen++;
if ((m->aflags & PGA_WRITEABLE) != 0 &&
if ((m->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(

View File

@ -53,7 +53,7 @@
#endif
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
/*

View File

@ -332,7 +332,7 @@ vtballoon_inflate(struct vtballoon_softc *sc, int npages)
sc->vtballoon_page_frames[i] =
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
KASSERT(m->queue == PQ_NONE,
KASSERT(m->a.queue == PQ_NONE,
("%s: allocated page %p on queue", __func__, m));
TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
}

View File

@ -3783,7 +3783,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
("pmap_enter: no PV entry for %#x", va));
if ((newpte & PG_MANAGED) == 0)
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
if ((om->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
((om->flags & PG_FICTITIOUS) != 0 ||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))

View File

@ -239,7 +239,7 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
static inline int

View File

@ -164,7 +164,7 @@ extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) (((m)->md.pv_flags & PV_MEMATTR_MASK) >> PV_MEMATTR_SHIFT)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
void pmap_bootstrap(void);
void *pmap_mapdev(vm_paddr_t, vm_size_t);

View File

@ -2158,7 +2158,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pv = pmap_pvh_remove(&om->md, pmap, va);
if (!pte_test(&newpte, PTE_MANAGED))
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
if ((om->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_aflag_clear(om, PGA_WRITEABLE);
}
@ -3223,7 +3223,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
* determine if the address is MINCORE_REFERENCED.
*/
m = PHYS_TO_VM_PAGE(pa);
if ((m->aflags & PGA_REFERENCED) != 0)
if ((m->a.flags & PGA_REFERENCED) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=

View File

@ -1906,7 +1906,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
}
if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}

View File

@ -1493,7 +1493,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
* Flush the page from the instruction cache if this page is
* mapped executable and cacheable.
*/
if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
vm_page_aflag_set(m, PGA_EXECUTABLE);
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
@ -2254,7 +2254,8 @@ moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
if (refchg < 0)
refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
if (pm != kernel_pmap && pg != NULL &&
(pg->a.flags & PGA_EXECUTABLE) == 0 &&
(pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
if ((pg->oflags & VPO_UNMANAGED) == 0)
vm_page_aflag_set(pg, PGA_EXECUTABLE);
@ -2468,7 +2469,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
}
KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
PV_PAGE_UNLOCK(m);
/* Clean up UMA allocations */

View File

@ -249,7 +249,7 @@ extern struct pmap kernel_pmap_store;
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
void pmap_bootstrap(vm_offset_t, vm_offset_t);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);

View File

@ -54,7 +54,7 @@
#endif
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
/*

View File

@ -2832,7 +2832,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
("pmap_enter: no PV entry for %#lx", va));
if ((new_l3 & PTE_SW_MANAGED) == 0)
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
if ((om->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
((om->flags & PG_FICTITIOUS) != 0 ||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@ -3586,7 +3586,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
if (TAILQ_EMPTY(&pvh->pv_list)) {
for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
if (TAILQ_EMPTY(&mt->md.pv_list) &&
(mt->aflags & PGA_WRITEABLE) != 0)
(mt->a.flags & PGA_WRITEABLE) != 0)
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
@ -3604,7 +3604,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
if (TAILQ_EMPTY(&m->md.pv_list) &&
(m->aflags & PGA_WRITEABLE) != 0) {
(m->a.flags & PGA_WRITEABLE) != 0) {
pvh = pa_to_pvh(m->phys_addr);
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
@ -4138,7 +4138,7 @@ pmap_clear_modify(vm_page_t m)
* If the object containing the page is locked and the page is not
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
*/
if ((m->aflags & PGA_WRITEABLE) == 0)
if ((m->a.flags & PGA_WRITEABLE) == 0)
return;
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
pa_to_pvh(VM_PAGE_TO_PHYS(m));

View File

@ -82,7 +82,7 @@ struct pmap {
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(u_int cpu_impl);

View File

@ -262,7 +262,7 @@ v2sizep(vm_offset_t va)
if (pa == 0)
panic("MemGuard detected double-free of %p", (void *)va);
p = PHYS_TO_VM_PAGE(pa);
KASSERT(vm_page_wired(p) && p->queue == PQ_NONE,
KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
return (&p->plinks.memguard.p);
}
@ -277,7 +277,7 @@ v2sizev(vm_offset_t va)
if (pa == 0)
panic("MemGuard detected double-free of %p", (void *)va);
p = PHYS_TO_VM_PAGE(pa);
KASSERT(vm_page_wired(p) && p->queue == PQ_NONE,
KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
return (&p->plinks.memguard.v);
}

View File

@ -1669,7 +1669,7 @@ swp_pager_force_dirty(vm_page_t m)
vm_page_dirty(m);
#ifdef INVARIANTS
vm_page_lock(m);
if (!vm_page_wired(m) && m->queue == PQ_NONE)
if (!vm_page_wired(m) && m->a.queue == PQ_NONE)
panic("page %p is neither wired nor queued", m);
vm_page_unlock(m);
#endif

View File

@ -931,9 +931,9 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
* and set PGA_REFERENCED before the call to
* pmap_is_referenced().
*/
if ((m->aflags & PGA_REFERENCED) != 0 ||
if ((m->a.flags & PGA_REFERENCED) != 0 ||
pmap_is_referenced(m) ||
(m->aflags & PGA_REFERENCED) != 0)
(m->a.flags & PGA_REFERENCED) != 0)
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)

View File

@ -897,7 +897,7 @@ vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
* nosync page, skip it. Note that the object flags were not
* cleared in this case so we do not have to set them.
*/
if ((flags & OBJPC_NOSYNC) != 0 && (p->aflags & PGA_NOSYNC) != 0) {
if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
*allclean = FALSE;
return (FALSE);
} else {
@ -2472,9 +2472,9 @@ sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
* sysctl is only meant to give an
* approximation of the system anyway.
*/
if (m->queue == PQ_ACTIVE)
if (m->a.queue == PQ_ACTIVE)
kvo->kvo_active++;
else if (m->queue == PQ_INACTIVE)
else if (m->a.queue == PQ_INACTIVE)
kvo->kvo_inactive++;
}

View File

@ -436,9 +436,9 @@ vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
bzero(marker, sizeof(*marker));
marker->flags = PG_MARKER;
marker->aflags = aflags;
marker->a.flags = aflags;
marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
marker->queue = queue;
marker->a.queue = queue;
}
static void
@ -508,9 +508,9 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
m->object = NULL;
m->ref_count = 0;
m->busy_lock = VPB_UNBUSIED;
m->flags = m->aflags = 0;
m->flags = m->a.flags = 0;
m->phys_addr = pa;
m->queue = PQ_NONE;
m->a.queue = PQ_NONE;
m->psind = 0;
m->segind = segind;
m->order = VM_NFREEORDER;
@ -1265,7 +1265,7 @@ vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
goto memattr;
}
m->phys_addr = paddr;
m->queue = PQ_NONE;
m->a.queue = PQ_NONE;
/* Fictitious pages don't use "segind". */
m->flags = PG_FICTITIOUS;
/* Fictitious pages don't use "order" or "pool". */
@ -2002,7 +2002,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
if ((req & VM_ALLOC_NODUMP) != 0)
flags |= PG_NODUMP;
m->flags = flags;
m->aflags = 0;
m->a.flags = 0;
m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
VPO_UNMANAGED : 0;
m->busy_lock = VPB_UNBUSIED;
@ -2018,7 +2018,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
vm_wire_add(1);
m->ref_count = 1;
}
m->act_count = 0;
m->a.act_count = 0;
if (object != NULL) {
if (vm_page_insert_after(m, object, pindex, mpred)) {
@ -2212,12 +2212,12 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
memattr = object->memattr;
}
for (m = m_ret; m < &m_ret[npages]; m++) {
m->aflags = 0;
m->a.flags = 0;
m->flags = (m->flags | PG_NODUMP) & flags;
m->busy_lock = busy_lock;
if ((req & VM_ALLOC_WIRED) != 0)
m->ref_count = 1;
m->act_count = 0;
m->a.act_count = 0;
m->oflags = oflags;
if (object != NULL) {
if (vm_page_insert_after(m, object, pindex, mpred)) {
@ -2260,9 +2260,10 @@ vm_page_alloc_check(vm_page_t m)
{
KASSERT(m->object == NULL, ("page %p has object", m));
KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
KASSERT(m->a.queue == PQ_NONE &&
(m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
("page %p has unexpected queue %d, flags %#x",
m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
KASSERT(m->ref_count == 0, ("page %p has references", m));
KASSERT(!vm_page_busied(m), ("page %p is busy", m));
KASSERT(m->dirty == 0, ("page %p is dirty", m));
@ -2336,7 +2337,7 @@ vm_page_alloc_freelist_domain(int domain, int freelist, int req)
/*
* Initialize the page. Only the PG_ZERO flag is inherited.
*/
m->aflags = 0;
m->a.flags = 0;
flags = 0;
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
@ -2744,7 +2745,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
* and dequeued. Finally, change "m"
* as if vm_page_free() was called.
*/
m_new->aflags = m->aflags &
m_new->a.flags = m->a.flags &
~PGA_QUEUE_STATE_MASK;
KASSERT(m_new->oflags == VPO_UNMANAGED,
("page %p is managed", m_new));
@ -3216,7 +3217,7 @@ vm_page_pagequeue(vm_page_t m)
uint8_t queue;
if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
if ((queue = atomic_load_8(&m->a.queue)) == PQ_NONE)
return (NULL);
return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
}
@ -3231,11 +3232,11 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
vm_pagequeue_assert_locked(pq);
/*
* The page daemon is allowed to set m->queue = PQ_NONE without
* The page daemon is allowed to set m->a.queue = PQ_NONE without
* the page queue lock held. In this case it is about to free the page,
* which must not have any queue state.
*/
qflags = atomic_load_16(&m->aflags);
qflags = atomic_load_16(&m->a.flags);
KASSERT(pq == vm_page_pagequeue(m) ||
(qflags & PGA_QUEUE_STATE_MASK) == 0,
("page %p doesn't belong to queue %p but has aflags %#x",
@ -3261,7 +3262,7 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
* first.
*/
if ((qflags & PGA_REQUEUE_HEAD) != 0) {
KASSERT(m->queue == PQ_INACTIVE,
KASSERT(m->a.queue == PQ_INACTIVE,
("head enqueue not supported for page %p", m));
vmd = vm_pagequeue_domain(m);
TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
@ -3285,7 +3286,7 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
for (i = 0; i < bq->bq_cnt; i++) {
m = bq->bq_pa[i];
if (__predict_false(m->queue != queue))
if (__predict_false(m->a.queue != queue))
continue;
vm_pqbatch_process_page(pq, m);
}
@ -3297,7 +3298,7 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
*
* Enqueue a page in the specified page queue's batched work queue.
* The caller must have encoded the requested operation in the page
* structure's aflags field.
* structure's a.flags field.
*/
void
vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
@ -3333,12 +3334,12 @@ vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
* or the page is being freed, a different thread cannot be concurrently
* enqueuing the page.
*/
if (__predict_true(m->queue == queue))
if (__predict_true(m->a.queue == queue))
vm_pqbatch_process_page(pq, m);
else {
KASSERT(m->queue == PQ_NONE,
KASSERT(m->a.queue == PQ_NONE,
("invalid queue transition for page %p", m));
KASSERT((m->aflags & PGA_ENQUEUED) == 0,
KASSERT((m->a.flags & PGA_ENQUEUED) == 0,
("page %p is enqueued with invalid queue index", m));
}
vm_pagequeue_unlock(pq);
@ -3394,7 +3395,7 @@ static void
vm_page_dequeue_complete(vm_page_t m)
{
m->queue = PQ_NONE;
m->a.queue = PQ_NONE;
atomic_thread_fence_rel();
vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
}
@ -3446,10 +3447,10 @@ vm_page_dequeue_deferred_free(vm_page_t m)
KASSERT(m->ref_count == 0, ("page %p has references", m));
for (;;) {
if ((m->aflags & PGA_DEQUEUE) != 0)
if ((m->a.flags & PGA_DEQUEUE) != 0)
return;
atomic_thread_fence_acq();
if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
if ((queue = atomic_load_8(&m->a.queue)) == PQ_NONE)
return;
if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE,
PGA_DEQUEUE)) {
@ -3483,7 +3484,7 @@ vm_page_dequeue(vm_page_t m)
* vm_page_dequeue_complete(). Ensure that all queue
* state is cleared before we return.
*/
aflags = atomic_load_16(&m->aflags);
aflags = atomic_load_16(&m->a.flags);
if ((aflags & PGA_QUEUE_STATE_MASK) == 0)
return;
KASSERT((aflags & PGA_DEQUEUE) != 0,
@ -3506,11 +3507,11 @@ vm_page_dequeue(vm_page_t m)
}
KASSERT(pq == vm_page_pagequeue(m),
("%s: page %p migrated directly between queues", __func__, m));
KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
KASSERT((m->a.flags & PGA_DEQUEUE) != 0 ||
mtx_owned(vm_page_lockptr(m)),
("%s: queued unlocked page %p", __func__, m));
if ((m->aflags & PGA_ENQUEUED) != 0)
if ((m->a.flags & PGA_ENQUEUED) != 0)
vm_pagequeue_remove(pq, m);
vm_page_dequeue_complete(m);
vm_pagequeue_unlock(pq);
@ -3525,13 +3526,14 @@ vm_page_enqueue(vm_page_t m, uint8_t queue)
{
vm_page_assert_locked(m);
KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
KASSERT(m->a.queue == PQ_NONE &&
(m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
("%s: page %p is already enqueued", __func__, m));
KASSERT(m->ref_count > 0,
("%s: page %p does not carry any references", __func__, m));
m->queue = queue;
if ((m->aflags & PGA_REQUEUE) == 0)
m->a.queue = queue;
if ((m->a.flags & PGA_REQUEUE) == 0)
vm_page_aflag_set(m, PGA_REQUEUE);
vm_page_pqbatch_submit(m, queue);
}
@ -3553,9 +3555,9 @@ vm_page_requeue(vm_page_t m)
KASSERT(m->ref_count > 0,
("%s: page %p does not carry any references", __func__, m));
if ((m->aflags & PGA_REQUEUE) == 0)
if ((m->a.flags & PGA_REQUEUE) == 0)
vm_page_aflag_set(m, PGA_REQUEUE);
vm_page_pqbatch_submit(m, atomic_load_8(&m->queue));
vm_page_pqbatch_submit(m, atomic_load_8(&m->a.queue));
}
/*
@ -3584,7 +3586,7 @@ vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t newq)
* queue lock is acquired, so we must verify that we hold the correct
* lock before proceeding.
*/
if (__predict_false(m->queue != oldq)) {
if (__predict_false(m->a.queue != oldq)) {
vm_pagequeue_unlock(pq);
return;
}
@ -3595,7 +3597,7 @@ vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t newq)
* Therefore we must remove the page from the queue now in anticipation
* of a successful commit, and be prepared to roll back.
*/
if (__predict_true((m->aflags & PGA_ENQUEUED) != 0)) {
if (__predict_true((m->a.flags & PGA_ENQUEUED) != 0)) {
next = TAILQ_NEXT(m, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_page_aflag_clear(m, PGA_ENQUEUED);
@ -3658,10 +3660,10 @@ vm_page_free_prep(vm_page_t m)
if ((m->oflags & VPO_UNMANAGED) == 0) {
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_prep: freeing mapped page %p", m));
KASSERT((m->aflags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
("vm_page_free_prep: mapping flags set in page %p", m));
} else {
KASSERT(m->queue == PQ_NONE,
KASSERT(m->a.queue == PQ_NONE,
("vm_page_free_prep: unmanaged page %p is queued", m));
}
VM_CNT_INC(v_tfree);
@ -3694,7 +3696,7 @@ vm_page_free_prep(vm_page_t m)
if ((m->flags & PG_FICTITIOUS) != 0) {
KASSERT(m->ref_count == 1,
("fictitious page %p is referenced", m));
KASSERT(m->queue == PQ_NONE,
KASSERT(m->a.queue == PQ_NONE,
("fictitious page %p is queued", m));
return (false);
}
@ -3955,8 +3957,8 @@ vm_page_mvqueue(vm_page_t m, const uint8_t nqueue)
vm_page_requeue(m);
}
if (nqueue == PQ_ACTIVE && m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
if (nqueue == PQ_ACTIVE && m->a.act_count < ACT_INIT)
m->a.act_count = ACT_INIT;
}
/*
@ -3998,9 +4000,9 @@ _vm_page_deactivate_noreuse(vm_page_t m)
if (!vm_page_inactive(m)) {
vm_page_dequeue(m);
m->queue = PQ_INACTIVE;
m->a.queue = PQ_INACTIVE;
}
if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
if ((m->a.flags & PGA_REQUEUE_HEAD) == 0)
vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
vm_page_pqbatch_submit(m, PQ_INACTIVE);
}
@ -5102,7 +5104,7 @@ DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
"page %p obj %p pidx 0x%jx phys 0x%jx q %d ref %u\n"
" af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
m->queue, m->ref_count, m->aflags, m->oflags,
m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
m->a.queue, m->ref_count, m->a.flags, m->oflags,
m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
}
#endif /* DDB */

View File

@ -215,6 +215,15 @@ typedef uint32_t vm_page_bits_t;
typedef uint64_t vm_page_bits_t;
#endif
typedef union vm_page_astate {
struct {
uint16_t flags;
uint8_t queue;
uint8_t act_count;
};
uint32_t _bits;
} vm_page_astate_t;
struct vm_page {
union {
TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
@ -237,9 +246,7 @@ struct vm_page {
struct md_page md; /* machine dependent stuff */
u_int ref_count; /* page references (A) */
volatile u_int busy_lock; /* busy owners lock */
uint16_t aflags; /* atomic flags (A) */
uint8_t queue; /* page queue index (Q) */
uint8_t act_count; /* page usage count (P) */
union vm_page_astate a; /* state accessed atomically */
uint8_t order; /* index of the buddy queue (F) */
uint8_t pool; /* vm_phys freepool index (F) */
uint8_t flags; /* page PG_* flags (P) */
@ -755,19 +762,19 @@ void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
* destinations. In order that we can easily use a 32-bit operation, we
* require that the aflags field be 32-bit aligned.
*/
_Static_assert(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0,
_Static_assert(offsetof(struct vm_page, a.flags) % sizeof(uint32_t) == 0,
"aflags field is not 32-bit aligned");
/*
* We want to be able to update the aflags and queue fields atomically in
* the same operation.
*/
_Static_assert(offsetof(struct vm_page, aflags) / sizeof(uint32_t) ==
offsetof(struct vm_page, queue) / sizeof(uint32_t),
_Static_assert(offsetof(struct vm_page, a.flags) / sizeof(uint32_t) ==
offsetof(struct vm_page, a.queue) / sizeof(uint32_t),
"aflags and queue fields do not belong to the same 32-bit word");
_Static_assert(offsetof(struct vm_page, queue) % sizeof(uint32_t) == 2,
_Static_assert(offsetof(struct vm_page, a.queue) % sizeof(uint32_t) == 2,
"queue field is at an unexpected offset");
_Static_assert(sizeof(((struct vm_page *)NULL)->queue) == 1,
_Static_assert(sizeof(((struct vm_page *)NULL)->a.queue) == 1,
"queue field has an unexpected size");
#if BYTE_ORDER == LITTLE_ENDIAN
@ -798,7 +805,7 @@ vm_page_aflag_clear(vm_page_t m, uint16_t bits)
* atomic update. Parallel non-atomic updates to the other fields
* within this word are handled properly by the atomic update.
*/
addr = (void *)&m->aflags;
addr = (void *)&m->a.flags;
val = bits << VM_PAGE_AFLAG_SHIFT;
atomic_clear_32(addr, val);
}
@ -818,7 +825,7 @@ vm_page_aflag_set(vm_page_t m, uint16_t bits)
* atomic update. Parallel non-atomic updates to the other fields
* within this word are handled properly by the atomic update.
*/
addr = (void *)&m->aflags;
addr = (void *)&m->a.flags;
val = bits << VM_PAGE_AFLAG_SHIFT;
atomic_set_32(addr, val);
}
@ -843,7 +850,7 @@ vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq,
qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) <<
VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK;
addr = (void *)&m->aflags;
addr = (void *)&m->a.flags;
oval = atomic_load_32(addr);
do {
if ((oval & fflags) != 0)
@ -918,10 +925,10 @@ vm_page_queue(vm_page_t m)
vm_page_assert_locked(m);
if ((m->aflags & PGA_DEQUEUE) != 0)
if ((m->a.flags & PGA_DEQUEUE) != 0)
return (PQ_NONE);
atomic_thread_fence_acq();
return (m->queue);
return (m->a.queue);
}
static inline bool

View File

@ -218,7 +218,7 @@ vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
{
vm_pagequeue_assert_locked(pq);
KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
("marker %p already enqueued", marker));
if (after == NULL)
@ -242,7 +242,7 @@ vm_pageout_end_scan(struct scan_state *ss)
pq = ss->pq;
vm_pagequeue_assert_locked(pq);
KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
("marker %p not enqueued", ss->marker));
TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
@ -271,7 +271,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
marker = ss->marker;
pq = ss->pq;
KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
("marker %p not enqueued", ss->marker));
vm_pagequeue_lock(pq);
@ -280,7 +280,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
m = n, ss->scanned++) {
n = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) == 0) {
KASSERT((m->aflags & PGA_ENQUEUED) != 0,
KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
("page %p not enqueued", m));
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("Fictitious page %p cannot be in page queue", m));
@ -472,7 +472,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
KASSERT(vm_page_all_valid(mc[i]),
("vm_pageout_flush: partially invalid page %p index %d/%d",
mc[i], i, count));
KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
("vm_pageout_flush: writeable page %p", mc[i]));
vm_page_busy_downgrade(mc[i]);
}
@ -766,7 +766,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
* A requeue was requested, so this page gets a second
* chance.
*/
if ((m->aflags & PGA_REQUEUE) != 0) {
if ((m->a.flags & PGA_REQUEUE) != 0) {
vm_page_pqbatch_submit(m, queue);
continue;
}
@ -848,7 +848,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
("page %p is mapped", m));
act_delta = 0;
}
if ((m->aflags & PGA_REFERENCED) != 0) {
if ((m->a.flags & PGA_REFERENCED) != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED);
act_delta++;
}
@ -865,7 +865,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
* be returned prematurely to the inactive
* queue.
*/
m->act_count += act_delta + ACT_ADVANCE;
m->a.act_count += act_delta + ACT_ADVANCE;
/*
* If this was a background laundering, count
@ -1302,7 +1302,7 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
act_delta = pmap_ts_referenced(m);
else
act_delta = 0;
if ((m->aflags & PGA_REFERENCED) != 0) {
if ((m->a.flags & PGA_REFERENCED) != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED);
act_delta++;
}
@ -1311,13 +1311,13 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
* Advance or decay the act_count based on recent usage.
*/
if (act_delta != 0) {
m->act_count += ACT_ADVANCE + act_delta;
if (m->act_count > ACT_MAX)
m->act_count = ACT_MAX;
m->a.act_count += ACT_ADVANCE + act_delta;
if (m->a.act_count > ACT_MAX)
m->a.act_count = ACT_MAX;
} else
m->act_count -= min(m->act_count, ACT_DECLINE);
m->a.act_count -= min(m->a.act_count, ACT_DECLINE);
if (m->act_count == 0) {
if (m->a.act_count == 0) {
/*
* When not short for inactive pages, let dirty pages go
* through the inactive queue before moving to the
@ -1372,14 +1372,14 @@ vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
{
struct vm_domain *vmd;
if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
if (m->a.queue != PQ_INACTIVE || (m->a.flags & PGA_ENQUEUED) != 0)
return (0);
vm_page_aflag_set(m, PGA_ENQUEUED);
if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {
if ((m->a.flags & PGA_REQUEUE_HEAD) != 0) {
vmd = vm_pagequeue_domain(m);
TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
} else if ((m->aflags & PGA_REQUEUE) != 0) {
} else if ((m->a.flags & PGA_REQUEUE) != 0) {
TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
} else
@ -1458,7 +1458,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
/*
* Start scanning the inactive queue for pages that we can free. The
* scan will stop when we reach the target or we have scanned the
* entire queue. (Note that m->act_count is not used to make
* entire queue. (Note that m->a.act_count is not used to make
* decisions for the inactive queue, only for the active queue.)
*/
marker = &vmd->vmd_markers[PQ_INACTIVE];
@ -1488,7 +1488,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
* dropped, or a requeue was requested. This page gets a second
* chance.
*/
if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE |
if ((m->a.flags & (PGA_ENQUEUED | PGA_REQUEUE |
PGA_REQUEUE_HEAD)) != 0)
goto reinsert;
@ -1579,7 +1579,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
("page %p is mapped", m));
act_delta = 0;
}
if ((m->aflags & PGA_REFERENCED) != 0) {
if ((m->a.flags & PGA_REFERENCED) != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED);
act_delta++;
}
@ -1596,7 +1596,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
* be returned prematurely to the inactive
* queue.
*/
m->act_count += act_delta + ACT_ADVANCE;
m->a.act_count += act_delta + ACT_ADVANCE;
continue;
} else if ((object->flags & OBJ_DEAD) == 0) {
vm_page_xunbusy(m);
@ -1636,9 +1636,9 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
* requests, we can safely disassociate the page
* from the inactive queue.
*/
KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
KASSERT((m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
("page %p has queue state", m));
m->queue = PQ_NONE;
m->a.queue = PQ_NONE;
vm_page_free(m);
page_shortage--;
continue;

View File

@ -224,31 +224,31 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
}
act_delta = pmap_ts_referenced(p);
vm_page_lock(p);
if ((p->aflags & PGA_REFERENCED) != 0) {
if ((p->a.flags & PGA_REFERENCED) != 0) {
if (act_delta == 0)
act_delta = 1;
vm_page_aflag_clear(p, PGA_REFERENCED);
}
if (!vm_page_active(p) && act_delta != 0) {
vm_page_activate(p);
p->act_count += act_delta;
p->a.act_count += act_delta;
} else if (vm_page_active(p)) {
/*
* The page daemon does not requeue pages
* after modifying their activation count.
*/
if (act_delta == 0) {
p->act_count -= min(p->act_count,
p->a.act_count -= min(p->a.act_count,
ACT_DECLINE);
if (!remove_mode && p->act_count == 0) {
if (!remove_mode && p->a.act_count == 0) {
(void)vm_page_try_remove_all(p);
vm_page_deactivate(p);
}
} else {
vm_page_activate(p);
if (p->act_count < ACT_MAX -
if (p->a.act_count < ACT_MAX -
ACT_ADVANCE)
p->act_count += ACT_ADVANCE;
p->a.act_count += ACT_ADVANCE;
}
} else if (vm_page_inactive(p))
(void)vm_page_try_remove_all(p);