Different consumers of the struct vm_page abuse pageq member to keep

additional information, when the page is guaranteed to not belong to a
paging queue.  Usually, this results in a lot of type casts which make
reasoning about the code correctness harder.

Sometimes m->object is used instead of pageq, which could cause real
and confusing bugs if non-NULL m->object is leaked.  See r141955 and
r253140 for examples.

Change the pageq member into a union containing explicitly-typed
members.  Use them instead of type-punning or abusing m->object in x86
pmaps, uma and vm_page_alloc_contig().

Requested and reviewed by:	alc
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2013-08-10 17:36:42 +00:00
parent 7bc9877d20
commit c325e866f4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=254182
13 changed files with 196 additions and 196 deletions

View File

@ -295,13 +295,12 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
vm_page_t *free, struct rwlock **lockp);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free,
struct rwlock **lockp);
struct spglist *free, struct rwlock **lockp);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
vm_page_t *free);
struct spglist *free);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
@ -316,8 +315,8 @@ static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
struct rwlock **lockp);
static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_page_t *free);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
struct spglist *free);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
/*
@ -1490,14 +1489,12 @@ pmap_qremove(vm_offset_t sva, int count)
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(vm_page_t free)
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
while (free != NULL) {
m = free;
free = (void *)m->object;
m->object = NULL;
while ((m = SLIST_FIRST(free)) != NULL) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@ -1509,15 +1506,15 @@ pmap_free_zero_pages(vm_page_t free)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
{
if (set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
m->object = (void *)*free;
*free = m;
SLIST_INSERT_HEAD(free, m, plinks.s.ss);
}
/*
@ -1567,7 +1564,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
* page table page was unmapped and FALSE otherwise.
*/
static inline boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
@ -1579,7 +1576,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
}
static void
_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -1637,7 +1634,8 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
* conditionally free the page, and manage the hold/wire counts.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
struct spglist *free)
{
vm_page_t mpte;
@ -2123,7 +2121,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
pt_entry_t *pte, tpte;
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
vm_page_t m, m_pc;
struct spglist free;
uint64_t inuse;
int bit, field, freed;
@ -2131,10 +2130,11 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
pmap = NULL;
free = m_pc = NULL;
m_pc = NULL;
SLIST_INIT(&free);
TAILQ_INIT(&new_tail);
mtx_lock(&pv_chunks_mutex);
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) {
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
mtx_unlock(&pv_chunks_mutex);
if (pmap != pc->pc_pmap) {
@ -2193,7 +2193,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
}
}
pc->pc_map[field] |= 1UL << bit;
pmap_unuse_pt(pmap, va, *pde, &free);
pmap_unuse_pt(pmap, va, *pde, &free);
freed++;
}
}
@ -2233,15 +2233,14 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
}
if (m_pc == NULL && free != NULL) {
m_pc = free;
free = (void *)m_pc->object;
m_pc->object = NULL;
if (m_pc == NULL && !SLIST_EMPTY(&free)) {
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
}
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
return (m_pc);
}
@ -2690,7 +2689,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pd_entry_t newpde, oldpde;
pt_entry_t *firstpte, newpte;
vm_paddr_t mptepa;
vm_page_t free, mpte;
vm_page_t mpte;
struct spglist free;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@ -2720,11 +2720,11 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
free = NULL;
SLIST_INIT(&free);
pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
lockp);
pmap_invalidate_page(pmap, trunc_2mpage(va));
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
@ -2845,7 +2845,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
*/
static int
pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
vm_page_t *free, struct rwlock **lockp)
struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pd_entry_t oldpde;
@ -2904,7 +2904,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
*/
static int
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp)
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pt_entry_t oldpte;
@ -2937,7 +2937,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
* Remove a single page from a process address space
*/
static void
pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
struct spglist *free)
{
struct rwlock *lock;
pt_entry_t *pte;
@ -2970,7 +2971,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pdp_entry_t *pdpe;
pd_entry_t ptpaddr, *pde;
pt_entry_t *pte;
vm_page_t free = NULL;
struct spglist free;
int anyvalid;
/*
@ -2980,6 +2981,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
return;
anyvalid = 0;
SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@ -3098,7 +3100,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*
@ -3123,11 +3125,11 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
pd_entry_t *pde;
vm_offset_t va;
vm_page_t free;
struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
free = NULL;
SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
if ((m->flags & PG_FICTITIOUS) != 0)
goto small_mappings;
@ -3169,7 +3171,7 @@ pmap_remove_all(vm_page_t m)
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*
@ -3692,7 +3694,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
struct rwlock **lockp)
{
pd_entry_t *pde, newpde;
vm_page_t free, mpde;
vm_page_t mpde;
struct spglist free;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -3721,10 +3724,10 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m),
lockp)) {
free = NULL;
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@ -3827,7 +3830,7 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
vm_page_t free;
struct spglist free;
pt_entry_t *pte;
vm_paddr_t pa;
@ -3898,10 +3901,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
free = NULL;
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
mpte = NULL;
}
@ -4096,7 +4099,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
struct rwlock *lock;
vm_page_t free;
struct spglist free;
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t va_next;
@ -4204,13 +4207,13 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*dst_pte = ptetemp & ~(PG_W | PG_M |
PG_A);
pmap_resident_count_inc(dst_pmap, 1);
} else {
free = NULL;
} else {
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr,
dstmpte, &free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(free);
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(&free);
}
goto out;
}
@ -4227,10 +4230,10 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
}
}
/*
* pmap_zero_page zeros the specified hardware page by mapping
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*/
void
@ -4445,7 +4448,7 @@ pmap_remove_pages(pmap_t pmap)
{
pd_entry_t ptepde;
pt_entry_t *pte, tpte;
vm_page_t free = NULL;
struct spglist free;
vm_page_t m, mpte, mt;
pv_entry_t pv;
struct md_page *pvh;
@ -4462,6 +4465,7 @@ pmap_remove_pages(pmap_t pmap)
return;
}
lock = NULL;
SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
@ -4597,7 +4601,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*

View File

@ -330,7 +330,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
restart:
mtx_lock(&pool->lock);
TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
if (freed_pages >= npages_to_free)
break;
@ -338,7 +338,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
TAILQ_REMOVE(&pool->list, p, pageq);
TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
/**
@ -373,7 +373,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
/* remove range of pages from the pool */
if (freed_pages) {
TAILQ_REMOVE(&pool->list, p, pageq);
TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
@ -470,7 +470,7 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
TAILQ_REMOVE(pages, failed_pages[i], pageq);
TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
ttm_vm_page_free(failed_pages[i]);
}
}
@ -545,7 +545,7 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
}
}
TAILQ_INSERT_HEAD(pages, p, pageq);
TAILQ_INSERT_HEAD(pages, p, plinks.q);
}
if (cpages) {
@ -600,16 +600,16 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
mtx_lock(&pool->lock);
if (!r) {
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
++pool->nrefills;
pool->npages += alloc_size;
} else {
printf("[TTM] Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
TAILQ_FOREACH(p, &pool->list, pageq) {
TAILQ_FOREACH(p, &pool->list, plinks.q) {
++cpages;
}
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
pool->npages += cpages;
}
@ -636,15 +636,15 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
if (count >= pool->npages) {
/* take all pages from the pool */
TAILQ_CONCAT(pages, &pool->list, pageq);
TAILQ_CONCAT(pages, &pool->list, plinks.q);
count -= pool->npages;
pool->npages = 0;
goto out;
}
for (i = 0; i < count; i++) {
p = TAILQ_FIRST(&pool->list);
TAILQ_REMOVE(&pool->list, p, pageq);
TAILQ_INSERT_TAIL(pages, p, pageq);
TAILQ_REMOVE(&pool->list, p, plinks.q);
TAILQ_INSERT_TAIL(pages, p, plinks.q);
}
pool->npages -= count;
count = 0;
@ -674,7 +674,7 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
mtx_lock(&pool->lock);
for (i = 0; i < npages; i++) {
if (pages[i]) {
TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
pages[i] = NULL;
pool->npages++;
}
@ -735,13 +735,13 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
TAILQ_INIT(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
TAILQ_FOREACH(p, &plist, pageq) {
TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
TAILQ_FOREACH(p, &plist, pageq) {
TAILQ_FOREACH(p, &plist, plinks.q) {
pmap_zero_page(p);
}
}
@ -754,7 +754,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
TAILQ_INIT(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
npages);
TAILQ_FOREACH(p, &plist, pageq) {
TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
if (r) {

View File

@ -334,7 +334,7 @@ vtballoon_inflate(struct vtballoon_softc *sc, int npages)
KASSERT(m->queue == PQ_NONE,
("%s: allocated page %p on queue", __func__, m));
TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, pageq);
TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
}
if (i > 0)
@ -362,8 +362,8 @@ vtballoon_deflate(struct vtballoon_softc *sc, int npages)
sc->vtballoon_page_frames[i] =
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
TAILQ_REMOVE(&sc->vtballoon_pages, m, pageq);
TAILQ_INSERT_TAIL(&free_pages, m, pageq);
TAILQ_REMOVE(&sc->vtballoon_pages, m, plinks.q);
TAILQ_INSERT_TAIL(&free_pages, m, plinks.q);
}
if (i > 0) {
@ -371,7 +371,7 @@ vtballoon_deflate(struct vtballoon_softc *sc, int npages)
vtballoon_send_page_frames(sc, vq, i);
while ((m = TAILQ_FIRST(&free_pages)) != NULL) {
TAILQ_REMOVE(&free_pages, m, pageq);
TAILQ_REMOVE(&free_pages, m, plinks.q);
vtballoon_free_page(sc, m);
}
}

View File

@ -317,12 +317,12 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
vm_page_t *free);
struct spglist *free);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
vm_page_t *free);
struct spglist *free);
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
vm_page_t *free);
struct spglist *free);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
@ -335,10 +335,10 @@ static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
#ifdef PAE
static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
#endif
@ -1568,14 +1568,12 @@ pmap_qremove(vm_offset_t sva, int count)
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(vm_page_t free)
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
while (free != NULL) {
m = free;
free = (void *)m->object;
m->object = NULL;
while ((m = SLIST_FIRST(free)) != NULL) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@ -1587,15 +1585,15 @@ pmap_free_zero_pages(vm_page_t free)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
{
if (set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
m->object = (void *)*free;
*free = m;
SLIST_INSERT_HEAD(free, m, plinks.s.ss);
}
/*
@ -1645,7 +1643,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
* page table page was unmapped and FALSE otherwise.
*/
static inline boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
--m->wire_count;
@ -1657,7 +1655,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
}
static void
_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
vm_offset_t pteva;
@ -1693,7 +1691,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
* conditionally free the page, and manage the hold/wire counts.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
{
pd_entry_t ptepde;
vm_page_t mpte;
@ -2193,16 +2191,18 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pt_entry_t *pte, tpte;
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
vm_page_t m, m_pc;
struct spglist free;
uint32_t inuse;
int bit, field, freed;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
pmap = NULL;
free = m_pc = NULL;
m_pc = NULL;
SLIST_INIT(&free);
TAILQ_INIT(&newtail);
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
free == NULL)) {
SLIST_EMPTY(&free))) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
if (pmap != pc->pc_pmap) {
if (pmap != NULL) {
@ -2307,15 +2307,14 @@ pmap_pv_reclaim(pmap_t locked_pmap)
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
}
if (m_pc == NULL && pv_vafree != 0 && free != NULL) {
m_pc = free;
free = (void *)m_pc->object;
m_pc->object = NULL;
if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
}
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
return (m_pc);
}
@ -2636,7 +2635,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
pd_entry_t newpde, oldpde;
pt_entry_t *firstpte, newpte;
vm_paddr_t mptepa;
vm_page_t free, mpte;
vm_page_t mpte;
struct spglist free;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@ -2658,10 +2658,10 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
free = NULL;
SLIST_INIT(&free);
pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
pmap_invalidate_page(pmap, trunc_4mpage(va));
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
return (FALSE);
@ -2814,7 +2814,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
*/
static void
pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
vm_page_t *free)
struct spglist *free)
{
struct md_page *pvh;
pd_entry_t oldpde;
@ -2870,7 +2870,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
* pmap_remove_pte: do the things to unmap a page in a process
*/
static int
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
struct spglist *free)
{
pt_entry_t oldpte;
vm_page_t m;
@ -2904,7 +2905,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
* Remove a single page from a process address space
*/
static void
pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
{
pt_entry_t *pte;
@ -2929,7 +2930,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t pdnxt;
pd_entry_t ptpaddr;
pt_entry_t *pte;
vm_page_t free = NULL;
struct spglist free;
int anyvalid;
/*
@ -2939,6 +2940,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
return;
anyvalid = 0;
SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
sched_pin();
@ -3031,7 +3033,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*
@ -3056,11 +3058,11 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
pd_entry_t *pde;
vm_offset_t va;
vm_page_t free;
struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
free = NULL;
SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
sched_pin();
if ((m->flags & PG_FICTITIOUS) != 0)
@ -3105,7 +3107,7 @@ pmap_remove_all(vm_page_t m)
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
rw_wunlock(&pvh_global_lock);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*
@ -3769,7 +3771,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
{
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_t free;
struct spglist free;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->oflags & VPO_UNMANAGED) != 0,
@ -3838,10 +3840,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m)) {
if (mpte != NULL) {
free = NULL;
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
mpte = NULL;
@ -4024,7 +4026,7 @@ void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
vm_page_t free;
struct spglist free;
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
@ -4107,12 +4109,12 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PG_A);
dst_pmap->pm_stats.resident_count++;
} else {
free = NULL;
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, dstmpte,
&free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
goto out;
}
@ -4419,11 +4421,11 @@ void
pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t free = NULL;
vm_page_t m, mpte, mt;
pv_entry_t pv;
struct md_page *pvh;
struct pv_chunk *pc, *npc;
struct spglist free;
int field, idx;
int32_t bit;
uint32_t inuse, bitmask;
@ -4433,6 +4435,7 @@ pmap_remove_pages(pmap_t pmap)
printf("warning: pmap_remove_pages called with non-current pmap\n");
return;
}
SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
sched_pin();
@ -4541,7 +4544,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(free);
pmap_free_zero_pages(&free);
}
/*

View File

@ -226,7 +226,7 @@ dev_pager_free_page(vm_object_t object, vm_page_t m)
KASSERT((object->type == OBJT_DEVICE &&
(m->oflags & VPO_UNMANAGED) != 0),
("Managed device or page obj %p m %p", object, m));
TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, plinks.q);
vm_page_putfake(m);
}
@ -281,7 +281,7 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
("Wrong page type %p %p", ma[reqpage], object));
if (object->type == OBJT_DEVICE) {
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
ma[reqpage], pageq);
ma[reqpage], plinks.q);
}
}

View File

@ -261,7 +261,7 @@ v2sizep(vm_offset_t va)
p = PHYS_TO_VM_PAGE(pa);
KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
return ((u_long *)&p->pageq.tqe_next);
return (&p->plinks.memguard.p);
}
static u_long *
@ -276,7 +276,7 @@ v2sizev(vm_offset_t va)
p = PHYS_TO_VM_PAGE(pa);
KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
return ((u_long *)&p->pageq.tqe_prev);
return (&p->plinks.memguard.v);
}
/*

View File

@ -124,7 +124,7 @@ sg_pager_dealloc(vm_object_t object)
* Free up our fake pages.
*/
while ((m = TAILQ_FIRST(&object->un_pager.sgp.sgp_pglist)) != 0) {
TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, pageq);
TAILQ_REMOVE(&object->un_pager.sgp.sgp_pglist, m, plinks.q);
vm_page_putfake(m);
}
@ -182,7 +182,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/* Construct a new fake page. */
page = vm_page_getfake(paddr, memattr);
VM_OBJECT_WLOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, plinks.q);
/* Free the original pages and insert this fake page into the object. */
for (i = 0; i < count; i++) {

View File

@ -718,18 +718,6 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
keg->uk_size);
}
if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
vm_object_t obj;
if (flags & UMA_SLAB_KMEM)
obj = kmem_object;
else if (flags & UMA_SLAB_KERNEL)
obj = kernel_object;
else
obj = NULL;
for (i = 0; i < keg->uk_ppera; i++)
vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), obj);
}
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
#ifdef UMA_DEBUG
@ -3112,7 +3100,7 @@ uma_large_malloc(int size, int wait)
void
uma_large_free(uma_slab_t slab)
{
vsetobj((vm_offset_t)slab->us_data, kmem_object);
page_free(slab->us_data, slab->us_size, slab->us_flags);
zone_free_item(slabzone, slab, NULL, SKIP_NONE);
}

View File

@ -407,7 +407,7 @@ vtoslab(vm_offset_t va)
uma_slab_t slab;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
slab = (uma_slab_t )p->object;
slab = (uma_slab_t )p->plinks.s.pv;
if (p->flags & PG_SLAB)
return (slab);
@ -421,20 +421,10 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
p->object = (vm_object_t)slab;
p->plinks.s.pv = slab;
p->flags |= PG_SLAB;
}
static __inline void
vsetobj(vm_offset_t va, vm_object_t obj)
{
vm_page_t p;
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
p->object = obj;
p->flags &= ~PG_SLAB;
}
/*
* The following two functions may be defined by architecture specific code
* if they can provide more effecient allocation functions. This is useful

View File

@ -1643,6 +1643,16 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
return (m);
}
static void
vm_page_alloc_contig_vdrop(struct spglist *lst)
{
while (!SLIST_EMPTY(lst)) {
vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
SLIST_REMOVE_HEAD(lst, plinks.s.ss);
}
}
/*
* vm_page_alloc_contig:
*
@ -1687,7 +1697,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
vm_paddr_t boundary, vm_memattr_t memattr)
{
struct vnode *drop;
vm_page_t deferred_vdrop_list, m, m_tmp, m_ret;
struct spglist deferred_vdrop_list;
vm_page_t m, m_tmp, m_ret;
u_int flags, oflags;
int req_class;
@ -1712,7 +1723,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
req_class = VM_ALLOC_SYSTEM;
deferred_vdrop_list = NULL;
SLIST_INIT(&deferred_vdrop_list);
mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count + cnt.v_cache_count >= npages +
cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
@ -1744,9 +1755,9 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
* page list, "pageq" can be safely abused to
* construct a short-lived list of vnodes.
*/
m->pageq.tqe_prev = (void *)drop;
m->pageq.tqe_next = deferred_vdrop_list;
deferred_vdrop_list = m;
m->plinks.s.pv = drop;
SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
plinks.s.ss);
}
}
else {
@ -1792,11 +1803,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
m->oflags = oflags;
if (object != NULL) {
if (vm_page_insert(m, object, pindex)) {
while (deferred_vdrop_list != NULL) {
vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
deferred_vdrop_list =
deferred_vdrop_list->pageq.tqe_next;
}
vm_page_alloc_contig_vdrop(
&deferred_vdrop_list);
if (vm_paging_needed())
pagedaemon_wakeup();
for (m = m_ret, m_tmp = m_ret;
@ -1815,10 +1823,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
pmap_page_set_memattr(m, memattr);
pindex++;
}
while (deferred_vdrop_list != NULL) {
vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
}
vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
if (vm_paging_needed())
pagedaemon_wakeup();
return (m_ret);
@ -2035,7 +2040,7 @@ vm_page_dequeue(vm_page_t m)
pq = vm_page_pagequeue(m);
vm_pagequeue_lock(pq);
m->queue = PQ_NONE;
TAILQ_REMOVE(&pq->pq_pl, m, pageq);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
vm_pagequeue_unlock(pq);
}
@ -2056,7 +2061,7 @@ vm_page_dequeue_locked(vm_page_t m)
pq = vm_page_pagequeue(m);
vm_pagequeue_assert_locked(pq);
m->queue = PQ_NONE;
TAILQ_REMOVE(&pq->pq_pl, m, pageq);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
}
@ -2076,7 +2081,7 @@ vm_page_enqueue(int queue, vm_page_t m)
pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
vm_pagequeue_lock(pq);
m->queue = queue;
TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_inc(pq);
vm_pagequeue_unlock(pq);
}
@ -2098,8 +2103,8 @@ vm_page_requeue(vm_page_t m)
("vm_page_requeue: page %p is not queued", m));
pq = vm_page_pagequeue(m);
vm_pagequeue_lock(pq);
TAILQ_REMOVE(&pq->pq_pl, m, pageq);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_unlock(pq);
}
@ -2119,8 +2124,8 @@ vm_page_requeue_locked(vm_page_t m)
("vm_page_requeue_locked: page %p is not queued", m));
pq = vm_page_pagequeue(m);
vm_pagequeue_assert_locked(pq);
TAILQ_REMOVE(&pq->pq_pl, m, pageq);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
}
/*
@ -2413,9 +2418,9 @@ _vm_page_deactivate(vm_page_t m, int athead)
vm_pagequeue_lock(pq);
m->queue = PQ_INACTIVE;
if (athead)
TAILQ_INSERT_HEAD(&pq->pq_pl, m, pageq);
TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
else
TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_inc(pq);
vm_pagequeue_unlock(pq);
}

View File

@ -126,10 +126,19 @@ typedef uint64_t vm_page_bits_t;
#endif
struct vm_page {
TAILQ_ENTRY(vm_page) pageq; /* page queue or free list (Q) */
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
vm_object_t object; /* which object am I in (O,P)*/
union {
TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
struct {
SLIST_ENTRY(vm_page) ss; /* private slists */
void *pv;
} s;
struct {
u_long p;
u_long v;
} memguard;
} plinks;
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
vm_object_t object; /* which object am I in (O,P) */
vm_pindex_t pindex; /* offset into object (O,P) */
vm_paddr_t phys_addr; /* physical address of page */
struct md_page md; /* machine dependant stuff */
@ -145,7 +154,7 @@ struct vm_page {
uint16_t flags; /* page PG_* flags (P) */
u_char act_count; /* page usage count (P) */
u_char __pad0; /* unused padding */
/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
/* NOTE that these must support one bit per DEV_BSIZE in a page */
/* so, on normal X86 kernels, they must be at least 8 bits wide */
vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
@ -201,6 +210,7 @@ struct vm_page {
#define PQ_COUNT 2
TAILQ_HEAD(pglist, vm_page);
SLIST_HEAD(spglist, vm_page);
struct vm_pagequeue {
struct mtx pq_mutex;

View File

@ -274,7 +274,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
pq = vm_page_pagequeue(m);
object = m->object;
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
vm_pagequeue_unlock(pq);
vm_page_unlock(m);
VM_OBJECT_WLOCK(object);
@ -282,11 +282,11 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
vm_pagequeue_lock(pq);
/* Page queue might have changed. */
*next = TAILQ_NEXT(&marker, pageq);
*next = TAILQ_NEXT(&marker, plinks.q);
unchanged = (m->queue == queue &&
m->object == object &&
&marker == TAILQ_NEXT(m, pageq));
TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
&marker == TAILQ_NEXT(m, plinks.q));
TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
return (unchanged);
}
@ -315,15 +315,15 @@ vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
vm_pageout_init_marker(&marker, queue);
pq = vm_page_pagequeue(m);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
vm_pagequeue_unlock(pq);
vm_page_lock(m);
vm_pagequeue_lock(pq);
/* Page queue might have changed. */
*next = TAILQ_NEXT(&marker, pageq);
unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
*next = TAILQ_NEXT(&marker, plinks.q);
unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q));
TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
return (unchanged);
}
@ -578,7 +578,7 @@ vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low,
vm_page_t m, m_tmp, next;
vm_pagequeue_lock(pq);
TAILQ_FOREACH_SAFE(m, &pq->pq_pl, pageq, next) {
TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) {
if ((m->flags & PG_MARKER) != 0)
continue;
pa = VM_PAGE_TO_PHYS(m);
@ -963,7 +963,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
PCPU_INC(cnt.v_pdpages);
next = TAILQ_NEXT(m, pageq);
next = TAILQ_NEXT(m, plinks.q);
/*
* skip marker pages
@ -1013,7 +1013,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* 'next' pointer. Use our marker to remember our
* place.
*/
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, pageq);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
vm_pagequeue_unlock(pq);
queues_locked = FALSE;
@ -1207,7 +1207,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
*/
if (m->queue != PQ_INACTIVE ||
m->object != object ||
TAILQ_NEXT(m, pageq) != &vmd->vmd_marker) {
TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) {
vm_page_unlock(m);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
@ -1277,8 +1277,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
vm_pagequeue_lock(pq);
queues_locked = TRUE;
}
next = TAILQ_NEXT(&vmd->vmd_marker, pageq);
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, pageq);
next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
}
vm_pagequeue_unlock(pq);
@ -1304,7 +1304,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
next = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) != 0) {
m = next;
continue;
@ -1612,7 +1612,7 @@ vm_pageout_page_stats(struct vm_domain *vmd)
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_page_stats: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
next = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) != 0) {
m = next;
continue;

View File

@ -226,9 +226,9 @@ vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
m->order = order;
if (tail)
TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
else
TAILQ_INSERT_HEAD(&fl[order].pl, m, pageq);
TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
fl[order].lcnt++;
}
@ -236,7 +236,7 @@ static void
vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
{
TAILQ_REMOVE(&fl[order].pl, m, pageq);
TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
fl[order].lcnt--;
m->order = VM_NFREEORDER;
}
@ -819,7 +819,7 @@ vm_phys_zero_pages_idle(void)
fl = vm_phys_free_queues[domain][0][0];
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
for (;;) {
TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
vm_phys_unfree_page(m_tmp);
@ -889,7 +889,7 @@ vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
for (pind = 0; pind < VM_NFREEPOOL; pind++) {
fl = &vm_phys_free_queues[domain][flind][pind][0];
TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
/*
* A free list may contain physical pages
* from one or more segments.