Complete the removal of the "wire_count" field from struct vm_page.

Convert all remaining references to that field to "ref_count" and update
comments accordingly.  No functional change intended.

Reviewed by:	alc, kib
Sponsored by:	Intel, Netflix
Differential Revision:	https://reviews.freebsd.org/D21768
This commit is contained in:
Mark Johnston 2019-09-25 16:11:35 +00:00
parent a9d0e0071c
commit b119329d81
12 changed files with 193 additions and 193 deletions

View File

@ -74,7 +74,7 @@ efi_destroy_1t1_map(void)
if (obj_1t1_pt != NULL) {
VM_OBJECT_RLOCK(obj_1t1_pt);
TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
m->wire_count = VPRC_OBJREF;
m->ref_count = VPRC_OBJREF;
vm_wire_sub(obj_1t1_pt->resident_page_count);
VM_OBJECT_RUNLOCK(obj_1t1_pt);
vm_object_deallocate(obj_1t1_pt);

View File

@ -1856,7 +1856,7 @@ pmap_init(void)
("pmap_init: page table page is out of range"));
mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
mpte->wire_count = 1;
mpte->ref_count = 1;
/*
* Collect the page table pages that were replaced by a 2MB
@ -3285,8 +3285,8 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
}
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
*/
@ -3294,8 +3294,8 @@ static inline boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
if (m->wire_count == 0) {
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m, free);
return (TRUE);
} else
@ -3355,7 +3355,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
* conditionally free the page, and manage the reference count.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
@ -3615,7 +3615,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
} else {
/* Add reference to pdp page */
pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
pdppg->wire_count++;
pdppg->ref_count++;
}
pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
@ -3660,7 +3660,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
} else {
/* Add reference to the pd page */
pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
pdpg->wire_count++;
pdpg->ref_count++;
}
}
pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
@ -3689,7 +3689,7 @@ pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
if (pdpe != NULL && (*pdpe & PG_V) != 0) {
/* Add a reference to the pd page. */
pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
pdpg->wire_count++;
pdpg->ref_count++;
} else {
/* Allocate a pd page. */
ptepindex = pmap_pde_pindex(va);
@ -3740,7 +3740,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
*/
if (pd != NULL && (*pd & PG_V) != 0) {
m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
m->wire_count++;
m->ref_count++;
} else {
/*
* Here if the pte page isn't mapped, or if it has been
@ -4205,7 +4205,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
m_pc->ref_count = 1;
}
vm_page_free_pages_toq(&free, true);
return (m_pc);
@ -4785,7 +4785,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
}
if (!in_kernel) {
mpte->wire_count = NPTEPG;
mpte->ref_count = NPTEPG;
pmap_resident_count_inc(pmap, 1);
}
}
@ -4946,9 +4946,9 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pde: pte page not promoted"));
pmap_resident_count_dec(pmap, 1);
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pde: pte page wire count error"));
mpte->wire_count = 0;
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pde: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
}
}
@ -5709,7 +5709,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pte = pmap_pde_to_pte(pde, va);
if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
mpte->wire_count++;
mpte->ref_count++;
}
} else if (va < VM_MAXUSER_ADDRESS) {
/*
@ -5751,8 +5751,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Remove the extra PT page reference.
*/
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
mpte->ref_count--;
KASSERT(mpte->ref_count > 0,
("pmap_enter: missing reference to page table page,"
" va: 0x%lx", va));
}
@ -5873,7 +5873,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* If both the page table page and the reservation are fully
* populated, then attempt promotion.
*/
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
pmap_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0)
@ -5975,10 +5975,10 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
pde = &pde[pmap_pde_index(va)];
oldpde = *pde;
if ((oldpde & PG_V) != 0) {
KASSERT(pdpg->wire_count > 1,
("pmap_enter_pde: pdpg's wire count is too low"));
KASSERT(pdpg->ref_count > 1,
("pmap_enter_pde: pdpg's reference count is too low"));
if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
pdpg->wire_count--;
pdpg->ref_count--;
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (KERN_FAILURE);
@ -6152,7 +6152,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
ptepindex = pmap_pde_pindex(va);
if (mpte && (mpte->pindex == ptepindex)) {
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Get the page directory entry
@ -6169,7 +6169,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (*ptepa & PG_PS)
return (NULL);
mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Pass NULL instead of the PV list lock
@ -6188,7 +6188,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
}
if (*pte) {
if (mpte != NULL) {
mpte->wire_count--;
mpte->ref_count--;
mpte = NULL;
}
return (mpte);
@ -6334,8 +6334,8 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
atomic_add_long(&pmap_pde_mappings, 1);
} else {
/* Continue on if the PDE is already valid. */
pdpg->wire_count--;
KASSERT(pdpg->wire_count > 0,
pdpg->ref_count--;
KASSERT(pdpg->ref_count > 0,
("pmap_object_init_pt: missing reference "
"to page directory page, va: 0x%lx", addr));
}
@ -6525,13 +6525,13 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PAGE_SIZE);
atomic_add_long(&pmap_pde_mappings, 1);
} else
dst_pdpg->wire_count--;
dst_pdpg->ref_count--;
continue;
}
srcptepaddr &= PG_FRAME;
srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
KASSERT(srcmpte->wire_count > 0,
KASSERT(srcmpte->ref_count > 0,
("pmap_copy: source page table page is unused"));
if (va_next > end_addr)
@ -6553,7 +6553,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
KASSERT(dstmpte->pindex ==
pmap_pde_pindex(addr),
("dstmpte pindex/addr mismatch"));
dstmpte->wire_count++;
dstmpte->ref_count++;
} else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
NULL)) == NULL)
goto out;
@ -6586,7 +6586,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
goto out;
}
/* Have we copied all of the valid mappings? */
if (dstmpte->wire_count >= srcmpte->wire_count)
if (dstmpte->ref_count >= srcmpte->ref_count)
break;
}
}
@ -6996,9 +6996,9 @@ pmap_remove_pages(pmap_t pmap)
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pages: pte page not promoted"));
pmap_resident_count_dec(pmap, 1);
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pages: pte page wire count error"));
mpte->wire_count = 0;
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pages: pte page reference count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, &free, FALSE);
}
} else {
@ -8728,7 +8728,7 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
pmap_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
@ -8919,12 +8919,12 @@ pmap_quick_remove_page(vm_offset_t addr)
/*
* Pdp pages from the large map are managed differently from either
* kernel or user page table pages. They are permanently allocated at
* initialization time, and their wire count is permanently set to
* initialization time, and their reference count is permanently set to
* zero. The pml4 entries pointing to those pages are copied into
* each allocated pmap.
*
* In contrast, pd and pt pages are managed like user page table
* pages. They are dynamically allocated, and their wire count
* pages. They are dynamically allocated, and their reference count
* represents the number of valid entries within the page.
*/
static vm_page_t
@ -9011,7 +9011,7 @@ pmap_large_map_pte(vm_offset_t va)
goto retry;
mphys = VM_PAGE_TO_PHYS(m);
*pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->wire_count++;
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
} else {
MPASS((*pde & X86_PG_PS) == 0);
mphys = *pde & PG_FRAME;
@ -9131,7 +9131,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
X86_PG_V | X86_PG_A | pg_nx |
pmap_cache_bits(kernel_pmap, mattr, TRUE);
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
wire_count++;
ref_count++;
inc = NBPDR;
} else {
pte = pmap_large_map_pte(va);
@ -9140,7 +9140,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
mattr, FALSE);
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
wire_count++;
ref_count++;
inc = PAGE_SIZE;
}
}
@ -9209,8 +9209,8 @@ pmap_large_unmap(void *svaa, vm_size_t len)
pde_store(pde, 0);
inc = NBPDR;
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
m->wire_count--;
if (m->wire_count == 0) {
m->ref_count--;
if (m->ref_count == 0) {
*pdpe = 0;
SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
}
@ -9223,13 +9223,13 @@ pmap_large_unmap(void *svaa, vm_size_t len)
pte_clear(pte);
inc = PAGE_SIZE;
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
m->wire_count--;
if (m->wire_count == 0) {
m->ref_count--;
if (m->ref_count == 0) {
*pde = 0;
SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
m->wire_count--;
if (m->wire_count == 0) {
m->ref_count--;
if (m->ref_count == 0) {
*pdpe = 0;
SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
}
@ -9436,7 +9436,7 @@ static bool
pmap_pti_free_page(vm_page_t m)
{
KASSERT(m->wire_count > 0, ("page %p not wired", m));
KASSERT(m->ref_count > 0, ("page %p not referenced", m));
if (!vm_page_unwire_noq(m))
return (false);
vm_page_free_zero(m);
@ -9530,7 +9530,7 @@ pmap_pti_wire_pte(void *pte)
VM_OBJECT_ASSERT_WLOCKED(pti_obj);
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
m->wire_count++;
m->ref_count++;
}
static void
@ -9540,8 +9540,8 @@ pmap_pti_unwire_pde(void *pde, bool only_ref)
VM_OBJECT_ASSERT_WLOCKED(pti_obj);
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
MPASS(m->wire_count > 0);
MPASS(only_ref || m->wire_count > 1);
MPASS(m->ref_count > 0);
MPASS(only_ref || m->ref_count > 1);
pmap_pti_free_page(m);
}
@ -9553,7 +9553,7 @@ pmap_pti_unwire_pte(void *pte, vm_offset_t va)
VM_OBJECT_ASSERT_WLOCKED(pti_obj);
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
MPASS(m->wire_count > 0);
MPASS(m->ref_count > 0);
if (pmap_pti_free_page(m)) {
pde = pmap_pti_pde(va);
MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);

View File

@ -2365,7 +2365,7 @@ pmap_release(pmap_t pmap)
* untouched, so the table (strictly speaking a page which holds it)
* is never freed if promoted.
*
* If a page m->wire_count == 1 then no valid mappings exist in any L2 page
* If a page m->ref_count == 1 then no valid mappings exist in any L2 page
* table in the page and the page itself is only mapped in PT2TAB.
*/
@ -2376,7 +2376,7 @@ pt2_wirecount_init(vm_page_t m)
/*
* Note: A page m is allocated with VM_ALLOC_WIRED flag and
* m->wire_count should be already set correctly.
* m->ref_count should be already set correctly.
* So, there is no need to set it again herein.
*/
for (i = 0; i < NPT2_IN_PG; i++)
@ -2396,10 +2396,10 @@ pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx)
*/
KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1),
("%s: PT2 is overflowing ...", __func__));
KASSERT(m->wire_count <= (NPTE2_IN_PG + 1),
KASSERT(m->ref_count <= (NPTE2_IN_PG + 1),
("%s: PT2PG is overflowing ...", __func__));
m->wire_count++;
m->ref_count++;
m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++;
}
@ -2409,10 +2409,10 @@ pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx)
KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0,
("%s: PT2 is underflowing ...", __func__));
KASSERT(m->wire_count > 1,
KASSERT(m->ref_count > 1,
("%s: PT2PG is underflowing ...", __func__));
m->wire_count--;
m->ref_count--;
m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--;
}
@ -2422,16 +2422,16 @@ pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count)
KASSERT(count <= NPTE2_IN_PT2,
("%s: invalid count %u", __func__, count));
KASSERT(m->wire_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK],
("%s: PT2PG corrupting (%u, %u) ...", __func__, m->wire_count,
KASSERT(m->ref_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK],
("%s: PT2PG corrupting (%u, %u) ...", __func__, m->ref_count,
m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]));
m->wire_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK];
m->wire_count += count;
m->ref_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK];
m->ref_count += count;
m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count;
KASSERT(m->wire_count <= (NPTE2_IN_PG + 1),
("%s: PT2PG is overflowed (%u) ...", __func__, m->wire_count));
KASSERT(m->ref_count <= (NPTE2_IN_PG + 1),
("%s: PT2PG is overflowed (%u) ...", __func__, m->ref_count));
}
static __inline uint32_t
@ -2460,7 +2460,7 @@ static __inline boolean_t
pt2pg_is_empty(vm_page_t m)
{
return (m->wire_count == 1);
return (m->ref_count == 1);
}
/*
@ -2634,7 +2634,7 @@ pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m)
(void)pt2tab_load_clear(pte2p);
pmap_tlb_flush(pmap, pt2map_pt2pg(va));
m->wire_count = 0;
m->ref_count = 0;
pmap->pm_stats.resident_count--;
/*
@ -2683,8 +2683,8 @@ pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK),
("%s: PT2 page's pindex is wrong", __func__));
KASSERT(m->wire_count > pt2_wirecount_get(m, pte1_idx),
("%s: bad pt2 wire count %u > %u", __func__, m->wire_count,
KASSERT(m->ref_count > pt2_wirecount_get(m, pte1_idx),
("%s: bad pt2 wire count %u > %u", __func__, m->ref_count,
pt2_wirecount_get(m, pte1_idx)));
/*
@ -2949,7 +2949,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
m_pc->ref_count = 1;
vm_wire_add(1);
}
vm_page_free_pages_toq(&free, false);
@ -6707,7 +6707,7 @@ pmap_pid_dump(int pid)
m = PHYS_TO_VM_PAGE(pa);
printf("va: 0x%x, pa: 0x%x, w: %d, "
"f: 0x%x", va, pa,
m->wire_count, m->flags);
m->ref_count, m->flags);
npte2++;
index++;
if (index >= 2) {
@ -6818,7 +6818,7 @@ dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok)
pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m);
if (m != NULL) {
printf(" v:%d w:%d f:0x%04X\n", m->valid,
m->wire_count, m->flags);
m->ref_count, m->flags);
} else {
printf("\n");
}
@ -6892,7 +6892,7 @@ DB_SHOW_COMMAND(pmap, pmap_pmap_print)
dump_link_ok = FALSE;
}
else if (m != NULL)
printf(" w:%d w2:%u", m->wire_count,
printf(" w:%d w2:%u", m->ref_count,
pt2_wirecount_get(m, pte1_index(va)));
if (pte2 == 0)
printf(" !!! pt2tab entry is ZERO");
@ -6928,7 +6928,7 @@ dump_pt2tab(pmap_t pmap)
pte2_class(pte2), !!(pte2 & PTE2_S), m);
if (m != NULL)
printf(" , w: %d, f: 0x%04X pidx: %lld",
m->wire_count, m->flags, m->pindex);
m->ref_count, m->flags, m->pindex);
printf("\n");
}
}

View File

@ -74,7 +74,7 @@ efi_destroy_1t1_map(void)
if (obj_1t1_pt != NULL) {
VM_OBJECT_RLOCK(obj_1t1_pt);
TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
m->wire_count = VPRC_OBJREF;
m->ref_count = VPRC_OBJREF;
vm_wire_sub(obj_1t1_pt->resident_page_count);
VM_OBJECT_RUNLOCK(obj_1t1_pt);
vm_object_deallocate(obj_1t1_pt);

View File

@ -1348,8 +1348,8 @@ pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
}
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
*/
@ -1357,8 +1357,8 @@ static inline boolean_t
pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
if (m->wire_count == 0) {
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_l3(pmap, va, m, free);
return (TRUE);
} else
@ -1423,7 +1423,7 @@ _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
* conditionally free the page, and manage the reference count.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
@ -1554,7 +1554,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
}
} else {
l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
l1pg->wire_count++;
l1pg->ref_count++;
}
l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
@ -1595,7 +1595,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
}
} else {
l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
l2pg->wire_count++;
l2pg->ref_count++;
}
}
@ -1621,7 +1621,7 @@ pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
/* Add a reference to the L2 page. */
l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
l2pg->wire_count++;
l2pg->ref_count++;
} else {
/* Allocate a L2 page. */
l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
@ -1679,7 +1679,7 @@ pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
tpde = pmap_load(pde);
if (tpde != 0) {
m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
m->wire_count++;
m->ref_count++;
return (m);
}
break;
@ -2044,7 +2044,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
m_pc->ref_count = 1;
}
vm_page_free_pages_toq(&free, true);
return (m_pc);
@ -2482,9 +2482,9 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
("pmap_remove_l2: l3 page not promoted"));
pmap_resident_count_dec(pmap, 1);
KASSERT(ml3->wire_count == NL3PG,
("pmap_remove_l2: l3 page wire count error"));
ml3->wire_count = 0;
KASSERT(ml3->ref_count == NL3PG,
("pmap_remove_l2: l3 page ref count error"));
ml3->ref_count = 0;
pmap_add_delayed_free_list(ml3, free, FALSE);
}
}
@ -3229,7 +3229,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l3 = pmap_l2_to_l3(pde, va);
if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
mpte->wire_count++;
mpte->ref_count++;
}
goto havel3;
} else if (pde != NULL && lvl == 1) {
@ -3240,7 +3240,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (va < VM_MAXUSER_ADDRESS) {
mpte = PHYS_TO_VM_PAGE(
pmap_load(l2) & ~ATTR_MASK);
mpte->wire_count++;
mpte->ref_count++;
}
goto havel3;
}
@ -3291,8 +3291,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Remove the extra PT page reference.
*/
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
mpte->ref_count--;
KASSERT(mpte->ref_count > 0,
("pmap_enter: missing reference to page table page,"
" va: 0x%lx", va));
}
@ -3417,7 +3417,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
#if VM_NRESERVLEVEL > 0
if ((mpte == NULL || mpte->wire_count == NL3PG) &&
if ((mpte == NULL || mpte->ref_count == NL3PG) &&
pmap_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
@ -3494,10 +3494,10 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
l2 = &l2[pmap_l2_index(va)];
if ((old_l2 = pmap_load(l2)) != 0) {
KASSERT(l2pg->wire_count > 1,
("pmap_enter_l2: l2pg's wire count is too low"));
KASSERT(l2pg->ref_count > 1,
("pmap_enter_l2: l2pg's ref count is too low"));
if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
l2pg->wire_count--;
l2pg->ref_count--;
CTR2(KTR_PMAP,
"pmap_enter_l2: failure for va %#lx in pmap %p",
va, pmap);
@ -3671,7 +3671,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
l2pindex = pmap_l2_pindex(va);
if (mpte && (mpte->pindex == l2pindex)) {
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Get the l2 entry
@ -3693,7 +3693,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (lvl == 2 && pmap_load(pde) != 0) {
mpte =
PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Pass NULL instead of the PV list lock
@ -3722,7 +3722,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
if (pmap_load(l3) != 0) {
if (mpte != NULL) {
mpte->wire_count--;
mpte->ref_count--;
mpte = NULL;
}
return (mpte);
@ -3952,14 +3952,14 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PAGE_SIZE);
atomic_add_long(&pmap_l2_mappings, 1);
} else
dst_l2pg->wire_count--;
dst_l2pg->ref_count--;
continue;
}
KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
("pmap_copy: invalid L2 entry"));
srcptepaddr &= ~ATTR_MASK;
srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
KASSERT(srcmpte->wire_count > 0,
KASSERT(srcmpte->ref_count > 0,
("pmap_copy: source page table page is unused"));
if (va_next > end_addr)
va_next = end_addr;
@ -3978,7 +3978,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (dstmpte != NULL) {
KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
("dstmpte pindex/addr mismatch"));
dstmpte->wire_count++;
dstmpte->ref_count++;
} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
NULL)) == NULL)
goto out;
@ -4017,7 +4017,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
goto out;
}
/* Have we copied all of the valid mappings? */
if (dstmpte->wire_count >= srcmpte->wire_count)
if (dstmpte->ref_count >= srcmpte->ref_count)
break;
}
}
@ -4382,9 +4382,9 @@ pmap_remove_pages(pmap_t pmap)
KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pages: l3 page not promoted"));
pmap_resident_count_dec(pmap,1);
KASSERT(ml3->wire_count == NL3PG,
("pmap_remove_pages: l3 page wire count error"));
ml3->wire_count = 0;
KASSERT(ml3->ref_count == NL3PG,
("pmap_remove_pages: l3 page ref count error"));
ml3->ref_count = 0;
pmap_add_delayed_free_list(ml3,
&free, FALSE);
}
@ -5521,7 +5521,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
}
if (va < VM_MAXUSER_ADDRESS) {
ml3->wire_count = NL3PG;
ml3->ref_count = NL3PG;
pmap_resident_count_inc(pmap, 1);
}
}

View File

@ -1954,7 +1954,7 @@ agp_intel_gtt_insert_pages(device_t dev, u_int first_entry, u_int num_entries,
sc = device_get_softc(dev);
for (i = 0; i < num_entries; i++) {
MPASS(pages[i]->valid == VM_PAGE_BITS_ALL);
MPASS(pages[i]->wire_count > 0);
MPASS(pages[i]->ref_count > 0);
sc->match->driver->install_gtt_pte(dev, first_entry + i,
VM_PAGE_TO_PHYS(pages[i]), flags);
}

View File

@ -992,7 +992,7 @@ __CONCAT(PMTYPE, init)(void)
("pmap_init: page table page is out of range"));
mpte->pindex = i + KPTDI;
mpte->phys_addr = KPTphys + ptoa(i);
mpte->wire_count = 1;
mpte->ref_count = 1;
/*
* Collect the page table pages that were replaced by a 2/4MB
@ -1952,8 +1952,8 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
}
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
*/
@ -1961,8 +1961,8 @@ static inline boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
--m->wire_count;
if (m->wire_count == 0) {
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, m, free);
return (TRUE);
} else
@ -1992,7 +1992,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
* conditionally free the page, and manage the reference count.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
@ -2171,7 +2171,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
*/
if (ptepa) {
m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
m->wire_count++;
m->ref_count++;
} else {
/*
* Here if the pte page isn't mapped, or if it has
@ -2438,7 +2438,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
m_pc = SLIST_FIRST(&free);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
m_pc->ref_count = 1;
}
vm_page_free_pages_toq(&free, true);
return (m_pc);
@ -2797,7 +2797,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
return (FALSE);
}
if (pmap != kernel_pmap) {
mpte->wire_count = NPTEPG;
mpte->ref_count = NPTEPG;
pmap->pm_stats.resident_count++;
}
}
@ -2993,9 +2993,9 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pde: pte page not promoted"));
pmap->pm_stats.resident_count--;
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pde: pte page wire count error"));
mpte->wire_count = 0;
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pde: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
}
}
@ -3731,8 +3731,8 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
* Remove the extra PT page reference.
*/
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
mpte->ref_count--;
KASSERT(mpte->ref_count > 0,
("pmap_enter: missing reference to page table page,"
" va: 0x%x", va));
}
@ -3853,7 +3853,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If both the page table page and the reservation are fully
* populated, then attempt promotion.
*/
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0)
pmap_promote_pde(pmap, pde, va);
@ -4076,7 +4076,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
ptepindex = va >> PDRSHIFT;
if (mpte && (mpte->pindex == ptepindex)) {
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Get the page directory entry
@ -4091,7 +4091,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (ptepa & PG_PS)
return (NULL);
mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
mpte->wire_count++;
mpte->ref_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex,
PMAP_ENTER_NOSLEEP);
@ -4107,7 +4107,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pte = pmap_pte_quick(pmap, va);
if (*pte) {
if (mpte != NULL) {
mpte->wire_count--;
mpte->ref_count--;
mpte = NULL;
}
sched_unpin();
@ -4402,7 +4402,7 @@ __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
}
srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
KASSERT(srcmpte->wire_count > 0,
KASSERT(srcmpte->ref_count > 0,
("pmap_copy: source page table page is unused"));
if (pdnxt > end_addr)
@ -4442,7 +4442,7 @@ __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
}
goto out;
}
if (dstmpte->wire_count >= srcmpte->wire_count)
if (dstmpte->ref_count >= srcmpte->ref_count)
break;
}
addr += PAGE_SIZE;
@ -4829,9 +4829,9 @@ __CONCAT(PMTYPE, remove_pages)(pmap_t pmap)
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pages: pte page not promoted"));
pmap->pm_stats.resident_count--;
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pages: pte page wire count error"));
mpte->wire_count = 0;
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pages: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, &free, FALSE);
}
} else {

View File

@ -983,8 +983,8 @@ pmap_qremove(vm_offset_t va, int count)
***************************************************/
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
*/
@ -992,8 +992,8 @@ static PMAP_INLINE boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
--m->wire_count;
if (m->wire_count == 0) {
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m);
return (TRUE);
} else
@ -1043,7 +1043,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
* conditionally free the page, and manage the reference count.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
@ -1194,7 +1194,7 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
}
} else {
pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
pg->wire_count++;
pg->ref_count++;
}
/* Next level entry */
pde = (pd_entry_t *)*pdep;
@ -1230,7 +1230,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
*/
if (pde != NULL && *pde != NULL) {
m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
m->wire_count++;
m->ref_count++;
} else {
/*
* Here if the pte page isn't mapped, or if it has been
@ -2124,7 +2124,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Remove extra pte reference
*/
if (mpte)
mpte->wire_count--;
mpte->ref_count--;
if (pte_test(&origpte, PTE_MANAGED)) {
m->md.pv_flags |= PV_TABLE_REF;
@ -2165,8 +2165,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
origpte = 0;
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
mpte->ref_count--;
KASSERT(mpte->ref_count > 0,
("pmap_enter: missing reference to page table page,"
" va: %p", (void *)va));
}
@ -2276,7 +2276,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
ptepindex = pmap_pde_pindex(va);
if (mpte && (mpte->pindex == ptepindex)) {
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Get the page directory entry
@ -2290,7 +2290,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (pde && *pde != 0) {
mpte = PHYS_TO_VM_PAGE(
MIPS_DIRECT_TO_PHYS(*pde));
mpte->wire_count++;
mpte->ref_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex,
PMAP_ENTER_NOSLEEP);
@ -2305,7 +2305,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pte = pmap_pte(pmap, va);
if (pte_test(pte, PTE_V)) {
if (mpte != NULL) {
mpte->wire_count--;
mpte->ref_count--;
mpte = NULL;
}
return (mpte);

View File

@ -663,8 +663,8 @@ pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
/*
* Free pdir page if there are no dir entries in this pdir.
*/
m->wire_count--;
if (m->wire_count == 0) {
m->ref_count--;
if (m->ref_count == 0) {
pdir_free(mmu, pmap, pp2d_idx, m);
return (1);
}
@ -686,7 +686,7 @@ pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir));
m->wire_count++;
m->ref_count++;
}
/* Allocate page table. */
@ -765,11 +765,11 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
/*
* Free ptbl pages if there are no pte entries in this ptbl.
* wire_count has the same value for all ptbl pages, so check the
* ref_count has the same value for all ptbl pages, so check the
* last page.
*/
m->wire_count--;
if (m->wire_count == 0) {
m->ref_count--;
if (m->ref_count == 0) {
ptbl_free(mmu, pmap, pdir, pdir_idx, m);
pdir_unhold(mmu, pmap, pp2d_idx);
return (1);
@ -795,7 +795,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
m->wire_count++;
m->ref_count++;
}
#else
@ -1010,15 +1010,15 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
pa = pte_vatopa(mmu, kernel_pmap,
(vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->wire_count--;
m->ref_count--;
}
/*
* Free ptbl pages if there are no pte etries in this ptbl.
* wire_count has the same value for all ptbl pages, so check the last
* ref_count has the same value for all ptbl pages, so check the last
* page.
*/
if (m->wire_count == 0) {
if (m->ref_count == 0) {
ptbl_free(mmu, pmap, pdir_idx);
//debugf("ptbl_unhold: e (freed ptbl)\n");
@ -1056,7 +1056,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
pa = pte_vatopa(mmu, kernel_pmap,
(vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->wire_count++;
m->ref_count++;
}
}
#endif

View File

@ -1127,8 +1127,8 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
}
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
*/
@ -1136,8 +1136,8 @@ static inline boolean_t
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
if (m->wire_count == 0) {
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m, free);
return (TRUE);
} else {
@ -1184,7 +1184,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
* conditionally free the page, and manage the reference count.
*/
static int
pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
@ -1327,7 +1327,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
} else {
phys = PTE_TO_PHYS(pmap_load(l1));
pdpg = PHYS_TO_VM_PAGE(phys);
pdpg->wire_count++;
pdpg->ref_count++;
}
phys = PTE_TO_PHYS(pmap_load(l1));
@ -1357,7 +1357,7 @@ pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
if (l1 != NULL && (pmap_load(l1) & PTE_RWX) == 0) {
/* Add a reference to the L2 page. */
l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l1)));
l2pg->wire_count++;
l2pg->ref_count++;
} else {
/* Allocate a L2 page. */
l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
@ -1393,7 +1393,7 @@ pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
if (l2 != NULL && pmap_load(l2) != 0) {
phys = PTE_TO_PHYS(pmap_load(l2));
m = PHYS_TO_VM_PAGE(phys);
m->wire_count++;
m->ref_count++;
} else {
/*
* Here if the pte page isn't mapped, or if it has been
@ -2068,9 +2068,9 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
("pmap_remove_l2: l3 page not promoted"));
pmap_resident_count_dec(pmap, 1);
KASSERT(ml3->wire_count == Ln_ENTRIES,
("pmap_remove_l2: l3 page wire count error"));
ml3->wire_count = 1;
KASSERT(ml3->ref_count == Ln_ENTRIES,
("pmap_remove_l2: l3 page ref count error"));
ml3->ref_count = 1;
vm_page_unwire_noq(ml3);
pmap_add_delayed_free_list(ml3, free, FALSE);
}
@ -2487,7 +2487,7 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
return (false);
}
if (va < VM_MAXUSER_ADDRESS) {
mpte->wire_count = Ln_ENTRIES;
mpte->ref_count = Ln_ENTRIES;
pmap_resident_count_inc(pmap, 1);
}
}
@ -2695,7 +2695,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l3 = pmap_l2_to_l3(l2, va);
if (va < VM_MAXUSER_ADDRESS) {
mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
mpte->wire_count++;
mpte->ref_count++;
}
} else if (va < VM_MAXUSER_ADDRESS) {
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
@ -2775,8 +2775,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Remove the extra PT page reference.
*/
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
mpte->ref_count--;
KASSERT(mpte->ref_count > 0,
("pmap_enter: missing reference to page table page,"
" va: 0x%lx", va));
}
@ -2878,7 +2878,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
#if VM_NRESERVLEVEL > 0
if (mpte != NULL && mpte->wire_count == Ln_ENTRIES &&
if (mpte != NULL && mpte->ref_count == Ln_ENTRIES &&
pmap_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0)
@ -2955,10 +2955,10 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
l2 = &l2[pmap_l2_index(va)];
if ((oldl2 = pmap_load(l2)) != 0) {
KASSERT(l2pg->wire_count > 1,
("pmap_enter_l2: l2pg's wire count is too low"));
KASSERT(l2pg->ref_count > 1,
("pmap_enter_l2: l2pg's ref count is too low"));
if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
l2pg->wire_count--;
l2pg->ref_count--;
CTR2(KTR_PMAP,
"pmap_enter_l2: failure for va %#lx in pmap %p",
va, pmap);
@ -3133,7 +3133,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
l2pindex = pmap_l2_pindex(va);
if (mpte && (mpte->pindex == l2pindex)) {
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Get the l2 entry
@ -3149,7 +3149,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (l2 != NULL && pmap_load(l2) != 0) {
phys = PTE_TO_PHYS(pmap_load(l2));
mpte = PHYS_TO_VM_PAGE(phys);
mpte->wire_count++;
mpte->ref_count++;
} else {
/*
* Pass NULL instead of the PV list lock
@ -3170,7 +3170,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
panic("pmap_enter_quick_locked: No l3");
if (pmap_load(l3) != 0) {
if (mpte != NULL) {
mpte->wire_count--;
mpte->ref_count--;
mpte = NULL;
}
return (mpte);
@ -3564,9 +3564,9 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
("pmap_remove_pages: pte page not promoted"));
pmap_resident_count_dec(pmap, 1);
KASSERT(mpte->wire_count == Ln_ENTRIES,
("pmap_remove_pages: pte page wire count error"));
mpte->wire_count = 0;
KASSERT(mpte->ref_count == Ln_ENTRIES,
("pmap_remove_pages: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
}
} else {

View File

@ -101,6 +101,9 @@
* annotated below with two of these locks, then holding either lock is
* sufficient for read access, but both locks are required for write
* access. An annotation of (C) indicates that the field is immutable.
* An annotation of (A) indicates that modifications to the field must
* be atomic. Accesses to such fields may require additional
* synchronization depending on the context.
*
* In contrast, the synchronization of accesses to the page's
* dirty field is machine dependent (M). In the
@ -207,10 +210,7 @@ struct vm_page {
vm_pindex_t pindex; /* offset into object (O,P) */
vm_paddr_t phys_addr; /* physical address of page (C) */
struct md_page md; /* machine dependent stuff */
union {
u_int wire_count;
u_int ref_count; /* page references */
};
u_int ref_count; /* page references (A) */
volatile u_int busy_lock; /* busy owners lock */
uint16_t flags; /* page PG_* flags (P) */
uint8_t order; /* index of the buddy queue (F) */

View File

@ -392,7 +392,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
* pte write and clean while the lock is
* dropped.
*/
m->wire_count++;
m->ref_count++;
sfp = NULL;
ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
@ -400,7 +400,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
if (ptep == NULL) {
KASSERT(m->pindex != 0,
("loosing root page %p", domain));
m->wire_count--;
m->ref_count--;
dmar_pgfree(domain->pgtbl_obj, m->pindex,
flags);
return (NULL);
@ -408,8 +408,8 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
VM_PAGE_TO_PHYS(m));
dmar_flush_pte_to_ram(domain->dmar, ptep);
sf_buf_page(sfp)->wire_count += 1;
m->wire_count--;
sf_buf_page(sfp)->ref_count += 1;
m->ref_count--;
dmar_unmap_pgtbl(sfp);
/* Only executed once. */
goto retry;
@ -489,7 +489,7 @@ domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
(superpage ? DMAR_PTE_SP : 0));
dmar_flush_pte_to_ram(domain->dmar, pte);
sf_buf_page(sf)->wire_count += 1;
sf_buf_page(sf)->ref_count += 1;
}
if (sf != NULL)
dmar_unmap_pgtbl(sf);
@ -587,8 +587,8 @@ domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
dmar_unmap_pgtbl(*sf);
*sf = NULL;
}
m->wire_count--;
if (m->wire_count != 0)
m->ref_count--;
if (m->ref_count != 0)
return;
KASSERT(lvl != 0,
("lost reference (lvl) on root pg domain %p base %jx lvl %d",
@ -701,7 +701,7 @@ domain_alloc_pgtbl(struct dmar_domain *domain)
m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK |
DMAR_PGF_ZERO | DMAR_PGF_OBJL);
/* No implicit free of the top level page table page. */
m->wire_count = 1;
m->ref_count = 1;
DMAR_DOMAIN_PGUNLOCK(domain);
DMAR_LOCK(domain->dmar);
domain->flags |= DMAR_DOMAIN_PGTBL_INITED;
@ -731,10 +731,10 @@ domain_free_pgtbl(struct dmar_domain *domain)
return;
}
/* Obliterate wire_counts */
/* Obliterate ref_counts */
VM_OBJECT_ASSERT_WLOCKED(obj);
for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
m->wire_count = 0;
m->ref_count = 0;
VM_OBJECT_WUNLOCK(obj);
vm_object_deallocate(obj);
}