- Move the PG_UNMANAGED flag from m->flags to m->oflags, renaming the flag
to VPO_UNMANAGED (and also making the flag protected by the vm object lock, instead of vm page queue lock). - Mark the fake pages with both PG_FICTITIOUS (as it is now) and VPO_UNMANAGED. As a consequence, pmap code now can use use just VPO_UNMANAGED to decide whether the page is unmanaged. Reviewed by: alc Tested by: pho (x86, previous version), marius (sparc64), marcel (arm, ia64, powerpc), ray (mips) Sponsored by: The FreeBSD Foundation Approved by: re (bz)
This commit is contained in:
parent
2d49ef5934
commit
d98d0ce27a
@ -2320,7 +2320,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
|
||||
va_last = va + NBPDR - PAGE_SIZE;
|
||||
do {
|
||||
m++;
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_pv_demote_pde: page %p is not managed", m));
|
||||
va += PAGE_SIZE;
|
||||
pmap_insert_entry(pmap, va, m);
|
||||
@ -2847,7 +2847,7 @@ pmap_remove_all(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
vm_page_t free;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
@ -3194,8 +3194,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
|
||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
|
||||
va));
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
|
||||
mpte = NULL;
|
||||
@ -3276,7 +3276,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if (pv == NULL)
|
||||
@ -3389,7 +3389,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
}
|
||||
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
|
||||
PG_PS | PG_V;
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
newpde |= PG_MANAGED;
|
||||
|
||||
/*
|
||||
@ -3498,7 +3498,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -3556,7 +3556,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 &&
|
||||
!pmap_try_insert_pv_entry(pmap, va, m)) {
|
||||
if (mpte != NULL) {
|
||||
free = NULL;
|
||||
@ -3581,7 +3581,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
/*
|
||||
* Now validate mapping with RO protection
|
||||
*/
|
||||
if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
pte_store(pte, pa | PG_V | PG_U);
|
||||
else
|
||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
|
||||
@ -3958,7 +3958,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -3999,7 +3999,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
count = pmap_pvh_wired_mappings(&m->md, count);
|
||||
@ -4041,7 +4041,7 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
@ -4199,7 +4199,7 @@ pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -4280,7 +4280,7 @@ pmap_is_referenced(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_referenced_pvh(&m->md) ||
|
||||
@ -4328,7 +4328,7 @@ pmap_remove_write(vm_page_t m)
|
||||
pt_entry_t oldpte, *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -4397,7 +4397,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
int rtval = 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
@ -4471,7 +4471,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
pt_entry_t oldpte, *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -4548,7 +4548,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
pt_entry_t *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -3120,7 +3120,7 @@ pmap_remove_all(vm_page_t m)
|
||||
pmap_t curpm;
|
||||
int flags = 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
if (TAILQ_EMPTY(&m->md.pv_list))
|
||||
return;
|
||||
@ -3242,7 +3242,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
PTE_SYNC(ptep);
|
||||
|
||||
if (pg != NULL) {
|
||||
if (!(pg->flags & PG_UNMANAGED)) {
|
||||
if (!(pg->oflags & VPO_UNMANAGED)) {
|
||||
f = pmap_modify_pv(pg, pm, sva,
|
||||
PVF_WRITE, 0);
|
||||
vm_page_dirty(pg);
|
||||
@ -3327,8 +3327,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pa = systempage.pv_pa;
|
||||
m = NULL;
|
||||
} else {
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0 || (flags & M_NOWAIT) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
(flags & M_NOWAIT) != 0,
|
||||
("pmap_enter_locked: page %p is not busy", m));
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
}
|
||||
@ -3417,7 +3417,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
npte |= L2_S_PROT_W;
|
||||
if (m != NULL &&
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
(m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
}
|
||||
npte |= pte_l2_s_cache_mode;
|
||||
@ -3480,36 +3480,36 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
* this physical page is not/is already mapped.
|
||||
*/
|
||||
|
||||
if (m && ((m->flags & PG_FICTITIOUS) ||
|
||||
((m->flags & PG_UNMANAGED) &&
|
||||
if (m && (m->oflags & VPO_UNMANAGED) &&
|
||||
!m->md.pv_kva &&
|
||||
TAILQ_EMPTY(&m->md.pv_list)))) {
|
||||
TAILQ_EMPTY(&m->md.pv_list)) {
|
||||
pmap_free_pv_entry(pve);
|
||||
pve = NULL;
|
||||
}
|
||||
} else if (m && !(m->flags & PG_FICTITIOUS) &&
|
||||
(!(m->flags & PG_UNMANAGED) || m->md.pv_kva ||
|
||||
} else if (m &&
|
||||
(!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva ||
|
||||
!TAILQ_EMPTY(&m->md.pv_list)))
|
||||
pve = pmap_get_pv_entry();
|
||||
} else if (m && !(m->flags & PG_FICTITIOUS) &&
|
||||
(!(m->flags & PG_UNMANAGED) || m->md.pv_kva ||
|
||||
} else if (m &&
|
||||
(!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva ||
|
||||
!TAILQ_EMPTY(&m->md.pv_list)))
|
||||
pve = pmap_get_pv_entry();
|
||||
|
||||
if (m && !(m->flags & PG_FICTITIOUS)) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if (m->flags & PG_UNMANAGED) {
|
||||
if (m) {
|
||||
if ((m->oflags & VPO_UNMANAGED)) {
|
||||
if (!TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
m->md.pv_kva) {
|
||||
m->md.pv_kva) {
|
||||
KASSERT(pve != NULL, ("No pv"));
|
||||
nflags |= PVF_UNMAN;
|
||||
pmap_enter_pv(m, pve, pmap, va, nflags);
|
||||
} else
|
||||
m->md.pv_kva = va;
|
||||
} else {
|
||||
KASSERT(pve != NULL, ("No pv"));
|
||||
pmap_enter_pv(m, pve, pmap, va, nflags);
|
||||
KASSERT(va < kmi.clean_sva ||
|
||||
va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
KASSERT(pve != NULL, ("No pv"));
|
||||
pmap_enter_pv(m, pve, pmap, va, nflags);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4423,7 +4423,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -4453,7 +4453,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
|
||||
@ -4472,7 +4472,7 @@ int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
return (pmap_clearbit(m, PVF_REF));
|
||||
}
|
||||
@ -4482,7 +4482,7 @@ boolean_t
|
||||
pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
if (m->md.pvh_attrs & PVF_MOD)
|
||||
return (TRUE);
|
||||
@ -4498,7 +4498,7 @@ void
|
||||
pmap_clear_modify(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -4526,7 +4526,7 @@ boolean_t
|
||||
pmap_is_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
return ((m->md.pvh_attrs & PVF_REF) != 0);
|
||||
}
|
||||
@ -4540,7 +4540,7 @@ void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
if (m->md.pvh_attrs & PVF_REF)
|
||||
pmap_clearbit(m, PVF_REF);
|
||||
@ -4554,7 +4554,7 @@ void
|
||||
pmap_remove_write(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
|
@ -2400,7 +2400,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
|
||||
va_last = va + NBPDR - PAGE_SIZE;
|
||||
do {
|
||||
m++;
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_pv_demote_pde: page %p is not managed", m));
|
||||
va += PAGE_SIZE;
|
||||
pmap_insert_entry(pmap, va, m);
|
||||
@ -2927,7 +2927,7 @@ pmap_remove_all(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
vm_page_t free;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
@ -3299,8 +3299,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
|
||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
|
||||
va));
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
|
||||
mpte = NULL;
|
||||
@ -3388,7 +3388,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if (pv == NULL)
|
||||
@ -3498,7 +3498,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
}
|
||||
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
|
||||
PG_PS | PG_V;
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
newpde |= PG_MANAGED;
|
||||
|
||||
/*
|
||||
@ -3604,7 +3604,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_page_t free;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -3667,7 +3667,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 &&
|
||||
!pmap_try_insert_pv_entry(pmap, va, m)) {
|
||||
if (mpte != NULL) {
|
||||
free = NULL;
|
||||
@ -3695,7 +3695,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
/*
|
||||
* Now validate mapping with RO protection
|
||||
*/
|
||||
if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
pte_store(pte, pa | PG_V | PG_U);
|
||||
else
|
||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
|
||||
@ -4096,7 +4096,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -4137,7 +4137,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
count = pmap_pvh_wired_mappings(&m->md, count);
|
||||
@ -4181,7 +4181,7 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
@ -4341,7 +4341,7 @@ pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -4424,7 +4424,7 @@ pmap_is_referenced(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_referenced_pvh(&m->md) ||
|
||||
@ -4474,7 +4474,7 @@ pmap_remove_write(vm_page_t m)
|
||||
pt_entry_t oldpte, *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -4550,7 +4550,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
int rtval = 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
@ -4626,7 +4626,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
pt_entry_t oldpte, *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -4715,7 +4715,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
pt_entry_t *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
|
@ -2430,7 +2430,7 @@ pmap_remove_all(vm_page_t m)
|
||||
pt_entry_t *pte, tpte;
|
||||
vm_page_t free;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
@ -2616,8 +2616,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
|
||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
|
||||
va));
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
|
||||
mpte = NULL;
|
||||
@ -2715,7 +2714,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if (pv == NULL)
|
||||
@ -2915,7 +2914,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
|
||||
multicall_entry_t *mcl = *mclpp;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -2979,7 +2978,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 &&
|
||||
!pmap_try_insert_pv_entry(pmap, va, m)) {
|
||||
if (mpte != NULL) {
|
||||
free = NULL;
|
||||
@ -3008,7 +3007,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
|
||||
/*
|
||||
* Now validate mapping with RO protection
|
||||
*/
|
||||
if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
pte_store(pte, pa | PG_V | PG_U);
|
||||
else
|
||||
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
|
||||
@ -3016,7 +3015,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
|
||||
/*
|
||||
* Now validate mapping with RO protection
|
||||
*/
|
||||
if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
pa = xpmap_ptom(pa | PG_V | PG_U);
|
||||
else
|
||||
pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED);
|
||||
@ -3403,7 +3402,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -3435,7 +3434,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
@ -3461,7 +3460,7 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
@ -3600,7 +3599,7 @@ pmap_is_modified(vm_page_t m)
|
||||
pmap_t pmap;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
|
||||
@ -3671,7 +3670,7 @@ pmap_is_referenced(vm_page_t m)
|
||||
pmap_t pmap;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -3732,7 +3731,7 @@ pmap_remove_write(vm_page_t m)
|
||||
pmap_t pmap;
|
||||
pt_entry_t oldpte, *pte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -3798,7 +3797,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pt_entry_t *pte;
|
||||
int rtval = 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
@ -3840,7 +3839,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
pmap_t pmap;
|
||||
pt_entry_t *pte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -3886,7 +3885,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
pmap_t pmap;
|
||||
pt_entry_t *pte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
|
@ -1442,7 +1442,7 @@ pmap_remove_all(vm_page_t m)
|
||||
pmap_t oldpmap;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
@ -1548,8 +1548,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
|
||||
va &= ~PAGE_MASK;
|
||||
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
|
||||
/*
|
||||
@ -1619,7 +1618,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
pmap_insert_entry(pmap, va, m);
|
||||
@ -1720,7 +1719,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
boolean_t managed;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -1730,7 +1729,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
|
||||
if (!pmap_present(pte)) {
|
||||
/* Enter on the PV list if the page is managed. */
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
if (!pmap_try_insert_pv_entry(pmap, va, m)) {
|
||||
pmap_free_pte(pte, va);
|
||||
return;
|
||||
@ -1900,7 +1899,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -1932,7 +1931,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2010,7 +2009,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
int count = 0;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2044,7 +2043,7 @@ pmap_is_modified(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
|
||||
@ -2104,7 +2103,7 @@ pmap_is_referenced(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -2133,7 +2132,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
pmap_t oldpmap;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -2174,7 +2173,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
pmap_t oldpmap;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2203,7 +2202,7 @@ pmap_remove_write(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
vm_prot_t prot;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
|
@ -1708,7 +1708,7 @@ pmap_remove_all(vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
pt_entry_t *pte, tpte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
|
||||
@ -1863,8 +1863,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
|
||||
va &= ~PAGE_MASK;
|
||||
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0,
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
|
||||
mpte = NULL;
|
||||
@ -1952,7 +1951,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* raise IPL while manipulating pv_table since pmap_enter can be
|
||||
* called at interrupt time.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if (pv == NULL)
|
||||
@ -2067,7 +2066,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
|
||||
(m->oflags & VPO_UNMANAGED) != 0,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -2129,7 +2128,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory.
|
||||
*/
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 &&
|
||||
!pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
|
||||
if (mpte != NULL) {
|
||||
pmap_unwire_pte_hold(pmap, va, mpte);
|
||||
@ -2464,7 +2463,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
int loops = 0;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -2558,7 +2557,7 @@ pmap_testbit(vm_page_t m, int bit)
|
||||
pt_entry_t *pte;
|
||||
boolean_t rv = FALSE;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
if (m->oflags & VPO_UNMANAGED)
|
||||
return (rv);
|
||||
|
||||
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
|
||||
@ -2585,7 +2584,7 @@ pmap_changebit(vm_page_t m, int bit, boolean_t setem)
|
||||
pv_entry_t pv;
|
||||
pt_entry_t *pte;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
if (m->oflags & VPO_UNMANAGED)
|
||||
return;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
@ -2634,7 +2633,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
@ -2659,7 +2658,7 @@ pmap_remove_write(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
pt_entry_t *pte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -2699,7 +2698,7 @@ int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
if (m->md.pv_flags & PV_TABLE_REF) {
|
||||
vm_page_lock_queues();
|
||||
@ -2721,7 +2720,7 @@ pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -2775,7 +2774,7 @@ void
|
||||
pmap_clear_modify(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -2806,7 +2805,7 @@ boolean_t
|
||||
pmap_is_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
return ((m->md.pv_flags & PV_TABLE_REF) != 0);
|
||||
}
|
||||
@ -2820,7 +2819,7 @@ void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
if (m->md.pv_flags & PV_TABLE_REF) {
|
||||
@ -3168,7 +3167,7 @@ page_is_managed(vm_paddr_t pa)
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
if (m == NULL)
|
||||
return (0);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
@ -3181,7 +3180,7 @@ init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
|
||||
if (!(prot & VM_PROT_WRITE))
|
||||
rw = PTE_V | PTE_RO | PTE_C_CACHE;
|
||||
else if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
|
||||
else if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
|
||||
rw = PTE_V | PTE_D | PTE_C_CACHE;
|
||||
else
|
||||
|
@ -1073,12 +1073,12 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (pmap_bootstrapped)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("moea_enter_locked: page %p is not busy", m));
|
||||
|
||||
/* XXX change the pvo head for fake pages */
|
||||
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
||||
pvo_flags &= ~PVO_MANAGED;
|
||||
pvo_head = &moea_pvo_kunmanaged;
|
||||
zone = moea_upvo_zone;
|
||||
@ -1088,7 +1088,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
* If this is a managed page, and it's the first reference to the page,
|
||||
* clear the execness of the page. Otherwise fetch the execness.
|
||||
*/
|
||||
if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) {
|
||||
if ((pg != NULL) && ((m->oflags & VPO_UNMANAGED) == 0)) {
|
||||
if (LIST_EMPTY(pvo_head)) {
|
||||
moea_attr_clear(pg, PTE_EXEC);
|
||||
} else {
|
||||
@ -1101,7 +1101,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
pte_lo |= PTE_BW;
|
||||
if (pmap_bootstrapped &&
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
(m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
} else
|
||||
pte_lo |= PTE_BR;
|
||||
@ -1112,9 +1112,6 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (wired)
|
||||
pvo_flags |= PVO_WIRED;
|
||||
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
pvo_flags |= PVO_FAKE;
|
||||
|
||||
error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
|
||||
pte_lo, pvo_flags);
|
||||
|
||||
@ -1245,7 +1242,7 @@ boolean_t
|
||||
moea_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_is_referenced: page %p is not managed", m));
|
||||
return (moea_query_bit(m, PTE_REF));
|
||||
}
|
||||
@ -1254,7 +1251,7 @@ boolean_t
|
||||
moea_is_modified(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -1286,7 +1283,7 @@ void
|
||||
moea_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_clear_reference: page %p is not managed", m));
|
||||
moea_clear_bit(m, PTE_REF);
|
||||
}
|
||||
@ -1295,7 +1292,7 @@ void
|
||||
moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -1322,7 +1319,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
pmap_t pmap;
|
||||
u_int lo;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -1379,7 +1376,7 @@ boolean_t
|
||||
moea_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_ts_referenced: page %p is not managed", m));
|
||||
return (moea_clear_bit(m, PTE_REF));
|
||||
}
|
||||
@ -1396,7 +1393,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
pmap_t pmap;
|
||||
u_int lo;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS) {
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
||||
m->md.mdpg_cache_attrs = ma;
|
||||
return;
|
||||
}
|
||||
@ -1537,7 +1534,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
struct pvo_entry *pvo;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
@ -1565,7 +1562,7 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
||||
@ -1928,8 +1925,6 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
|
||||
pvo->pvo_vaddr |= PVO_MANAGED;
|
||||
if (bootstrap)
|
||||
pvo->pvo_vaddr |= PVO_BOOTSTRAP;
|
||||
if (flags & PVO_FAKE)
|
||||
pvo->pvo_vaddr |= PVO_FAKE;
|
||||
|
||||
moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
|
||||
|
||||
@ -1988,7 +1983,7 @@ moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
||||
/*
|
||||
* Save the REF/CHG bits into their cache if the page is managed.
|
||||
*/
|
||||
if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
|
||||
if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
|
||||
struct vm_page *pg;
|
||||
|
||||
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
|
||||
|
@ -1222,12 +1222,12 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if (pmap_bootstrapped)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("moea64_enter_locked: page %p is not busy", m));
|
||||
|
||||
/* XXX change the pvo head for fake pages */
|
||||
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
||||
pvo_flags &= ~PVO_MANAGED;
|
||||
pvo_head = &moea64_pvo_kunmanaged;
|
||||
zone = moea64_upvo_zone;
|
||||
@ -1238,7 +1238,7 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
pte_lo |= LPTE_BW;
|
||||
if (pmap_bootstrapped &&
|
||||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
(m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
} else
|
||||
pte_lo |= LPTE_BR;
|
||||
@ -1249,9 +1249,6 @@ moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if (wired)
|
||||
pvo_flags |= PVO_WIRED;
|
||||
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
pvo_flags |= PVO_FAKE;
|
||||
|
||||
error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
|
||||
VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
|
||||
|
||||
@ -1474,7 +1471,7 @@ boolean_t
|
||||
moea64_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_is_referenced: page %p is not managed", m));
|
||||
return (moea64_query_bit(mmu, m, PTE_REF));
|
||||
}
|
||||
@ -1483,7 +1480,7 @@ boolean_t
|
||||
moea64_is_modified(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_is_modified: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -1515,7 +1512,7 @@ void
|
||||
moea64_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_clear_reference: page %p is not managed", m));
|
||||
moea64_clear_bit(mmu, m, LPTE_REF);
|
||||
}
|
||||
@ -1524,7 +1521,7 @@ void
|
||||
moea64_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -1551,7 +1548,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
|
||||
pmap_t pmap;
|
||||
uint64_t lo;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -1611,7 +1608,7 @@ boolean_t
|
||||
moea64_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_ts_referenced: page %p is not managed", m));
|
||||
return (moea64_clear_bit(mmu, m, LPTE_REF));
|
||||
}
|
||||
@ -1628,7 +1625,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
pmap_t pmap;
|
||||
uint64_t lo;
|
||||
|
||||
if (m->flags & PG_FICTITIOUS) {
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
||||
m->md.mdpg_cache_attrs = ma;
|
||||
return;
|
||||
}
|
||||
@ -1763,7 +1760,7 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
struct pvo_entry *pvo;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
@ -1791,7 +1788,7 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
||||
@ -2227,8 +2224,6 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
|
||||
pvo->pvo_vaddr |= PVO_MANAGED;
|
||||
if (bootstrap)
|
||||
pvo->pvo_vaddr |= PVO_BOOTSTRAP;
|
||||
if (flags & PVO_FAKE)
|
||||
pvo->pvo_vaddr |= PVO_FAKE;
|
||||
if (flags & PVO_LARGE)
|
||||
pvo->pvo_vaddr |= PVO_LARGE;
|
||||
|
||||
@ -2305,7 +2300,7 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
|
||||
/*
|
||||
* Save the REF/CHG bits into their cache if the page is managed.
|
||||
*/
|
||||
if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
|
||||
if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
|
||||
struct vm_page *pg;
|
||||
|
||||
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
|
||||
|
@ -887,13 +887,11 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
* Insert pv_entry into pv_list for mapped page if part of managed
|
||||
* memory.
|
||||
*/
|
||||
if ((m->flags & PG_FICTITIOUS) == 0) {
|
||||
if ((m->flags & PG_UNMANAGED) == 0) {
|
||||
flags |= PTE_MANAGED;
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
flags |= PTE_MANAGED;
|
||||
|
||||
/* Create and insert pv entry. */
|
||||
pv_insert(pmap, va, m);
|
||||
}
|
||||
/* Create and insert pv entry. */
|
||||
pv_insert(pmap, va, m);
|
||||
}
|
||||
|
||||
pmap->pm_stats.resident_count++;
|
||||
@ -1562,8 +1560,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
KASSERT((va <= VM_MAXUSER_ADDRESS),
|
||||
("mmu_booke_enter_locked: user pmap, non user va"));
|
||||
}
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("mmu_booke_enter_locked: page %p is not busy", m));
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
@ -1668,7 +1666,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if (!su)
|
||||
flags |= PTE_UW;
|
||||
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
}
|
||||
|
||||
@ -1955,7 +1953,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
pte_t *pte;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
@ -2169,7 +2167,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_is_modified: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
|
||||
@ -2220,7 +2218,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_is_referenced: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -2248,7 +2246,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -2300,7 +2298,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
pv_entry_t pv;
|
||||
int count;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_ts_referenced: page %p is not managed", m));
|
||||
count = 0;
|
||||
vm_page_lock_queues();
|
||||
@ -2339,7 +2337,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
@ -2400,7 +2398,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
int loops;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
@ -2428,7 +2426,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
pte_t *pte;
|
||||
int count = 0;
|
||||
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
|
@ -127,10 +127,8 @@ LIST_HEAD(pvo_head, pvo_entry);
|
||||
#define PVO_EXECUTABLE 0x040UL /* PVO entry is executable */
|
||||
#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during
|
||||
bootstrap */
|
||||
#define PVO_FAKE 0x100UL /* fictitious phys page */
|
||||
#define PVO_LARGE 0x200UL /* large page */
|
||||
#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
|
||||
#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE)
|
||||
#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
|
||||
#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
|
||||
#define PVO_PTEGIDX_CLR(pvo) \
|
||||
|
@ -1390,7 +1390,7 @@ pmap_remove_all(vm_page_t m)
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
|
||||
@ -1498,13 +1498,13 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t real;
|
||||
u_long data;
|
||||
int i;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
(m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
("pmap_enter_locked: page %p is not busy", m));
|
||||
PMAP_STATS_INC(pmap_nenter);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
@ -1514,12 +1514,9 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
* physical memory, convert to the real backing page.
|
||||
*/
|
||||
if ((m->flags & PG_FICTITIOUS) != 0) {
|
||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||
if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
break;
|
||||
}
|
||||
}
|
||||
real = vm_phys_paddr_to_vm_page(pa);
|
||||
if (real != NULL)
|
||||
m = real;
|
||||
}
|
||||
|
||||
CTR6(KTR_PMAP,
|
||||
@ -1562,7 +1559,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
tp->tte_data |= TD_SW;
|
||||
if (wired)
|
||||
tp->tte_data |= TD_W;
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
} else if ((data & TD_W) != 0)
|
||||
vm_page_dirty(m);
|
||||
@ -1603,7 +1600,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
data |= TD_P;
|
||||
if ((prot & VM_PROT_WRITE) != 0) {
|
||||
data |= TD_SW;
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_flag_set(m, PG_WRITEABLE);
|
||||
}
|
||||
if (prot & VM_PROT_EXECUTE) {
|
||||
@ -1945,7 +1942,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
|
||||
int loops;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
@ -1975,7 +1972,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
|
||||
@ -2006,7 +2003,7 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
boolean_t rv;
|
||||
|
||||
rv = FALSE;
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (rv);
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
|
||||
@ -2037,7 +2034,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
u_long data;
|
||||
int count;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
count = 0;
|
||||
vm_page_lock_queues();
|
||||
@ -2064,7 +2061,7 @@ pmap_is_modified(vm_page_t m)
|
||||
struct tte *tp;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_modified: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
|
||||
@ -2117,7 +2114,7 @@ pmap_is_referenced(vm_page_t m)
|
||||
struct tte *tp;
|
||||
boolean_t rv;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
@ -2139,7 +2136,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
struct tte *tp;
|
||||
u_long data;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_modify: page %p is not managed", m));
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
KASSERT((m->oflags & VPO_BUSY) == 0,
|
||||
@ -2169,7 +2166,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
struct tte *tp;
|
||||
u_long data;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
@ -2188,7 +2185,7 @@ pmap_remove_write(vm_page_t m)
|
||||
struct tte *tp;
|
||||
u_long data;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_remove_write: page %p is not managed", m));
|
||||
|
||||
/*
|
||||
|
@ -173,7 +173,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
|
||||
enter:
|
||||
if ((m->flags & PG_FICTITIOUS) == 0) {
|
||||
data |= TD_CP;
|
||||
if ((m->flags & PG_UNMANAGED) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
pm->pm_stats.resident_count++;
|
||||
data |= TD_PV;
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ kmem_alloc(map, size)
|
||||
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
|
||||
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
mem->valid = VM_PAGE_BITS_ALL;
|
||||
KASSERT((mem->flags & PG_UNMANAGED) != 0,
|
||||
KASSERT((mem->oflags & VPO_UNMANAGED) != 0,
|
||||
("kmem_alloc: page %p is managed", mem));
|
||||
}
|
||||
VM_OBJECT_UNLOCK(kernel_object);
|
||||
@ -428,7 +428,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
|
||||
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
KASSERT((m->flags & PG_UNMANAGED) != 0,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
|
||||
("kmem_malloc: page %p is managed", m));
|
||||
}
|
||||
VM_OBJECT_UNLOCK(kmem_object);
|
||||
|
@ -1087,7 +1087,9 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
|
||||
vm_page_unlock(m);
|
||||
goto unlock_tobject;
|
||||
}
|
||||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("vm_object_madvise: page %p is fictitious", m));
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("vm_object_madvise: page %p is not managed", m));
|
||||
if ((m->oflags & VPO_BUSY) || m->busy) {
|
||||
if (advise == MADV_WILLNEED) {
|
||||
|
@ -483,8 +483,8 @@ vm_page_flag_set(vm_page_t m, unsigned short bits)
|
||||
* VPO_BUSY. Currently, this flag is only set by pmap_enter().
|
||||
*/
|
||||
KASSERT((bits & PG_WRITEABLE) == 0 ||
|
||||
((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0 &&
|
||||
(m->oflags & VPO_BUSY) != 0), ("PG_WRITEABLE and !VPO_BUSY"));
|
||||
(m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
|
||||
("PG_WRITEABLE and !VPO_BUSY"));
|
||||
m->flags |= bits;
|
||||
}
|
||||
|
||||
@ -636,7 +636,7 @@ vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
|
||||
/* Fictitious pages don't use "segind". */
|
||||
m->flags = PG_FICTITIOUS;
|
||||
/* Fictitious pages don't use "order" or "pool". */
|
||||
m->oflags = VPO_BUSY;
|
||||
m->oflags = VPO_BUSY | VPO_UNMANAGED;
|
||||
m->wire_count = 1;
|
||||
pmap_page_set_memattr(m, memattr);
|
||||
return (m);
|
||||
@ -896,7 +896,7 @@ vm_page_remove(vm_page_t m)
|
||||
vm_object_t object;
|
||||
vm_page_t root;
|
||||
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if ((object = m->object) == NULL)
|
||||
return;
|
||||
@ -1388,14 +1388,14 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
if (req & VM_ALLOC_ZERO)
|
||||
flags = PG_ZERO;
|
||||
}
|
||||
if (object == NULL || object->type == OBJT_PHYS)
|
||||
flags |= PG_UNMANAGED;
|
||||
m->flags = flags;
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
|
||||
m->oflags = 0;
|
||||
if (object == NULL || object->type == OBJT_PHYS)
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
else
|
||||
m->oflags = VPO_BUSY;
|
||||
m->oflags = 0;
|
||||
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
|
||||
m->oflags |= VPO_BUSY;
|
||||
if (req & VM_ALLOC_WIRED) {
|
||||
/*
|
||||
* The page lock is not required for wiring a page until that
|
||||
@ -1479,8 +1479,8 @@ vm_page_alloc_init(vm_page_t m)
|
||||
if (m->flags & PG_ZERO)
|
||||
vm_page_zero_count--;
|
||||
/* Don't clear the PG_ZERO flag; we'll need it later. */
|
||||
m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
|
||||
m->oflags = 0;
|
||||
m->flags &= PG_ZERO;
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
/* Unmanaged pages don't use "act_count". */
|
||||
return (drop);
|
||||
}
|
||||
@ -1670,7 +1670,7 @@ vm_page_activate(vm_page_t m)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if ((queue = m->queue) != PQ_ACTIVE) {
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
|
||||
if (m->act_count < ACT_INIT)
|
||||
m->act_count = ACT_INIT;
|
||||
vm_page_lock_queues();
|
||||
@ -1736,7 +1736,7 @@ void
|
||||
vm_page_free_toq(vm_page_t m)
|
||||
{
|
||||
|
||||
if ((m->flags & PG_UNMANAGED) == 0) {
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
KASSERT(!pmap_page_is_mapped(m),
|
||||
("vm_page_free_toq: freeing mapped page %p", m));
|
||||
@ -1754,7 +1754,7 @@ vm_page_free_toq(vm_page_t m)
|
||||
* callback routine until after we've put the page on the
|
||||
* appropriate free queue.
|
||||
*/
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_pageq_remove(m);
|
||||
vm_page_remove(m);
|
||||
|
||||
@ -1834,7 +1834,7 @@ vm_page_wire(vm_page_t m)
|
||||
return;
|
||||
}
|
||||
if (m->wire_count == 0) {
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_pageq_remove(m);
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
}
|
||||
@ -1862,7 +1862,7 @@ void
|
||||
vm_page_unwire(vm_page_t m, int activate)
|
||||
{
|
||||
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if ((m->flags & PG_FICTITIOUS) != 0) {
|
||||
KASSERT(m->wire_count == 1,
|
||||
@ -1873,7 +1873,7 @@ vm_page_unwire(vm_page_t m, int activate)
|
||||
m->wire_count--;
|
||||
if (m->wire_count == 0) {
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
if ((m->flags & PG_UNMANAGED) != 0 ||
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
m->object == NULL)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
@ -1921,7 +1921,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
*/
|
||||
if ((queue = m->queue) == PQ_INACTIVE)
|
||||
return;
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(m, PG_WINATCFLS);
|
||||
if (queue != PQ_NONE)
|
||||
@ -1962,7 +1962,7 @@ vm_page_try_to_cache(vm_page_t m)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
|
||||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
pmap_remove_all(m);
|
||||
if (m->dirty)
|
||||
@ -1985,7 +1985,7 @@ vm_page_try_to_free(vm_page_t m)
|
||||
if (m->object != NULL)
|
||||
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
|
||||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
|
||||
return (0);
|
||||
pmap_remove_all(m);
|
||||
if (m->dirty)
|
||||
@ -2010,7 +2010,7 @@ vm_page_cache(vm_page_t m)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
object = m->object;
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
|
||||
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
|
||||
m->hold_count || m->wire_count)
|
||||
panic("vm_page_cache: attempting to cache busy page");
|
||||
pmap_remove_all(m);
|
||||
@ -2657,7 +2657,8 @@ vm_page_cowsetup(vm_page_t m)
|
||||
{
|
||||
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
|
||||
if ((m->flags & PG_FICTITIOUS) != 0 ||
|
||||
(m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
|
||||
return (EBUSY);
|
||||
m->cow++;
|
||||
|
@ -157,9 +157,18 @@ struct vm_page {
|
||||
*
|
||||
* Access to these page flags is synchronized by the lock on the object
|
||||
* containing the page (O).
|
||||
*
|
||||
* Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
|
||||
* indicates that the page is not under PV management but
|
||||
* otherwise should be treated as a normal page. Pages not
|
||||
* under PV management cannot be paged out via the
|
||||
* object/vm_page_t because there is no knowledge of their pte
|
||||
* mappings, and such pages are also not on any PQ queue.
|
||||
*
|
||||
*/
|
||||
#define VPO_BUSY 0x0001 /* page is in transit */
|
||||
#define VPO_WANTED 0x0002 /* someone is waiting for page */
|
||||
#define VPO_UNMANAGED 0x0004 /* No PV management for page */
|
||||
#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
|
||||
#define VPO_NOSYNC 0x0400 /* do not collect for syncer */
|
||||
|
||||
@ -216,13 +225,6 @@ extern struct vpglocks pa_lock[];
|
||||
/*
|
||||
* These are the flags defined for vm_page.
|
||||
*
|
||||
* Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
|
||||
* not under PV management but otherwise should be treated as a
|
||||
* normal page. Pages not under PV management cannot be paged out
|
||||
* via the object/vm_page_t because there is no knowledge of their
|
||||
* pte mappings, nor can they be removed from their objects via
|
||||
* the object, and such pages are also not on any PQ queue.
|
||||
*
|
||||
* PG_REFERENCED may be cleared only if the object containing the page is
|
||||
* locked.
|
||||
*
|
||||
@ -236,7 +238,6 @@ extern struct vpglocks pa_lock[];
|
||||
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
|
||||
#define PG_ZERO 0x0040 /* page is zeroed */
|
||||
#define PG_REFERENCED 0x0080 /* page has been referenced */
|
||||
#define PG_UNMANAGED 0x0800 /* No PV management for page */
|
||||
#define PG_MARKER 0x1000 /* special queue marker page */
|
||||
#define PG_SLAB 0x2000 /* object pointer is actually a slab */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user