If we ever allow for managed fictitious pages, the pages shall be
excluded from superpage promotions. At least one of the reason is that pv_table is sized for non-fictitious pages only. Consistently check for the page to be non-fictitious before accesing superpage pv list. Sponsored by: The FreeBSD Foundation Reviewed by: alc MFC after: 2 weeks
This commit is contained in:
parent
004cb95bae
commit
fc6e32fb62
@ -2386,7 +2386,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
pmap_pvh_free(&m->md, pmap, va);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list)) {
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
if (TAILQ_EMPTY(&pvh->pv_list))
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -2849,6 +2849,8 @@ pmap_remove_all(vm_page_t m)
|
||||
("pmap_remove_all: page %p is not managed", m));
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -2858,6 +2860,7 @@ pmap_remove_all(vm_page_t m)
|
||||
(void)pmap_demote_pde(pmap, pde, va);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -2939,7 +2942,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
pdp_entry_t *pdpe;
|
||||
pd_entry_t ptpaddr, *pde;
|
||||
pt_entry_t *pte;
|
||||
boolean_t anychanged, pv_lists_locked;
|
||||
int anychanged;
|
||||
boolean_t pv_lists_locked;
|
||||
|
||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
||||
pmap_remove(pmap, sva, eva);
|
||||
@ -2952,7 +2956,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
|
||||
pv_lists_locked = FALSE;
|
||||
resume:
|
||||
anychanged = FALSE;
|
||||
anychanged = 0;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
for (; sva < eva; sva = va_next) {
|
||||
@ -3000,7 +3004,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* invalidated by pmap_protect_pde().
|
||||
*/
|
||||
if (pmap_protect_pde(pmap, pde, sva, prot))
|
||||
anychanged = TRUE;
|
||||
anychanged = 1;
|
||||
continue;
|
||||
} else {
|
||||
if (!pv_lists_locked) {
|
||||
@ -3054,7 +3058,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
if (obits & PG_G)
|
||||
pmap_invalidate_page(pmap, sva);
|
||||
else
|
||||
anychanged = TRUE;
|
||||
anychanged = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3354,7 +3358,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
}
|
||||
if ((origpte & PG_MANAGED) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list) &&
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
if (invlva)
|
||||
pmap_invalidate_page(pmap, va);
|
||||
@ -3367,7 +3372,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* populated, then attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
|
||||
pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
|
||||
pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0)
|
||||
pmap_promote_pde(pmap, pde, va);
|
||||
|
||||
vm_page_unlock_queues();
|
||||
@ -3987,7 +3993,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
if (!rv && loops < 16) {
|
||||
if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
@ -4019,7 +4025,10 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
count = pmap_pvh_wired_mappings(&m->md, count);
|
||||
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
|
||||
if ((m->flags & PG_FICTITIOUS) == 0) {
|
||||
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
|
||||
count);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
@ -4061,7 +4070,8 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4134,9 +4144,10 @@ pmap_remove_pages(pmap_t pmap)
|
||||
m, (uintmax_t)m->phys_addr,
|
||||
(uintmax_t)tpte));
|
||||
|
||||
KASSERT(m < &vm_page_array[vm_page_array_size],
|
||||
("pmap_remove_pages: bad tpte %#jx",
|
||||
(uintmax_t)tpte));
|
||||
KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
|
||||
m < &vm_page_array[vm_page_array_size],
|
||||
("pmap_remove_pages: bad tpte %#jx",
|
||||
(uintmax_t)tpte));
|
||||
|
||||
pte_clear(pte);
|
||||
|
||||
@ -4178,7 +4189,8 @@ pmap_remove_pages(pmap_t pmap)
|
||||
} else {
|
||||
pmap_resident_count_dec(pmap, 1);
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list)) {
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
if (TAILQ_EMPTY(&pvh->pv_list))
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -4229,7 +4241,8 @@ pmap_is_modified(vm_page_t m)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4300,7 +4313,8 @@ pmap_is_referenced(vm_page_t m)
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_referenced_pvh(&m->md) ||
|
||||
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4357,6 +4371,8 @@ pmap_remove_write(vm_page_t m)
|
||||
(m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4367,6 +4383,7 @@ pmap_remove_write(vm_page_t m)
|
||||
(void)pmap_demote_pde(pmap, pde, va);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4415,8 +4432,10 @@ pmap_ts_referenced(vm_page_t m)
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_ts_referenced: page %p is not managed", m));
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4447,6 +4466,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pvf = pv;
|
||||
do {
|
||||
@ -4501,6 +4521,8 @@ pmap_clear_modify(vm_page_t m)
|
||||
if ((m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4533,6 +4555,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4567,6 +4590,8 @@ pmap_clear_reference(vm_page_t m)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4590,6 +4615,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
|
@ -2483,7 +2483,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
pmap_pvh_free(&m->md, pmap, va);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list)) {
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
if (TAILQ_EMPTY(&pvh->pv_list))
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -2957,6 +2957,8 @@ pmap_remove_all(vm_page_t m)
|
||||
free = NULL;
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
|
||||
va = pv->pv_va;
|
||||
@ -2966,6 +2968,7 @@ pmap_remove_all(vm_page_t m)
|
||||
(void)pmap_demote_pde(pmap, pde, va);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -3481,7 +3484,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
}
|
||||
if ((origpte & PG_MANAGED) != 0 &&
|
||||
TAILQ_EMPTY(&om->md.pv_list) &&
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
if (invlva)
|
||||
pmap_invalidate_page(pmap, va);
|
||||
@ -3494,7 +3498,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* populated, then attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
|
||||
pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
|
||||
pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0)
|
||||
pmap_promote_pde(pmap, pde, va);
|
||||
|
||||
sched_unpin();
|
||||
@ -4134,7 +4139,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
if (loops >= 16)
|
||||
break;
|
||||
}
|
||||
if (!rv && loops < 16) {
|
||||
if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
|
||||
if (PV_PMAP(pv) == pmap) {
|
||||
@ -4166,7 +4171,10 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
count = pmap_pvh_wired_mappings(&m->md, count);
|
||||
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
|
||||
if ((m->flags & PG_FICTITIOUS) == 0) {
|
||||
count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
|
||||
count);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
return (count);
|
||||
}
|
||||
@ -4210,7 +4218,8 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
|
||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4283,9 +4292,10 @@ pmap_remove_pages(pmap_t pmap)
|
||||
m, (uintmax_t)m->phys_addr,
|
||||
(uintmax_t)tpte));
|
||||
|
||||
KASSERT(m < &vm_page_array[vm_page_array_size],
|
||||
("pmap_remove_pages: bad tpte %#jx",
|
||||
(uintmax_t)tpte));
|
||||
KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
|
||||
m < &vm_page_array[vm_page_array_size],
|
||||
("pmap_remove_pages: bad tpte %#jx",
|
||||
(uintmax_t)tpte));
|
||||
|
||||
pte_clear(pte);
|
||||
|
||||
@ -4327,7 +4337,8 @@ pmap_remove_pages(pmap_t pmap)
|
||||
} else {
|
||||
pmap->pm_stats.resident_count--;
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list)) {
|
||||
if (TAILQ_EMPTY(&m->md.pv_list) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0) {
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
if (TAILQ_EMPTY(&pvh->pv_list))
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -4380,7 +4391,8 @@ pmap_is_modified(vm_page_t m)
|
||||
return (FALSE);
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_modified_pvh(&m->md) ||
|
||||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4453,7 +4465,8 @@ pmap_is_referenced(vm_page_t m)
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
rv = pmap_is_referenced_pvh(&m->md) ||
|
||||
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
|
||||
((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
|
||||
vm_page_unlock_queues();
|
||||
return (rv);
|
||||
}
|
||||
@ -4513,6 +4526,8 @@ pmap_remove_write(vm_page_t m)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
va = pv->pv_va;
|
||||
@ -4523,6 +4538,7 @@ pmap_remove_write(vm_page_t m)
|
||||
(void)pmap_demote_pde(pmap, pde, va);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4580,6 +4596,8 @@ pmap_ts_referenced(vm_page_t m)
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
|
||||
va = pv->pv_va;
|
||||
pmap = PV_PMAP(pv);
|
||||
@ -4610,6 +4628,7 @@ pmap_ts_referenced(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pvf = pv;
|
||||
do {
|
||||
@ -4666,6 +4685,8 @@ pmap_clear_modify(vm_page_t m)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
va = pv->pv_va;
|
||||
@ -4703,6 +4724,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -4744,6 +4766,8 @@ pmap_clear_reference(vm_page_t m)
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
vm_page_lock_queues();
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
|
||||
va = pv->pv_va;
|
||||
@ -4767,6 +4791,7 @@ pmap_clear_reference(vm_page_t m)
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
|
Loading…
Reference in New Issue
Block a user