The pmap function pmap_clear_reference() is no longer used. Remove it.
pmap_clear_reference() has had exactly one caller in the kernel for several years, more precisely, since FreeBSD 8. Now, that call no longer exists. Approved by: re (kib) Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
2789c11117
commit
deb179bb4c
@ -5381,66 +5381,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pmap_t pmap;
|
||||
pv_entry_t next_pv, pv;
|
||||
pd_entry_t oldpde, *pde;
|
||||
pt_entry_t *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
va = pv->pv_va;
|
||||
pde = pmap_pde(pmap, va);
|
||||
oldpde = *pde;
|
||||
if ((oldpde & PG_A) != 0) {
|
||||
if (pmap_demote_pde(pmap, pde, va)) {
|
||||
/*
|
||||
* Remove the mapping to a single page so
|
||||
* that a subsequent access may repromote.
|
||||
* Since the underlying page table page is
|
||||
* fully populated, this removal never frees
|
||||
* a page table page.
|
||||
*/
|
||||
va += VM_PAGE_TO_PHYS(m) - (oldpde &
|
||||
PG_PS_FRAME);
|
||||
pmap_remove_page(pmap, va, pde, NULL);
|
||||
}
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
pde = pmap_pde(pmap, pv->pv_va);
|
||||
KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
|
||||
" a 2mpage in page %p's pv list", m));
|
||||
pte = pmap_pde_to_pte(pde, pv->pv_va);
|
||||
if (*pte & PG_A) {
|
||||
atomic_clear_long(pte, PG_A);
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Miscellaneous support routines follow
|
||||
*/
|
||||
|
@ -4904,22 +4904,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
if (pmap_is_referenced(m))
|
||||
pmap_clearbit(m, PVF_REF);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Clear the write and modified bits in each of the given page's mappings.
|
||||
*/
|
||||
|
@ -4591,21 +4591,6 @@ pmap_is_referenced(vm_page_t m)
|
||||
return ((m->md.pvh_attrs & PVF_REF) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
if (m->md.pvh_attrs & PVF_REF)
|
||||
pmap_clearbit(m, PVF_REF);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Clear the write and modified bits in each of the given page's mappings.
|
||||
|
@ -5048,73 +5048,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pv_entry_t next_pv, pv;
|
||||
pmap_t pmap;
|
||||
pd_entry_t oldpde, *pde;
|
||||
pt_entry_t *pte;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
if ((m->flags & PG_FICTITIOUS) != 0)
|
||||
goto small_mappings;
|
||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
|
||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
|
||||
va = pv->pv_va;
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
pde = pmap_pde(pmap, va);
|
||||
oldpde = *pde;
|
||||
if ((oldpde & PG_A) != 0) {
|
||||
if (pmap_demote_pde(pmap, pde, va)) {
|
||||
/*
|
||||
* Remove the mapping to a single page so
|
||||
* that a subsequent access may repromote.
|
||||
* Since the underlying page table page is
|
||||
* fully populated, this removal never frees
|
||||
* a page table page.
|
||||
*/
|
||||
va += VM_PAGE_TO_PHYS(m) - (oldpde &
|
||||
PG_PS_FRAME);
|
||||
pmap_remove_page(pmap, va, NULL);
|
||||
}
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
small_mappings:
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
pde = pmap_pde(pmap, pv->pv_va);
|
||||
KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
|
||||
" a 4mpage in page %p's pv list", m));
|
||||
pte = pmap_pte_quick(pmap, pv->pv_va);
|
||||
if ((*pte & PG_A) != 0) {
|
||||
/*
|
||||
* Regardless of whether a pte is 32 or 64 bits
|
||||
* in size, PG_A is among the least significant
|
||||
* 32 bits.
|
||||
*/
|
||||
atomic_clear_int((u_int *)pte, PG_A);
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Miscellaneous support routines follow
|
||||
*/
|
||||
|
@ -4023,41 +4023,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
pmap_t pmap;
|
||||
pt_entry_t *pte;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
sched_pin();
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
pte = pmap_pte_quick(pmap, pv->pv_va);
|
||||
if ((*pte & PG_A) != 0) {
|
||||
/*
|
||||
* Regardless of whether a pte is 32 or 64 bits
|
||||
* in size, PG_A is among the least significant
|
||||
* 32 bits.
|
||||
*/
|
||||
PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE);
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Miscellaneous support routines follow
|
||||
*/
|
||||
|
@ -2393,37 +2393,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
struct ia64_lpte *pte;
|
||||
pmap_t oldpmap, pmap;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
pmap = PV_PMAP(pv);
|
||||
PMAP_LOCK(pmap);
|
||||
oldpmap = pmap_switch(pmap);
|
||||
pte = pmap_find_vhpt(pv->pv_va);
|
||||
KASSERT(pte != NULL, ("pte"));
|
||||
if (pmap_accessed(pte)) {
|
||||
pmap_clear_accessed(pte);
|
||||
pmap_invalidate_page(pv->pv_va);
|
||||
}
|
||||
pmap_switch(oldpmap);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the write and modified bits in each of the given page's mappings.
|
||||
*/
|
||||
|
@ -3052,24 +3052,6 @@ pmap_is_referenced(vm_page_t m)
|
||||
return ((m->md.pv_flags & PV_TABLE_REF) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_clear_reference:
|
||||
*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
if (m->md.pv_flags & PV_TABLE_REF) {
|
||||
m->md.pv_flags &= ~PV_TABLE_REF;
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Miscellaneous support routines follow
|
||||
*/
|
||||
|
@ -278,7 +278,6 @@ int moea_pte_spill(vm_offset_t);
|
||||
*/
|
||||
void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
||||
void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_clear_reference(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
@ -328,7 +327,6 @@ struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
|
||||
static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
|
||||
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
|
||||
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, moea_copy_pages),
|
||||
MMUMETHOD(mmu_enter, moea_enter),
|
||||
@ -1352,17 +1350,6 @@ moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
return (rv);
|
||||
}
|
||||
|
||||
void
|
||||
moea_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
moea_clear_bit(m, PTE_REF);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
void
|
||||
moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
@ -288,7 +288,6 @@ static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
|
||||
*/
|
||||
void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
||||
void moea64_clear_modify(mmu_t, vm_page_t);
|
||||
void moea64_clear_reference(mmu_t, vm_page_t);
|
||||
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
@ -334,7 +333,6 @@ static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
||||
static mmu_method_t moea64_methods[] = {
|
||||
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
|
||||
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
|
||||
MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea64_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
|
||||
MMUMETHOD(mmu_enter, moea64_enter),
|
||||
@ -1542,15 +1540,6 @@ moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
return (rv);
|
||||
}
|
||||
|
||||
void
|
||||
moea64_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("moea64_clear_reference: page %p is not managed", m));
|
||||
moea64_clear_bit(mmu, m, LPTE_REF);
|
||||
}
|
||||
|
||||
void
|
||||
moea64_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
|
@ -270,7 +270,6 @@ void pmap_bootstrap_ap(volatile uint32_t *);
|
||||
*/
|
||||
static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
||||
static void mmu_booke_clear_modify(mmu_t, vm_page_t);
|
||||
static void mmu_booke_clear_reference(mmu_t, vm_page_t);
|
||||
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
|
||||
vm_size_t, vm_offset_t);
|
||||
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
@ -333,7 +332,6 @@ static mmu_method_t mmu_booke_methods[] = {
|
||||
/* pmap dispatcher interface */
|
||||
MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring),
|
||||
MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
|
||||
MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
|
||||
MMUMETHOD(mmu_copy, mmu_booke_copy),
|
||||
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
|
||||
@ -2356,38 +2354,6 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
return (count);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the reference bit on the specified physical page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
|
||||
{
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("mmu_booke_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (PTE_ISREFERENCED(pte)) {
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
pte->flags &= ~PTE_REFERENCED;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
}
|
||||
}
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change wiring attribute for a map/virtual-address pair.
|
||||
*/
|
||||
|
@ -115,14 +115,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
MMU_CLEAR_MODIFY(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_CLEAR_REFERENCE(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
vm_size_t len, vm_offset_t src_addr)
|
||||
|
@ -2164,25 +2164,6 @@ pmap_clear_modify(vm_page_t m)
|
||||
rw_wunlock(&tte_list_global_lock);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
struct tte *tp;
|
||||
u_long data;
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_clear_reference: page %p is not managed", m));
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if ((tp->tte_data & TD_PV) == 0)
|
||||
continue;
|
||||
data = atomic_clear_long(&tp->tte_data, TD_REF);
|
||||
if ((data & TD_REF) != 0)
|
||||
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
|
||||
}
|
||||
rw_wunlock(&tte_list_global_lock);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove_write(vm_page_t m)
|
||||
{
|
||||
|
@ -104,7 +104,6 @@ void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
|
||||
vm_size_t);
|
||||
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
|
||||
void pmap_clear_modify(vm_page_t m);
|
||||
void pmap_clear_reference(vm_page_t m);
|
||||
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
void pmap_copy_page(vm_page_t, vm_page_t);
|
||||
void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
|
||||
|
Loading…
Reference in New Issue
Block a user