diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index c5ba84791837..f4e8d67c308a 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5381,66 +5381,6 @@ pmap_clear_modify(vm_page_t m) rw_wunlock(&pvh_global_lock); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - struct md_page *pvh; - pmap_t pmap; - pv_entry_t next_pv, pv; - pd_entry_t oldpde, *pde; - pt_entry_t *pte; - vm_offset_t va; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - if ((m->flags & PG_FICTITIOUS) != 0) - goto small_mappings; - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - va = pv->pv_va; - pde = pmap_pde(pmap, va); - oldpde = *pde; - if ((oldpde & PG_A) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { - /* - * Remove the mapping to a single page so - * that a subsequent access may repromote. - * Since the underlying page table page is - * fully populated, this removal never frees - * a page table page. - */ - va += VM_PAGE_TO_PHYS(m) - (oldpde & - PG_PS_FRAME); - pmap_remove_page(pmap, va, pde, NULL); - } - } - PMAP_UNLOCK(pmap); - } -small_mappings: - TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pde = pmap_pde(pmap, pv->pv_va); - KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found" - " a 2mpage in page %p's pv list", m)); - pte = pmap_pde_to_pte(pde, pv->pv_va); - if (*pte & PG_A) { - atomic_clear_long(pte, PG_A); - pmap_invalidate_page(pmap, pv->pv_va); - } - PMAP_UNLOCK(pmap); - } - rw_wunlock(&pvh_global_lock); -} - /* * Miscellaneous support routines follow */ diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index dface47e6e4a..14ba2e0076b0 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -4904,22 +4904,6 @@ pmap_clear_modify(vm_page_t m) } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - if (pmap_is_referenced(m)) - pmap_clearbit(m, PVF_REF); -} - - /* * Clear the write and modified bits in each of the given page's mappings. */ diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index bc7912dbb6d6..c72effd6c6ff 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -4591,21 +4591,6 @@ pmap_is_referenced(vm_page_t m) return ((m->md.pvh_attrs & PVF_REF) != 0); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - if (m->md.pvh_attrs & PVF_REF) - pmap_clearbit(m, PVF_REF); -} - /* * Clear the write and modified bits in each of the given page's mappings. diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index c9f327c3fd1c..64bf1a3491a4 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -5048,73 +5048,6 @@ pmap_clear_modify(vm_page_t m) rw_wunlock(&pvh_global_lock); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - struct md_page *pvh; - pv_entry_t next_pv, pv; - pmap_t pmap; - pd_entry_t oldpde, *pde; - pt_entry_t *pte; - vm_offset_t va; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - sched_pin(); - if ((m->flags & PG_FICTITIOUS) != 0) - goto small_mappings; - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { - va = pv->pv_va; - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pde = pmap_pde(pmap, va); - oldpde = *pde; - if ((oldpde & PG_A) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { - /* - * Remove the mapping to a single page so - * that a subsequent access may repromote. - * Since the underlying page table page is - * fully populated, this removal never frees - * a page table page. - */ - va += VM_PAGE_TO_PHYS(m) - (oldpde & - PG_PS_FRAME); - pmap_remove_page(pmap, va, NULL); - } - } - PMAP_UNLOCK(pmap); - } -small_mappings: - TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pde = pmap_pde(pmap, pv->pv_va); - KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found" - " a 4mpage in page %p's pv list", m)); - pte = pmap_pte_quick(pmap, pv->pv_va); - if ((*pte & PG_A) != 0) { - /* - * Regardless of whether a pte is 32 or 64 bits - * in size, PG_A is among the least significant - * 32 bits. - */ - atomic_clear_int((u_int *)pte, PG_A); - pmap_invalidate_page(pmap, pv->pv_va); - } - PMAP_UNLOCK(pmap); - } - sched_unpin(); - rw_wunlock(&pvh_global_lock); -} - /* * Miscellaneous support routines follow */ diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 3abe7ef84e45..9f3e8bb888dc 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -4023,41 +4023,6 @@ pmap_clear_modify(vm_page_t m) rw_wunlock(&pvh_global_lock); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - pv_entry_t pv; - pmap_t pmap; - pt_entry_t *pte; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - sched_pin(); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - pte = pmap_pte_quick(pmap, pv->pv_va); - if ((*pte & PG_A) != 0) { - /* - * Regardless of whether a pte is 32 or 64 bits - * in size, PG_A is among the least significant - * 32 bits. - */ - PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); - pmap_invalidate_page(pmap, pv->pv_va); - } - PMAP_UNLOCK(pmap); - } - sched_unpin(); - rw_wunlock(&pvh_global_lock); -} - /* * Miscellaneous support routines follow */ diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c index 442149f588ba..3e848399d9ea 100644 --- a/sys/ia64/ia64/pmap.c +++ b/sys/ia64/ia64/pmap.c @@ -2393,37 +2393,6 @@ pmap_clear_modify(vm_page_t m) rw_wunlock(&pvh_global_lock); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - struct ia64_lpte *pte; - pmap_t oldpmap, pmap; - pv_entry_t pv; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - pmap = PV_PMAP(pv); - PMAP_LOCK(pmap); - oldpmap = pmap_switch(pmap); - pte = pmap_find_vhpt(pv->pv_va); - KASSERT(pte != NULL, ("pte")); - if (pmap_accessed(pte)) { - pmap_clear_accessed(pte); - pmap_invalidate_page(pv->pv_va); - } - pmap_switch(oldpmap); - PMAP_UNLOCK(pmap); - } - rw_wunlock(&pvh_global_lock); -} - /* * Clear the write and modified bits in each of the given page's mappings. */ diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index d1bf5ea00d58..6bc5ba9d2877 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -3052,24 +3052,6 @@ pmap_is_referenced(vm_page_t m) return ((m->md.pv_flags & PV_TABLE_REF) != 0); } -/* - * pmap_clear_reference: - * - * Clear the reference bit on the specified physical page. - */ -void -pmap_clear_reference(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - if (m->md.pv_flags & PV_TABLE_REF) { - m->md.pv_flags &= ~PV_TABLE_REF; - } - rw_wunlock(&pvh_global_lock); -} - /* * Miscellaneous support routines follow */ diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index f4e9d9bbbe83..ac3e9fe8a59a 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -278,7 +278,6 @@ int moea_pte_spill(vm_offset_t); */ void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); void moea_clear_modify(mmu_t, vm_page_t); -void moea_clear_reference(mmu_t, vm_page_t); void moea_copy_page(mmu_t, vm_page_t, vm_page_t); void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); @@ -328,7 +327,6 @@ struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev); static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_change_wiring, moea_change_wiring), MMUMETHOD(mmu_clear_modify, moea_clear_modify), - MMUMETHOD(mmu_clear_reference, moea_clear_reference), MMUMETHOD(mmu_copy_page, moea_copy_page), MMUMETHOD(mmu_copy_pages, moea_copy_pages), MMUMETHOD(mmu_enter, moea_enter), @@ -1352,17 +1350,6 @@ moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) return (rv); } -void -moea_clear_reference(mmu_t mmu, vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("moea_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - moea_clear_bit(m, PTE_REF); - rw_wunlock(&pvh_global_lock); -} - void moea_clear_modify(mmu_t mmu, vm_page_t m) { diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 3913b4778ac4..4ed2606d0d44 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -288,7 +288,6 @@ static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, */ void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); void moea64_clear_modify(mmu_t, vm_page_t); -void moea64_clear_reference(mmu_t, vm_page_t); void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); @@ -334,7 +333,6 @@ static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_change_wiring, moea64_change_wiring), MMUMETHOD(mmu_clear_modify, moea64_clear_modify), - MMUMETHOD(mmu_clear_reference, moea64_clear_reference), MMUMETHOD(mmu_copy_page, moea64_copy_page), MMUMETHOD(mmu_copy_pages, moea64_copy_pages), MMUMETHOD(mmu_enter, moea64_enter), @@ -1542,15 +1540,6 @@ moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) return (rv); } -void -moea64_clear_reference(mmu_t mmu, vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("moea64_clear_reference: page %p is not managed", m)); - moea64_clear_bit(mmu, m, LPTE_REF); -} - void moea64_clear_modify(mmu_t mmu, vm_page_t m) { diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index d5c64ed32907..7b9a5d313d02 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -270,7 +270,6 @@ void pmap_bootstrap_ap(volatile uint32_t *); */ static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); static void mmu_booke_clear_modify(mmu_t, vm_page_t); -static void mmu_booke_clear_reference(mmu_t, vm_page_t); static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); @@ -333,7 +332,6 @@ static mmu_method_t mmu_booke_methods[] = { /* pmap dispatcher interface */ MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), - MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), MMUMETHOD(mmu_copy, mmu_booke_copy), MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), @@ -2356,38 +2354,6 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) return (count); } -/* - * Clear the reference bit on the specified physical page. - */ -static void -mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) -{ - pte_t *pte; - pv_entry_t pv; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("mmu_booke_clear_reference: page %p is not managed", m)); - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { - PMAP_LOCK(pv->pv_pmap); - if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && - PTE_ISVALID(pte)) { - if (PTE_ISREFERENCED(pte)) { - mtx_lock_spin(&tlbivax_mutex); - tlb_miss_lock(); - - tlb0_flush_entry(pv->pv_va); - pte->flags &= ~PTE_REFERENCED; - - tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); - } - } - PMAP_UNLOCK(pv->pv_pmap); - } - rw_wunlock(&pvh_global_lock); -} - /* * Change wiring attribute for a map/virtual-address pair. */ diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 24e6076b2aa1..773ede1cd0b0 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -115,14 +115,6 @@ pmap_clear_modify(vm_page_t m) MMU_CLEAR_MODIFY(mmu_obj, m); } -void -pmap_clear_reference(vm_page_t m) -{ - - CTR2(KTR_PMAP, "%s(%p)", __func__, m); - MMU_CLEAR_REFERENCE(mmu_obj, m); -} - void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 7e63c6caa57b..180e5fd909b4 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -2164,25 +2164,6 @@ pmap_clear_modify(vm_page_t m) rw_wunlock(&tte_list_global_lock); } -void -pmap_clear_reference(vm_page_t m) -{ - struct tte *tp; - u_long data; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_reference: page %p is not managed", m)); - rw_wlock(&tte_list_global_lock); - TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { - if ((tp->tte_data & TD_PV) == 0) - continue; - data = atomic_clear_long(&tp->tte_data, TD_REF); - if ((data & TD_REF) != 0) - tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); - } - rw_wunlock(&tte_list_global_lock); -} - void pmap_remove_write(vm_page_t m) { diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 911298f9534c..0c45e338227f 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -104,7 +104,6 @@ void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t); void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); void pmap_clear_modify(vm_page_t m); -void pmap_clear_reference(vm_page_t m); void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); void pmap_copy_page(vm_page_t, vm_page_t); void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,