diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 49a632cedac1..799a27a1504f 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -4240,6 +4240,27 @@ pmap_page_wired_mappings(vm_page_t m) return (count); } +/* + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. + */ +bool +pmap_page_is_mapped(vm_page_t m) +{ + struct rwlock *lock; + bool rv; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (false); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + ((m->flags & PG_FICTITIOUS) == 0 && + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + rw_runlock(lock); + return (rv); +} + /* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h index 2b1446a5475e..d691d3de7b37 100644 --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -150,6 +150,7 @@ vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void *pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t ma); +bool pmap_page_is_mapped(vm_page_t m); bool pmap_ps_enabled(pmap_t pmap); void *pmap_mapdev(vm_offset_t, vm_size_t); @@ -167,8 +168,6 @@ int pmap_fault(pmap_t, uint64_t, uint64_t); struct pcb *pmap_switch(struct thread *, struct thread *); -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) - static inline int pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) { diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h index 4acdb9490f51..4c7e6ce79183 100644 --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -146,6 +146,7 @@ void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void pmap_kremove_device(vm_offset_t, vm_size_t); +bool pmap_page_is_mapped(vm_page_t m); bool pmap_ps_enabled(pmap_t); void *pmap_mapdev(vm_offset_t, vm_size_t); @@ -159,8 +160,6 @@ void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pt_entry_t **); -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) - int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t); static inline int diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index b2bc6ea02ffd..4de5f48509c1 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -3541,6 +3541,27 @@ pmap_page_wired_mappings(vm_page_t m) return (count); } +/* + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. + */ +bool +pmap_page_is_mapped(vm_page_t m) +{ + struct rwlock *lock; + bool rv; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (false); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); + rv = !TAILQ_EMPTY(&m->md.pv_list) || + ((m->flags & PG_FICTITIOUS) == 0 && + !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + rw_runlock(lock); + return (rv); +} + static void pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv, struct spglist *free, bool superpage)