From 96f57c313dddffcf41ecda79d57fa221faa52c53 Mon Sep 17 00:00:00 2001 From: Brandon Bergren Date: Sun, 13 Sep 2020 16:42:49 +0000 Subject: [PATCH] [PowerPC64] Implement pmap_mincore() for moea64 Implement pmap_mincore() for moea64. This will need some slight tweaks when large page support in HPT lands. Submitted by: Fernando Eckhardt Valle Reviewed by: bdragon Differential Revision: https://reviews.freebsd.org/D26314 --- sys/powerpc/aim/mmu_oea64.c | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index af651f01671a..0ca704a6baf7 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -72,6 +73,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include @@ -315,6 +317,7 @@ static void *moea64_dump_pmap_init(unsigned blkpgs); #ifdef __powerpc64__ static void moea64_page_array_startup(long); #endif +static int moea64_mincore(pmap_t, vm_offset_t, vm_paddr_t *); static struct pmap_funcs moea64_methods = { .clear_modify = moea64_clear_modify, @@ -331,6 +334,7 @@ static struct pmap_funcs moea64_methods = { .is_referenced = moea64_is_referenced, .ts_referenced = moea64_ts_referenced, .map = moea64_map, + .mincore = moea64_mincore, .page_exists_quick = moea64_page_exists_quick, .page_init = moea64_page_init, .page_wired_mappings = moea64_page_wired_mappings, @@ -1221,6 +1225,51 @@ moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) PMAP_UNLOCK(pm); } +static int +moea64_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) +{ + struct pvo_entry *pvo; + vm_paddr_t pa; + vm_page_t m; + int val; + bool managed; + + PMAP_LOCK(pmap); + + /* XXX Add support for superpages */ + pvo = moea64_pvo_find_va(pmap, addr); + if (pvo != NULL) { + pa = PVO_PADDR(pvo); + m = PHYS_TO_VM_PAGE(pa); + managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; + val = MINCORE_INCORE; + } else { + PMAP_UNLOCK(pmap); + return (0); + } + + PMAP_UNLOCK(pmap); + + if (m == NULL) + return (0); + + if (managed) { + if (moea64_is_modified(m)) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + + if (moea64_is_referenced(m)) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + managed) { + *pap = pa; + } + + return (val); +} + /* * This goes through and sets the physical address of our * special scratch PTE to the PA we want to zero or copy. Because