powerpc/mmu: Convert PowerPC pmap drivers to ifunc from kobj
With IFUNC support in the kernel, we can finally get rid of our poor-man's ifunc for pmap, utilizing kobj. Since moea64 uses a second tier kobj as well, for its own private methods, this adds a second pmap install function (pmap_mmu_init()) to perform pmap 'post-install pre-bootstrap' initialization, before the IFUNCs get initialized. Reviewed by: bdragon
This commit is contained in:
parent
64cc3b0c28
commit
45b69dd63e
@ -135,7 +135,6 @@ powerpc/aim/aim_machdep.c optional aim
|
||||
powerpc/aim/mmu_oea.c optional aim powerpc
|
||||
powerpc/aim/mmu_oea64.c optional aim
|
||||
powerpc/aim/mmu_radix.c optional aim powerpc64
|
||||
powerpc/aim/moea64_if.m optional aim
|
||||
powerpc/aim/moea64_native.c optional aim
|
||||
powerpc/aim/mp_cpudep.c optional aim
|
||||
powerpc/aim/slb.c optional aim powerpc64
|
||||
@ -260,7 +259,6 @@ powerpc/powerpc/iommu_if.m standard
|
||||
powerpc/powerpc/machdep.c standard
|
||||
powerpc/powerpc/mem.c optional mem
|
||||
powerpc/powerpc/minidump_machdep.c optional powerpc64
|
||||
powerpc/powerpc/mmu_if.m standard
|
||||
powerpc/powerpc/mp_machdep.c optional smp
|
||||
powerpc/powerpc/nexus.c standard
|
||||
powerpc/powerpc/openpic.c standard
|
||||
|
@ -149,8 +149,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/mmuvar.h>
|
||||
#include <machine/trap.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
|
||||
#define MOEA_DEBUG
|
||||
|
||||
#define TODO panic("%s: not implemented", __func__);
|
||||
@ -267,125 +265,123 @@ static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
static void moea_syncicache(vm_paddr_t, vm_size_t);
|
||||
static boolean_t moea_query_bit(vm_page_t, int);
|
||||
static u_int moea_clear_bit(vm_page_t, int);
|
||||
static void moea_kremove(mmu_t, vm_offset_t);
|
||||
static void moea_kremove(vm_offset_t);
|
||||
int moea_pte_spill(vm_offset_t);
|
||||
|
||||
/*
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
void moea_clear_modify(vm_page_t);
|
||||
void moea_copy_page(vm_page_t, vm_page_t);
|
||||
void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
|
||||
int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
|
||||
int8_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
|
||||
void moea_init(mmu_t);
|
||||
boolean_t moea_is_modified(mmu_t, vm_page_t);
|
||||
boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
||||
boolean_t moea_is_referenced(mmu_t, vm_page_t);
|
||||
int moea_ts_referenced(mmu_t, vm_page_t);
|
||||
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
|
||||
void moea_page_init(mmu_t, vm_page_t);
|
||||
int moea_page_wired_mappings(mmu_t, vm_page_t);
|
||||
void moea_pinit(mmu_t, pmap_t);
|
||||
void moea_pinit0(mmu_t, pmap_t);
|
||||
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
|
||||
void moea_qremove(mmu_t, vm_offset_t, int);
|
||||
void moea_release(mmu_t, pmap_t);
|
||||
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_remove_all(mmu_t, vm_page_t);
|
||||
void moea_remove_write(mmu_t, vm_page_t);
|
||||
void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_zero_page(mmu_t, vm_page_t);
|
||||
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
void moea_activate(mmu_t, struct thread *);
|
||||
void moea_deactivate(mmu_t, struct thread *);
|
||||
void moea_cpu_bootstrap(mmu_t, int);
|
||||
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
|
||||
void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
||||
void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
|
||||
void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
|
||||
void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
||||
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
|
||||
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
||||
void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
|
||||
void moea_scan_init(mmu_t mmu);
|
||||
vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
|
||||
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
||||
boolean_t moea_page_is_mapped(mmu_t mmu, vm_page_t m);
|
||||
static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||
void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t moea_extract(pmap_t, vm_offset_t);
|
||||
vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
|
||||
void moea_init(void);
|
||||
boolean_t moea_is_modified(vm_page_t);
|
||||
boolean_t moea_is_prefaultable(pmap_t, vm_offset_t);
|
||||
boolean_t moea_is_referenced(vm_page_t);
|
||||
int moea_ts_referenced(vm_page_t);
|
||||
vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
boolean_t moea_page_exists_quick(pmap_t, vm_page_t);
|
||||
void moea_page_init(vm_page_t);
|
||||
int moea_page_wired_mappings(vm_page_t);
|
||||
int moea_pinit(pmap_t);
|
||||
void moea_pinit0(pmap_t);
|
||||
void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
void moea_qenter(vm_offset_t, vm_page_t *, int);
|
||||
void moea_qremove(vm_offset_t, int);
|
||||
void moea_release(pmap_t);
|
||||
void moea_remove(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_remove_all(vm_page_t);
|
||||
void moea_remove_write(vm_page_t);
|
||||
void moea_unwire(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_zero_page(vm_page_t);
|
||||
void moea_zero_page_area(vm_page_t, int, int);
|
||||
void moea_activate(struct thread *);
|
||||
void moea_deactivate(struct thread *);
|
||||
void moea_cpu_bootstrap(int);
|
||||
void moea_bootstrap(vm_offset_t, vm_offset_t);
|
||||
void *moea_mapdev(vm_paddr_t, vm_size_t);
|
||||
void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
void moea_unmapdev(vm_offset_t, vm_size_t);
|
||||
vm_paddr_t moea_kextract(vm_offset_t);
|
||||
void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
|
||||
void moea_kenter(vm_offset_t, vm_paddr_t);
|
||||
void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma);
|
||||
boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||
static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t);
|
||||
void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
|
||||
void moea_scan_init(void);
|
||||
vm_offset_t moea_quick_enter_page(vm_page_t m);
|
||||
void moea_quick_remove_page(vm_offset_t addr);
|
||||
boolean_t moea_page_is_mapped(vm_page_t m);
|
||||
static int moea_map_user_ptr(pmap_t pm,
|
||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||
static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||
static int moea_decode_kernel_ptr(vm_offset_t addr,
|
||||
int *is_user, vm_offset_t *decoded_addr);
|
||||
|
||||
|
||||
static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
|
||||
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, moea_copy_pages),
|
||||
MMUMETHOD(mmu_enter, moea_enter),
|
||||
MMUMETHOD(mmu_enter_object, moea_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
|
||||
MMUMETHOD(mmu_extract, moea_extract),
|
||||
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
|
||||
MMUMETHOD(mmu_init, moea_init),
|
||||
MMUMETHOD(mmu_is_modified, moea_is_modified),
|
||||
MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable),
|
||||
MMUMETHOD(mmu_is_referenced, moea_is_referenced),
|
||||
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
|
||||
MMUMETHOD(mmu_map, moea_map),
|
||||
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
|
||||
MMUMETHOD(mmu_page_init, moea_page_init),
|
||||
MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
|
||||
MMUMETHOD(mmu_pinit, moea_pinit),
|
||||
MMUMETHOD(mmu_pinit0, moea_pinit0),
|
||||
MMUMETHOD(mmu_protect, moea_protect),
|
||||
MMUMETHOD(mmu_qenter, moea_qenter),
|
||||
MMUMETHOD(mmu_qremove, moea_qremove),
|
||||
MMUMETHOD(mmu_release, moea_release),
|
||||
MMUMETHOD(mmu_remove, moea_remove),
|
||||
MMUMETHOD(mmu_remove_all, moea_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, moea_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
|
||||
MMUMETHOD(mmu_unwire, moea_unwire),
|
||||
MMUMETHOD(mmu_zero_page, moea_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
|
||||
MMUMETHOD(mmu_activate, moea_activate),
|
||||
MMUMETHOD(mmu_deactivate, moea_deactivate),
|
||||
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
|
||||
MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page),
|
||||
MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page),
|
||||
MMUMETHOD(mmu_page_is_mapped, moea_page_is_mapped),
|
||||
static struct pmap_funcs moea_methods = {
|
||||
.clear_modify = moea_clear_modify,
|
||||
.copy_page = moea_copy_page,
|
||||
.copy_pages = moea_copy_pages,
|
||||
.enter = moea_enter,
|
||||
.enter_object = moea_enter_object,
|
||||
.enter_quick = moea_enter_quick,
|
||||
.extract = moea_extract,
|
||||
.extract_and_hold = moea_extract_and_hold,
|
||||
.init = moea_init,
|
||||
.is_modified = moea_is_modified,
|
||||
.is_prefaultable = moea_is_prefaultable,
|
||||
.is_referenced = moea_is_referenced,
|
||||
.ts_referenced = moea_ts_referenced,
|
||||
.map = moea_map,
|
||||
.page_exists_quick = moea_page_exists_quick,
|
||||
.page_init = moea_page_init,
|
||||
.page_wired_mappings = moea_page_wired_mappings,
|
||||
.pinit = moea_pinit,
|
||||
.pinit0 = moea_pinit0,
|
||||
.protect = moea_protect,
|
||||
.qenter = moea_qenter,
|
||||
.qremove = moea_qremove,
|
||||
.release = moea_release,
|
||||
.remove = moea_remove,
|
||||
.remove_all = moea_remove_all,
|
||||
.remove_write = moea_remove_write,
|
||||
.sync_icache = moea_sync_icache,
|
||||
.unwire = moea_unwire,
|
||||
.zero_page = moea_zero_page,
|
||||
.zero_page_area = moea_zero_page_area,
|
||||
.activate = moea_activate,
|
||||
.deactivate = moea_deactivate,
|
||||
.page_set_memattr = moea_page_set_memattr,
|
||||
.quick_enter_page = moea_quick_enter_page,
|
||||
.quick_remove_page = moea_quick_remove_page,
|
||||
.page_is_mapped = moea_page_is_mapped,
|
||||
|
||||
/* Internal interfaces */
|
||||
MMUMETHOD(mmu_bootstrap, moea_bootstrap),
|
||||
MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap),
|
||||
MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr),
|
||||
MMUMETHOD(mmu_mapdev, moea_mapdev),
|
||||
MMUMETHOD(mmu_unmapdev, moea_unmapdev),
|
||||
MMUMETHOD(mmu_kextract, moea_kextract),
|
||||
MMUMETHOD(mmu_kenter, moea_kenter),
|
||||
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
|
||||
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
|
||||
MMUMETHOD(mmu_scan_init, moea_scan_init),
|
||||
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
|
||||
MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr),
|
||||
MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr),
|
||||
|
||||
{ 0, 0 }
|
||||
.bootstrap = moea_bootstrap,
|
||||
.cpu_bootstrap = moea_cpu_bootstrap,
|
||||
.mapdev_attr = moea_mapdev_attr,
|
||||
.mapdev = moea_mapdev,
|
||||
.unmapdev = moea_unmapdev,
|
||||
.kextract = moea_kextract,
|
||||
.kenter = moea_kenter,
|
||||
.kenter_attr = moea_kenter_attr,
|
||||
.dev_direct_mapped = moea_dev_direct_mapped,
|
||||
.dumpsys_pa_init = moea_scan_init,
|
||||
.dumpsys_map_chunk = moea_dumpsys_map,
|
||||
.map_user_ptr = moea_map_user_ptr,
|
||||
.decode_kernel_ptr = moea_decode_kernel_ptr,
|
||||
};
|
||||
|
||||
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
|
||||
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods);
|
||||
|
||||
static __inline uint32_t
|
||||
moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
|
||||
@ -627,7 +623,7 @@ om_cmp(const void *a, const void *b)
|
||||
}
|
||||
|
||||
void
|
||||
moea_cpu_bootstrap(mmu_t mmup, int ap)
|
||||
moea_cpu_bootstrap(int ap)
|
||||
{
|
||||
u_int sdr;
|
||||
int i;
|
||||
@ -665,7 +661,7 @@ moea_cpu_bootstrap(mmu_t mmup, int ap)
|
||||
}
|
||||
|
||||
void
|
||||
moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
{
|
||||
ihandle_t mmui;
|
||||
phandle_t chosen, mmu;
|
||||
@ -921,7 +917,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
/* Enter the pages */
|
||||
for (off = 0; off < translations[i].om_len;
|
||||
off += PAGE_SIZE)
|
||||
moea_kenter(mmup, translations[i].om_va + off,
|
||||
moea_kenter(translations[i].om_va + off,
|
||||
translations[i].om_pa + off);
|
||||
}
|
||||
}
|
||||
@ -933,7 +929,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
;
|
||||
Maxmem = powerpc_btop(phys_avail[i + 1]);
|
||||
|
||||
moea_cpu_bootstrap(mmup,0);
|
||||
moea_cpu_bootstrap(0);
|
||||
mtmsr(mfmsr() | PSL_DR | PSL_IR);
|
||||
pmap_bootstrapped++;
|
||||
|
||||
@ -954,7 +950,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
thread0.td_kstack = va;
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
moea_kenter(mmup, va, pa);
|
||||
moea_kenter(va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -967,7 +963,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
va = virtual_avail;
|
||||
virtual_avail += round_page(msgbufsize);
|
||||
while (va < virtual_avail) {
|
||||
moea_kenter(mmup, va, pa);
|
||||
moea_kenter(va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -980,7 +976,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
va = virtual_avail;
|
||||
virtual_avail += DPCPU_SIZE;
|
||||
while (va < virtual_avail) {
|
||||
moea_kenter(mmup, va, pa);
|
||||
moea_kenter(va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -992,7 +988,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
* space can be accessed in any way.
|
||||
*/
|
||||
void
|
||||
moea_activate(mmu_t mmu, struct thread *td)
|
||||
moea_activate(struct thread *td)
|
||||
{
|
||||
pmap_t pm, pmr;
|
||||
|
||||
@ -1010,7 +1006,7 @@ moea_activate(mmu_t mmu, struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
moea_deactivate(mmu_t mmu, struct thread *td)
|
||||
moea_deactivate(struct thread *td)
|
||||
{
|
||||
pmap_t pm;
|
||||
|
||||
@ -1020,7 +1016,7 @@ moea_deactivate(mmu_t mmu, struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct pvo_entry key, *pvo;
|
||||
|
||||
@ -1038,7 +1034,7 @@ moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
}
|
||||
|
||||
void
|
||||
moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
moea_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
{
|
||||
vm_offset_t dst;
|
||||
vm_offset_t src;
|
||||
@ -1050,7 +1046,7 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
}
|
||||
|
||||
void
|
||||
moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
@ -1077,7 +1073,7 @@ moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
* Zero a page of physical memory by temporarily mapping it into the tlb.
|
||||
*/
|
||||
void
|
||||
moea_zero_page(mmu_t mmu, vm_page_t m)
|
||||
moea_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t off, pa = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
@ -1086,7 +1082,7 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
moea_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
|
||||
void *va = (void *)(pa + off);
|
||||
@ -1095,19 +1091,19 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
moea_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
moea_quick_enter_page(vm_page_t m)
|
||||
{
|
||||
|
||||
return (VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
|
||||
void
|
||||
moea_quick_remove_page(mmu_t mmu, vm_offset_t addr)
|
||||
moea_quick_remove_page(vm_offset_t addr)
|
||||
{
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_page_is_mapped(mmu_t mmu, vm_page_t m)
|
||||
moea_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
|
||||
}
|
||||
@ -1118,7 +1114,7 @@ moea_page_is_mapped(mmu_t mmu, vm_page_t m)
|
||||
* will be wired down.
|
||||
*/
|
||||
int
|
||||
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
int error;
|
||||
@ -1216,7 +1212,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
void
|
||||
moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
@ -1239,7 +1235,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
}
|
||||
|
||||
void
|
||||
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
@ -1252,7 +1248,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
|
||||
moea_extract(pmap_t pm, vm_offset_t va)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_paddr_t pa;
|
||||
@ -1273,7 +1269,7 @@ moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
|
||||
* protection.
|
||||
*/
|
||||
vm_page_t
|
||||
moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_page_t m;
|
||||
@ -1293,7 +1289,7 @@ moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
}
|
||||
|
||||
void
|
||||
moea_init(mmu_t mmu)
|
||||
moea_init()
|
||||
{
|
||||
|
||||
moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
|
||||
@ -1306,7 +1302,7 @@ moea_init(mmu_t mmu)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
moea_is_referenced(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
@ -1319,7 +1315,7 @@ moea_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_is_modified(mmu_t mmu, vm_page_t m)
|
||||
moea_is_modified(vm_page_t m)
|
||||
{
|
||||
boolean_t rv;
|
||||
|
||||
@ -1339,7 +1335,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
moea_is_prefaultable(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
boolean_t rv;
|
||||
@ -1352,7 +1348,7 @@ moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
}
|
||||
|
||||
void
|
||||
moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
moea_clear_modify(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
@ -1370,7 +1366,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
* Clear the write and modified bits in each of the given page's mappings.
|
||||
*/
|
||||
void
|
||||
moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
moea_remove_write(vm_page_t m)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
struct pte *pt;
|
||||
@ -1425,7 +1421,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* optimal aging of shared pages.
|
||||
*/
|
||||
int
|
||||
moea_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
moea_ts_referenced(vm_page_t m)
|
||||
{
|
||||
int count;
|
||||
|
||||
@ -1441,7 +1437,7 @@ moea_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
* Modify the WIMG settings of all mappings for a page.
|
||||
*/
|
||||
void
|
||||
moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
moea_page_set_memattr(vm_page_t m, vm_memattr_t ma)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
struct pvo_head *pvo_head;
|
||||
@ -1481,14 +1477,14 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
* Map a wired page into kernel virtual address space.
|
||||
*/
|
||||
void
|
||||
moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
moea_kenter(vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
|
||||
moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
|
||||
moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
|
||||
}
|
||||
|
||||
void
|
||||
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
{
|
||||
u_int pte_lo;
|
||||
int error;
|
||||
@ -1517,7 +1513,7 @@ moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
* address.
|
||||
*/
|
||||
vm_paddr_t
|
||||
moea_kextract(mmu_t mmu, vm_offset_t va)
|
||||
moea_kextract(vm_offset_t va)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_paddr_t pa;
|
||||
@ -1541,10 +1537,10 @@ moea_kextract(mmu_t mmu, vm_offset_t va)
|
||||
* Remove a wired page from kernel virtual address space.
|
||||
*/
|
||||
void
|
||||
moea_kremove(mmu_t mmu, vm_offset_t va)
|
||||
moea_kremove(vm_offset_t va)
|
||||
{
|
||||
|
||||
moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
|
||||
moea_remove(kernel_pmap, va, va + PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1553,7 +1549,7 @@ moea_kremove(mmu_t mmu, vm_offset_t va)
|
||||
* called in this thread. This is used internally in copyin/copyout.
|
||||
*/
|
||||
int
|
||||
moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
||||
moea_map_user_ptr(pmap_t pm, volatile const void *uaddr,
|
||||
void **kaddr, size_t ulen, size_t *klen)
|
||||
{
|
||||
size_t l;
|
||||
@ -1592,7 +1588,7 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
||||
* address space.
|
||||
*/
|
||||
static int
|
||||
moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||
moea_decode_kernel_ptr(vm_offset_t addr, int *is_user,
|
||||
vm_offset_t *decoded_addr)
|
||||
{
|
||||
vm_offset_t user_sr;
|
||||
@ -1621,7 +1617,7 @@ moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||
* first usable address after the mapped region.
|
||||
*/
|
||||
vm_offset_t
|
||||
moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
vm_paddr_t pa_end, int prot)
|
||||
{
|
||||
vm_offset_t sva, va;
|
||||
@ -1629,7 +1625,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
sva = *virt;
|
||||
va = sva;
|
||||
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
|
||||
moea_kenter(mmu, va, pa_start);
|
||||
moea_kenter(va, pa_start);
|
||||
*virt = va;
|
||||
return (sva);
|
||||
}
|
||||
@ -1642,7 +1638,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
* subset of pmaps for proper page aging.
|
||||
*/
|
||||
boolean_t
|
||||
moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
moea_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
int loops;
|
||||
struct pvo_entry *pvo;
|
||||
@ -1666,7 +1662,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
moea_page_init(mmu_t mmu __unused, vm_page_t m)
|
||||
moea_page_init(vm_page_t m)
|
||||
{
|
||||
|
||||
m->md.mdpg_attrs = 0;
|
||||
@ -1679,7 +1675,7 @@ moea_page_init(mmu_t mmu __unused, vm_page_t m)
|
||||
* that are wired.
|
||||
*/
|
||||
int
|
||||
moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
moea_page_wired_mappings(vm_page_t m)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
int count;
|
||||
@ -1697,8 +1693,8 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
|
||||
static u_int moea_vsidcontext;
|
||||
|
||||
void
|
||||
moea_pinit(mmu_t mmu, pmap_t pmap)
|
||||
int
|
||||
moea_pinit(pmap_t pmap)
|
||||
{
|
||||
int i, mask;
|
||||
u_int entropy;
|
||||
@ -1708,7 +1704,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
|
||||
entropy = 0;
|
||||
__asm __volatile("mftb %0" : "=r"(entropy));
|
||||
|
||||
if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
|
||||
if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap))
|
||||
== NULL) {
|
||||
pmap->pmap_phys = pmap;
|
||||
}
|
||||
@ -1752,7 +1748,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
|
||||
for (i = 0; i < 16; i++)
|
||||
pmap->pm_sr[i] = VSID_MAKE(i, hash);
|
||||
mtx_unlock(&moea_vsid_mutex);
|
||||
return;
|
||||
return (1);
|
||||
}
|
||||
|
||||
mtx_unlock(&moea_vsid_mutex);
|
||||
@ -1763,11 +1759,11 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
|
||||
* Initialize the pmap associated with process 0.
|
||||
*/
|
||||
void
|
||||
moea_pinit0(mmu_t mmu, pmap_t pm)
|
||||
moea_pinit0(pmap_t pm)
|
||||
{
|
||||
|
||||
PMAP_LOCK_INIT(pm);
|
||||
moea_pinit(mmu, pm);
|
||||
moea_pinit(pm);
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
}
|
||||
|
||||
@ -1775,7 +1771,7 @@ moea_pinit0(mmu_t mmu, pmap_t pm)
|
||||
* Set the physical protection on the specified range of this map as requested.
|
||||
*/
|
||||
void
|
||||
moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
struct pvo_entry *pvo, *tpvo, key;
|
||||
@ -1785,7 +1781,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
("moea_protect: non current pmap"));
|
||||
|
||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
||||
moea_remove(mmu, pm, sva, eva);
|
||||
moea_remove(pm, sva, eva);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1825,13 +1821,13 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
* references recorded. Existing mappings in the region are overwritten.
|
||||
*/
|
||||
void
|
||||
moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
|
||||
moea_qenter(vm_offset_t sva, vm_page_t *m, int count)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
|
||||
moea_kenter(va, VM_PAGE_TO_PHYS(*m));
|
||||
va += PAGE_SIZE;
|
||||
m++;
|
||||
}
|
||||
@ -1842,19 +1838,19 @@ moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
|
||||
* temporary mappings entered by moea_qenter.
|
||||
*/
|
||||
void
|
||||
moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
||||
moea_qremove(vm_offset_t sva, int count)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
moea_kremove(mmu, va);
|
||||
moea_kremove(va);
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
moea_release(mmu_t mmu, pmap_t pmap)
|
||||
moea_release(pmap_t pmap)
|
||||
{
|
||||
int idx, mask;
|
||||
|
||||
@ -1876,7 +1872,7 @@ moea_release(mmu_t mmu, pmap_t pmap)
|
||||
* Remove the given range of addresses from the specified map.
|
||||
*/
|
||||
void
|
||||
moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct pvo_entry *pvo, *tpvo, key;
|
||||
|
||||
@ -1897,7 +1893,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
* will reflect changes in pte's back to the vm_page.
|
||||
*/
|
||||
void
|
||||
moea_remove_all(mmu_t mmu, vm_page_t m)
|
||||
moea_remove_all(vm_page_t m)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
struct pvo_entry *pvo, *next_pvo;
|
||||
@ -2600,7 +2596,7 @@ moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -2623,14 +2619,14 @@ moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
* NOT real memory.
|
||||
*/
|
||||
void *
|
||||
moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
moea_mapdev(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
|
||||
return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
|
||||
}
|
||||
|
||||
void *
|
||||
moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
{
|
||||
vm_offset_t va, tmpva, ppa, offset;
|
||||
int i;
|
||||
@ -2654,7 +2650,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
panic("moea_mapdev: Couldn't alloc kernel virtual memory");
|
||||
|
||||
for (tmpva = va; size > 0;) {
|
||||
moea_kenter_attr(mmu, tmpva, ppa, ma);
|
||||
moea_kenter_attr(tmpva, ppa, ma);
|
||||
tlbie(tmpva);
|
||||
size -= PAGE_SIZE;
|
||||
tmpva += PAGE_SIZE;
|
||||
@ -2665,7 +2661,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
}
|
||||
|
||||
void
|
||||
moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
moea_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
vm_offset_t base, offset;
|
||||
|
||||
@ -2682,7 +2678,7 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
}
|
||||
|
||||
static void
|
||||
moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_offset_t lim;
|
||||
@ -2706,7 +2702,7 @@ moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
}
|
||||
|
||||
void
|
||||
moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
|
||||
{
|
||||
|
||||
*va = (void *)pa;
|
||||
@ -2715,7 +2711,7 @@ moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
|
||||
|
||||
void
|
||||
moea_scan_init(mmu_t mmu)
|
||||
moea_scan_init()
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_offset_t va;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,7 @@
|
||||
|
||||
#include "opt_pmap.h"
|
||||
|
||||
#include <vm/vm_extern.h>
|
||||
#include <machine/mmuvar.h>
|
||||
|
||||
struct dump_context {
|
||||
@ -40,7 +41,7 @@ struct dump_context {
|
||||
size_t blksz;
|
||||
};
|
||||
|
||||
extern mmu_def_t oea64_mmu;
|
||||
extern const struct mmu_kobj oea64_mmu;
|
||||
|
||||
/*
|
||||
* Helper routines
|
||||
@ -69,13 +70,36 @@ void moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte);
|
||||
* moea64_late_bootstrap();
|
||||
*/
|
||||
|
||||
void moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
|
||||
void moea64_early_bootstrap(vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend);
|
||||
void moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
|
||||
void moea64_mid_bootstrap(vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend);
|
||||
void moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
|
||||
void moea64_late_bootstrap(vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend);
|
||||
|
||||
int64_t moea64_pte_replace(struct pvo_entry *, int);
|
||||
int64_t moea64_pte_insert(struct pvo_entry *);
|
||||
int64_t moea64_pte_unset(struct pvo_entry *);
|
||||
int64_t moea64_pte_clear(struct pvo_entry *, uint64_t);
|
||||
int64_t moea64_pte_synch(struct pvo_entry *);
|
||||
|
||||
|
||||
typedef int64_t (*moea64_pte_replace_t)(struct pvo_entry *, int);
|
||||
typedef int64_t (*moea64_pte_insert_t)(struct pvo_entry *);
|
||||
typedef int64_t (*moea64_pte_unset_t)(struct pvo_entry *);
|
||||
typedef int64_t (*moea64_pte_clear_t)(struct pvo_entry *, uint64_t);
|
||||
typedef int64_t (*moea64_pte_synch_t)(struct pvo_entry *);
|
||||
|
||||
struct moea64_funcs {
|
||||
moea64_pte_replace_t pte_replace;
|
||||
moea64_pte_insert_t pte_insert;
|
||||
moea64_pte_unset_t pte_unset;
|
||||
moea64_pte_clear_t pte_clear;
|
||||
moea64_pte_synch_t pte_synch;
|
||||
};
|
||||
|
||||
extern struct moea64_funcs *moea64_ops;
|
||||
|
||||
static inline uint64_t
|
||||
moea64_pte_vpn_from_pvo_vpn(const struct pvo_entry *pvo)
|
||||
{
|
||||
|
@ -406,152 +406,145 @@ static u_int64_t KPTphys; /* phys addr of kernel level 1 */
|
||||
static vm_offset_t qframe = 0;
|
||||
static struct mtx qframe_mtx;
|
||||
|
||||
void mmu_radix_activate(mmu_t mmu, struct thread *);
|
||||
void mmu_radix_advise(mmu_t mmu, pmap_t, vm_offset_t, vm_offset_t, int);
|
||||
void mmu_radix_align_superpage(mmu_t mmu, vm_object_t, vm_ooffset_t, vm_offset_t *,
|
||||
void mmu_radix_activate(struct thread *);
|
||||
void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
|
||||
void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
|
||||
vm_size_t);
|
||||
void mmu_radix_clear_modify(mmu_t, vm_page_t);
|
||||
void mmu_radix_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||
void mmu_radix_clear_modify(vm_page_t);
|
||||
void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
int mmu_radix_map_user_ptr(pmap_t pm,
|
||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||
int mmu_radix_decode_kernel_ptr(mmu_t, vm_offset_t, int *, vm_offset_t *);
|
||||
int mmu_radix_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
|
||||
void mmu_radix_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
|
||||
int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
|
||||
void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
void mmu_radix_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t mmu_radix_extract(mmu_t, pmap_t pmap, vm_offset_t va);
|
||||
vm_page_t mmu_radix_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
|
||||
void mmu_radix_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
||||
vm_paddr_t mmu_radix_kextract(mmu_t, vm_offset_t);
|
||||
void mmu_radix_kremove(mmu_t, vm_offset_t);
|
||||
boolean_t mmu_radix_is_modified(mmu_t, vm_page_t);
|
||||
boolean_t mmu_radix_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
||||
boolean_t mmu_radix_is_referenced(mmu_t, vm_page_t);
|
||||
void mmu_radix_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t,
|
||||
void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
|
||||
vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
|
||||
void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
|
||||
vm_paddr_t mmu_radix_kextract(vm_offset_t);
|
||||
void mmu_radix_kremove(vm_offset_t);
|
||||
boolean_t mmu_radix_is_modified(vm_page_t);
|
||||
boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
|
||||
boolean_t mmu_radix_is_referenced(vm_page_t);
|
||||
void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
|
||||
vm_pindex_t, vm_size_t);
|
||||
boolean_t mmu_radix_page_exists_quick(mmu_t, pmap_t, vm_page_t);
|
||||
void mmu_radix_page_init(mmu_t, vm_page_t);
|
||||
boolean_t mmu_radix_page_is_mapped(mmu_t, vm_page_t m);
|
||||
void mmu_radix_page_set_memattr(mmu_t, vm_page_t, vm_memattr_t);
|
||||
int mmu_radix_page_wired_mappings(mmu_t, vm_page_t);
|
||||
void mmu_radix_pinit(mmu_t, pmap_t);
|
||||
void mmu_radix_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
boolean_t mmu_radix_ps_enabled(mmu_t, pmap_t);
|
||||
void mmu_radix_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
|
||||
void mmu_radix_qremove(mmu_t, vm_offset_t, int);
|
||||
vm_offset_t mmu_radix_quick_enter_page(mmu_t, vm_page_t);
|
||||
void mmu_radix_quick_remove_page(mmu_t, vm_offset_t);
|
||||
boolean_t mmu_radix_ts_referenced(mmu_t, vm_page_t);
|
||||
void mmu_radix_release(mmu_t, pmap_t);
|
||||
void mmu_radix_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void mmu_radix_remove_all(mmu_t, vm_page_t);
|
||||
void mmu_radix_remove_pages(mmu_t, pmap_t);
|
||||
void mmu_radix_remove_write(mmu_t, vm_page_t);
|
||||
void mmu_radix_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void mmu_radix_zero_page(mmu_t, vm_page_t);
|
||||
void mmu_radix_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
int mmu_radix_change_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
|
||||
void mmu_radix_page_array_startup(mmu_t mmu, long pages);
|
||||
boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
|
||||
void mmu_radix_page_init(vm_page_t);
|
||||
boolean_t mmu_radix_page_is_mapped(vm_page_t m);
|
||||
void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
|
||||
int mmu_radix_page_wired_mappings(vm_page_t);
|
||||
int mmu_radix_pinit(pmap_t);
|
||||
void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
bool mmu_radix_ps_enabled(pmap_t);
|
||||
void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
|
||||
void mmu_radix_qremove(vm_offset_t, int);
|
||||
vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
|
||||
void mmu_radix_quick_remove_page(vm_offset_t);
|
||||
boolean_t mmu_radix_ts_referenced(vm_page_t);
|
||||
void mmu_radix_release(pmap_t);
|
||||
void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void mmu_radix_remove_all(vm_page_t);
|
||||
void mmu_radix_remove_pages(pmap_t);
|
||||
void mmu_radix_remove_write(vm_page_t);
|
||||
void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void mmu_radix_zero_page(vm_page_t);
|
||||
void mmu_radix_zero_page_area(vm_page_t, int, int);
|
||||
int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
|
||||
void mmu_radix_page_array_startup(long pages);
|
||||
|
||||
#include "mmu_oea64.h"
|
||||
#include "mmu_if.h"
|
||||
#include "moea64_if.h"
|
||||
|
||||
/*
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
|
||||
static void mmu_radix_bootstrap(mmu_t mmup,
|
||||
vm_offset_t kernelstart, vm_offset_t kernelend);
|
||||
static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
|
||||
|
||||
static void mmu_radix_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
static void mmu_radix_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
static void mmu_radix_copy_page(vm_page_t, vm_page_t);
|
||||
static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
static void mmu_radix_growkernel(mmu_t, vm_offset_t);
|
||||
static void mmu_radix_init(mmu_t);
|
||||
static int mmu_radix_mincore(mmu_t, pmap_t, vm_offset_t, vm_paddr_t *);
|
||||
static vm_offset_t mmu_radix_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
static void mmu_radix_pinit0(mmu_t, pmap_t);
|
||||
static void mmu_radix_growkernel(vm_offset_t);
|
||||
static void mmu_radix_init(void);
|
||||
static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
|
||||
static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
static void mmu_radix_pinit0(pmap_t);
|
||||
|
||||
static void *mmu_radix_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void *mmu_radix_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
static void mmu_radix_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
static void mmu_radix_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
|
||||
static boolean_t mmu_radix_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
|
||||
void **va);
|
||||
static void mmu_radix_scan_init(mmu_t mmu);
|
||||
static void mmu_radix_cpu_bootstrap(mmu_t, int ap);
|
||||
static void mmu_radix_tlbie_all(mmu_t);
|
||||
static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
|
||||
static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
|
||||
static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
|
||||
static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||
static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
|
||||
static void mmu_radix_scan_init(void);
|
||||
static void mmu_radix_cpu_bootstrap(int ap);
|
||||
static void mmu_radix_tlbie_all(void);
|
||||
|
||||
static mmu_method_t mmu_radix_methods[] = {
|
||||
MMUMETHOD(mmu_bootstrap, mmu_radix_bootstrap),
|
||||
MMUMETHOD(mmu_copy_page, mmu_radix_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, mmu_radix_copy_pages),
|
||||
MMUMETHOD(mmu_cpu_bootstrap, mmu_radix_cpu_bootstrap),
|
||||
MMUMETHOD(mmu_growkernel, mmu_radix_growkernel),
|
||||
MMUMETHOD(mmu_init, mmu_radix_init),
|
||||
MMUMETHOD(mmu_map, mmu_radix_map),
|
||||
MMUMETHOD(mmu_mincore, mmu_radix_mincore),
|
||||
MMUMETHOD(mmu_pinit, mmu_radix_pinit),
|
||||
MMUMETHOD(mmu_pinit0, mmu_radix_pinit0),
|
||||
static struct pmap_funcs mmu_radix_methods = {
|
||||
.bootstrap = mmu_radix_bootstrap,
|
||||
.copy_page = mmu_radix_copy_page,
|
||||
.copy_pages = mmu_radix_copy_pages,
|
||||
.cpu_bootstrap = mmu_radix_cpu_bootstrap,
|
||||
.growkernel = mmu_radix_growkernel,
|
||||
.init = mmu_radix_init,
|
||||
.map = mmu_radix_map,
|
||||
.mincore = mmu_radix_mincore,
|
||||
.pinit = mmu_radix_pinit,
|
||||
.pinit0 = mmu_radix_pinit0,
|
||||
|
||||
MMUMETHOD(mmu_mapdev, mmu_radix_mapdev),
|
||||
MMUMETHOD(mmu_mapdev_attr, mmu_radix_mapdev_attr),
|
||||
MMUMETHOD(mmu_unmapdev, mmu_radix_unmapdev),
|
||||
MMUMETHOD(mmu_kenter_attr, mmu_radix_kenter_attr),
|
||||
MMUMETHOD(mmu_dev_direct_mapped,mmu_radix_dev_direct_mapped),
|
||||
MMUMETHOD(mmu_scan_init, mmu_radix_scan_init),
|
||||
MMUMETHOD(mmu_dumpsys_map, mmu_radix_dumpsys_map),
|
||||
MMUMETHOD(mmu_page_is_mapped, mmu_radix_page_is_mapped),
|
||||
MMUMETHOD(mmu_ps_enabled, mmu_radix_ps_enabled),
|
||||
MMUMETHOD(mmu_object_init_pt, mmu_radix_object_init_pt),
|
||||
MMUMETHOD(mmu_protect, mmu_radix_protect),
|
||||
.mapdev = mmu_radix_mapdev,
|
||||
.mapdev_attr = mmu_radix_mapdev_attr,
|
||||
.unmapdev = mmu_radix_unmapdev,
|
||||
.kenter_attr = mmu_radix_kenter_attr,
|
||||
.dev_direct_mapped = mmu_radix_dev_direct_mapped,
|
||||
.dumpsys_pa_init = mmu_radix_scan_init,
|
||||
.dumpsys_map_chunk = mmu_radix_dumpsys_map,
|
||||
.page_is_mapped = mmu_radix_page_is_mapped,
|
||||
.ps_enabled = mmu_radix_ps_enabled,
|
||||
.object_init_pt = mmu_radix_object_init_pt,
|
||||
.protect = mmu_radix_protect,
|
||||
/* pmap dispatcher interface */
|
||||
MMUMETHOD(mmu_clear_modify, mmu_radix_clear_modify),
|
||||
MMUMETHOD(mmu_copy, mmu_radix_copy),
|
||||
MMUMETHOD(mmu_enter, mmu_radix_enter),
|
||||
MMUMETHOD(mmu_enter_object, mmu_radix_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, mmu_radix_enter_quick),
|
||||
MMUMETHOD(mmu_extract, mmu_radix_extract),
|
||||
MMUMETHOD(mmu_extract_and_hold, mmu_radix_extract_and_hold),
|
||||
MMUMETHOD(mmu_is_modified, mmu_radix_is_modified),
|
||||
MMUMETHOD(mmu_is_prefaultable, mmu_radix_is_prefaultable),
|
||||
MMUMETHOD(mmu_is_referenced, mmu_radix_is_referenced),
|
||||
MMUMETHOD(mmu_ts_referenced, mmu_radix_ts_referenced),
|
||||
MMUMETHOD(mmu_page_exists_quick,mmu_radix_page_exists_quick),
|
||||
MMUMETHOD(mmu_page_init, mmu_radix_page_init),
|
||||
MMUMETHOD(mmu_page_wired_mappings, mmu_radix_page_wired_mappings),
|
||||
MMUMETHOD(mmu_qenter, mmu_radix_qenter),
|
||||
MMUMETHOD(mmu_qremove, mmu_radix_qremove),
|
||||
MMUMETHOD(mmu_release, mmu_radix_release),
|
||||
MMUMETHOD(mmu_remove, mmu_radix_remove),
|
||||
MMUMETHOD(mmu_remove_all, mmu_radix_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, mmu_radix_remove_write),
|
||||
MMUMETHOD(mmu_unwire, mmu_radix_unwire),
|
||||
MMUMETHOD(mmu_zero_page, mmu_radix_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, mmu_radix_zero_page_area),
|
||||
MMUMETHOD(mmu_activate, mmu_radix_activate),
|
||||
MMUMETHOD(mmu_quick_enter_page, mmu_radix_quick_enter_page),
|
||||
MMUMETHOD(mmu_quick_remove_page, mmu_radix_quick_remove_page),
|
||||
MMUMETHOD(mmu_page_set_memattr, mmu_radix_page_set_memattr),
|
||||
MMUMETHOD(mmu_page_array_startup, mmu_radix_page_array_startup),
|
||||
.clear_modify = mmu_radix_clear_modify,
|
||||
.copy = mmu_radix_copy,
|
||||
.enter = mmu_radix_enter,
|
||||
.enter_object = mmu_radix_enter_object,
|
||||
.enter_quick = mmu_radix_enter_quick,
|
||||
.extract = mmu_radix_extract,
|
||||
.extract_and_hold = mmu_radix_extract_and_hold,
|
||||
.is_modified = mmu_radix_is_modified,
|
||||
.is_prefaultable = mmu_radix_is_prefaultable,
|
||||
.is_referenced = mmu_radix_is_referenced,
|
||||
.ts_referenced = mmu_radix_ts_referenced,
|
||||
.page_exists_quick = mmu_radix_page_exists_quick,
|
||||
.page_init = mmu_radix_page_init,
|
||||
.page_wired_mappings = mmu_radix_page_wired_mappings,
|
||||
.qenter = mmu_radix_qenter,
|
||||
.qremove = mmu_radix_qremove,
|
||||
.release = mmu_radix_release,
|
||||
.remove = mmu_radix_remove,
|
||||
.remove_all = mmu_radix_remove_all,
|
||||
.remove_write = mmu_radix_remove_write,
|
||||
.unwire = mmu_radix_unwire,
|
||||
.zero_page = mmu_radix_zero_page,
|
||||
.zero_page_area = mmu_radix_zero_page_area,
|
||||
.activate = mmu_radix_activate,
|
||||
.quick_enter_page = mmu_radix_quick_enter_page,
|
||||
.quick_remove_page = mmu_radix_quick_remove_page,
|
||||
.page_set_memattr = mmu_radix_page_set_memattr,
|
||||
.page_array_startup = mmu_radix_page_array_startup,
|
||||
|
||||
/* Internal interfaces */
|
||||
MMUMETHOD(mmu_kenter, mmu_radix_kenter),
|
||||
MMUMETHOD(mmu_kextract, mmu_radix_kextract),
|
||||
MMUMETHOD(mmu_kremove, mmu_radix_kremove),
|
||||
MMUMETHOD(mmu_change_attr, mmu_radix_change_attr),
|
||||
MMUMETHOD(mmu_map_user_ptr, mmu_radix_map_user_ptr),
|
||||
MMUMETHOD(mmu_decode_kernel_ptr, mmu_radix_decode_kernel_ptr),
|
||||
.kenter = mmu_radix_kenter,
|
||||
.kextract = mmu_radix_kextract,
|
||||
.kremove = mmu_radix_kremove,
|
||||
.change_attr = mmu_radix_change_attr,
|
||||
.map_user_ptr = mmu_radix_map_user_ptr,
|
||||
.decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
|
||||
|
||||
MMUMETHOD(mmu_tlbie_all, mmu_radix_tlbie_all),
|
||||
{ 0, 0 }
|
||||
.tlbie_all = mmu_radix_tlbie_all,
|
||||
};
|
||||
|
||||
MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods, 0);
|
||||
|
||||
#define METHODVOID(m) mmu_radix_ ## m(mmu_t mmup)
|
||||
MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
|
||||
|
||||
static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
|
||||
struct rwlock **lockp);
|
||||
@ -778,7 +771,7 @@ mmu_radix_tlbiel_flush(int scope)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_tlbie_all(mmu_t __unused mmu)
|
||||
mmu_radix_tlbie_all()
|
||||
{
|
||||
/* TODO: LPID invalidate */
|
||||
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
|
||||
@ -907,7 +900,7 @@ kvtopte(vm_offset_t va)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
pt_entry_t *pte;
|
||||
|
||||
@ -917,8 +910,8 @@ mmu_radix_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
RPTE_EAA_P | PG_M | PG_A;
|
||||
}
|
||||
|
||||
boolean_t
|
||||
mmu_radix_ps_enabled(mmu_t mmu, pmap_t pmap)
|
||||
bool
|
||||
mmu_radix_ps_enabled(pmap_t pmap)
|
||||
{
|
||||
return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
|
||||
}
|
||||
@ -1025,7 +1018,7 @@ pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
|
||||
* a 2mpage. Otherwise, returns FALSE.
|
||||
*/
|
||||
boolean_t
|
||||
mmu_radix_page_is_mapped(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
boolean_t rv;
|
||||
@ -2036,7 +2029,7 @@ mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_late_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
|
||||
mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
int i;
|
||||
vm_paddr_t pa;
|
||||
@ -2079,7 +2072,7 @@ mmu_radix_late_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
|
||||
CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
|
||||
thread0.td_kstack = va;
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
mmu_radix_kenter(mmu, va, pa);
|
||||
mmu_radix_kenter(va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
@ -2190,7 +2183,7 @@ mmu_radix_proctab_init(void)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
int advice)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
@ -2304,7 +2297,7 @@ mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
* Routines used in machine-dependent code
|
||||
*/
|
||||
static void
|
||||
mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
|
||||
mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
uint64_t lpcr;
|
||||
|
||||
@ -2328,7 +2321,7 @@ mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
|
||||
/* XXX assume CPU_FTR_HVMODE */
|
||||
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
|
||||
|
||||
mmu_radix_late_bootstrap(mmu, start, end);
|
||||
mmu_radix_late_bootstrap(start, end);
|
||||
numa_mem_regions(&numa_pregions, &numa_pregions_sz);
|
||||
if (bootverbose)
|
||||
printf("%s done\n", __func__);
|
||||
@ -2337,7 +2330,7 @@ mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_cpu_bootstrap(mmu_t mmu, int ap)
|
||||
mmu_radix_cpu_bootstrap(int ap)
|
||||
{
|
||||
uint64_t lpcr;
|
||||
uint64_t ptcr;
|
||||
@ -2382,7 +2375,7 @@ SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
|
||||
&pmap_l2e_demotions, 0, "1GB page demotions");
|
||||
|
||||
void
|
||||
mmu_radix_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_clear_modify(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pmap_t pmap;
|
||||
@ -2477,7 +2470,7 @@ mmu_radix_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
vm_size_t len, vm_offset_t src_addr)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
@ -2634,7 +2627,7 @@ mmu_radix_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
{
|
||||
vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
|
||||
vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
|
||||
@ -2647,7 +2640,7 @@ mmu_radix_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_copy_pages(mmu_t mmu, vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
|
||||
@ -2767,7 +2760,7 @@ pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
|
||||
#endif /* VM_NRESERVLEVEL > 0 */
|
||||
|
||||
int
|
||||
mmu_radix_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, u_int flags, int8_t psind)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
@ -3058,7 +3051,7 @@ mmu_radix_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* populated, then attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
|
||||
mmu_radix_ps_enabled(mmu, pmap) &&
|
||||
mmu_radix_ps_enabled(pmap) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0 &&
|
||||
pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
|
||||
@ -3225,7 +3218,7 @@ pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
|
||||
@ -3248,7 +3241,7 @@ mmu_radix_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
va = start + ptoa(diff);
|
||||
if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
|
||||
m->psind == 1 && mmu_radix_ps_enabled(mmu, pmap) &&
|
||||
m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
|
||||
pmap_enter_2mpage(pmap, va, m, prot, &lock))
|
||||
m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
|
||||
else
|
||||
@ -3372,7 +3365,7 @@ mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
@ -3392,7 +3385,7 @@ mmu_radix_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
mmu_radix_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
mmu_radix_extract(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
pml3_entry_t *l3e;
|
||||
pt_entry_t *pte;
|
||||
@ -3424,7 +3417,7 @@ mmu_radix_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
mmu_radix_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
{
|
||||
pml3_entry_t l3e, *l3ep;
|
||||
pt_entry_t pte;
|
||||
@ -3455,7 +3448,7 @@ mmu_radix_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t pro
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
|
||||
mmu_radix_growkernel(vm_offset_t addr)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
vm_page_t nkpg;
|
||||
@ -3480,7 +3473,7 @@ mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
|
||||
if (nkpg == NULL)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
if ((nkpg->flags & PG_ZERO) == 0)
|
||||
mmu_radix_zero_page(mmu, nkpg);
|
||||
mmu_radix_zero_page(nkpg);
|
||||
paddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pde_store(l2e, paddr);
|
||||
continue; /* try again */
|
||||
@ -3501,7 +3494,7 @@ mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
|
||||
if (nkpg == NULL)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
if ((nkpg->flags & PG_ZERO) == 0)
|
||||
mmu_radix_zero_page(mmu, nkpg);
|
||||
mmu_radix_zero_page(nkpg);
|
||||
paddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pde_store(l3e, paddr);
|
||||
|
||||
@ -3559,7 +3552,7 @@ radix_pgd_release(void *arg __unused, void **store, int count)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_init(mmu_t mmu)
|
||||
mmu_radix_init()
|
||||
{
|
||||
vm_page_t mpte;
|
||||
vm_size_t s;
|
||||
@ -3726,7 +3719,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
|
||||
* in any physical maps.
|
||||
*/
|
||||
boolean_t
|
||||
mmu_radix_is_modified(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_is_modified(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
@ -3742,7 +3735,7 @@ mmu_radix_is_modified(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
mmu_radix_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
|
||||
mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
|
||||
{
|
||||
pml3_entry_t *l3e;
|
||||
pt_entry_t *pte;
|
||||
@ -3761,7 +3754,7 @@ mmu_radix_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
mmu_radix_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_is_referenced(vm_page_t m)
|
||||
{
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("pmap_is_referenced: page %p is not managed", m));
|
||||
@ -3790,7 +3783,7 @@ mmu_radix_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
* released.
|
||||
*/
|
||||
boolean_t
|
||||
mmu_radix_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_ts_referenced(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pv_entry_t pv, pvf;
|
||||
@ -3928,7 +3921,7 @@ mmu_radix_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
static vm_offset_t
|
||||
mmu_radix_map(mmu_t mmu, vm_offset_t *virt __unused, vm_paddr_t start,
|
||||
mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
|
||||
vm_paddr_t end, int prot __unused)
|
||||
{
|
||||
|
||||
@ -3938,7 +3931,7 @@ mmu_radix_map(mmu_t mmu, vm_offset_t *virt __unused, vm_paddr_t start,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
|
||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
|
||||
{
|
||||
pml3_entry_t *l3e;
|
||||
@ -3953,7 +3946,7 @@ mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
("pmap_object_init_pt: non-device object"));
|
||||
/* NB: size can be logically ored with addr here */
|
||||
if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
|
||||
if (!mmu_radix_ps_enabled(mmu, pmap))
|
||||
if (!mmu_radix_ps_enabled(pmap))
|
||||
return;
|
||||
if (!vm_object_populate(object, pindex, pindex + atop(size)))
|
||||
return;
|
||||
@ -4023,7 +4016,7 @@ mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
}
|
||||
|
||||
boolean_t
|
||||
mmu_radix_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
struct rwlock *lock;
|
||||
@ -4063,7 +4056,7 @@ mmu_radix_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_page_init(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_page_init(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
@ -4072,7 +4065,7 @@ mmu_radix_page_init(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
int
|
||||
mmu_radix_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_page_wired_mappings(vm_page_t m)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
struct md_page *pvh;
|
||||
@ -4137,8 +4130,8 @@ mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
|
||||
isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_pinit(mmu_t mmu, pmap_t pmap)
|
||||
int
|
||||
mmu_radix_pinit(pmap_t pmap)
|
||||
{
|
||||
vmem_addr_t pid;
|
||||
vm_paddr_t l1pa;
|
||||
@ -4162,6 +4155,8 @@ mmu_radix_pinit(mmu_t mmu, pmap_t pmap)
|
||||
l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
|
||||
mmu_radix_update_proctab(pid, l1pa);
|
||||
__asm __volatile("ptesync;isync" : : : "memory");
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4200,7 +4195,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
return (NULL);
|
||||
}
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
mmu_radix_zero_page(NULL, m);
|
||||
mmu_radix_zero_page(m);
|
||||
|
||||
/*
|
||||
* Map the pagetable page into the process address space, if
|
||||
@ -4371,7 +4366,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_pinit0(mmu_t mmu, pmap_t pmap)
|
||||
mmu_radix_pinit0(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
@ -4429,7 +4424,7 @@ pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
vm_offset_t va_next;
|
||||
@ -4444,7 +4439,7 @@ mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
|
||||
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
|
||||
if (prot == VM_PROT_NONE) {
|
||||
mmu_radix_remove(mmu, pmap, sva, eva);
|
||||
mmu_radix_remove(pmap, sva, eva);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4556,7 +4551,7 @@ mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *ma, int count)
|
||||
mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
|
||||
@ -4598,7 +4593,7 @@ mmu_radix_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *ma, int count)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
||||
mmu_radix_qremove(vm_offset_t sva, int count)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pt_entry_t *pte;
|
||||
@ -4749,7 +4744,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_release(mmu_t mmu, pmap_t pmap)
|
||||
mmu_radix_release(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
@ -5112,7 +5107,7 @@ pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
|
||||
|
||||
void
|
||||
mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
vm_offset_t va_next;
|
||||
@ -5229,7 +5224,7 @@ mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_remove_all(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_remove_all(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pv_entry_t pv;
|
||||
@ -5337,7 +5332,7 @@ mmu_radix_remove_all(mmu_t mmu, vm_page_t m)
|
||||
*/
|
||||
|
||||
void
|
||||
mmu_radix_remove_pages(mmu_t mmu, pmap_t pmap)
|
||||
mmu_radix_remove_pages(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
@ -5506,7 +5501,7 @@ mmu_radix_remove_pages(mmu_t mmu, pmap_t pmap)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_remove_write(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_remove_write(vm_page_t m)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
pmap_t pmap;
|
||||
@ -5598,7 +5593,7 @@ mmu_radix_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* function are not needed.
|
||||
*/
|
||||
void
|
||||
mmu_radix_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
vm_offset_t va_next;
|
||||
pml1_entry_t *l1e;
|
||||
@ -5670,7 +5665,7 @@ mmu_radix_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_zero_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t addr;
|
||||
|
||||
@ -5680,7 +5675,7 @@ mmu_radix_zero_page(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
mmu_radix_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
caddr_t addr;
|
||||
|
||||
@ -5694,8 +5689,7 @@ mmu_radix_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
|
||||
|
||||
static int
|
||||
mmu_radix_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
vm_paddr_t *locked_pa)
|
||||
mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
{
|
||||
pml3_entry_t *l3ep;
|
||||
pt_entry_t pte;
|
||||
@ -5740,7 +5734,7 @@ mmu_radix_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_activate(mmu_t mmu, struct thread *td)
|
||||
mmu_radix_activate(struct thread *td)
|
||||
{
|
||||
pmap_t pmap;
|
||||
uint32_t curpid;
|
||||
@ -5761,7 +5755,7 @@ mmu_radix_activate(mmu_t mmu, struct thread *td)
|
||||
* different alignment might result in more superpage mappings.
|
||||
*/
|
||||
void
|
||||
mmu_radix_align_superpage(mmu_t mmu, vm_object_t object, vm_ooffset_t offset,
|
||||
mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
|
||||
vm_offset_t *addr, vm_size_t size)
|
||||
{
|
||||
|
||||
@ -5784,7 +5778,7 @@ mmu_radix_align_superpage(mmu_t mmu, vm_object_t object, vm_ooffset_t offset,
|
||||
}
|
||||
|
||||
static void *
|
||||
mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
|
||||
mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
|
||||
{
|
||||
vm_offset_t va, tmpva, ppa, offset;
|
||||
|
||||
@ -5803,7 +5797,7 @@ mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t att
|
||||
panic("%s: Couldn't alloc kernel virtual memory", __func__);
|
||||
|
||||
for (tmpva = va; size > 0;) {
|
||||
mmu_radix_kenter_attr(mmu, tmpva, ppa, attr);
|
||||
mmu_radix_kenter_attr(tmpva, ppa, attr);
|
||||
size -= PAGE_SIZE;
|
||||
tmpva += PAGE_SIZE;
|
||||
ppa += PAGE_SIZE;
|
||||
@ -5814,16 +5808,16 @@ mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t att
|
||||
}
|
||||
|
||||
static void *
|
||||
mmu_radix_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
|
||||
return (mmu_radix_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
|
||||
return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
|
||||
@ -5835,13 +5829,13 @@ mmu_radix_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
* required for data coherence.
|
||||
*/
|
||||
if ((m->flags & PG_FICTITIOUS) == 0 &&
|
||||
mmu_radix_change_attr(mmu, PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
|
||||
mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
|
||||
PAGE_SIZE, m->md.mdpg_cache_attrs))
|
||||
panic("memory attribute change on the direct map failed");
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
vm_offset_t offset;
|
||||
|
||||
@ -5929,7 +5923,7 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
mmu_radix_kextract(mmu_t mmu, vm_offset_t va)
|
||||
mmu_radix_kextract(vm_offset_t va)
|
||||
{
|
||||
pml3_entry_t l3e;
|
||||
vm_paddr_t pa;
|
||||
@ -5980,7 +5974,7 @@ mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
{
|
||||
pt_entry_t *pte, pteval;
|
||||
uint64_t cache_bits;
|
||||
@ -5993,7 +5987,7 @@ mmu_radix_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_kremove(mmu_t mmu, vm_offset_t va)
|
||||
mmu_radix_kremove(vm_offset_t va)
|
||||
{
|
||||
pt_entry_t *pte;
|
||||
|
||||
@ -6003,7 +5997,7 @@ mmu_radix_kremove(mmu_t mmu, vm_offset_t va)
|
||||
pte_clear(pte);
|
||||
}
|
||||
|
||||
int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||
int mmu_radix_map_user_ptr(pmap_t pm,
|
||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen)
|
||||
{
|
||||
if ((uintptr_t)uaddr + ulen >= VM_MAXUSER_ADDRESS)
|
||||
@ -6017,7 +6011,7 @@ int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||
}
|
||||
|
||||
int
|
||||
mmu_radix_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||
mmu_radix_decode_kernel_ptr(vm_offset_t addr,
|
||||
int *is_user, vm_offset_t *decoded)
|
||||
{
|
||||
|
||||
@ -6028,7 +6022,7 @@ mmu_radix_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
mmu_radix_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
@ -6036,7 +6030,7 @@ mmu_radix_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_scan_init(mmu_t mmup)
|
||||
mmu_radix_scan_init()
|
||||
{
|
||||
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
@ -6044,7 +6038,7 @@ mmu_radix_scan_init(mmu_t mmup)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
|
||||
mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
|
||||
void **va)
|
||||
{
|
||||
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
|
||||
@ -6052,7 +6046,7 @@ mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
mmu_radix_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_radix_quick_enter_page(vm_page_t m)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
|
||||
@ -6062,7 +6056,7 @@ mmu_radix_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_radix_quick_remove_page(mmu_t mmu, vm_offset_t addr __unused)
|
||||
mmu_radix_quick_remove_page(vm_offset_t addr __unused)
|
||||
{
|
||||
/* no work to do here */
|
||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
|
||||
@ -6075,7 +6069,7 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
|
||||
}
|
||||
|
||||
int
|
||||
mmu_radix_change_attr(mmu_t mmu, vm_offset_t va, vm_size_t size,
|
||||
mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
|
||||
vm_memattr_t mode)
|
||||
{
|
||||
int error;
|
||||
@ -6301,7 +6295,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
|
||||
* attempting to back the vm_pages with domain-local memory.
|
||||
*/
|
||||
void
|
||||
mmu_radix_page_array_startup(mmu_t mmu, long pages)
|
||||
mmu_radix_page_array_startup(long pages)
|
||||
{
|
||||
#ifdef notyet
|
||||
pml2_entry_t *l2e;
|
||||
@ -6321,7 +6315,7 @@ mmu_radix_page_array_startup(mmu_t mmu, long pages)
|
||||
|
||||
pa = vm_phys_early_alloc(0, end - start);
|
||||
|
||||
start = mmu_radix_map(mmu, &start, pa, end - start, VM_MEMATTR_DEFAULT);
|
||||
start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
|
||||
#ifdef notyet
|
||||
/* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
|
||||
for (va = start; va < end; va += L3_PAGE_SIZE) {
|
||||
|
@ -1,122 +0,0 @@
|
||||
#-
|
||||
# Copyright (c) 2010,2015 Nathan Whitehorn
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
# $FreeBSD$
|
||||
#
|
||||
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
#include <machine/mmuvar.h>
|
||||
|
||||
/**
|
||||
* MOEA64 kobj methods for 64-bit Book-S page table
|
||||
* manipulation routines used, for example, by hypervisors.
|
||||
*/
|
||||
|
||||
INTERFACE moea64;
|
||||
SINGLETON;
|
||||
|
||||
CODE {
|
||||
static moea64_pte_replace_t moea64_pte_replace_default;
|
||||
|
||||
static int64_t moea64_pte_replace_default(mmu_t mmu,
|
||||
struct pvo_entry *pvo, int flags)
|
||||
{
|
||||
int64_t refchg;
|
||||
|
||||
refchg = MOEA64_PTE_UNSET(mmu, pvo);
|
||||
MOEA64_PTE_INSERT(mmu, pvo);
|
||||
|
||||
return (refchg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return ref/changed bits from PTE referenced by _pvo if _pvo is currently in
|
||||
* the page table. Returns -1 if _pvo not currently present in the page table.
|
||||
*/
|
||||
METHOD int64_t pte_synch {
|
||||
mmu_t _mmu;
|
||||
struct pvo_entry *_pvo;
|
||||
};
|
||||
|
||||
/**
|
||||
* Clear bits ptebit (a mask) from the low word of the PTE referenced by
|
||||
* _pvo. Return previous values of ref/changed bits or -1 if _pvo is not
|
||||
* currently in the page table.
|
||||
*/
|
||||
METHOD int64_t pte_clear {
|
||||
mmu_t _mmu;
|
||||
struct pvo_entry *_pvo;
|
||||
uint64_t _ptebit;
|
||||
};
|
||||
|
||||
/**
|
||||
* Invalidate the PTE referenced by _pvo, returning its ref/changed bits.
|
||||
* Returns -1 if PTE not currently present in page table.
|
||||
*/
|
||||
METHOD int64_t pte_unset {
|
||||
mmu_t _mmu;
|
||||
struct pvo_entry *_pvo;
|
||||
};
|
||||
|
||||
/**
|
||||
* Update the reference PTE to correspond to the contents of _pvo. Has the
|
||||
* same ref/changed semantics as pte_unset() (and should clear R/C bits). May
|
||||
* change the PVO's location in the page table or return with it unmapped if
|
||||
* PVO_WIRED is not set. By default, does unset() followed by insert().
|
||||
*
|
||||
* _flags is a bitmask describing what level of page invalidation should occur:
|
||||
* 0 means no invalidation is required
|
||||
* MOEA64_PTE_PROT_UPDATE signifies that the page protection bits are changing
|
||||
* MOEA64_PTE_INVALIDATE requires an invalidation of the same strength as
|
||||
* pte_unset() followed by pte_insert()
|
||||
*/
|
||||
METHOD int64_t pte_replace {
|
||||
mmu_t _mmu;
|
||||
struct pvo_entry *_pvo;
|
||||
int _flags;
|
||||
} DEFAULT moea64_pte_replace_default;
|
||||
|
||||
/**
|
||||
* Insert a PTE corresponding to _pvo into the page table, returning any errors
|
||||
* encountered and (optionally) setting the PVO slot value to some
|
||||
* representation of where the entry was placed.
|
||||
*
|
||||
* Must not replace PTEs marked LPTE_WIRED. If an existing valid PTE is spilled,
|
||||
* must synchronize ref/changed bits as in pte_unset().
|
||||
*/
|
||||
METHOD int pte_insert {
|
||||
mmu_t _mmu;
|
||||
struct pvo_entry *_pvo;
|
||||
};
|
||||
|
@ -121,8 +121,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/mmuvar.h>
|
||||
|
||||
#include "mmu_oea64.h"
|
||||
#include "mmu_if.h"
|
||||
#include "moea64_if.h"
|
||||
|
||||
#define PTESYNC() __asm __volatile("ptesync");
|
||||
#define TLBSYNC() __asm __volatile("tlbsync; ptesync");
|
||||
@ -215,46 +213,56 @@ static volatile struct pate *moea64_part_table;
|
||||
/*
|
||||
* Dump function.
|
||||
*/
|
||||
static void *moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf,
|
||||
static void *moea64_dump_pmap_native(void *ctx, void *buf,
|
||||
u_long *nbytes);
|
||||
|
||||
/*
|
||||
* PTE calls.
|
||||
*/
|
||||
static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
|
||||
static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
|
||||
static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
|
||||
static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
|
||||
static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
|
||||
static int64_t moea64_pte_insert_native(struct pvo_entry *);
|
||||
static int64_t moea64_pte_synch_native(struct pvo_entry *);
|
||||
static int64_t moea64_pte_clear_native(struct pvo_entry *, uint64_t);
|
||||
static int64_t moea64_pte_replace_native(struct pvo_entry *, int);
|
||||
static int64_t moea64_pte_unset_native(struct pvo_entry *);
|
||||
|
||||
/*
|
||||
* Utility routines.
|
||||
*/
|
||||
static void moea64_bootstrap_native(mmu_t mmup,
|
||||
static void moea64_bootstrap_native(
|
||||
vm_offset_t kernelstart, vm_offset_t kernelend);
|
||||
static void moea64_cpu_bootstrap_native(mmu_t, int ap);
|
||||
static void moea64_cpu_bootstrap_native(int ap);
|
||||
static void tlbia(void);
|
||||
static void moea64_install_native(void);
|
||||
|
||||
static struct pmap_funcs moea64_native_methods = {
|
||||
.install = moea64_install_native,
|
||||
|
||||
static mmu_method_t moea64_native_methods[] = {
|
||||
/* Internal interfaces */
|
||||
MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
|
||||
MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
|
||||
MMUMETHOD(mmu_dump_pmap, moea64_dump_pmap_native),
|
||||
|
||||
MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
|
||||
MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
|
||||
MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
|
||||
MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
|
||||
MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
|
||||
|
||||
{ 0, 0 }
|
||||
.bootstrap = moea64_bootstrap_native,
|
||||
.cpu_bootstrap = moea64_cpu_bootstrap_native,
|
||||
.dumpsys_dump_pmap = moea64_dump_pmap_native,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
|
||||
0, oea64_mmu);
|
||||
static struct moea64_funcs moea64_native_funcs = {
|
||||
.pte_synch = moea64_pte_synch_native,
|
||||
.pte_clear = moea64_pte_clear_native,
|
||||
.pte_unset = moea64_pte_unset_native,
|
||||
.pte_replace = moea64_pte_replace_native,
|
||||
.pte_insert = moea64_pte_insert_native,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, oea64_mmu);
|
||||
|
||||
static void
|
||||
moea64_install_native()
|
||||
{
|
||||
|
||||
/* Install the MOEA64 ops. */
|
||||
moea64_ops = &moea64_native_funcs;
|
||||
}
|
||||
|
||||
static int64_t
|
||||
moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
moea64_pte_synch_native(struct pvo_entry *pvo)
|
||||
{
|
||||
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
|
||||
uint64_t ptelo, pvo_ptevpn;
|
||||
@ -279,7 +287,7 @@ moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
moea64_pte_clear_native(struct pvo_entry *pvo, uint64_t ptebit)
|
||||
{
|
||||
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
|
||||
struct lpte properpt;
|
||||
@ -317,15 +325,15 @@ moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
critical_exit();
|
||||
} else {
|
||||
rw_runlock(&moea64_eviction_lock);
|
||||
ptelo = moea64_pte_unset_native(mmu, pvo);
|
||||
moea64_pte_insert_native(mmu, pvo);
|
||||
ptelo = moea64_pte_unset_native(pvo);
|
||||
moea64_pte_insert_native(pvo);
|
||||
}
|
||||
|
||||
return (ptelo & (LPTE_REF | LPTE_CHG));
|
||||
}
|
||||
|
||||
static int64_t
|
||||
moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
moea64_pte_unset_native(struct pvo_entry *pvo)
|
||||
{
|
||||
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
|
||||
uint64_t ptelo, pvo_ptevpn;
|
||||
@ -361,7 +369,7 @@ moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo,
|
||||
moea64_pte_replace_inval_native(struct pvo_entry *pvo,
|
||||
volatile struct lpte *pt)
|
||||
{
|
||||
struct lpte properpt;
|
||||
@ -400,7 +408,7 @@ moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo,
|
||||
}
|
||||
|
||||
static int64_t
|
||||
moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
|
||||
moea64_pte_replace_native(struct pvo_entry *pvo, int flags)
|
||||
{
|
||||
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
|
||||
struct lpte properpt;
|
||||
@ -421,14 +429,14 @@ moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
|
||||
rw_runlock(&moea64_eviction_lock);
|
||||
} else {
|
||||
/* Otherwise, need reinsertion and deletion */
|
||||
ptelo = moea64_pte_replace_inval_native(mmu, pvo, pt);
|
||||
ptelo = moea64_pte_replace_inval_native(pvo, pt);
|
||||
}
|
||||
|
||||
return (ptelo);
|
||||
}
|
||||
|
||||
static void
|
||||
moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
|
||||
moea64_cpu_bootstrap_native(int ap)
|
||||
{
|
||||
int i = 0;
|
||||
#ifdef __powerpc64__
|
||||
@ -485,15 +493,14 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
|
||||
}
|
||||
|
||||
static void
|
||||
moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend)
|
||||
moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
{
|
||||
vm_size_t size;
|
||||
vm_offset_t off;
|
||||
vm_paddr_t pa;
|
||||
register_t msr;
|
||||
|
||||
moea64_early_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_early_bootstrap(kernelstart, kernelend);
|
||||
|
||||
switch (mfpvr() >> 16) {
|
||||
case IBMPOWER9:
|
||||
@ -557,7 +564,7 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
|
||||
|
||||
CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
|
||||
|
||||
moea64_mid_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_mid_bootstrap(kernelstart, kernelend);
|
||||
|
||||
/*
|
||||
* Add a mapping for the page table itself if there is no direct map.
|
||||
@ -572,7 +579,7 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
|
||||
}
|
||||
|
||||
/* Bring up virtual memory */
|
||||
moea64_late_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_late_bootstrap(kernelstart, kernelend);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -715,8 +722,8 @@ moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
|
||||
return (k);
|
||||
}
|
||||
|
||||
static int
|
||||
moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
static int64_t
|
||||
moea64_pte_insert_native(struct pvo_entry *pvo)
|
||||
{
|
||||
struct lpte insertpt;
|
||||
uintptr_t slot;
|
||||
@ -790,7 +797,7 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static void *
|
||||
moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf, u_long *nbytes)
|
||||
moea64_dump_pmap_native(void *ctx, void *buf, u_long *nbytes)
|
||||
{
|
||||
struct dump_context *dctx;
|
||||
u_long ptex, ptex_end;
|
||||
|
@ -115,6 +115,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_pager.h>
|
||||
|
@ -125,8 +125,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <ddb/ddb.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
|
||||
#define SPARSE_MAPDEV
|
||||
|
||||
/* Use power-of-two mappings in mmu_booke_mapdev(), to save entries. */
|
||||
@ -182,7 +180,7 @@ static struct mtx tlbivax_mutex;
|
||||
/* PMAP */
|
||||
/**************************************************************************/
|
||||
|
||||
static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
static int mmu_booke_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int flags, int8_t psind);
|
||||
|
||||
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
|
||||
@ -263,10 +261,10 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
|
||||
#define PMAP_SHPGPERPROC 200
|
||||
#endif
|
||||
|
||||
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
|
||||
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
|
||||
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
|
||||
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(pmap_t, vm_offset_t);
|
||||
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
|
||||
|
||||
static pv_entry_t pv_alloc(void);
|
||||
@ -287,143 +285,141 @@ void pmap_bootstrap_ap(volatile uint32_t *);
|
||||
/*
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
static void mmu_booke_clear_modify(mmu_t, vm_page_t);
|
||||
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
|
||||
static void mmu_booke_clear_modify(vm_page_t);
|
||||
static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t,
|
||||
vm_size_t, vm_offset_t);
|
||||
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
|
||||
static void mmu_booke_copy_page(vm_page_t, vm_page_t);
|
||||
static void mmu_booke_copy_pages(vm_page_t *,
|
||||
vm_offset_t, vm_page_t *, vm_offset_t, int);
|
||||
static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
static int mmu_booke_enter(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int flags, int8_t psind);
|
||||
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
|
||||
static void mmu_booke_enter_object(pmap_t, vm_offset_t, vm_offset_t,
|
||||
vm_page_t, vm_prot_t);
|
||||
static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
static void mmu_booke_enter_quick(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
|
||||
static vm_paddr_t mmu_booke_extract(pmap_t, vm_offset_t);
|
||||
static vm_page_t mmu_booke_extract_and_hold(pmap_t, vm_offset_t,
|
||||
vm_prot_t);
|
||||
static void mmu_booke_init(mmu_t);
|
||||
static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
|
||||
static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
|
||||
static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
|
||||
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
|
||||
static void mmu_booke_init(void);
|
||||
static boolean_t mmu_booke_is_modified(vm_page_t);
|
||||
static boolean_t mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_is_referenced(vm_page_t);
|
||||
static int mmu_booke_ts_referenced(vm_page_t);
|
||||
static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
|
||||
int);
|
||||
static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
|
||||
static int mmu_booke_mincore(pmap_t, vm_offset_t,
|
||||
vm_paddr_t *);
|
||||
static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
|
||||
static void mmu_booke_object_init_pt(pmap_t, vm_offset_t,
|
||||
vm_object_t, vm_pindex_t, vm_size_t);
|
||||
static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
|
||||
static void mmu_booke_page_init(mmu_t, vm_page_t);
|
||||
static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
|
||||
static void mmu_booke_pinit(mmu_t, pmap_t);
|
||||
static void mmu_booke_pinit0(mmu_t, pmap_t);
|
||||
static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
|
||||
static boolean_t mmu_booke_page_exists_quick(pmap_t, vm_page_t);
|
||||
static void mmu_booke_page_init(vm_page_t);
|
||||
static int mmu_booke_page_wired_mappings(vm_page_t);
|
||||
static int mmu_booke_pinit(pmap_t);
|
||||
static void mmu_booke_pinit0(pmap_t);
|
||||
static void mmu_booke_protect(pmap_t, vm_offset_t, vm_offset_t,
|
||||
vm_prot_t);
|
||||
static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
|
||||
static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
|
||||
static void mmu_booke_release(mmu_t, pmap_t);
|
||||
static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_remove_all(mmu_t, vm_page_t);
|
||||
static void mmu_booke_remove_write(mmu_t, vm_page_t);
|
||||
static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_zero_page(mmu_t, vm_page_t);
|
||||
static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
static void mmu_booke_activate(mmu_t, struct thread *);
|
||||
static void mmu_booke_deactivate(mmu_t, struct thread *);
|
||||
static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
|
||||
static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
|
||||
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
||||
static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
|
||||
static void mmu_booke_kremove(mmu_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
|
||||
static void mmu_booke_qenter(vm_offset_t, vm_page_t *, int);
|
||||
static void mmu_booke_qremove(vm_offset_t, int);
|
||||
static void mmu_booke_release(pmap_t);
|
||||
static void mmu_booke_remove(pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_remove_all(vm_page_t);
|
||||
static void mmu_booke_remove_write(vm_page_t);
|
||||
static void mmu_booke_unwire(pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_zero_page(vm_page_t);
|
||||
static void mmu_booke_zero_page_area(vm_page_t, int, int);
|
||||
static void mmu_booke_activate(struct thread *);
|
||||
static void mmu_booke_deactivate(struct thread *);
|
||||
static void mmu_booke_bootstrap(vm_offset_t, vm_offset_t);
|
||||
static void *mmu_booke_mapdev(vm_paddr_t, vm_size_t);
|
||||
static void *mmu_booke_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
static void mmu_booke_unmapdev(vm_offset_t, vm_size_t);
|
||||
static vm_paddr_t mmu_booke_kextract(vm_offset_t);
|
||||
static void mmu_booke_kenter(vm_offset_t, vm_paddr_t);
|
||||
static void mmu_booke_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
|
||||
static void mmu_booke_kremove(vm_offset_t);
|
||||
static boolean_t mmu_booke_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||
static void mmu_booke_sync_icache(pmap_t, vm_offset_t,
|
||||
vm_size_t);
|
||||
static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
|
||||
static void mmu_booke_dumpsys_map(vm_paddr_t pa, size_t,
|
||||
void **);
|
||||
static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
|
||||
static void mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t,
|
||||
void *);
|
||||
static void mmu_booke_scan_init(mmu_t);
|
||||
static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
|
||||
static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
||||
static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
|
||||
static void mmu_booke_scan_init(void);
|
||||
static vm_offset_t mmu_booke_quick_enter_page(vm_page_t m);
|
||||
static void mmu_booke_quick_remove_page(vm_offset_t addr);
|
||||
static int mmu_booke_change_attr(vm_offset_t addr,
|
||||
vm_size_t sz, vm_memattr_t mode);
|
||||
static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||
static int mmu_booke_map_user_ptr(pmap_t pm,
|
||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||
static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||
static int mmu_booke_decode_kernel_ptr(vm_offset_t addr,
|
||||
int *is_user, vm_offset_t *decoded_addr);
|
||||
static void mmu_booke_page_array_startup(mmu_t , long);
|
||||
static boolean_t mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m);
|
||||
static void mmu_booke_page_array_startup(long);
|
||||
static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
|
||||
|
||||
|
||||
static mmu_method_t mmu_booke_methods[] = {
|
||||
static struct pmap_funcs mmu_booke_methods = {
|
||||
/* pmap dispatcher interface */
|
||||
MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
|
||||
MMUMETHOD(mmu_copy, mmu_booke_copy),
|
||||
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
|
||||
MMUMETHOD(mmu_enter, mmu_booke_enter),
|
||||
MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
|
||||
MMUMETHOD(mmu_extract, mmu_booke_extract),
|
||||
MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
|
||||
MMUMETHOD(mmu_init, mmu_booke_init),
|
||||
MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
|
||||
MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
|
||||
MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
|
||||
MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
|
||||
MMUMETHOD(mmu_map, mmu_booke_map),
|
||||
MMUMETHOD(mmu_mincore, mmu_booke_mincore),
|
||||
MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
|
||||
MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
|
||||
MMUMETHOD(mmu_page_init, mmu_booke_page_init),
|
||||
MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
|
||||
MMUMETHOD(mmu_pinit, mmu_booke_pinit),
|
||||
MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
|
||||
MMUMETHOD(mmu_protect, mmu_booke_protect),
|
||||
MMUMETHOD(mmu_qenter, mmu_booke_qenter),
|
||||
MMUMETHOD(mmu_qremove, mmu_booke_qremove),
|
||||
MMUMETHOD(mmu_release, mmu_booke_release),
|
||||
MMUMETHOD(mmu_remove, mmu_booke_remove),
|
||||
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
|
||||
MMUMETHOD(mmu_unwire, mmu_booke_unwire),
|
||||
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
|
||||
MMUMETHOD(mmu_activate, mmu_booke_activate),
|
||||
MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
|
||||
MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
|
||||
MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
|
||||
MMUMETHOD(mmu_page_array_startup, mmu_booke_page_array_startup),
|
||||
MMUMETHOD(mmu_page_is_mapped, mmu_booke_page_is_mapped),
|
||||
.clear_modify = mmu_booke_clear_modify,
|
||||
.copy = mmu_booke_copy,
|
||||
.copy_page = mmu_booke_copy_page,
|
||||
.copy_pages = mmu_booke_copy_pages,
|
||||
.enter = mmu_booke_enter,
|
||||
.enter_object = mmu_booke_enter_object,
|
||||
.enter_quick = mmu_booke_enter_quick,
|
||||
.extract = mmu_booke_extract,
|
||||
.extract_and_hold = mmu_booke_extract_and_hold,
|
||||
.init = mmu_booke_init,
|
||||
.is_modified = mmu_booke_is_modified,
|
||||
.is_prefaultable = mmu_booke_is_prefaultable,
|
||||
.is_referenced = mmu_booke_is_referenced,
|
||||
.ts_referenced = mmu_booke_ts_referenced,
|
||||
.map = mmu_booke_map,
|
||||
.mincore = mmu_booke_mincore,
|
||||
.object_init_pt = mmu_booke_object_init_pt,
|
||||
.page_exists_quick = mmu_booke_page_exists_quick,
|
||||
.page_init = mmu_booke_page_init,
|
||||
.page_wired_mappings = mmu_booke_page_wired_mappings,
|
||||
.pinit = mmu_booke_pinit,
|
||||
.pinit0 = mmu_booke_pinit0,
|
||||
.protect = mmu_booke_protect,
|
||||
.qenter = mmu_booke_qenter,
|
||||
.qremove = mmu_booke_qremove,
|
||||
.release = mmu_booke_release,
|
||||
.remove = mmu_booke_remove,
|
||||
.remove_all = mmu_booke_remove_all,
|
||||
.remove_write = mmu_booke_remove_write,
|
||||
.sync_icache = mmu_booke_sync_icache,
|
||||
.unwire = mmu_booke_unwire,
|
||||
.zero_page = mmu_booke_zero_page,
|
||||
.zero_page_area = mmu_booke_zero_page_area,
|
||||
.activate = mmu_booke_activate,
|
||||
.deactivate = mmu_booke_deactivate,
|
||||
.quick_enter_page = mmu_booke_quick_enter_page,
|
||||
.quick_remove_page = mmu_booke_quick_remove_page,
|
||||
.page_array_startup = mmu_booke_page_array_startup,
|
||||
.page_is_mapped = mmu_booke_page_is_mapped,
|
||||
|
||||
/* Internal interfaces */
|
||||
MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
|
||||
MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
|
||||
MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
|
||||
MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
|
||||
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
|
||||
MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
|
||||
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
|
||||
MMUMETHOD(mmu_kremove, mmu_booke_kremove),
|
||||
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
|
||||
MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
|
||||
MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
|
||||
MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
|
||||
.bootstrap = mmu_booke_bootstrap,
|
||||
.dev_direct_mapped = mmu_booke_dev_direct_mapped,
|
||||
.mapdev = mmu_booke_mapdev,
|
||||
.mapdev_attr = mmu_booke_mapdev_attr,
|
||||
.kenter = mmu_booke_kenter,
|
||||
.kenter_attr = mmu_booke_kenter_attr,
|
||||
.kextract = mmu_booke_kextract,
|
||||
.kremove = mmu_booke_kremove,
|
||||
.unmapdev = mmu_booke_unmapdev,
|
||||
.change_attr = mmu_booke_change_attr,
|
||||
.map_user_ptr = mmu_booke_map_user_ptr,
|
||||
.decode_kernel_ptr = mmu_booke_decode_kernel_ptr,
|
||||
|
||||
/* dumpsys() support */
|
||||
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
|
||||
MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
|
||||
MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
|
||||
|
||||
{ 0, 0 }
|
||||
.dumpsys_map_chunk = mmu_booke_dumpsys_map,
|
||||
.dumpsys_unmap_chunk = mmu_booke_dumpsys_unmap,
|
||||
.dumpsys_pa_init = mmu_booke_scan_init,
|
||||
};
|
||||
|
||||
MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
|
||||
MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods);
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#include "pmap_64.c"
|
||||
@ -632,7 +628,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
* This is called during booke_init, before the system is really initialized.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
mmu_booke_bootstrap(vm_offset_t start, vm_offset_t kernelend)
|
||||
{
|
||||
vm_paddr_t phys_kernelend;
|
||||
struct mem_region *mp, *mp1;
|
||||
@ -940,7 +936,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
|
||||
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
mmu_booke_kenter(mmu, kstack0, kstack0_phys);
|
||||
mmu_booke_kenter(kstack0, kstack0_phys);
|
||||
kstack0 += PAGE_SIZE;
|
||||
kstack0_phys += PAGE_SIZE;
|
||||
}
|
||||
@ -1012,12 +1008,12 @@ SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
|
||||
* Get the physical page address for the given pmap/virtual address.
|
||||
*/
|
||||
static vm_paddr_t
|
||||
mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
mmu_booke_extract(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
pa = pte_vatopa(mmu, pmap, va);
|
||||
pa = pte_vatopa(pmap, va);
|
||||
PMAP_UNLOCK(pmap);
|
||||
|
||||
return (pa);
|
||||
@ -1028,7 +1024,7 @@ mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
* kernel virtual address.
|
||||
*/
|
||||
static vm_paddr_t
|
||||
mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
|
||||
mmu_booke_kextract(vm_offset_t va)
|
||||
{
|
||||
tlb_entry_t e;
|
||||
vm_paddr_t p = 0;
|
||||
@ -1040,7 +1036,7 @@ mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
|
||||
#endif
|
||||
|
||||
if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
|
||||
p = pte_vatopa(mmu, kernel_pmap, va);
|
||||
p = pte_vatopa(kernel_pmap, va);
|
||||
|
||||
if (p == 0) {
|
||||
/* Check TLB1 mappings */
|
||||
@ -1062,7 +1058,7 @@ mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
|
||||
* system needs to map virtual memory.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_init(mmu_t mmu)
|
||||
mmu_booke_init()
|
||||
{
|
||||
int shpgperproc = PMAP_SHPGPERPROC;
|
||||
|
||||
@ -1099,13 +1095,13 @@ mmu_booke_init(mmu_t mmu)
|
||||
* references recorded. Existing mappings in the region are overwritten.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
|
||||
mmu_booke_qenter(vm_offset_t sva, vm_page_t *m, int count)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
|
||||
mmu_booke_kenter(va, VM_PAGE_TO_PHYS(*m));
|
||||
va += PAGE_SIZE;
|
||||
m++;
|
||||
}
|
||||
@ -1116,13 +1112,13 @@ mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
|
||||
* temporary mappings entered by mmu_booke_qenter.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
||||
mmu_booke_qremove(vm_offset_t sva, int count)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
mmu_booke_kremove(mmu, va);
|
||||
mmu_booke_kremove(va);
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
@ -1131,14 +1127,14 @@ mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
||||
* Map a wired page into kernel virtual address space.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
mmu_booke_kenter(vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
|
||||
mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
|
||||
mmu_booke_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
mmu_booke_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
{
|
||||
uint32_t flags;
|
||||
pte_t *pte;
|
||||
@ -1150,7 +1146,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
|
||||
flags |= PTE_PS_4KB;
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
@ -1182,7 +1178,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
* Remove a page from kernel page table.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
mmu_booke_kremove(vm_offset_t va)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
@ -1192,7 +1188,7 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
(va <= VM_MAX_KERNEL_ADDRESS)),
|
||||
("mmu_booke_kremove: invalid va"));
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
|
||||
if (!PTE_ISVALID(pte)) {
|
||||
|
||||
@ -1218,7 +1214,7 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
* called in this thread. This is used internally in copyin/copyout.
|
||||
*/
|
||||
int
|
||||
mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
||||
mmu_booke_map_user_ptr(pmap_t pm, volatile const void *uaddr,
|
||||
void **kaddr, size_t ulen, size_t *klen)
|
||||
{
|
||||
|
||||
@ -1238,7 +1234,7 @@ mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
||||
* address space.
|
||||
*/
|
||||
static int
|
||||
mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||
mmu_booke_decode_kernel_ptr(vm_offset_t addr, int *is_user,
|
||||
vm_offset_t *decoded_addr)
|
||||
{
|
||||
|
||||
@ -1252,7 +1248,7 @@ mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
|
||||
return (!TAILQ_EMPTY(&(m)->md.pv_list));
|
||||
@ -1262,11 +1258,11 @@ mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m)
|
||||
* Initialize pmap associated with process 0.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
|
||||
mmu_booke_pinit0(pmap_t pmap)
|
||||
{
|
||||
|
||||
PMAP_LOCK_INIT(pmap);
|
||||
mmu_booke_pinit(mmu, pmap);
|
||||
mmu_booke_pinit(pmap);
|
||||
PCPU_SET(curpmap, pmap);
|
||||
}
|
||||
|
||||
@ -1276,21 +1272,21 @@ mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
|
||||
* will be wired down.
|
||||
*/
|
||||
static int
|
||||
mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mmu_booke_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, u_int flags, int8_t psind)
|
||||
{
|
||||
int error;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
|
||||
error = mmu_booke_enter_locked(pmap, va, m, prot, flags, psind);
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mmu_booke_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
|
||||
{
|
||||
pte_t *pte;
|
||||
@ -1328,7 +1324,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* If there is an existing mapping, and the physical address has not
|
||||
* changed, must be protection or wiring change.
|
||||
*/
|
||||
if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
|
||||
if (((pte = pte_find(pmap, va)) != NULL) &&
|
||||
(PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
|
||||
|
||||
/*
|
||||
@ -1439,7 +1435,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
|
||||
flags |= PTE_WIRED;
|
||||
|
||||
error = pte_enter(mmu, pmap, m, va, flags,
|
||||
error = pte_enter(pmap, m, va, flags,
|
||||
(pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
|
||||
if (error != 0)
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
@ -1473,7 +1469,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* corresponding offset from m_start are mapped.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
mmu_booke_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
vm_page_t m;
|
||||
@ -1486,7 +1482,7 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
|
||||
mmu_booke_enter_locked(pmap, start + ptoa(diff), m,
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
@ -1496,13 +1492,13 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mmu_booke_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
mmu_booke_enter_locked(mmu, pmap, va, m,
|
||||
mmu_booke_enter_locked(pmap, va, m,
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
|
||||
PMAP_ENTER_QUICK_LOCKED, 0);
|
||||
PMAP_UNLOCK(pmap);
|
||||
@ -1515,7 +1511,7 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* It is assumed that the start and end are properly rounded to the page size.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
|
||||
mmu_booke_remove(pmap_t pmap, vm_offset_t va, vm_offset_t endva)
|
||||
{
|
||||
pte_t *pte;
|
||||
uint8_t hold_flag;
|
||||
@ -1545,12 +1541,12 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
for (; va < endva; va += PAGE_SIZE) {
|
||||
pte = pte_find_next(mmu, pmap, &va);
|
||||
pte = pte_find_next(pmap, &va);
|
||||
if ((pte == NULL) || !PTE_ISVALID(pte))
|
||||
break;
|
||||
if (va >= endva)
|
||||
break;
|
||||
pte_remove(mmu, pmap, va, hold_flag);
|
||||
pte_remove(pmap, va, hold_flag);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -1562,7 +1558,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
|
||||
* Remove physical page from all pmaps in which it resides.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_remove_all(vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv, pvn;
|
||||
uint8_t hold_flag;
|
||||
@ -1571,7 +1567,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
|
||||
TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_link, pvn) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
|
||||
pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
|
||||
pte_remove(pv->pv_pmap, pv->pv_va, hold_flag);
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
@ -1582,7 +1578,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
|
||||
* Map a range of physical addresses into kernel virtual address space.
|
||||
*/
|
||||
static vm_offset_t
|
||||
mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
vm_paddr_t pa_end, int prot)
|
||||
{
|
||||
vm_offset_t sva = *virt;
|
||||
@ -1595,7 +1591,7 @@ mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
#endif
|
||||
|
||||
while (pa_start < pa_end) {
|
||||
mmu_booke_kenter(mmu, va, pa_start);
|
||||
mmu_booke_kenter(va, pa_start);
|
||||
va += PAGE_SIZE;
|
||||
pa_start += PAGE_SIZE;
|
||||
}
|
||||
@ -1609,7 +1605,7 @@ mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
* way.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_activate(mmu_t mmu, struct thread *td)
|
||||
mmu_booke_activate(struct thread *td)
|
||||
{
|
||||
pmap_t pmap;
|
||||
u_int cpuid;
|
||||
@ -1646,7 +1642,7 @@ mmu_booke_activate(mmu_t mmu, struct thread *td)
|
||||
* Deactivate the specified process's address space.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_deactivate(mmu_t mmu, struct thread *td)
|
||||
mmu_booke_deactivate(struct thread *td)
|
||||
{
|
||||
pmap_t pmap;
|
||||
|
||||
@ -1669,7 +1665,7 @@ mmu_booke_deactivate(mmu_t mmu, struct thread *td)
|
||||
* This routine is only advisory and need not do anything.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
|
||||
mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap,
|
||||
vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
|
||||
{
|
||||
|
||||
@ -1679,7 +1675,7 @@ mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
|
||||
* Set the physical protection on the specified range of this map as requested.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
mmu_booke_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
vm_offset_t va;
|
||||
@ -1687,7 +1683,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
pte_t *pte;
|
||||
|
||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
||||
mmu_booke_remove(mmu, pmap, sva, eva);
|
||||
mmu_booke_remove(pmap, sva, eva);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1696,7 +1692,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
for (va = sva; va < eva; va += PAGE_SIZE) {
|
||||
if ((pte = pte_find(mmu, pmap, va)) != NULL) {
|
||||
if ((pte = pte_find(pmap, va)) != NULL) {
|
||||
if (PTE_ISVALID(pte)) {
|
||||
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
|
||||
|
||||
@ -1722,7 +1718,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
* Clear the write and modified bits in each of the given page's mappings.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_remove_write(vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
pte_t *pte;
|
||||
@ -1736,7 +1732,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL) {
|
||||
if (PTE_ISVALID(pte)) {
|
||||
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
|
||||
|
||||
@ -1766,7 +1762,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
* protection.
|
||||
*/
|
||||
static vm_page_t
|
||||
mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
|
||||
mmu_booke_extract_and_hold(pmap_t pmap, vm_offset_t va,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
@ -1775,7 +1771,7 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
|
||||
|
||||
m = NULL;
|
||||
PMAP_LOCK(pmap);
|
||||
pte = pte_find(mmu, pmap, va);
|
||||
pte = pte_find(pmap, va);
|
||||
if ((pte != NULL) && PTE_ISVALID(pte)) {
|
||||
if (pmap == kernel_pmap)
|
||||
pte_wbit = PTE_SW;
|
||||
@ -1796,7 +1792,7 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
|
||||
* Initialize a vm_page's machine-dependent fields.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_page_init(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_page_init(vm_page_t m)
|
||||
{
|
||||
|
||||
m->md.pv_tracked = 0;
|
||||
@ -1808,7 +1804,7 @@ mmu_booke_page_init(mmu_t mmu, vm_page_t m)
|
||||
* in any of physical maps.
|
||||
*/
|
||||
static boolean_t
|
||||
mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_is_modified(vm_page_t m)
|
||||
{
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
@ -1827,7 +1823,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (PTE_ISMODIFIED(pte))
|
||||
rv = TRUE;
|
||||
@ -1845,7 +1841,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
|
||||
* for prefault.
|
||||
*/
|
||||
static boolean_t
|
||||
mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
|
||||
mmu_booke_is_prefaultable(pmap_t pmap, vm_offset_t addr)
|
||||
{
|
||||
|
||||
return (FALSE);
|
||||
@ -1856,7 +1852,7 @@ mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
|
||||
* in any physical maps.
|
||||
*/
|
||||
static boolean_t
|
||||
mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_is_referenced(vm_page_t m)
|
||||
{
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
@ -1868,7 +1864,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (PTE_ISREFERENCED(pte))
|
||||
rv = TRUE;
|
||||
@ -1885,7 +1881,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
|
||||
* Clear the modify bits on the specified physical page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_clear_modify(vm_page_t m)
|
||||
{
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
@ -1900,7 +1896,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
@ -1934,7 +1930,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
* to pmap_is_modified().
|
||||
*/
|
||||
static int
|
||||
mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_ts_referenced(vm_page_t m)
|
||||
{
|
||||
pte_t *pte;
|
||||
pv_entry_t pv;
|
||||
@ -1946,7 +1942,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (PTE_ISMODIFIED(pte))
|
||||
vm_page_dirty(m);
|
||||
@ -1982,14 +1978,14 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
* there is no need to invalidate any TLB entries.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
mmu_booke_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t *pte;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
for (va = sva; va < eva; va += PAGE_SIZE) {
|
||||
if ((pte = pte_find(mmu, pmap, va)) != NULL &&
|
||||
if ((pte = pte_find(pmap, va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (!PTE_ISWIRED(pte))
|
||||
panic("mmu_booke_unwire: pte %p isn't wired",
|
||||
@ -2009,7 +2005,7 @@ mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
* page aging.
|
||||
*/
|
||||
static boolean_t
|
||||
mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
mmu_booke_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops;
|
||||
@ -2037,7 +2033,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
* wired.
|
||||
*/
|
||||
static int
|
||||
mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_page_wired_mappings(vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
pte_t *pte;
|
||||
@ -2048,7 +2044,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
|
||||
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL)
|
||||
if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
|
||||
count++;
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
@ -2058,7 +2054,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
static int
|
||||
mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
mmu_booke_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
int i;
|
||||
vm_offset_t va;
|
||||
@ -2076,7 +2072,7 @@ mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
mmu_booke_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
|
||||
{
|
||||
vm_paddr_t ppa;
|
||||
vm_offset_t ofs;
|
||||
@ -2102,7 +2098,7 @@ mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
}
|
||||
|
||||
void
|
||||
mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
|
||||
mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t sz, void *va)
|
||||
{
|
||||
vm_paddr_t ppa;
|
||||
vm_offset_t ofs;
|
||||
@ -2143,7 +2139,7 @@ mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
|
||||
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
|
||||
|
||||
void
|
||||
mmu_booke_scan_init(mmu_t mmu)
|
||||
mmu_booke_scan_init()
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t *pte;
|
||||
@ -2182,7 +2178,7 @@ mmu_booke_scan_init(mmu_t mmu)
|
||||
va = kmi.buffer_eva;
|
||||
continue;
|
||||
}
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
if (pte != NULL && PTE_ISVALID(pte))
|
||||
break;
|
||||
va += PAGE_SIZE;
|
||||
@ -2195,7 +2191,7 @@ mmu_booke_scan_init(mmu_t mmu)
|
||||
/* Don't run into the buffer cache. */
|
||||
if (va == kmi.buffer_sva)
|
||||
break;
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
if (pte == NULL || !PTE_ISVALID(pte))
|
||||
break;
|
||||
va += PAGE_SIZE;
|
||||
@ -2210,10 +2206,10 @@ mmu_booke_scan_init(mmu_t mmu)
|
||||
* for mapping device memory, NOT real memory.
|
||||
*/
|
||||
static void *
|
||||
mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
mmu_booke_mapdev(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
|
||||
return (mmu_booke_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2232,7 +2228,7 @@ tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e)
|
||||
}
|
||||
|
||||
static void *
|
||||
mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
mmu_booke_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
{
|
||||
tlb_entry_t e;
|
||||
vm_paddr_t tmppa;
|
||||
@ -2337,7 +2333,7 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
* 'Unmap' a range mapped by mmu_booke_mapdev().
|
||||
*/
|
||||
static void
|
||||
mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
mmu_booke_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
#ifdef SUPPORTS_SHRINKING_TLB1
|
||||
vm_offset_t base, offset;
|
||||
@ -2360,7 +2356,7 @@ mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
* and immediately after an mmap.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
mmu_booke_object_init_pt(pmap_t pmap, vm_offset_t addr,
|
||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
|
||||
{
|
||||
|
||||
@ -2373,8 +2369,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
* Perform the pmap work for mincore.
|
||||
*/
|
||||
static int
|
||||
mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
vm_paddr_t *pap)
|
||||
mmu_booke_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
/* XXX: this should be implemented at some point */
|
||||
@ -2382,8 +2377,7 @@ mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
}
|
||||
|
||||
static int
|
||||
mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
|
||||
vm_memattr_t mode)
|
||||
mmu_booke_change_attr(vm_offset_t addr, vm_size_t sz, vm_memattr_t mode)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t *pte;
|
||||
@ -2440,7 +2434,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
|
||||
/* Not in TLB1, try through pmap */
|
||||
/* First validate the range. */
|
||||
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
if (pte == NULL || !PTE_ISVALID(pte))
|
||||
return (EINVAL);
|
||||
}
|
||||
@ -2448,7 +2442,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
pte = pte_find(kernel_pmap, va);
|
||||
*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
|
||||
*pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
|
||||
tlb0_flush_entry(va);
|
||||
@ -2460,7 +2454,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_page_array_startup(mmu_t mmu, long pages)
|
||||
mmu_booke_page_array_startup(long pages)
|
||||
{
|
||||
vm_page_array_size = pages;
|
||||
}
|
||||
|
@ -97,8 +97,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <ddb/ddb.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
|
||||
#define PRI0ptrX "08x"
|
||||
|
||||
/* Reserved KVA space and mutex for mmu_booke_zero_page. */
|
||||
@ -132,15 +130,15 @@ static struct ptbl_buf *ptbl_buf_alloc(void);
|
||||
static void ptbl_buf_free(struct ptbl_buf *);
|
||||
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
|
||||
|
||||
static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
|
||||
static void ptbl_free(mmu_t, pmap_t, unsigned int);
|
||||
static void ptbl_hold(mmu_t, pmap_t, unsigned int);
|
||||
static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
|
||||
static pte_t *ptbl_alloc(pmap_t, unsigned int, boolean_t);
|
||||
static void ptbl_free(pmap_t, unsigned int);
|
||||
static void ptbl_hold(pmap_t, unsigned int);
|
||||
static int ptbl_unhold(pmap_t, unsigned int);
|
||||
|
||||
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
|
||||
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
|
||||
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
|
||||
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(pmap_t, vm_offset_t);
|
||||
|
||||
struct ptbl_buf {
|
||||
TAILQ_ENTRY(ptbl_buf) link; /* list link */
|
||||
@ -240,7 +238,7 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
|
||||
|
||||
/* Allocate page table. */
|
||||
static pte_t *
|
||||
ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
|
||||
ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
|
||||
{
|
||||
vm_page_t mtbl[PTBL_PAGES];
|
||||
vm_page_t m;
|
||||
@ -286,7 +284,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
|
||||
}
|
||||
|
||||
/* Map allocated pages into kernel_pmap. */
|
||||
mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
|
||||
mmu_booke_qenter((vm_offset_t)ptbl, mtbl, PTBL_PAGES);
|
||||
|
||||
/* Zero whole ptbl. */
|
||||
bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
|
||||
@ -299,7 +297,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
|
||||
|
||||
/* Free ptbl pages and invalidate pdir entry. */
|
||||
static void
|
||||
ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
ptbl_free(pmap_t pmap, unsigned int pdir_idx)
|
||||
{
|
||||
pte_t *ptbl;
|
||||
vm_paddr_t pa;
|
||||
@ -333,11 +331,11 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
|
||||
for (i = 0; i < PTBL_PAGES; i++) {
|
||||
va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
|
||||
pa = pte_vatopa(mmu, kernel_pmap, va);
|
||||
pa = pte_vatopa(kernel_pmap, va);
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_free_zero(m);
|
||||
vm_wire_sub(1);
|
||||
mmu_booke_kremove(mmu, va);
|
||||
mmu_booke_kremove(va);
|
||||
}
|
||||
|
||||
ptbl_free_pmap_ptbl(pmap, ptbl);
|
||||
@ -350,7 +348,7 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
* Return 1 if ptbl pages were freed.
|
||||
*/
|
||||
static int
|
||||
ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
ptbl_unhold(pmap_t pmap, unsigned int pdir_idx)
|
||||
{
|
||||
pte_t *ptbl;
|
||||
vm_paddr_t pa;
|
||||
@ -373,7 +371,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
|
||||
/* decrement hold count */
|
||||
for (i = 0; i < PTBL_PAGES; i++) {
|
||||
pa = pte_vatopa(mmu, kernel_pmap,
|
||||
pa = pte_vatopa(kernel_pmap,
|
||||
(vm_offset_t)ptbl + (i * PAGE_SIZE));
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->ref_count--;
|
||||
@ -385,7 +383,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
* page.
|
||||
*/
|
||||
if (m->ref_count == 0) {
|
||||
ptbl_free(mmu, pmap, pdir_idx);
|
||||
ptbl_free(pmap, pdir_idx);
|
||||
|
||||
//debugf("ptbl_unhold: e (freed ptbl)\n");
|
||||
return (1);
|
||||
@ -399,7 +397,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
* entry is being inserted into the ptbl.
|
||||
*/
|
||||
static void
|
||||
ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
ptbl_hold(pmap_t pmap, unsigned int pdir_idx)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
pte_t *ptbl;
|
||||
@ -419,7 +417,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
|
||||
|
||||
for (i = 0; i < PTBL_PAGES; i++) {
|
||||
pa = pte_vatopa(mmu, kernel_pmap,
|
||||
pa = pte_vatopa(kernel_pmap,
|
||||
(vm_offset_t)ptbl + (i * PAGE_SIZE));
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->ref_count++;
|
||||
@ -432,7 +430,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
* Return 1 if ptbl pages were freed, otherwise return 0.
|
||||
*/
|
||||
static int
|
||||
pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
pte_remove(pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
@ -492,7 +490,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
|
||||
if (flags & PTBL_UNHOLD) {
|
||||
//debugf("pte_remove: e (unhold)\n");
|
||||
return (ptbl_unhold(mmu, pmap, pdir_idx));
|
||||
return (ptbl_unhold(pmap, pdir_idx));
|
||||
}
|
||||
|
||||
//debugf("pte_remove: e\n");
|
||||
@ -503,7 +501,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
* Insert PTE for a given page and virtual address.
|
||||
*/
|
||||
static int
|
||||
pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
boolean_t nosleep)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
@ -518,7 +516,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
|
||||
if (ptbl == NULL) {
|
||||
/* Allocate page table pages. */
|
||||
ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
|
||||
ptbl = ptbl_alloc(pmap, pdir_idx, nosleep);
|
||||
if (ptbl == NULL) {
|
||||
KASSERT(nosleep, ("nosleep and NULL ptbl"));
|
||||
return (ENOMEM);
|
||||
@ -532,14 +530,14 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
*/
|
||||
pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
|
||||
if (PTE_ISVALID(pte)) {
|
||||
pte_remove(mmu, pmap, va, PTBL_HOLD);
|
||||
pte_remove(pmap, va, PTBL_HOLD);
|
||||
} else {
|
||||
/*
|
||||
* pte is not used, increment hold count
|
||||
* for ptbl pages.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
ptbl_hold(mmu, pmap, pdir_idx);
|
||||
ptbl_hold(pmap, pdir_idx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -572,12 +570,12 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
|
||||
/* Return the pa for the given pmap/va. */
|
||||
static vm_paddr_t
|
||||
pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
pte_vatopa(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa = 0;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_find(mmu, pmap, va);
|
||||
pte = pte_find(pmap, va);
|
||||
if ((pte != NULL) && PTE_ISVALID(pte))
|
||||
pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
|
||||
return (pa);
|
||||
@ -585,7 +583,7 @@ pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
|
||||
/* Get a pointer to a PTE in a page table. */
|
||||
static pte_t *
|
||||
pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
pte_find(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
@ -600,7 +598,7 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
|
||||
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
|
||||
static __inline pte_t *
|
||||
pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
|
||||
pte_find_next(pmap_t pmap, vm_offset_t *pva)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t **pdir;
|
||||
@ -691,8 +689,8 @@ mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
|
||||
* Initialize a preallocated and zeroed pmap structure,
|
||||
* such as one in a vmspace structure.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
static int
|
||||
mmu_booke_pinit(pmap_t pmap)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -708,6 +706,8 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK);
|
||||
bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
|
||||
TAILQ_INIT(&pmap->pm_ptbl_list);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -716,7 +716,7 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
* Should only be called if the map contains no valid mappings.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_release(mmu_t mmu, pmap_t pmap)
|
||||
mmu_booke_release(pmap_t pmap)
|
||||
{
|
||||
|
||||
KASSERT(pmap->pm_stats.resident_count == 0,
|
||||
@ -726,7 +726,7 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
pte_t *pte;
|
||||
vm_paddr_t pa = 0;
|
||||
@ -741,7 +741,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
|
||||
while (sz > 0) {
|
||||
PMAP_LOCK(pm);
|
||||
pte = pte_find(mmu, pm, va);
|
||||
pte = pte_find(pm, va);
|
||||
valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
|
||||
if (valid)
|
||||
pa = PTE_PA(pte);
|
||||
@ -754,11 +754,11 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
addr = 0;
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
PMAP_LOCK(pmap);
|
||||
pte_enter(mmu, pmap, m, addr,
|
||||
pte_enter(pmap, m, addr,
|
||||
PTE_SR | PTE_VALID, FALSE);
|
||||
addr += (va & PAGE_MASK);
|
||||
__syncicache((void *)addr, sync_sz);
|
||||
pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
|
||||
pte_remove(pmap, addr, PTBL_UNHOLD);
|
||||
PMAP_UNLOCK(pmap);
|
||||
} else
|
||||
__syncicache((void *)va, sync_sz);
|
||||
@ -777,7 +777,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
* off and size must reside within a single page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
mmu_booke_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
@ -786,9 +786,9 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
mtx_lock(&zero_page_mutex);
|
||||
va = zero_page_va;
|
||||
|
||||
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
|
||||
mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
|
||||
bzero((caddr_t)va + off, size);
|
||||
mmu_booke_kremove(mmu, va);
|
||||
mmu_booke_kremove(va);
|
||||
|
||||
mtx_unlock(&zero_page_mutex);
|
||||
}
|
||||
@ -797,19 +797,19 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
* mmu_booke_zero_page zeros the specified hardware page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t off, va;
|
||||
|
||||
va = zero_page_va;
|
||||
mtx_lock(&zero_page_mutex);
|
||||
|
||||
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
|
||||
mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
|
||||
__asm __volatile("dcbz 0,%0" :: "r"(va + off));
|
||||
|
||||
mmu_booke_kremove(mmu, va);
|
||||
mmu_booke_kremove(va);
|
||||
|
||||
mtx_unlock(&zero_page_mutex);
|
||||
}
|
||||
@ -820,7 +820,7 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
|
||||
* one machine dependent page at a time.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
|
||||
mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
|
||||
{
|
||||
vm_offset_t sva, dva;
|
||||
|
||||
@ -828,18 +828,18 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
|
||||
dva = copy_page_dst_va;
|
||||
|
||||
mtx_lock(©_page_mutex);
|
||||
mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
|
||||
mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
|
||||
mmu_booke_kenter(sva, VM_PAGE_TO_PHYS(sm));
|
||||
mmu_booke_kenter(dva, VM_PAGE_TO_PHYS(dm));
|
||||
|
||||
memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
|
||||
|
||||
mmu_booke_kremove(mmu, dva);
|
||||
mmu_booke_kremove(mmu, sva);
|
||||
mmu_booke_kremove(dva);
|
||||
mmu_booke_kremove(sva);
|
||||
mtx_unlock(©_page_mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
@ -850,17 +850,17 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
mmu_booke_kenter(mmu, copy_page_src_va,
|
||||
mmu_booke_kenter(copy_page_src_va,
|
||||
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
|
||||
a_cp = (char *)copy_page_src_va + a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
mmu_booke_kenter(mmu, copy_page_dst_va,
|
||||
mmu_booke_kenter(copy_page_dst_va,
|
||||
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
|
||||
b_cp = (char *)copy_page_dst_va + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
mmu_booke_kremove(mmu, copy_page_dst_va);
|
||||
mmu_booke_kremove(mmu, copy_page_src_va);
|
||||
mmu_booke_kremove(copy_page_dst_va);
|
||||
mmu_booke_kremove(copy_page_src_va);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
@ -869,7 +869,7 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
}
|
||||
|
||||
static vm_offset_t
|
||||
mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_quick_enter_page(vm_page_t m)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
vm_offset_t qaddr;
|
||||
@ -885,7 +885,7 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
critical_enter();
|
||||
qaddr = PCPU_GET(qmap_addr);
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, qaddr);
|
||||
pte = pte_find(kernel_pmap, qaddr);
|
||||
|
||||
KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
|
||||
|
||||
@ -907,11 +907,11 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
|
||||
mmu_booke_quick_remove_page(vm_offset_t addr)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, addr);
|
||||
pte = pte_find(kernel_pmap, addr);
|
||||
|
||||
KASSERT(PCPU_GET(qmap_addr) == addr,
|
||||
("mmu_booke_quick_remove_page: invalid address"));
|
||||
|
@ -99,8 +99,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <ddb/ddb.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
#define debugf(fmt, args...) printf(fmt, ##args)
|
||||
#else
|
||||
@ -136,16 +134,16 @@ static unsigned long ilog2(unsigned long);
|
||||
/**************************************************************************/
|
||||
|
||||
#define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
|
||||
static pte_t *ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va,
|
||||
static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
|
||||
bool nosleep, bool *is_new);
|
||||
static void ptbl_hold(mmu_t, pmap_t, pte_t *);
|
||||
static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
|
||||
static void ptbl_hold(pmap_t, pte_t *);
|
||||
static int ptbl_unhold(pmap_t, vm_offset_t);
|
||||
|
||||
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
|
||||
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
|
||||
static pte_t *pte_find_next(mmu_t, pmap_t, vm_offset_t *);
|
||||
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
|
||||
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
|
||||
static pte_t *pte_find(pmap_t, vm_offset_t);
|
||||
static pte_t *pte_find_next(pmap_t, vm_offset_t *);
|
||||
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
|
||||
|
||||
/**************************************************************************/
|
||||
@ -154,7 +152,7 @@ static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
|
||||
|
||||
/* Allocate a page, to be used in a page table. */
|
||||
static vm_offset_t
|
||||
mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep)
|
||||
mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
|
||||
{
|
||||
vm_page_t m;
|
||||
int req;
|
||||
@ -173,7 +171,7 @@ mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep)
|
||||
|
||||
if (!(m->flags & PG_ZERO))
|
||||
/* Zero whole ptbl. */
|
||||
mmu_booke_zero_page(mmu, m);
|
||||
mmu_booke_zero_page(m);
|
||||
|
||||
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
|
||||
}
|
||||
@ -186,7 +184,7 @@ ptbl_init(void)
|
||||
|
||||
/* Get a pointer to a PTE in a page table. */
|
||||
static __inline pte_t *
|
||||
pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
pte_find(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
pte_t ***pdir_l1;
|
||||
pte_t **pdir;
|
||||
@ -207,7 +205,7 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
|
||||
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
|
||||
static __inline pte_t *
|
||||
pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
|
||||
pte_find_next(pmap_t pmap, vm_offset_t *pva)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t ****pm_root;
|
||||
@ -250,7 +248,7 @@ pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
|
||||
}
|
||||
|
||||
static bool
|
||||
unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
unhold_free_page(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
|
||||
m->ref_count--;
|
||||
@ -264,7 +262,7 @@ unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
}
|
||||
|
||||
static vm_offset_t
|
||||
alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
|
||||
alloc_or_hold_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
|
||||
bool nosleep, bool hold, bool *isnew)
|
||||
{
|
||||
vm_offset_t page;
|
||||
@ -274,7 +272,7 @@ alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
|
||||
KASSERT(page != 0 || pmap != kernel_pmap,
|
||||
("NULL page table page found in kernel pmap!"));
|
||||
if (page == 0) {
|
||||
page = mmu_booke_alloc_page(mmu, pmap, index, nosleep);
|
||||
page = mmu_booke_alloc_page(pmap, index, nosleep);
|
||||
if (ptr_tbl[index] == 0) {
|
||||
*isnew = true;
|
||||
ptr_tbl[index] = page;
|
||||
@ -297,7 +295,7 @@ alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
|
||||
|
||||
/* Allocate page table. */
|
||||
static pte_t*
|
||||
ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
|
||||
ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
|
||||
{
|
||||
unsigned int pg_root_idx = PG_ROOT_IDX(va);
|
||||
unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
|
||||
@ -306,15 +304,15 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
|
||||
bool hold_page;
|
||||
|
||||
hold_page = (pmap != kernel_pmap);
|
||||
pdir_l1 = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pmap->pm_root,
|
||||
pdir_l1 = alloc_or_hold_page(pmap, (vm_offset_t *)pmap->pm_root,
|
||||
pg_root_idx, nosleep, hold_page, is_new);
|
||||
if (pdir_l1 == 0)
|
||||
return (NULL);
|
||||
pdir = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
|
||||
pdir = alloc_or_hold_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
|
||||
nosleep, hold_page, is_new);
|
||||
if (pdir == 0)
|
||||
return (NULL);
|
||||
ptbl = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir, pdir_idx,
|
||||
ptbl = alloc_or_hold_page(pmap, (vm_offset_t *)pdir, pdir_idx,
|
||||
nosleep, false, is_new);
|
||||
|
||||
return ((pte_t *)ptbl);
|
||||
@ -327,7 +325,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
|
||||
* Return 1 if ptbl pages were freed.
|
||||
*/
|
||||
static int
|
||||
ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
ptbl_unhold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
pte_t *ptbl;
|
||||
vm_page_t m;
|
||||
@ -351,19 +349,19 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
/* decrement hold count */
|
||||
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
|
||||
|
||||
if (!unhold_free_page(mmu, pmap, m))
|
||||
if (!unhold_free_page(pmap, m))
|
||||
return (0);
|
||||
|
||||
pdir[pdir_idx] = NULL;
|
||||
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
|
||||
|
||||
if (!unhold_free_page(mmu, pmap, m))
|
||||
if (!unhold_free_page(pmap, m))
|
||||
return (1);
|
||||
|
||||
pdir_l1[pdir_l1_idx] = NULL;
|
||||
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
|
||||
|
||||
if (!unhold_free_page(mmu, pmap, m))
|
||||
if (!unhold_free_page(pmap, m))
|
||||
return (1);
|
||||
pmap->pm_root[pg_root_idx] = NULL;
|
||||
|
||||
@ -375,7 +373,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
* entry is being inserted into ptbl.
|
||||
*/
|
||||
static void
|
||||
ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl)
|
||||
ptbl_hold(pmap_t pmap, pte_t *ptbl)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
@ -392,12 +390,12 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl)
|
||||
* Return 1 if ptbl pages were freed, otherwise return 0.
|
||||
*/
|
||||
static int
|
||||
pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
|
||||
pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
|
||||
{
|
||||
vm_page_t m;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_find(mmu, pmap, va);
|
||||
pte = pte_find(pmap, va);
|
||||
KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
|
||||
__func__, (uintmax_t)va, pmap));
|
||||
|
||||
@ -440,7 +438,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
|
||||
pmap->pm_stats.resident_count--;
|
||||
|
||||
if (flags & PTBL_UNHOLD) {
|
||||
return (ptbl_unhold(mmu, pmap, va));
|
||||
return (ptbl_unhold(pmap, va));
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
@ -449,7 +447,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
|
||||
* Insert PTE for a given page and virtual address.
|
||||
*/
|
||||
static int
|
||||
pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
boolean_t nosleep)
|
||||
{
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
@ -457,7 +455,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
bool is_new;
|
||||
|
||||
/* Get the page directory pointer. */
|
||||
ptbl = ptbl_alloc(mmu, pmap, va, nosleep, &is_new);
|
||||
ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
|
||||
if (ptbl == NULL) {
|
||||
KASSERT(nosleep, ("nosleep and NULL ptbl"));
|
||||
return (ENOMEM);
|
||||
@ -471,14 +469,14 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
*/
|
||||
pte = &ptbl[ptbl_idx];
|
||||
if (PTE_ISVALID(pte)) {
|
||||
pte_remove(mmu, pmap, va, PTBL_HOLD);
|
||||
pte_remove(pmap, va, PTBL_HOLD);
|
||||
} else {
|
||||
/*
|
||||
* pte is not used, increment hold count for ptbl
|
||||
* pages.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
ptbl_hold(mmu, pmap, ptbl);
|
||||
ptbl_hold(pmap, ptbl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,12 +510,12 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
|
||||
/* Return the pa for the given pmap/va. */
|
||||
static vm_paddr_t
|
||||
pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
pte_vatopa(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa = 0;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_find(mmu, pmap, va);
|
||||
pte = pte_find(pmap, va);
|
||||
if ((pte != NULL) && PTE_ISVALID(pte))
|
||||
pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
|
||||
return (pa);
|
||||
@ -599,8 +597,8 @@ mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
|
||||
* Initialize a preallocated and zeroed pmap structure,
|
||||
* such as one in a vmspace structure.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
static int
|
||||
mmu_booke_pinit(pmap_t pmap)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -615,6 +613,8 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
|
||||
pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
|
||||
bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -623,7 +623,7 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
|
||||
* Should only be called if the map contains no valid mappings.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_release(mmu_t mmu, pmap_t pmap)
|
||||
mmu_booke_release(pmap_t pmap)
|
||||
{
|
||||
|
||||
KASSERT(pmap->pm_stats.resident_count == 0,
|
||||
@ -633,7 +633,7 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
pte_t *pte;
|
||||
vm_paddr_t pa = 0;
|
||||
@ -641,7 +641,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
|
||||
while (sz > 0) {
|
||||
PMAP_LOCK(pm);
|
||||
pte = pte_find(mmu, pm, va);
|
||||
pte = pte_find(pm, va);
|
||||
valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
|
||||
if (valid)
|
||||
pa = PTE_PA(pte);
|
||||
@ -665,7 +665,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
* off and size must reside within a single page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
mmu_booke_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t va;
|
||||
|
||||
@ -679,7 +679,7 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
* mmu_booke_zero_page zeros the specified hardware page.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t off, va;
|
||||
|
||||
@ -695,7 +695,7 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
|
||||
* one machine dependent page at a time.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
|
||||
mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
|
||||
{
|
||||
vm_offset_t sva, dva;
|
||||
|
||||
@ -705,7 +705,7 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
|
||||
}
|
||||
|
||||
static inline void
|
||||
mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
@ -733,13 +733,13 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
}
|
||||
|
||||
static vm_offset_t
|
||||
mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
mmu_booke_quick_enter_page(vm_page_t m)
|
||||
{
|
||||
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
|
||||
}
|
||||
|
||||
static void
|
||||
mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
|
||||
mmu_booke_quick_remove_page(vm_offset_t addr)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -31,85 +31,171 @@
|
||||
#ifndef _MACHINE_MMUVAR_H_
|
||||
#define _MACHINE_MMUVAR_H_
|
||||
|
||||
/*
|
||||
* A PowerPC MMU implementation is declared with a kernel object and
|
||||
* an associated method table. The MMU_DEF macro is used to declare
|
||||
* the class, and also links it to the global MMU class list.
|
||||
*
|
||||
* e.g.
|
||||
*
|
||||
* static mmu_method_t ppc8xx_methods[] = {
|
||||
* MMUMETHOD(mmu_change_wiring, ppc8xx_mmu_change_wiring),
|
||||
* MMUMETHOD(mmu_clear_modify, ppc8xx_mmu_clear_modify),
|
||||
* MMUMETHOD(mmu_clear_reference, ppc8xx_mmu_clear_reference),
|
||||
* ...
|
||||
* MMUMETHOD(mmu_dev_direct_mapped, ppc8xx_mmu_dev_direct_mapped),
|
||||
* { 0, 0 }
|
||||
* };
|
||||
*
|
||||
* MMU_DEF(ppc8xx, MMU_TYPE_8xx, ppc8xx_methods, sizeof(ppc8xx_mmu_softc));
|
||||
*
|
||||
* A single level of inheritance is supported in a similar fashion to
|
||||
* kobj inheritance e.g.
|
||||
*
|
||||
* MMU_DEF_1(ppc860c, MMU_TYPE_860c, ppc860c_methods, 0, ppc8xx);
|
||||
*/
|
||||
typedef void (*pmap_bootstrap_t)(vm_offset_t, vm_offset_t);
|
||||
typedef void (*pmap_cpu_bootstrap_t)(int);
|
||||
typedef void (*pmap_kenter_t)(vm_offset_t, vm_paddr_t pa);
|
||||
typedef void (*pmap_kenter_attr_t)(vm_offset_t, vm_paddr_t, vm_memattr_t);
|
||||
typedef void (*pmap_kremove_t)(vm_offset_t);
|
||||
typedef void *(*pmap_mapdev_t)(vm_paddr_t, vm_size_t);
|
||||
typedef void *(*pmap_mapdev_attr_t)(vm_paddr_t, vm_size_t, vm_memattr_t);
|
||||
typedef void (*pmap_unmapdev_t)(vm_offset_t, vm_size_t);
|
||||
typedef void (*pmap_page_set_memattr_t)(vm_page_t, vm_memattr_t);
|
||||
typedef int (*pmap_change_attr_t)(vm_offset_t, vm_size_t, vm_memattr_t);
|
||||
typedef int (*pmap_map_user_ptr_t)(pmap_t, volatile const void *,
|
||||
void **, size_t, size_t *);
|
||||
typedef int (*pmap_decode_kernel_ptr_t)(vm_offset_t, int *, vm_offset_t *);
|
||||
typedef vm_paddr_t (*pmap_kextract_t)(vm_offset_t);
|
||||
typedef int (*pmap_dev_direct_mapped_t)(vm_paddr_t, vm_size_t);
|
||||
|
||||
#include <sys/kobj.h>
|
||||
typedef void (*pmap_page_array_startup_t)(long);
|
||||
typedef void (*pmap_advise_t)(pmap_t, vm_offset_t, vm_offset_t, int);
|
||||
typedef void (*pmap_clear_modify_t)(vm_page_t);
|
||||
typedef void (*pmap_remove_write_t)(vm_page_t);
|
||||
typedef void (*pmap_copy_t)(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
typedef void (*pmap_copy_page_t)(vm_page_t, vm_page_t);
|
||||
typedef void (*pmap_copy_pages_t)(vm_page_t *, vm_offset_t,
|
||||
vm_page_t *, vm_offset_t, int);
|
||||
typedef int (*pmap_enter_t)(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
u_int, int8_t);
|
||||
typedef void (*pmap_enter_object_t)(pmap_t, vm_offset_t, vm_offset_t,
|
||||
vm_page_t, vm_prot_t);
|
||||
typedef void (*pmap_enter_quick_t)(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
typedef vm_paddr_t (*pmap_extract_t)(pmap_t, vm_offset_t);
|
||||
typedef vm_page_t (*pmap_extract_and_hold_t)(pmap_t, vm_offset_t, vm_prot_t);
|
||||
typedef void (*pmap_growkernel_t)(vm_offset_t);
|
||||
typedef void (*pmap_init_t)(void);
|
||||
typedef boolean_t (*pmap_is_modified_t)(vm_page_t);
|
||||
typedef boolean_t (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t);
|
||||
typedef boolean_t (*pmap_is_referenced_t)(vm_page_t);
|
||||
typedef int (*pmap_ts_referenced_t)(vm_page_t);
|
||||
typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t,
|
||||
vm_pindex_t, vm_size_t);
|
||||
typedef boolean_t (*pmap_page_exists_quick_t)(pmap_t, vm_page_t);
|
||||
typedef boolean_t (*pmap_page_is_mapped_t)(vm_page_t);
|
||||
typedef void (*pmap_page_init_t)(vm_page_t);
|
||||
typedef int (*pmap_page_wired_mappings_t)(vm_page_t);
|
||||
typedef void (*pmap_pinit0_t)(pmap_t);
|
||||
typedef void (*pmap_protect_t)(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
typedef void (*pmap_qenter_t)(vm_offset_t, vm_page_t *, int);
|
||||
typedef void (*pmap_qremove_t)(vm_offset_t, int);
|
||||
typedef void (*pmap_release_t)(pmap_t);
|
||||
typedef void (*pmap_remove_t)(pmap_t, vm_offset_t, vm_offset_t);
|
||||
typedef void (*pmap_remove_all_t)(vm_page_t);
|
||||
typedef void (*pmap_remove_pages_t)(pmap_t);
|
||||
typedef void (*pmap_unwire_t)(pmap_t, vm_offset_t, vm_offset_t);
|
||||
typedef void (*pmap_zero_page_t)(vm_page_t);
|
||||
typedef void (*pmap_zero_page_area_t)(vm_page_t, int, int);
|
||||
typedef int (*pmap_mincore_t)(pmap_t, vm_offset_t, vm_paddr_t *);
|
||||
typedef void (*pmap_activate_t)(struct thread *);
|
||||
typedef void (*pmap_deactivate_t)(struct thread *);
|
||||
typedef void (*pmap_align_superpage_t)(vm_object_t, vm_ooffset_t,
|
||||
vm_offset_t *, vm_size_t);
|
||||
|
||||
typedef void (*pmap_sync_icache_t)(pmap_t, vm_offset_t, vm_size_t);
|
||||
typedef void (*pmap_dumpsys_map_chunk_t)(vm_paddr_t, size_t, void **);
|
||||
typedef void (*pmap_dumpsys_unmap_chunk_t)(vm_paddr_t, size_t, void *);
|
||||
typedef void (*pmap_dumpsys_pa_init_t)(void);
|
||||
typedef size_t (*pmap_dumpsys_scan_pmap_t)(void);
|
||||
typedef void *(*pmap_dumpsys_dump_pmap_init_t)(unsigned);
|
||||
typedef void *(*pmap_dumpsys_dump_pmap_t)(void *, void *, u_long *);
|
||||
typedef vm_offset_t (*pmap_quick_enter_page_t)(vm_page_t);
|
||||
typedef void (*pmap_quick_remove_page_t)(vm_offset_t);
|
||||
typedef bool (*pmap_ps_enabled_t)(pmap_t);
|
||||
typedef void (*pmap_tlbie_all_t)(void);
|
||||
typedef void (*pmap_installer_t)(void);
|
||||
|
||||
struct pmap_funcs {
|
||||
pmap_installer_t install;
|
||||
pmap_bootstrap_t bootstrap;
|
||||
pmap_cpu_bootstrap_t cpu_bootstrap;
|
||||
pmap_kenter_t kenter;
|
||||
pmap_kenter_attr_t kenter_attr;
|
||||
pmap_kremove_t kremove;
|
||||
pmap_mapdev_t mapdev;
|
||||
pmap_mapdev_attr_t mapdev_attr;
|
||||
pmap_unmapdev_t unmapdev;
|
||||
pmap_page_set_memattr_t page_set_memattr;
|
||||
pmap_change_attr_t change_attr;
|
||||
pmap_map_user_ptr_t map_user_ptr;
|
||||
pmap_decode_kernel_ptr_t decode_kernel_ptr;
|
||||
pmap_kextract_t kextract;
|
||||
pmap_dev_direct_mapped_t dev_direct_mapped;
|
||||
pmap_advise_t advise;
|
||||
pmap_clear_modify_t clear_modify;
|
||||
pmap_remove_write_t remove_write;
|
||||
pmap_copy_t copy;
|
||||
pmap_copy_page_t copy_page;
|
||||
pmap_copy_pages_t copy_pages;
|
||||
pmap_enter_t enter;
|
||||
pmap_enter_object_t enter_object;
|
||||
pmap_enter_quick_t enter_quick;
|
||||
pmap_extract_t extract;
|
||||
pmap_extract_and_hold_t extract_and_hold;
|
||||
pmap_growkernel_t growkernel;
|
||||
pmap_init_t init;
|
||||
pmap_is_modified_t is_modified;
|
||||
pmap_is_prefaultable_t is_prefaultable;
|
||||
pmap_is_referenced_t is_referenced;
|
||||
pmap_ts_referenced_t ts_referenced;
|
||||
pmap_page_is_mapped_t page_is_mapped;
|
||||
pmap_ps_enabled_t ps_enabled;
|
||||
pmap_map_t map;
|
||||
pmap_object_init_pt_t object_init_pt;
|
||||
pmap_page_exists_quick_t page_exists_quick;
|
||||
pmap_page_init_t page_init;
|
||||
pmap_page_wired_mappings_t page_wired_mappings;
|
||||
pmap_pinit_t pinit;
|
||||
pmap_pinit0_t pinit0;
|
||||
pmap_protect_t protect;
|
||||
pmap_qenter_t qenter;
|
||||
pmap_qremove_t qremove;
|
||||
pmap_release_t release;
|
||||
pmap_remove_t remove;
|
||||
pmap_remove_all_t remove_all;
|
||||
pmap_remove_pages_t remove_pages;
|
||||
pmap_unwire_t unwire;
|
||||
pmap_zero_page_t zero_page;
|
||||
pmap_zero_page_area_t zero_page_area;
|
||||
pmap_mincore_t mincore;
|
||||
pmap_activate_t activate;
|
||||
pmap_deactivate_t deactivate;
|
||||
pmap_align_superpage_t align_superpage;
|
||||
pmap_sync_icache_t sync_icache;
|
||||
pmap_quick_enter_page_t quick_enter_page;
|
||||
pmap_quick_remove_page_t quick_remove_page;
|
||||
pmap_page_array_startup_t page_array_startup;
|
||||
pmap_dumpsys_map_chunk_t dumpsys_map_chunk;
|
||||
pmap_dumpsys_unmap_chunk_t dumpsys_unmap_chunk;
|
||||
pmap_dumpsys_pa_init_t dumpsys_pa_init;
|
||||
pmap_dumpsys_scan_pmap_t dumpsys_scan_pmap;
|
||||
pmap_dumpsys_dump_pmap_init_t dumpsys_dump_pmap_init;
|
||||
pmap_dumpsys_dump_pmap_t dumpsys_dump_pmap;
|
||||
pmap_tlbie_all_t tlbie_all;
|
||||
|
||||
};
|
||||
struct mmu_kobj {
|
||||
/*
|
||||
* An MMU instance is a kernel object
|
||||
*/
|
||||
KOBJ_FIELDS;
|
||||
|
||||
/*
|
||||
* Utility elements that an instance may use
|
||||
*/
|
||||
struct mtx mmu_mtx; /* available for instance use */
|
||||
void *mmu_iptr; /* instance data pointer */
|
||||
|
||||
/*
|
||||
* Opaque data that can be overlaid with an instance-private
|
||||
* structure. MMU code can test that this is large enough at
|
||||
* compile time with a sizeof() test againt it's softc. There
|
||||
* is also a run-time test when the MMU kernel object is
|
||||
* registered.
|
||||
*/
|
||||
#define MMU_OPAQUESZ 64
|
||||
u_int mmu_opaque[MMU_OPAQUESZ];
|
||||
const char *name;
|
||||
const struct mmu_kobj *base;
|
||||
const struct pmap_funcs *funcs;
|
||||
};
|
||||
|
||||
typedef struct mmu_kobj *mmu_t;
|
||||
typedef struct kobj_class mmu_def_t;
|
||||
#define mmu_method_t kobj_method_t
|
||||
|
||||
#define MMUMETHOD KOBJMETHOD
|
||||
|
||||
#define MMU_DEF(name, ident, methods, size) \
|
||||
#define MMU_DEF(name, ident, methods) \
|
||||
\
|
||||
mmu_def_t name = { \
|
||||
ident, methods, size, NULL \
|
||||
const struct mmu_kobj name = { \
|
||||
ident, NULL, &methods \
|
||||
}; \
|
||||
DATA_SET(mmu_set, name)
|
||||
|
||||
#define MMU_DEF_INHERIT(name, ident, methods, size, base1) \
|
||||
#define MMU_DEF_INHERIT(name, ident, methods, base1) \
|
||||
\
|
||||
static kobj_class_t name ## _baseclasses[] = \
|
||||
{ &base1, NULL }; \
|
||||
mmu_def_t name = { \
|
||||
ident, methods, size, name ## _baseclasses \
|
||||
}; \
|
||||
const struct mmu_kobj name = { \
|
||||
ident, &base1, &methods, \
|
||||
}; \
|
||||
DATA_SET(mmu_set, name)
|
||||
|
||||
|
||||
#if 0
|
||||
mmu_def_t name = { \
|
||||
ident, methods, size, name ## _baseclasses \
|
||||
};
|
||||
DATA_SET(mmu_set, name)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Known MMU names
|
||||
*/
|
||||
|
@ -319,6 +319,7 @@ void pmap_deactivate(struct thread *);
|
||||
vm_paddr_t pmap_kextract(vm_offset_t);
|
||||
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||
boolean_t pmap_mmu_install(char *name, int prio);
|
||||
void pmap_mmu_init(void);
|
||||
const char *pmap_mmu_name(void);
|
||||
bool pmap_ps_enabled(pmap_t pmap);
|
||||
int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
|
||||
|
@ -466,9 +466,10 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
/*
|
||||
* Bring up MMU
|
||||
*/
|
||||
pmap_mmu_init();
|
||||
link_elf_ireloc(kmdp);
|
||||
pmap_bootstrap(startkernel, endkernel);
|
||||
mtmsr(psl_kernset & ~PSL_EE);
|
||||
link_elf_ireloc(kmdp);
|
||||
|
||||
/*
|
||||
* Initialize params/tunables that are derived from memsize
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -53,19 +53,16 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
#include <machine/dump.h>
|
||||
#include <machine/ifunc.h>
|
||||
#include <machine/md_var.h>
|
||||
#include <machine/mmuvar.h>
|
||||
#include <machine/smp.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
|
||||
static mmu_def_t *mmu_def_impl;
|
||||
static mmu_t mmu_obj;
|
||||
static struct mmu_kobj mmu_kernel_obj;
|
||||
static struct kobj_ops mmu_kernel_kops;
|
||||
|
||||
/*
|
||||
* pmap globals
|
||||
@ -93,574 +90,136 @@ pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
|
||||
}
|
||||
RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
|
||||
#endif
|
||||
|
||||
|
||||
void
|
||||
pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
|
||||
{
|
||||
|
||||
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
|
||||
advice);
|
||||
MMU_ADVISE(mmu_obj, pmap, start, end, advice);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_clear_modify(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_CLEAR_MODIFY(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
vm_size_t len, vm_offset_t src_addr)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
|
||||
src_pmap, dst_addr, len, src_addr);
|
||||
MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
|
||||
MMU_COPY_PAGE(mmu_obj, src, dst);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
|
||||
a_offset, mb, b_offset, xfersize);
|
||||
MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
|
||||
}
|
||||
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %#x, %d)", pmap, va,
|
||||
p, prot, flags, psind);
|
||||
return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_page_t m_start, vm_prot_t prot)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
|
||||
end, m_start, prot);
|
||||
MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
|
||||
MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
pmap_extract(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
|
||||
return (MMU_EXTRACT(mmu_obj, pmap, va));
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
|
||||
return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_growkernel(vm_offset_t va)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
|
||||
MMU_GROWKERNEL(mmu_obj, va);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_init(void)
|
||||
{
|
||||
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
MMU_INIT(mmu_obj);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_IS_MODIFIED(mmu_obj, m));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
|
||||
return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_is_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_IS_REFERENCED(mmu_obj, m));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_TS_REFERENCED(mmu_obj, m));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
|
||||
{
|
||||
|
||||
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
|
||||
prot);
|
||||
return (MMU_MAP(mmu_obj, virt, start, end, prot));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
|
||||
vm_pindex_t pindex, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
|
||||
object, pindex, size);
|
||||
MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
|
||||
return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_page_init(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_PAGE_INIT(mmu_obj, m);
|
||||
}
|
||||
|
||||
int
|
||||
pmap_page_wired_mappings(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
|
||||
}
|
||||
|
||||
int
|
||||
pmap_pinit(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
MMU_PINIT(mmu_obj, pmap);
|
||||
return (1);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit0(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
MMU_PINIT0(mmu_obj, pmap);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
|
||||
{
|
||||
|
||||
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
|
||||
prot);
|
||||
MMU_PROTECT(mmu_obj, pmap, start, end, prot);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
|
||||
MMU_QENTER(mmu_obj, start, m, count);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_qremove(vm_offset_t start, int count)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
|
||||
MMU_QREMOVE(mmu_obj, start, count);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_release(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
MMU_RELEASE(mmu_obj, pmap);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
|
||||
MMU_REMOVE(mmu_obj, pmap, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove_all(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_REMOVE_ALL(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove_pages(pmap_t pmap)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
MMU_REMOVE_PAGES(mmu_obj, pmap);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove_write(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_REMOVE_WRITE(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
|
||||
MMU_UNWIRE(mmu_obj, pmap, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_zero_page(vm_page_t m)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
MMU_ZERO_PAGE(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
|
||||
MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
|
||||
}
|
||||
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
|
||||
return (MMU_MINCORE(mmu_obj, pmap, addr, pap));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_activate(struct thread *td)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, td);
|
||||
MMU_ACTIVATE(mmu_obj, td);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_deactivate(struct thread *td)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, td);
|
||||
MMU_DEACTIVATE(mmu_obj, td);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
*/
|
||||
void
|
||||
pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
|
||||
vm_offset_t *addr, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
|
||||
size);
|
||||
MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Routines used in machine-dependent code
|
||||
*/
|
||||
void
|
||||
pmap_bootstrap(vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
mmu_obj = &mmu_kernel_obj;
|
||||
|
||||
/*
|
||||
* Take care of compiling the selected class, and
|
||||
* then statically initialise the MMU object
|
||||
*/
|
||||
kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
|
||||
kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
|
||||
|
||||
MMU_BOOTSTRAP(mmu_obj, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_cpu_bootstrap(int ap)
|
||||
{
|
||||
/*
|
||||
* No KTR here because our console probably doesn't work yet
|
||||
*/
|
||||
|
||||
return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
|
||||
}
|
||||
|
||||
void *
|
||||
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
return (MMU_MAPDEV(mmu_obj, pa, size));
|
||||
}
|
||||
|
||||
void *
|
||||
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
|
||||
return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
|
||||
return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
|
||||
MMU_UNMAPDEV(mmu_obj, va, size);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
pmap_kextract(vm_offset_t va)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
|
||||
return (MMU_KEXTRACT(mmu_obj, va));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
|
||||
MMU_KENTER(mmu_obj, va, pa);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
|
||||
MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_kremove(vm_offset_t va)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
|
||||
return (MMU_KREMOVE(mmu_obj, va));
|
||||
}
|
||||
|
||||
int
|
||||
pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
|
||||
size_t ulen, size_t *klen)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, uaddr);
|
||||
return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
|
||||
}
|
||||
|
||||
int
|
||||
pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
|
||||
return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
|
||||
return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
|
||||
}
|
||||
|
||||
void
|
||||
dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
|
||||
return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
|
||||
}
|
||||
|
||||
void
|
||||
dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
|
||||
return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
|
||||
}
|
||||
|
||||
void
|
||||
dumpsys_pa_init(void)
|
||||
{
|
||||
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
return (MMU_SCAN_INIT(mmu_obj));
|
||||
}
|
||||
|
||||
size_t
|
||||
dumpsys_scan_pmap(void)
|
||||
{
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
return (MMU_SCAN_PMAP(mmu_obj));
|
||||
}
|
||||
|
||||
void *
|
||||
dumpsys_dump_pmap_init(unsigned blkpgs)
|
||||
{
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
return (MMU_DUMP_PMAP_INIT(mmu_obj, blkpgs));
|
||||
}
|
||||
|
||||
void *
|
||||
dumpsys_dump_pmap(void *ctx, void *buf, u_long *nbytes)
|
||||
{
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
return (MMU_DUMP_PMAP(mmu_obj, ctx, buf, nbytes));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
pmap_quick_enter_page(vm_page_t m)
|
||||
{
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_quick_remove_page(vm_offset_t addr)
|
||||
{
|
||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
|
||||
MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
|
||||
}
|
||||
|
||||
int
|
||||
pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
|
||||
{
|
||||
CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
|
||||
return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_page_array_startup(long pages)
|
||||
{
|
||||
CTR2(KTR_PMAP, "%s(%ld)", __func__, pages);
|
||||
MMU_PAGE_ARRAY_STARTUP(mmu_obj, pages);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_page_is_mapped(vm_page_t m)
|
||||
{
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
|
||||
return (MMU_PAGE_IS_MAPPED(mmu_obj, m));
|
||||
}
|
||||
|
||||
bool
|
||||
pmap_ps_enabled(pmap_t pmap)
|
||||
{
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
|
||||
return (MMU_PS_ENABLED(mmu_obj, pmap));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_tlbie_all(void)
|
||||
{
|
||||
CTR1(KTR_PMAP, "%s()", __func__);
|
||||
return (MMU_TLBIE_ALL(mmu_obj));
|
||||
}
|
||||
static int
|
||||
pmap_nomethod(void)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
#define DEFINE_PMAP_IFUNC(ret, func, args) \
|
||||
DEFINE_IFUNC(, ret, pmap_##func, args) { \
|
||||
const struct mmu_kobj *mmu = mmu_obj; \
|
||||
pmap_##func##_t f; \
|
||||
do { \
|
||||
f = mmu->funcs->func; \
|
||||
if (f != NULL) break; \
|
||||
mmu = mmu->base; \
|
||||
} while (mmu != NULL); \
|
||||
return (f != NULL ? f : (pmap_##func##_t)pmap_nomethod);\
|
||||
}
|
||||
#define DEFINE_DUMPSYS_IFUNC(ret, func, args) \
|
||||
DEFINE_IFUNC(, ret, dumpsys_##func, args) { \
|
||||
const struct mmu_kobj *mmu = mmu_obj; \
|
||||
pmap_dumpsys_##func##_t f; \
|
||||
do { \
|
||||
f = mmu->funcs->dumpsys_##func; \
|
||||
if (f != NULL) break; \
|
||||
mmu = mmu->base; \
|
||||
} while (mmu != NULL); \
|
||||
return (f != NULL ? f : (pmap_dumpsys_##func##_t)pmap_nomethod);\
|
||||
}
|
||||
|
||||
DEFINE_PMAP_IFUNC(void, activate, (struct thread *));
|
||||
DEFINE_PMAP_IFUNC(void, advise, (pmap_t, vm_offset_t, vm_offset_t, int));
|
||||
DEFINE_PMAP_IFUNC(void, align_superpage, (vm_object_t, vm_ooffset_t,
|
||||
vm_offset_t *, vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(void, clear_modify, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, copy, (pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(int, enter, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t));
|
||||
DEFINE_PMAP_IFUNC(void, enter_quick, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t));
|
||||
DEFINE_PMAP_IFUNC(void, enter_object, (pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t));
|
||||
DEFINE_PMAP_IFUNC(vm_paddr_t, extract, (pmap_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(vm_page_t, extract_and_hold, (pmap_t, vm_offset_t, vm_prot_t));
|
||||
DEFINE_PMAP_IFUNC(void, kenter, (vm_offset_t, vm_paddr_t));
|
||||
DEFINE_PMAP_IFUNC(void, kenter_attr, (vm_offset_t, vm_paddr_t, vm_memattr_t));
|
||||
DEFINE_PMAP_IFUNC(vm_paddr_t, kextract, (vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t,
|
||||
vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t));
|
||||
DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t));
|
||||
DEFINE_PMAP_IFUNC(void, qenter, (vm_offset_t, vm_page_t *, int));
|
||||
DEFINE_PMAP_IFUNC(void, qremove, (vm_offset_t, int));
|
||||
DEFINE_PMAP_IFUNC(vm_offset_t, quick_enter_page, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, quick_remove_page, (vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, ts_referenced, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, release, (pmap_t));
|
||||
DEFINE_PMAP_IFUNC(void, remove, (pmap_t, vm_offset_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, remove_all, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, remove_pages, (pmap_t));
|
||||
DEFINE_PMAP_IFUNC(void, remove_write, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, unwire, (pmap_t, vm_offset_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, zero_page, (vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, zero_page_area, (vm_page_t, int, int));
|
||||
DEFINE_PMAP_IFUNC(void, copy_page, (vm_page_t, vm_page_t));
|
||||
DEFINE_PMAP_IFUNC(void, copy_pages,
|
||||
(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize));
|
||||
DEFINE_PMAP_IFUNC(void, growkernel, (vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, init, (void));
|
||||
DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int));
|
||||
DEFINE_PMAP_IFUNC(int, pinit, (pmap_t));
|
||||
DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t));
|
||||
DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *));
|
||||
DEFINE_PMAP_IFUNC(void, deactivate, (struct thread *));
|
||||
DEFINE_PMAP_IFUNC(void, bootstrap, (vm_offset_t, vm_offset_t));
|
||||
DEFINE_PMAP_IFUNC(void, cpu_bootstrap, (int));
|
||||
DEFINE_PMAP_IFUNC(void *, mapdev, (vm_paddr_t, vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(void *, mapdev_attr, (vm_paddr_t, vm_size_t, vm_memattr_t));
|
||||
DEFINE_PMAP_IFUNC(void, page_set_memattr, (vm_page_t, vm_memattr_t));
|
||||
DEFINE_PMAP_IFUNC(void, unmapdev, (vm_offset_t, vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(int, map_user_ptr,
|
||||
(pmap_t, volatile const void *, void **, size_t, size_t *));
|
||||
DEFINE_PMAP_IFUNC(int, decode_kernel_ptr, (vm_offset_t, int *, vm_offset_t *));
|
||||
DEFINE_PMAP_IFUNC(boolean_t, dev_direct_mapped, (vm_paddr_t, vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(void, sync_icache, (pmap_t, vm_offset_t, vm_size_t));
|
||||
DEFINE_PMAP_IFUNC(int, change_attr, (vm_offset_t, vm_size_t, vm_memattr_t));
|
||||
DEFINE_PMAP_IFUNC(void, page_array_startup, (long));
|
||||
DEFINE_PMAP_IFUNC(void, tlbie_all, (void));
|
||||
|
||||
DEFINE_DUMPSYS_IFUNC(void, map_chunk, (vm_paddr_t, size_t, void **));
|
||||
DEFINE_DUMPSYS_IFUNC(void, unmap_chunk, (vm_paddr_t, size_t, void *));
|
||||
DEFINE_DUMPSYS_IFUNC(void, pa_init, (void));
|
||||
DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (void));
|
||||
DEFINE_DUMPSYS_IFUNC(void *, dump_pmap_init, (unsigned));
|
||||
DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *));
|
||||
|
||||
/*
|
||||
* MMU install routines. Highest priority wins, equal priority also
|
||||
* overrides allowing last-set to win.
|
||||
*/
|
||||
SET_DECLARE(mmu_set, mmu_def_t);
|
||||
SET_DECLARE(mmu_set, struct mmu_kobj);
|
||||
|
||||
boolean_t
|
||||
pmap_mmu_install(char *name, int prio)
|
||||
{
|
||||
mmu_def_t **mmupp, *mmup;
|
||||
mmu_t *mmupp, mmup;
|
||||
static int curr_prio = 0;
|
||||
|
||||
printf("Trying to install pmap %s\n", name);
|
||||
|
||||
/*
|
||||
* Try and locate the MMU kobj corresponding to the name
|
||||
*/
|
||||
SET_FOREACH(mmupp, mmu_set) {
|
||||
mmup = *mmupp;
|
||||
|
||||
printf("Checking %s(%p)\n", mmup->name, mmup->name);
|
||||
if (mmup->name &&
|
||||
!strcmp(mmup->name, name) &&
|
||||
(prio >= curr_prio || mmu_def_impl == NULL)) {
|
||||
(prio >= curr_prio || mmu_obj == NULL)) {
|
||||
printf("match found: %p\n", mmup);
|
||||
curr_prio = prio;
|
||||
mmu_def_impl = mmup;
|
||||
mmu_obj = mmup;
|
||||
return (TRUE);
|
||||
}
|
||||
}
|
||||
@ -668,10 +227,18 @@ pmap_mmu_install(char *name, int prio)
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */
|
||||
void
|
||||
pmap_mmu_init()
|
||||
{
|
||||
if (mmu_obj->funcs->install != NULL)
|
||||
(mmu_obj->funcs->install)();
|
||||
}
|
||||
|
||||
const char *
|
||||
pmap_mmu_name(void)
|
||||
{
|
||||
return (mmu_obj->ops->cls->name);
|
||||
return (mmu_obj->name);
|
||||
}
|
||||
|
||||
int unmapped_buf_allowed;
|
||||
|
@ -51,8 +51,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <powerpc/aim/mmu_oea64.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
#include "moea64_if.h"
|
||||
#include "ps3-hvcall.h"
|
||||
|
||||
#define VSID_HASH_MASK 0x0000007fffffffffUL
|
||||
@ -66,39 +64,47 @@ static uint64_t mps3_vas_id;
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
|
||||
static void mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
|
||||
static void mps3_install(void);
|
||||
static void mps3_bootstrap(vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend);
|
||||
static void mps3_cpu_bootstrap(mmu_t mmup, int ap);
|
||||
static int64_t mps3_pte_synch(mmu_t, struct pvo_entry *);
|
||||
static int64_t mps3_pte_clear(mmu_t, struct pvo_entry *, uint64_t ptebit);
|
||||
static int64_t mps3_pte_unset(mmu_t, struct pvo_entry *);
|
||||
static int mps3_pte_insert(mmu_t, struct pvo_entry *);
|
||||
static void mps3_cpu_bootstrap(int ap);
|
||||
static int64_t mps3_pte_synch(struct pvo_entry *);
|
||||
static int64_t mps3_pte_clear(struct pvo_entry *, uint64_t ptebit);
|
||||
static int64_t mps3_pte_unset(struct pvo_entry *);
|
||||
static int64_t mps3_pte_insert(struct pvo_entry *);
|
||||
|
||||
|
||||
static mmu_method_t mps3_methods[] = {
|
||||
MMUMETHOD(mmu_bootstrap, mps3_bootstrap),
|
||||
MMUMETHOD(mmu_cpu_bootstrap, mps3_cpu_bootstrap),
|
||||
|
||||
MMUMETHOD(moea64_pte_synch, mps3_pte_synch),
|
||||
MMUMETHOD(moea64_pte_clear, mps3_pte_clear),
|
||||
MMUMETHOD(moea64_pte_unset, mps3_pte_unset),
|
||||
MMUMETHOD(moea64_pte_insert, mps3_pte_insert),
|
||||
|
||||
{ 0, 0 }
|
||||
static struct pmap_funcs mps3_methods = {
|
||||
.install = mps3_install,
|
||||
.bootstrap = mps3_bootstrap,
|
||||
.cpu_bootstrap = mps3_cpu_bootstrap,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, 0, oea64_mmu);
|
||||
static struct moea64_funcs mps3_funcs = {
|
||||
.pte_synch = mps3_pte_synch,
|
||||
.pte_clear = mps3_pte_clear,
|
||||
.pte_unset = mps3_pte_unset,
|
||||
.pte_insert = mps3_pte_insert,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, oea64_mmu);
|
||||
|
||||
static struct mtx mps3_table_lock;
|
||||
|
||||
static void
|
||||
mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
mps3_install()
|
||||
{
|
||||
moea64_ops = &mps3_funcs;
|
||||
}
|
||||
|
||||
static void
|
||||
mps3_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
{
|
||||
uint64_t final_pteg_count;
|
||||
|
||||
mtx_init(&mps3_table_lock, "page table", NULL, MTX_DEF);
|
||||
|
||||
moea64_early_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_early_bootstrap(kernelstart, kernelend);
|
||||
|
||||
/* In case we had a page table already */
|
||||
lv1_destruct_virtual_address_space(0);
|
||||
@ -114,12 +120,12 @@ mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
|
||||
moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
|
||||
|
||||
moea64_mid_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_late_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_mid_bootstrap(kernelstart, kernelend);
|
||||
moea64_late_bootstrap(kernelstart, kernelend);
|
||||
}
|
||||
|
||||
static void
|
||||
mps3_cpu_bootstrap(mmu_t mmup, int ap)
|
||||
mps3_cpu_bootstrap(int ap)
|
||||
{
|
||||
struct slb *slb = PCPU_GET(aim.slb);
|
||||
register_t seg0;
|
||||
@ -179,7 +185,7 @@ mps3_pte_synch_locked(struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mps3_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
|
||||
mps3_pte_synch(struct pvo_entry *pvo)
|
||||
{
|
||||
int64_t retval;
|
||||
|
||||
@ -191,7 +197,7 @@ mps3_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mps3_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
mps3_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
|
||||
{
|
||||
int64_t refchg;
|
||||
struct lpte pte;
|
||||
@ -217,7 +223,7 @@ mps3_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mps3_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
|
||||
mps3_pte_unset(struct pvo_entry *pvo)
|
||||
{
|
||||
int64_t refchg;
|
||||
|
||||
@ -236,8 +242,8 @@ mps3_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
|
||||
return (refchg & (LPTE_REF | LPTE_CHG));
|
||||
}
|
||||
|
||||
static int
|
||||
mps3_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
|
||||
static int64_t
|
||||
mps3_pte_insert(struct pvo_entry *pvo)
|
||||
{
|
||||
int result;
|
||||
struct lpte pte, evicted;
|
||||
|
@ -54,9 +54,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <powerpc/aim/mmu_oea64.h>
|
||||
|
||||
#include "mmu_if.h"
|
||||
#include "moea64_if.h"
|
||||
|
||||
#include "phyp-hvcall.h"
|
||||
|
||||
#define MMU_PHYP_DEBUG 0
|
||||
@ -75,32 +72,32 @@ static struct rmlock mphyp_eviction_lock;
|
||||
* Kernel MMU interface
|
||||
*/
|
||||
|
||||
static void mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
|
||||
static void mphyp_install(void);
|
||||
static void mphyp_bootstrap(vm_offset_t kernelstart,
|
||||
vm_offset_t kernelend);
|
||||
static void mphyp_cpu_bootstrap(mmu_t mmup, int ap);
|
||||
static void *mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf,
|
||||
static void mphyp_cpu_bootstrap(int ap);
|
||||
static void *mphyp_dump_pmap(void *ctx, void *buf,
|
||||
u_long *nbytes);
|
||||
static int64_t mphyp_pte_synch(mmu_t, struct pvo_entry *pvo);
|
||||
static int64_t mphyp_pte_clear(mmu_t, struct pvo_entry *pvo, uint64_t ptebit);
|
||||
static int64_t mphyp_pte_unset(mmu_t, struct pvo_entry *pvo);
|
||||
static int mphyp_pte_insert(mmu_t, struct pvo_entry *pvo);
|
||||
static int64_t mphyp_pte_synch(struct pvo_entry *pvo);
|
||||
static int64_t mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit);
|
||||
static int64_t mphyp_pte_unset(struct pvo_entry *pvo);
|
||||
static int64_t mphyp_pte_insert(struct pvo_entry *pvo);
|
||||
|
||||
static mmu_method_t mphyp_methods[] = {
|
||||
MMUMETHOD(mmu_bootstrap, mphyp_bootstrap),
|
||||
MMUMETHOD(mmu_cpu_bootstrap, mphyp_cpu_bootstrap),
|
||||
MMUMETHOD(mmu_dump_pmap, mphyp_dump_pmap),
|
||||
|
||||
MMUMETHOD(moea64_pte_synch, mphyp_pte_synch),
|
||||
MMUMETHOD(moea64_pte_clear, mphyp_pte_clear),
|
||||
MMUMETHOD(moea64_pte_unset, mphyp_pte_unset),
|
||||
MMUMETHOD(moea64_pte_insert, mphyp_pte_insert),
|
||||
|
||||
/* XXX: pmap_copy_page, pmap_init_page with H_PAGE_INIT */
|
||||
|
||||
{ 0, 0 }
|
||||
static struct pmap_funcs mphyp_methods = {
|
||||
.install = mphyp_install,
|
||||
.bootstrap = mphyp_bootstrap,
|
||||
.cpu_bootstrap = mphyp_cpu_bootstrap,
|
||||
.dumpsys_dump_pmap = mphyp_dump_pmap,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, 0, oea64_mmu);
|
||||
static struct moea64_funcs mmu_phyp_funcs = {
|
||||
.pte_synch = mphyp_pte_synch,
|
||||
.pte_clear = mphyp_pte_clear,
|
||||
.pte_unset = mphyp_pte_unset,
|
||||
.pte_insert = mphyp_pte_insert,
|
||||
};
|
||||
|
||||
MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, oea64_mmu);
|
||||
|
||||
static int brokenkvm = 0;
|
||||
|
||||
@ -120,7 +117,14 @@ SYSINIT(kvmbugwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_kvm_bug_warning,
|
||||
NULL);
|
||||
|
||||
static void
|
||||
mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
mphyp_install()
|
||||
{
|
||||
|
||||
moea64_ops = &mmu_phyp_funcs;
|
||||
}
|
||||
|
||||
static void
|
||||
mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
{
|
||||
uint64_t final_pteg_count = 0;
|
||||
char buf[8];
|
||||
@ -134,7 +138,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
|
||||
rm_init(&mphyp_eviction_lock, "pte eviction");
|
||||
|
||||
moea64_early_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_early_bootstrap(kernelstart, kernelend);
|
||||
|
||||
root = OF_peer(0);
|
||||
|
||||
@ -246,8 +250,8 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
}
|
||||
}
|
||||
|
||||
moea64_mid_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_late_bootstrap(mmup, kernelstart, kernelend);
|
||||
moea64_mid_bootstrap(kernelstart, kernelend);
|
||||
moea64_late_bootstrap(kernelstart, kernelend);
|
||||
|
||||
/* Test for broken versions of KVM that don't conform to the spec */
|
||||
if (phyp_hcall(H_CLEAR_MOD, 0, 0) == H_FUNCTION)
|
||||
@ -255,7 +259,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
}
|
||||
|
||||
static void
|
||||
mphyp_cpu_bootstrap(mmu_t mmup, int ap)
|
||||
mphyp_cpu_bootstrap(int ap)
|
||||
{
|
||||
struct slb *slb = PCPU_GET(aim.slb);
|
||||
register_t seg0;
|
||||
@ -277,7 +281,7 @@ mphyp_cpu_bootstrap(mmu_t mmup, int ap)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mphyp_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
|
||||
mphyp_pte_synch(struct pvo_entry *pvo)
|
||||
{
|
||||
struct lpte pte;
|
||||
uint64_t junk;
|
||||
@ -296,7 +300,7 @@ mphyp_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
|
||||
{
|
||||
struct rm_priotracker track;
|
||||
int64_t refchg;
|
||||
@ -313,7 +317,7 @@ mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
|
||||
rm_rlock(&mphyp_eviction_lock, &track);
|
||||
|
||||
refchg = mphyp_pte_synch(mmu, pvo);
|
||||
refchg = mphyp_pte_synch(pvo);
|
||||
if (refchg < 0) {
|
||||
rm_runlock(&mphyp_eviction_lock, &track);
|
||||
return (refchg);
|
||||
@ -350,7 +354,7 @@ mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
|
||||
}
|
||||
|
||||
static int64_t
|
||||
mphyp_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
|
||||
mphyp_pte_unset(struct pvo_entry *pvo)
|
||||
{
|
||||
struct lpte pte;
|
||||
uint64_t junk;
|
||||
@ -410,8 +414,8 @@ mphyp_pte_spillable_ident(uintptr_t ptegbase, struct lpte *to_evict)
|
||||
return (k);
|
||||
}
|
||||
|
||||
static int
|
||||
mphyp_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
|
||||
static int64_t
|
||||
mphyp_pte_insert(struct pvo_entry *pvo)
|
||||
{
|
||||
struct rm_priotracker track;
|
||||
int64_t result;
|
||||
@ -509,7 +513,7 @@ mphyp_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
|
||||
}
|
||||
|
||||
static void *
|
||||
mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf, u_long *nbytes)
|
||||
mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes)
|
||||
{
|
||||
struct dump_context *dctx;
|
||||
struct lpte p, *pbuf;
|
||||
|
Loading…
Reference in New Issue
Block a user