o Introduce vm_sync_icache() for making the I-cache coherent with
the memory or D-cache, depending on the semantics of the platform. vm_sync_icache() is basically a wrapper around pmap_sync_icache(), that translates the vm_map_t argumument to pmap_t. o Introduce pmap_sync_icache() to all PMAP implementation. For powerpc it replaces the pmap_page_executable() function, added to solve the I-cache problem in uiomove_fromphys(). o In proc_rwmem() call vm_sync_icache() when writing to a page that has execute permissions. This assures that when breakpoints are written, the I-cache will be coherent and the process will actually hit the breakpoint. o This also fixes the Book-E PMAP implementation that was missing necessary locking while trying to deal with the I-cache coherency in pmap_enter() (read: mmu_booke_enter_locked). The key property of this change is that the I-cache is made coherent *after* writes have been done. Doing it in the PMAP layer when adding or changing a mapping means that the I-cache is made coherent *before* any writes happen. The difference is key when the I-cache prefetches.
This commit is contained in:
parent
a7b5ad271c
commit
1a4fcaebe3
sys
amd64/amd64
arm
i386
ia64/ia64
kern
mips/mips
powerpc
sparc64/sparc64
sun4v/sun4v
vm
@ -4810,6 +4810,11 @@ if (oldpmap) /* XXX FIXME */
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -2863,14 +2863,14 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
|
||||
if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) {
|
||||
vm_page_lock_queues();
|
||||
if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) {
|
||||
/* release vm_page lock for pv_entry UMA */
|
||||
/* release vm_page lock for pv_entry UMA */
|
||||
vm_page_unlock_queues();
|
||||
if ((pve = pmap_get_pv_entry()) == NULL)
|
||||
panic("pmap_kenter_internal: no pv entries");
|
||||
vm_page_lock_queues();
|
||||
PMAP_LOCK(pmap_kernel());
|
||||
pmap_enter_pv(m, pve, pmap_kernel(), va,
|
||||
PVF_WRITE | PVF_UNMAN);
|
||||
PVF_WRITE | PVF_UNMAN);
|
||||
pmap_fix_cache(m, pmap_kernel(), va);
|
||||
PMAP_UNLOCK(pmap_kernel());
|
||||
} else {
|
||||
@ -4567,6 +4567,12 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -408,7 +408,7 @@ initarm(void *mdp, void *unused __unused)
|
||||
availmem_regions_sz = i;
|
||||
} else {
|
||||
/* Fall back to hardcoded boothowto flags and metadata. */
|
||||
boothowto = RB_VERBOSE | RB_SINGLE;
|
||||
boothowto = 0; // RB_VERBOSE | RB_SINGLE;
|
||||
lastaddr = fake_preload_metadata();
|
||||
|
||||
/*
|
||||
|
@ -4859,6 +4859,11 @@ pmap_activate(struct thread *td)
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -4175,6 +4175,11 @@ pmap_activate(struct thread *td)
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -2276,6 +2276,33 @@ out:
|
||||
return (prevpm);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
pmap_t oldpm;
|
||||
struct ia64_lpte *pte;
|
||||
vm_offset_t lim;
|
||||
vm_size_t len;
|
||||
|
||||
sz += va & 31;
|
||||
va &= ~31;
|
||||
sz = (sz + 31) & ~31;
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
oldpm = pmap_switch(pm);
|
||||
while (sz > 0) {
|
||||
lim = round_page(va);
|
||||
len = MIN(lim - va, sz);
|
||||
pte = pmap_find_vhpt(va);
|
||||
if (pte != NULL && pmap_present(pte))
|
||||
ia64_sync_icache(va, len);
|
||||
va += len;
|
||||
sz -= len;
|
||||
}
|
||||
pmap_switch(oldpm);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -327,6 +327,10 @@ proc_rwmem(struct proc *p, struct uio *uio)
|
||||
*/
|
||||
error = uiomove_fromphys(&m, page_offset, len, uio);
|
||||
|
||||
/* Make the I-cache coherent for breakpoints. */
|
||||
if (!error && writing && (out_prot & VM_PROT_EXECUTE))
|
||||
vm_sync_icache(map, uva, len);
|
||||
|
||||
/*
|
||||
* Release the page.
|
||||
*/
|
||||
|
@ -2903,6 +2903,11 @@ pmap_activate(struct thread *td)
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -330,7 +330,7 @@ void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
|
||||
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
|
||||
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
|
||||
boolean_t moea_page_executable(mmu_t, vm_page_t);
|
||||
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
||||
|
||||
static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
|
||||
@ -357,6 +357,7 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_remove, moea_remove),
|
||||
MMUMETHOD(mmu_remove_all, moea_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, moea_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
|
||||
MMUMETHOD(mmu_zero_page, moea_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
|
||||
@ -371,7 +372,6 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_kextract, moea_kextract),
|
||||
MMUMETHOD(mmu_kenter, moea_kenter),
|
||||
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
|
||||
MMUMETHOD(mmu_page_executable, moea_page_executable),
|
||||
|
||||
{ 0, 0 }
|
||||
};
|
||||
@ -2359,12 +2359,6 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
return (EFAULT);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_page_executable(mmu_t mmu, vm_page_t pg)
|
||||
{
|
||||
return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a set of physical memory pages into the kernel virtual
|
||||
* address space. Return a pointer to where it is mapped. This
|
||||
@ -2424,3 +2418,27 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
kmem_free(kernel_map, base, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_offset_t lim;
|
||||
vm_paddr_t pa;
|
||||
vm_size_t len;
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
while (sz > 0) {
|
||||
lim = round_page(va);
|
||||
len = MIN(lim - va, sz);
|
||||
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
|
||||
if (pvo != NULL) {
|
||||
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
|
||||
(va & ADDR_POFF);
|
||||
moea_syncicache(pa, len);
|
||||
}
|
||||
va += len;
|
||||
sz -= len;
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
|
||||
static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
|
||||
static void moea64_kremove(mmu_t, vm_offset_t);
|
||||
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
|
||||
vm_offset_t pa);
|
||||
vm_offset_t pa, vm_size_t sz);
|
||||
static void tlbia(void);
|
||||
|
||||
/*
|
||||
@ -410,7 +410,7 @@ void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
|
||||
void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
|
||||
boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
|
||||
boolean_t moea64_page_executable(mmu_t, vm_page_t);
|
||||
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
||||
|
||||
static mmu_method_t moea64_bridge_methods[] = {
|
||||
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
|
||||
@ -437,6 +437,7 @@ static mmu_method_t moea64_bridge_methods[] = {
|
||||
MMUMETHOD(mmu_remove, moea64_remove),
|
||||
MMUMETHOD(mmu_remove_all, moea64_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, moea64_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
|
||||
MMUMETHOD(mmu_zero_page, moea64_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
|
||||
@ -451,7 +452,6 @@ static mmu_method_t moea64_bridge_methods[] = {
|
||||
MMUMETHOD(mmu_kextract, moea64_kextract),
|
||||
MMUMETHOD(mmu_kenter, moea64_kenter),
|
||||
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
|
||||
MMUMETHOD(mmu_page_executable, moea64_page_executable),
|
||||
|
||||
{ 0, 0 }
|
||||
};
|
||||
@ -1264,12 +1264,12 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
* mapped executable and cacheable.
|
||||
*/
|
||||
if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
|
||||
moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m));
|
||||
moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
|
||||
moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
|
||||
{
|
||||
/*
|
||||
* This is much trickier than on older systems because
|
||||
@ -1285,16 +1285,16 @@ moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
|
||||
* If PMAP is not bootstrapped, we are likely to be
|
||||
* in real mode.
|
||||
*/
|
||||
__syncicache((void *)pa,PAGE_SIZE);
|
||||
__syncicache((void *)pa, sz);
|
||||
} else if (pmap == kernel_pmap) {
|
||||
__syncicache((void *)va,PAGE_SIZE);
|
||||
__syncicache((void *)va, sz);
|
||||
} else {
|
||||
/* Use the scratch page to set up a temp mapping */
|
||||
|
||||
mtx_lock(&moea64_scratchpage_mtx);
|
||||
|
||||
moea64_set_scratchpage_pa(1,pa);
|
||||
__syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
|
||||
__syncicache((void *)moea64_scratchpage_va[1], sz);
|
||||
|
||||
mtx_unlock(&moea64_scratchpage_mtx);
|
||||
}
|
||||
@ -1817,8 +1817,9 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
pvo->pvo_pmap, pvo->pvo_vaddr);
|
||||
if ((pvo->pvo_pte.lpte.pte_lo &
|
||||
(LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
|
||||
moea64_syncicache(pm, sva,
|
||||
pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
|
||||
moea64_syncicache(pm, sva,
|
||||
pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
|
||||
PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
UNLOCK_TABLE();
|
||||
@ -2406,12 +2407,6 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
return (EFAULT);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea64_page_executable(mmu_t mmu, vm_page_t pg)
|
||||
{
|
||||
return (!moea64_query_bit(pg, LPTE_NOEXEC));
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a set of physical memory pages into the kernel virtual
|
||||
* address space. Return a pointer to where it is mapped. This
|
||||
@ -2454,3 +2449,26 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
kmem_free(kernel_map, base, size);
|
||||
}
|
||||
|
||||
static void
|
||||
moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_offset_t lim;
|
||||
vm_paddr_t pa;
|
||||
vm_size_t len;
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
while (sz > 0) {
|
||||
lim = round_page(va);
|
||||
len = MIN(lim - va, sz);
|
||||
pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
|
||||
if (pvo != NULL) {
|
||||
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
|
||||
(va & ADDR_POFF);
|
||||
moea64_syncicache(pm, va, pa, len);
|
||||
}
|
||||
va += len;
|
||||
sz -= len;
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
@ -319,7 +319,8 @@ static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
|
||||
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_kremove(mmu_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
|
||||
static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
|
||||
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
|
||||
vm_size_t);
|
||||
static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
|
||||
vm_size_t, vm_size_t *);
|
||||
static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
|
||||
@ -357,6 +358,7 @@ static mmu_method_t mmu_booke_methods[] = {
|
||||
MMUMETHOD(mmu_remove, mmu_booke_remove),
|
||||
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
|
||||
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
|
||||
@ -370,7 +372,6 @@ static mmu_method_t mmu_booke_methods[] = {
|
||||
MMUMETHOD(mmu_kenter, mmu_booke_kenter),
|
||||
MMUMETHOD(mmu_kextract, mmu_booke_kextract),
|
||||
/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
|
||||
MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
|
||||
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
|
||||
|
||||
/* dumpsys() support */
|
||||
@ -1682,21 +1683,6 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
sync = 0;
|
||||
}
|
||||
|
||||
if (sync) {
|
||||
/* Create a temporary mapping. */
|
||||
pmap = PCPU_GET(curpmap);
|
||||
|
||||
va = 0;
|
||||
pte = pte_find(mmu, pmap, va);
|
||||
KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
|
||||
|
||||
flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
|
||||
|
||||
pte_enter(mmu, pmap, m, va, flags);
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
pte_remove(mmu, pmap, va, PTBL_UNHOLD);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1991,25 +1977,47 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
vm_page_flag_clear(m, PG_WRITEABLE);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
|
||||
static void
|
||||
mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
pte_t *pte;
|
||||
boolean_t executable;
|
||||
pmap_t pmap;
|
||||
vm_page_t m;
|
||||
vm_offset_t addr;
|
||||
vm_paddr_t pa;
|
||||
int active, valid;
|
||||
|
||||
va = trunc_page(va);
|
||||
sz = round_page(sz);
|
||||
|
||||
executable = FALSE;
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
|
||||
if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
|
||||
executable = TRUE;
|
||||
PMAP_UNLOCK(pv->pv_pmap);
|
||||
if (executable)
|
||||
break;
|
||||
vm_page_lock_queues();
|
||||
pmap = PCPU_GET(curpmap);
|
||||
active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
|
||||
while (sz > 0) {
|
||||
PMAP_LOCK(pm);
|
||||
pte = pte_find(mmu, pm, va);
|
||||
valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
|
||||
if (valid)
|
||||
pa = PTE_PA(pte);
|
||||
PMAP_UNLOCK(pm);
|
||||
if (valid) {
|
||||
if (!active) {
|
||||
/* Create a mapping in the active pmap. */
|
||||
addr = 0;
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
PMAP_LOCK(pmap);
|
||||
pte_enter(mmu, pmap, m, addr,
|
||||
PTE_SR | PTE_VALID | PTE_UR);
|
||||
__syncicache((void *)addr, PAGE_SIZE);
|
||||
pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
|
||||
PMAP_UNLOCK(pmap);
|
||||
} else
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
}
|
||||
va += PAGE_SIZE;
|
||||
sz -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (executable);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,7 +171,6 @@ void pmap_bootstrap(vm_offset_t, vm_offset_t);
|
||||
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
|
||||
void pmap_kremove(vm_offset_t);
|
||||
void *pmap_mapdev(vm_offset_t, vm_size_t);
|
||||
boolean_t pmap_page_executable(vm_page_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_deactivate(struct thread *);
|
||||
vm_offset_t pmap_kextract(vm_offset_t);
|
||||
|
@ -789,15 +789,21 @@ METHOD boolean_t dev_direct_mapped {
|
||||
|
||||
|
||||
/**
|
||||
* @brief Evaluate if a physical page has an executable mapping
|
||||
* @brief Enforce instruction cache coherency. Typically called after a
|
||||
* region of memory has been modified and before execution of or within
|
||||
* that region is attempted. Setting breakpoints in a process through
|
||||
* ptrace(2) is one example of when the instruction cache needs to be
|
||||
* made coherent.
|
||||
*
|
||||
* @param _pg physical page
|
||||
*
|
||||
* @retval bool TRUE if a physical mapping exists for the given page.
|
||||
* @param _pm the physical map of the virtual address
|
||||
* @param _va the virtual address of the modified region
|
||||
* @param _sz the size of the modified region
|
||||
*/
|
||||
METHOD boolean_t page_executable {
|
||||
METHOD void sync_icache {
|
||||
mmu_t _mmu;
|
||||
vm_page_t _pg;
|
||||
pmap_t _pm;
|
||||
vm_offset_t _va;
|
||||
vm_size_t _sz;
|
||||
};
|
||||
|
||||
|
||||
|
@ -457,12 +457,12 @@ pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
|
||||
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_page_executable(vm_page_t pg)
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
|
||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pg);
|
||||
return (MMU_PAGE_EXECUTABLE(mmu_obj, pg));
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
|
||||
return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
|
@ -107,9 +107,6 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
sf_buf_free(sf);
|
||||
goto out;
|
||||
}
|
||||
if (uio->uio_rw == UIO_WRITE &&
|
||||
pmap_page_executable(m))
|
||||
__syncicache(cp, cnt);
|
||||
break;
|
||||
case UIO_SYSSPACE:
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
|
@ -2001,6 +2001,11 @@ pmap_activate(struct thread *td)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -424,6 +424,11 @@ pmap_activate(struct thread *td)
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
|
@ -133,6 +133,7 @@ void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void pmap_remove_all(vm_page_t m);
|
||||
void pmap_remove_pages(pmap_t);
|
||||
void pmap_remove_write(vm_page_t m);
|
||||
void pmap_sync_icache(pmap_t, vm_offset_t, vm_size_t);
|
||||
void pmap_zero_page(vm_page_t);
|
||||
void pmap_zero_page_area(vm_page_t, int off, int size);
|
||||
void pmap_zero_page_idle(vm_page_t);
|
||||
|
@ -63,6 +63,7 @@ int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace
|
||||
void vm_waitproc(struct proc *);
|
||||
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
|
||||
void vm_set_page_size(void);
|
||||
void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t);
|
||||
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
|
||||
struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
|
||||
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
|
||||
|
@ -309,6 +309,13 @@ vm_imgact_unmap_page(struct sf_buf *sf)
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
void
|
||||
vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
|
||||
{
|
||||
|
||||
pmap_sync_icache(map->pmap, va, sz);
|
||||
}
|
||||
|
||||
struct kstack_cache_entry {
|
||||
vm_object_t ksobj;
|
||||
struct kstack_cache_entry *next_ks_entry;
|
||||
|
Loading…
x
Reference in New Issue
Block a user