Add two new pmap functions:

vm_offset_t pmap_quick_enter_page(vm_page_t m)
void pmap_quick_remove_page(vm_offset_t kva)

These will create and destroy a temporary, CPU-local KVA mapping of a specified page.

Guarantees:
--Will not sleep and will not fail.
--Safe to call under a non-sleepable lock or from an ithread

Restrictions:
--Not guaranteed to be safe to call from an interrupt filter or under a spin mutex on all platforms
--Current implementation does not guarantee more than one page of mapping space across all platforms. MI code should not make nested calls to pmap_quick_enter_page.
--MI code should not perform locking while holding onto a mapping created by pmap_quick_enter_page

The idea is to use this in busdma, for bounce buffer copies as well as virtually-indexed cache maintenance on mips and arm.

NOTE: the non-i386, non-amd64 implementations of these functions still need review and testing.

Reviewed by:	kib
Approved by:	kib (mentor)
Differential Revision:	http://reviews.freebsd.org/D3013
This commit is contained in:
Jason A. Harmening 2015-08-04 19:46:13 +00:00
parent 7b80d5ad13
commit 713841afb2
18 changed files with 575 additions and 8 deletions

View File

@ -6940,6 +6940,18 @@ pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
}
#include "opt_ddb.h"
#ifdef DDB
#include <ddb/ddb.h>

View File

@ -1156,6 +1156,22 @@ pmap_bootstrap(vm_offset_t firstaddr)
virtual_end = vm_max_kernel_address;
}
static void
pmap_init_qpages(void)
{
struct pcpu *pc;
int i;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
/*
* The function can already be use in second initialization stage.
* As such, the function DOES NOT call pmap_growkernel() where PT2
@ -5709,6 +5725,42 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
mtx_unlock(&sysmaps->lock);
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
pt2_entry_t *pte;
vm_offset_t qmap_addr;
critical_enter();
qmap_addr = PCPU_GET(qmap_addr);
pte = pt2map_entry(qmap_addr);
KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
pte2_store(pte, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m),
PTE2_AP_KRW, pmap_page_get_memattr(m)));
tlb_flush_local(qmap_addr);
return (qmap_addr);
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
pt2_entry_t *pte;
vm_offset_t qmap_addr;
qmap_addr = PCPU_GET(qmap_addr);
pte = pt2map_entry(qmap_addr);
KASSERT(addr == qmap_addr, ("pmap_quick_remove_page: invalid address"));
KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
pte2_clear(pte);
critical_exit();
}
/*
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len

View File

@ -1979,6 +1979,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
}
/***************************************************
* Pmap allocation/deallocation routines.
***************************************************/
@ -2306,6 +2307,31 @@ pmap_remove_pages(pmap_t pmap)
PMAP_UNLOCK(pmap);
}
static void
pmap_init_qpages(void)
{
struct pcpu *pc;
struct l2_bucket *l2b;
int i;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
l2b = pmap_get_l2_bucket(pmap_kernel(), pc->pc_qmap_addr);
if (l2b == NULL)
l2b = pmap_grow_l2_bucket(pmap_kernel(),
pc->pc_qmap_addr);
if (l2b == NULL)
panic("pmap_alloc_specials: no l2b for 0x%x",
pc->pc_qmap_addr);
pc->pc_qmap_pte = &l2b->l2b_kva[l2pte_index(pc->pc_qmap_addr)];
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
/***************************************************
* Low level mapping routines.....
@ -4678,6 +4704,49 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
pmap_copy_page_generic(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
pt_entry_t *qmap_pte;
vm_offset_t qmap_addr;
critical_enter();
qmap_addr = PCPU_GET(qmap_addr);
qmap_pte = PCPU_GET(qmap_pte);
KASSERT(*qmap_pte == 0, ("pmap_quick_enter_page: PTE busy"));
*qmap_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) | L2_S_REF;
if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
*qmap_pte |= pte_l2_s_cache_mode;
pmap_set_prot(qmap_pte, VM_PROT_READ | VM_PROT_WRITE, 0);
PTE_SYNC(qmap_pte);
cpu_tlb_flushD_SE(qmap_addr);
cpu_cpwait();
return (qmap_addr);
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
pt_entry_t *qmap_pte;
qmap_pte = PCPU_GET(qmap_pte);
KASSERT(addr == PCPU_GET(qmap_addr),
("pmap_quick_remove_page: invalid address"));
KASSERT(*qmap_pte != 0,
("pmap_quick_remove_page: PTE not in use"));
cpu_idcache_wbinv_range(addr, PAGE_SIZE);
pmap_l2cache_wbinv_range(addr, *qmap_pte & L2_S_FRAME, PAGE_SIZE);
*qmap_pte = 0;
PTE_SYNC(qmap_pte);
critical_exit();
}
/*
* this routine returns true if a physical page resides
* in the given pmap.

View File

@ -227,8 +227,8 @@ vm_offset_t vm_max_kernel_address;
struct pmap kernel_pmap_store;
static pt_entry_t *csrc_pte, *cdst_pte;
static vm_offset_t csrcp, cdstp;
static struct mtx cmtx;
static vm_offset_t csrcp, cdstp, qmap_addr;
static struct mtx cmtx, qmap_mtx;
static void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
/*
@ -2155,6 +2155,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
pd_entry_t pde;
pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
pt_entry_t *ptep;
pt_entry_t *qmap_pte;
vm_paddr_t pa;
vm_offset_t va;
vm_size_t size;
@ -2276,6 +2277,8 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte);
pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte);
pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte);
pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte);
size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) /
L1_S_SIZE;
pmap_alloc_specials(&virtual_avail,
@ -2302,6 +2305,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
virtual_end = vm_max_kernel_address;
kernel_vm_end = pmap_curmaxkvaddr;
mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF);
mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF);
pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
}
@ -4343,6 +4347,34 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
/*
* Don't bother with a PCPU pageframe, since we don't support
* SMP for anything pre-armv7. Use pmap_kenter() to ensure
* caching is handled correctly for multiple mappings of the
* same physical page.
*/
mtx_assert(&qmap_mtx, MA_NOTOWNED);
mtx_lock(&qmap_mtx);
pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m));
return (qmap_addr);
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
KASSERT(addr == qmap_addr,
("pmap_quick_remove_page: invalid address"));
mtx_assert(&qmap_mtx, MA_OWNED);
pmap_kremove(addr);
mtx_unlock(&qmap_mtx);
}
/*
* this routine returns true if a physical page resides
* in the given pmap.

View File

@ -47,10 +47,14 @@ struct vmspace;
unsigned int pc_vfpmvfr0; \
unsigned int pc_vfpmvfr1; \
struct pmap *pc_curpmap; \
char __pad[141]
vm_offset_t pc_qmap_addr; \
void *pc_qmap_pte; \
char __pad[133]
#else
#define PCPU_MD_FIELDS \
char __pad[157]
vm_offset_t qmap_addr; \
void *pc_qmap_pte; \
char __pad[149]
#endif
#ifdef _KERNEL

View File

@ -2441,6 +2441,18 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
}
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may

View File

@ -512,6 +512,22 @@ pmap_bootstrap(vm_paddr_t firstaddr)
pmap_set_pg();
}
static void
pmap_init_qpages(void)
{
struct pcpu *pc;
int i;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
/*
* Setup the PAT MSR.
*/
@ -5400,6 +5416,39 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
vm_offset_t qaddr;
pt_entry_t *pte;
critical_enter();
qaddr = PCPU_GET(qmap_addr);
pte = vtopte(qaddr);
KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
*pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
pmap_cache_bits(pmap_page_get_memattr(m), 0);
invlpg(qaddr);
return (qaddr);
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
vm_offset_t qaddr;
pt_entry_t *pte;
qaddr = PCPU_GET(qmap_addr);
pte = vtopte(qaddr);
KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address"));
*pte = 0;
critical_exit();
}
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid)

View File

@ -58,7 +58,8 @@
int pc_private_tss; /* Flag indicating private tss*/\
u_int pc_cmci_mask; /* MCx banks for CMCI */ \
u_int pc_vcpu_id; /* Xen vCPU ID */ \
char __pad[233]
vm_offset_t pc_qmap_addr; /* KVA for temporary mappings */\
char __pad[229]
#ifdef _KERNEL

View File

@ -2638,6 +2638,62 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
#if defined(__mips_n64)
return MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
#else
vm_paddr_t pa;
struct local_sysmaps *sysm;
pt_entry_t *pte;
pa = VM_PAGE_TO_PHYS(m);
if (MIPS_DIRECT_MAPPABLE(pa))
return (MIPS_PHYS_TO_DIRECT(pa));
critical_enter();
sysm = &sysmap_lmem[PCPU_GET(cpuid)];
KASSERT(sysm->valid1 == 0, ("pmap_quick_enter_page: PTE busy"));
pte = pmap_pte(kernel_pmap, sysm->base);
*pte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G |
(is_cacheable_mem(pa) ? PTE_C_CACHE : PTE_C_UNCACHED);
sysm->valid1 = 1;
return (sysm->base);
#endif
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
mips_dcache_wbinv_range(addr, PAGE_SIZE);
#if !defined(__mips_n64)
struct local_sysmaps *sysm;
pt_entry_t *pte;
if (addr >= MIPS_KSEG0_START && addr < MIPS_KSEG0_END)
return;
sysm = &sysmap_lmem[PCPU_GET(cpuid)];
KASSERT(sysm->valid1 != 0,
("pmap_quick_remove_page: PTE not in use"));
KASSERT(sysm->base == addr,
("pmap_quick_remove_page: invalid address"));
pte = pmap_pte(kernel_pmap, addr);
*pte = PTE_G;
tlb_invalidate_address(kernel_pmap, addr);
sysm->valid1 = 0;
critical_exit();
#endif
}
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may

View File

@ -316,6 +316,8 @@ boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
void moea_scan_init(mmu_t mmu);
vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
@ -351,6 +353,8 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_activate, moea_activate),
MMUMETHOD(mmu_deactivate, moea_deactivate),
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, moea_bootstrap),
@ -1082,6 +1086,18 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t m)
moea_zero_page(mmu, m);
}
vm_offset_t
moea_quick_enter_page(mmu_t mmu, vm_page_t m)
{
return (VM_PAGE_TO_PHYS(m));
}
void
moea_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/vmmeter.h>
#include <sys/smp.h>
#include <sys/kdb.h>
@ -227,6 +228,7 @@ static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
vm_paddr_t pa, vm_size_t sz);
static void moea64_pmap_init_qpages(void);
/*
* Kernel MMU interface
@ -278,6 +280,8 @@ static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
void **va);
void moea64_scan_init(mmu_t mmu);
vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
@ -314,6 +318,8 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_activate, moea64_activate),
MMUMETHOD(mmu_deactivate, moea64_deactivate),
MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
/* Internal interfaces */
MMUMETHOD(mmu_mapdev, moea64_mapdev),
@ -974,6 +980,29 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
}
}
static void
moea64_pmap_init_qpages(void)
{
struct pcpu *pc;
int i;
if (hw_direct_map)
return;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
PMAP_LOCK(kernel_pmap);
pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
PMAP_UNLOCK(kernel_pmap);
mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
/*
* Activate a user pmap. This mostly involves setting some non-CPU
* state.
@ -1206,6 +1235,48 @@ moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
moea64_zero_page(mmu, m);
}
vm_offset_t
moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
{
struct pvo_entry *pvo;
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
if (hw_direct_map)
return (pa);
/*
* MOEA64_PTE_REPLACE does some locking, so we can't just grab
* a critical section and access the PCPU data like on i386.
* Instead, pin the thread and grab the PCPU lock to prevent
* a preempting thread from using the same PCPU data.
*/
sched_pin();
mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
pvo = PCPU_GET(qmap_pvo);
mtx_lock(PCPU_PTR(qmap_lock));
pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
(uint64_t)pa;
MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
isync();
return (PCPU_GET(qmap_addr));
}
void
moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
if (hw_direct_map)
return;
mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
KASSERT(PCPU_GET(qmap_addr) == addr,
("moea64_quick_remove_page: invalid address"));
mtx_unlock(PCPU_PTR(qmap_lock));
sched_unpin();
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -246,6 +246,8 @@ static void pv_free(pv_entry_t);
static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
static void booke_pmap_init_qpages(void);
/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
#define PTBL_BUFS (128 * 16)
@ -332,6 +334,8 @@ static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
void *);
static void mmu_booke_scan_init(mmu_t);
static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
@ -371,6 +375,8 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
MMUMETHOD(mmu_activate, mmu_booke_activate),
MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
@ -1351,6 +1357,22 @@ pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
}
#endif
static void
booke_pmap_init_qpages(void)
{
struct pcpu *pc;
int i;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
/*
* Get the physical page address for the given pmap/virtual address.
*/
@ -2272,6 +2294,61 @@ mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
mmu_booke_kremove(mmu, va);
}
static vm_offset_t
mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
{
vm_paddr_t paddr;
vm_offset_t qaddr;
uint32_t flags;
pte_t *pte;
paddr = VM_PAGE_TO_PHYS(m);
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m));
critical_enter();
qaddr = PCPU_GET(qmap_addr);
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(qaddr)][PTBL_IDX(qaddr)]);
KASSERT(pte->flags == 0, ("mmu_booke_quick_enter_page: PTE busy"));
/*
* XXX: tlbivax is broadcast to other cores, but qaddr should
* not be present in other TLBs. Is there a better instruction
* sequence to use? Or just forget it & use mmu_booke_kenter()...
*/
__asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
__asm __volatile("isync; msync");
pte->rpn = paddr & ~PTE_PA_MASK;
pte->flags = flags;
/* Flush the real memory from the instruction cache. */
if ((flags & (PTE_I | PTE_G)) == 0)
__syncicache((void *)qaddr, PAGE_SIZE);
return (qaddr);
}
static void
mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
pte_t *pte;
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(addr)][PTBL_IDX(addr)]);
KASSERT(PCPU_GET(qmap_addr) == addr,
("mmu_booke_quick_remove_page: invalid address"));
KASSERT(pte->flags != 0,
("mmu_booke_quick_remove_page: PTE not in use"));
pte->flags = 0;
pte->rpn = 0;
critical_exit();
}
/*
* Return whether or not the specified physical page was modified
* in any of physical maps.

View File

@ -35,6 +35,7 @@
#include <machine/tlb.h>
struct pmap;
struct pvo_entry;
#define CPUSAVE_LEN 9
#define PCPU_MD_COMMON_FIELDS \
@ -53,6 +54,9 @@ struct pmap;
void *pc_restore;
#define PCPU_MD_AIM32_FIELDS \
vm_offset_t pc_qmap_addr; \
struct pvo_entry *pc_qmap_pvo; \
struct mtx pc_qmap_lock; \
/* char __pad[0] */
#define PCPU_MD_AIM64_FIELDS \
@ -60,7 +64,10 @@ struct pmap;
struct slb **pc_userslb; \
register_t pc_slbsave[18]; \
uint8_t pc_slbstack[1024]; \
char __pad[1137]
vm_offset_t pc_qmap_addr; \
struct pvo_entry *pc_qmap_pvo; \
struct mtx pc_qmap_lock; \
char __pad[1121 - sizeof(struct mtx)]
#ifdef __powerpc64__
#define PCPU_MD_AIM_FIELDS PCPU_MD_AIM64_FIELDS
@ -78,9 +85,10 @@ struct pmap;
register_t pc_booke_mchksave[CPUSAVE_LEN]; \
register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \
register_t pc_booke_tlb_level; \
vm_offset_t pc_qmap_addr; \
uint32_t *pc_booke_tlb_lock; \
int pc_tid_next; \
char __pad[173]
char __pad[165]
/* Definitions for register offsets within the exception tmp save areas */
#define CPUSAVE_R27 0 /* where r27 gets saved */

View File

@ -933,3 +933,26 @@ METHOD void dumpsys_unmap {
METHOD void scan_init {
mmu_t _mmu;
};
/**
* @brief Create a temporary thread-local KVA mapping of a single page.
*
* @param _pg The physical page to map
*
* @retval addr The temporary KVA
*/
METHOD vm_offset_t quick_enter_page {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Undo a mapping created by quick_enter_page
*
* @param _va The mapped KVA
*/
METHOD void quick_remove_page {
mmu_t _mmu;
vm_offset_t _va;
};

View File

@ -550,6 +550,20 @@ dumpsys_pa_init(void)
return (MMU_SCAN_INIT(mmu_obj));
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
}
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.

View File

@ -51,6 +51,7 @@ struct pmap;
struct intr_request *pc_irfree; \
struct pmap *pc_pmap; \
vm_offset_t pc_addr; \
vm_offset_t pc_qmap_addr; \
u_long pc_tickref; \
u_long pc_tickadj; \
u_long pc_tickincrement; \
@ -61,7 +62,7 @@ struct pmap;
u_int pc_tlb_ctx; \
u_int pc_tlb_ctx_max; \
u_int pc_tlb_ctx_min; \
char __pad[405]
char __pad[397]
#ifdef _KERNEL

View File

@ -143,6 +143,7 @@ static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2,
struct tte *tp, vm_offset_t va);
static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp,
vm_offset_t va);
static void pmap_init_qpages(void);
/*
* Map the given physical page at the specified virtual address in the
@ -680,6 +681,25 @@ pmap_bootstrap(u_int cpu_impl)
tlb_flush_nonlocked();
}
static void
pmap_init_qpages(void)
{
struct pcpu *pc;
int i;
if (dcache_color_ignore != 0)
return;
CPU_FOREACH(i) {
pc = pcpu_find(i);
pc->pc_qmap_addr = kva_alloc(PAGE_SIZE * DCACHE_COLORS);
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
}
}
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
/*
* Map the 4MB kernel TSB pages.
*/
@ -1934,6 +1954,54 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
}
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
vm_paddr_t pa;
vm_offset_t qaddr;
struct tte *tp;
pa = VM_PAGE_TO_PHYS(m);
if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa))
return (TLB_PHYS_TO_DIRECT(pa));
critical_enter();
qaddr = PCPU_GET(qmap_addr);
qaddr += (PAGE_SIZE * ((DCACHE_COLORS + DCACHE_COLOR(pa) -
DCACHE_COLOR(qaddr)) % DCACHE_COLORS));
tp = tsb_kvtotte(qaddr);
KASSERT(tp->tte_data == 0, ("pmap_quick_enter_page: PTE busy"));
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
tp->tte_vpn = TV_VPN(qaddr, TS_8K);
return (qaddr);
}
void
pmap_quick_remove_page(vm_offset_t addr)
{
vm_offset_t qaddr;
struct tte *tp;
if (addr >= VM_MIN_DIRECT_ADDRESS)
return;
tp = tsb_kvtotte(addr);
qaddr = PCPU_GET(qmap_addr);
KASSERT((addr >= qaddr) && (addr < (qaddr + (PAGE_SIZE * DCACHE_COLORS))),
("pmap_quick_remove_page: invalid address"));
KASSERT(tp->tte_data != 0, ("pmap_quick_remove_page: PTE not in use"));
stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
flush(KERNBASE);
TTE_ZERO(tp);
critical_exit();
}
int unmapped_buf_allowed;
void

View File

@ -152,6 +152,8 @@ void pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
vm_offset_t pmap_quick_enter_page(vm_page_t);
void pmap_quick_remove_page(vm_offset_t);
#define pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
#define pmap_wired_count(pm) ((pm)->pm_stats.wired_count)