Add pmap function pmap_copy_pages(), which copies the content of the
pages around, taking array of vm_page_t both for source and destination. Starting offsets and total transfer size are specified. The function implements optimal algorithm for copying using the platform-specific optimizations. For instance, on the architectures were the direct map is available, no transient mappings are created, for i386 the per-cpu ephemeral page frame is used. The code was typically borrowed from the pmap_copy_page() for the same architecture. Only i386/amd64, powerpc aim and arm/arm-v6 implementations were tested at the time of commit. High-level code, not committed yet to the tree, ensures that the use of the function is only allowed after explicit enablement. For sparc64, the existing code has known issues and a stab is added instead, to allow the kernel linking. Sponsored by: The FreeBSD Foundation Tested by: pho (i386, amd64), scottl (amd64), ian (arm and arm-v6) MFC after: 2 weeks
This commit is contained in:
parent
4824f82537
commit
63efc821c3
@ -4272,6 +4272,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
pagecopy((void *)src, (void *)dst);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
a_cp = (char *)PHYS_TO_DMAP(ma[a_offset >> PAGE_SHIFT]->
|
||||
phys_addr) + a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
b_cp = (char *)PHYS_TO_DMAP(mb[b_offset >> PAGE_SHIFT]->
|
||||
phys_addr) + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -3312,6 +3312,45 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
|
||||
mtx_unlock(&cmtx);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
vm_page_t a_pg, b_pg;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
mtx_lock(&cmtx);
|
||||
while (xfersize > 0) {
|
||||
a_pg = ma[a_offset >> PAGE_SHIFT];
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
b_pg = mb[b_offset >> PAGE_SHIFT];
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
*csrc_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) |
|
||||
pte_l2_s_cache_mode;
|
||||
pmap_set_prot(csrc_pte, VM_PROT_READ, 0);
|
||||
PTE_SYNC(csrc_pte);
|
||||
*cdst_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) |
|
||||
pte_l2_s_cache_mode;
|
||||
pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0);
|
||||
PTE_SYNC(cdst_pte);
|
||||
cpu_tlb_flushD_SE(csrcp);
|
||||
cpu_tlb_flushD_SE(cdstp);
|
||||
cpu_cpwait();
|
||||
bcopy((char *)csrcp + a_pg_offset, (char *)cdstp + b_pg_offset,
|
||||
cnt);
|
||||
cpu_idcache_wbinv_range(cdstp + b_pg_offset, cnt);
|
||||
pmap_l2cache_wbinv_range(cdstp + b_pg_offset,
|
||||
VM_PAGE_TO_PHYS(b_pg) + b_pg_offset, cnt);
|
||||
xfersize -= cnt;
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
}
|
||||
mtx_unlock(&cmtx);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
{
|
||||
|
@ -258,6 +258,9 @@ pt_entry_t pte_l1_c_proto;
|
||||
pt_entry_t pte_l2_s_proto;
|
||||
|
||||
void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
|
||||
void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
|
||||
vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs,
|
||||
int cnt);
|
||||
void (*pmap_zero_page_func)(vm_paddr_t, int, int);
|
||||
|
||||
struct msgbuf *msgbufp = 0;
|
||||
@ -400,6 +403,13 @@ static vm_paddr_t pmap_kernel_l2ptp_phys;
|
||||
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
|
||||
static struct rwlock pvh_global_lock;
|
||||
|
||||
void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
|
||||
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
|
||||
#if ARM_MMU_XSCALE == 1
|
||||
void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
|
||||
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This list exists for the benefit of pmap_map_chunk(). It keeps track
|
||||
* of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
|
||||
@ -484,6 +494,7 @@ pmap_pte_init_generic(void)
|
||||
pte_l2_s_proto = L2_S_PROTO_generic;
|
||||
|
||||
pmap_copy_page_func = pmap_copy_page_generic;
|
||||
pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
|
||||
pmap_zero_page_func = pmap_zero_page_generic;
|
||||
}
|
||||
|
||||
@ -660,6 +671,7 @@ pmap_pte_init_xscale(void)
|
||||
|
||||
#ifdef CPU_XSCALE_CORE3
|
||||
pmap_copy_page_func = pmap_copy_page_generic;
|
||||
pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
|
||||
pmap_zero_page_func = pmap_zero_page_generic;
|
||||
xscale_use_minidata = 0;
|
||||
/* Make sure it is L2-cachable */
|
||||
@ -672,6 +684,7 @@ pmap_pte_init_xscale(void)
|
||||
|
||||
#else
|
||||
pmap_copy_page_func = pmap_copy_page_xscale;
|
||||
pmap_copy_page_offs_func = pmap_copy_page_offs_xscale;
|
||||
pmap_zero_page_func = pmap_zero_page_xscale;
|
||||
#endif
|
||||
|
||||
@ -4300,6 +4313,29 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
|
||||
cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
|
||||
cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
|
||||
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
|
||||
{
|
||||
|
||||
mtx_lock(&cmtx);
|
||||
*csrc_pte = L2_S_PROTO | a_phys |
|
||||
L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
|
||||
PTE_SYNC(csrc_pte);
|
||||
*cdst_pte = L2_S_PROTO | b_phys |
|
||||
L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
|
||||
PTE_SYNC(cdst_pte);
|
||||
cpu_tlb_flushD_SE(csrcp);
|
||||
cpu_tlb_flushD_SE(cdstp);
|
||||
cpu_cpwait();
|
||||
bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
|
||||
mtx_unlock(&cmtx);
|
||||
cpu_dcache_inv_range(csrcp + a_offs, cnt);
|
||||
cpu_dcache_wbinv_range(cdstp + b_offs, cnt);
|
||||
cpu_l2cache_inv_range(csrcp + a_offs, cnt);
|
||||
cpu_l2cache_wbinv_range(cdstp + b_offs, cnt);
|
||||
}
|
||||
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
|
||||
|
||||
#if ARM_MMU_XSCALE == 1
|
||||
@ -4344,6 +4380,28 @@ pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
|
||||
mtx_unlock(&cmtx);
|
||||
xscale_cache_clean_minidata();
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
|
||||
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
|
||||
{
|
||||
|
||||
mtx_lock(&cmtx);
|
||||
*csrc_pte = L2_S_PROTO | a_phys |
|
||||
L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
|
||||
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
|
||||
PTE_SYNC(csrc_pte);
|
||||
*cdst_pte = L2_S_PROTO | b_phys |
|
||||
L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
|
||||
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
|
||||
PTE_SYNC(cdst_pte);
|
||||
cpu_tlb_flushD_SE(csrcp);
|
||||
cpu_tlb_flushD_SE(cdstp);
|
||||
cpu_cpwait();
|
||||
bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
|
||||
mtx_unlock(&cmtx);
|
||||
xscale_cache_clean_minidata();
|
||||
}
|
||||
#endif /* ARM_MMU_XSCALE == 1 */
|
||||
|
||||
void
|
||||
@ -4370,8 +4428,41 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
vm_page_t a_pg, b_pg;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
vm_offset_t a_va, b_va;
|
||||
#endif
|
||||
|
||||
|
||||
cpu_dcache_wbinv_all();
|
||||
cpu_l2cache_wbinv_all();
|
||||
while (xfersize > 0) {
|
||||
a_pg = ma[a_offset >> PAGE_SHIFT];
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
b_pg = mb[b_offset >> PAGE_SHIFT];
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
a_va = arm_ptovirt(VM_PAGE_TO_PHYS(a_pg)) + a_pg_offset;
|
||||
b_va = arm_ptovirt(VM_PAGE_TO_PHYS(b_pg)) + b_pg_offset;
|
||||
bcopy((char *)a_va, (char *)b_va, cnt);
|
||||
cpu_dcache_wbinv_range(b_va, cnt);
|
||||
cpu_l2cache_wbinv_range(b_va, cnt);
|
||||
#else
|
||||
pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset,
|
||||
VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt);
|
||||
#endif
|
||||
xfersize -= cnt;
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* this routine returns true if a physical page resides
|
||||
|
@ -533,6 +533,8 @@ extern pt_entry_t pte_l1_c_proto;
|
||||
extern pt_entry_t pte_l2_s_proto;
|
||||
|
||||
extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
|
||||
extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
|
||||
vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
|
||||
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
|
||||
|
||||
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
|
||||
|
@ -4238,6 +4238,49 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
struct sysmaps *sysmaps;
|
||||
vm_page_t a_pg, b_pg;
|
||||
char *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
|
||||
mtx_lock(&sysmaps->lock);
|
||||
if (*sysmaps->CMAP1 != 0)
|
||||
panic("pmap_copy_pages: CMAP1 busy");
|
||||
if (*sysmaps->CMAP2 != 0)
|
||||
panic("pmap_copy_pages: CMAP2 busy");
|
||||
sched_pin();
|
||||
while (xfersize > 0) {
|
||||
invlpg((u_int)sysmaps->CADDR1);
|
||||
invlpg((u_int)sysmaps->CADDR2);
|
||||
a_pg = ma[a_offset >> PAGE_SHIFT];
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
b_pg = mb[b_offset >> PAGE_SHIFT];
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
|
||||
pmap_cache_bits(b_pg->md.pat_mode, 0);
|
||||
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
|
||||
PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
|
||||
a_cp = sysmaps->CADDR1 + a_pg_offset;
|
||||
b_cp = sysmaps->CADDR2 + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
*sysmaps->CMAP1 = 0;
|
||||
*sysmaps->CMAP2 = 0;
|
||||
sched_unpin();
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -3447,6 +3447,46 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
struct sysmaps *sysmaps;
|
||||
vm_page_t a_pg, b_pg;
|
||||
char *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
|
||||
mtx_lock(&sysmaps->lock);
|
||||
if (*sysmaps->CMAP1 != 0)
|
||||
panic("pmap_copy_pages: CMAP1 busy");
|
||||
if (*sysmaps->CMAP2 != 0)
|
||||
panic("pmap_copy_pages: CMAP2 busy");
|
||||
sched_pin();
|
||||
while (xfersize > 0) {
|
||||
a_pg = ma[a_offset >> PAGE_SHIFT];
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
b_pg = mb[b_offset >> PAGE_SHIFT];
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(a_pg) | PG_A);
|
||||
PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
|
||||
VM_PAGE_TO_MACH(b_pg) | PG_A | PG_M);
|
||||
a_cp = sysmaps->CADDR1 + a_pg_offset;
|
||||
b_cp = sysmaps->CADDR2 + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
PT_SET_MA(sysmaps->CADDR1, 0);
|
||||
PT_SET_MA(sysmaps->CADDR2, 0);
|
||||
sched_unpin();
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -2014,6 +2014,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
bcopy(src, dst, PAGE_SIZE);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
|
||||
a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
|
||||
b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -2576,6 +2576,51 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
char *a_cp, *b_cp;
|
||||
vm_page_t a_m, b_m;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
vm_paddr_t a_phys, b_phys;
|
||||
int cnt;
|
||||
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
a_m = ma[a_offset >> PAGE_SHIFT];
|
||||
a_phys = VM_PAGE_TO_PHYS(a_m);
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
b_m = mb[b_offset >> PAGE_SHIFT];
|
||||
b_phys = VM_PAGE_TO_PHYS(b_m);
|
||||
if (MIPS_DIRECT_MAPPABLE(a_phys) &&
|
||||
MIPS_DIRECT_MAPPABLE(b_phys)) {
|
||||
pmap_flush_pvcache(a_m);
|
||||
mips_dcache_wbinv_range_index(
|
||||
MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
|
||||
a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
|
||||
a_pg_offset;
|
||||
b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
|
||||
b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
|
||||
} else {
|
||||
a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
|
||||
b_cp = (char *)a_cp + PAGE_SIZE;
|
||||
a_cp += a_pg_offset;
|
||||
b_cp += b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
|
||||
pmap_lmem_unmap();
|
||||
}
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -275,6 +275,8 @@ void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
||||
void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_clear_reference(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
@ -320,6 +322,7 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
|
||||
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, moea_copy_pages),
|
||||
MMUMETHOD(mmu_enter, moea_enter),
|
||||
MMUMETHOD(mmu_enter_object, moea_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
|
||||
@ -1043,6 +1046,30 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
bcopy((void *)src, (void *)dst, PAGE_SIZE);
|
||||
}
|
||||
|
||||
void
|
||||
moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
|
||||
a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
|
||||
b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Zero a page of physical memory by temporarily mapping it into the tlb.
|
||||
*/
|
||||
|
@ -290,6 +290,8 @@ void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
||||
void moea64_clear_modify(mmu_t, vm_page_t);
|
||||
void moea64_clear_reference(mmu_t, vm_page_t);
|
||||
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
@ -334,6 +336,7 @@ static mmu_method_t moea64_methods[] = {
|
||||
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
|
||||
MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
|
||||
MMUMETHOD(mmu_copy_page, moea64_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
|
||||
MMUMETHOD(mmu_enter, moea64_enter),
|
||||
MMUMETHOD(mmu_enter_object, moea64_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
|
||||
@ -1104,6 +1107,72 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
|
||||
a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
|
||||
b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
mtx_lock(&moea64_scratchpage_mtx);
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
moea64_set_scratchpage_pa(mmu, 0,
|
||||
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
|
||||
a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
moea64_set_scratchpage_pa(mmu, 1,
|
||||
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
|
||||
b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
mtx_unlock(&moea64_scratchpage_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
|
||||
if (hw_direct_map) {
|
||||
moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
|
||||
xfersize);
|
||||
} else {
|
||||
moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
|
||||
xfersize);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
||||
{
|
||||
|
@ -274,6 +274,8 @@ static void mmu_booke_clear_reference(mmu_t, vm_page_t);
|
||||
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
|
||||
vm_size_t, vm_offset_t);
|
||||
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
|
||||
vm_offset_t, vm_page_t *, vm_offset_t, int);
|
||||
static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
|
||||
@ -334,6 +336,7 @@ static mmu_method_t mmu_booke_methods[] = {
|
||||
MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
|
||||
MMUMETHOD(mmu_copy, mmu_booke_copy),
|
||||
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
|
||||
MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
|
||||
MMUMETHOD(mmu_enter, mmu_booke_enter),
|
||||
MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
|
||||
MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
|
||||
@ -2136,6 +2139,36 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
|
||||
mtx_unlock(©_page_mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
void *a_cp, *b_cp;
|
||||
vm_offset_t a_pg_offset, b_pg_offset;
|
||||
int cnt;
|
||||
|
||||
mtx_lock(©_page_mutex);
|
||||
while (xfersize > 0) {
|
||||
a_pg_offset = a_offset & PAGE_MASK;
|
||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
|
||||
mmu_booke_kenter(mmu, copy_page_src_va,
|
||||
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
|
||||
a_cp = (char *)copy_page_src_va + a_pg_offset;
|
||||
b_pg_offset = b_offset & PAGE_MASK;
|
||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
|
||||
mmu_booke_kenter(mmu, copy_page_dst_va,
|
||||
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
|
||||
b_cp = (char *)copy_page_dst_va + b_pg_offset;
|
||||
bcopy(a_cp, b_cp, cnt);
|
||||
mmu_booke_kremove(mmu, copy_page_dst_va);
|
||||
mmu_booke_kremove(mmu, copy_page_src_va);
|
||||
a_offset += cnt;
|
||||
b_offset += cnt;
|
||||
xfersize -= cnt;
|
||||
}
|
||||
mtx_unlock(©_page_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
|
||||
* into virtual memory and using bzero to clear its contents. This is intended
|
||||
|
@ -215,6 +215,14 @@ METHOD void copy_page {
|
||||
vm_page_t _dst;
|
||||
};
|
||||
|
||||
METHOD void copy_pages {
|
||||
mmu_t _mmu;
|
||||
vm_page_t *_ma;
|
||||
vm_offset_t _a_offset;
|
||||
vm_page_t *_mb;
|
||||
vm_offset_t _b_offset;
|
||||
int _xfersize;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Create a mapping between a virtual/physical address pair in the
|
||||
|
@ -132,6 +132,16 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
MMU_COPY_PAGE(mmu_obj, src, dst);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
|
||||
a_offset, mb, b_offset, xfersize);
|
||||
MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
|
@ -1918,6 +1918,14 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
vm_offset_t b_offset, int xfersize)
|
||||
{
|
||||
|
||||
panic("pmap_copy_pages: not implemented");
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
* 16 pvs linked to from this page. This count may
|
||||
|
@ -108,6 +108,8 @@ void pmap_clear_modify(vm_page_t m);
|
||||
void pmap_clear_reference(vm_page_t m);
|
||||
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
void pmap_copy_page(vm_page_t, vm_page_t);
|
||||
void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
|
||||
vm_page_t mb[], vm_offset_t b_offset, int xfersize);
|
||||
void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
|
Loading…
x
Reference in New Issue
Block a user