Enhance book-e pmap for 36-bit physaddr
Summary: This is (probably step 1) of enhancing the book-e pmap to support the full 36-bit physical address space on Freescale e500 and e5500 cores. Thus far it has only been regression tested on one platform. Since I only have one other Book-E platform (e5500), that needs work beyond this, I haven't yet tested it on this. Test Plan: Regression tested on my RouterBoard RB800. Reviewed By: marcel Differential Revision: https://reviews.freebsd.org/D3027
This commit is contained in:
parent
2693c49ecb
commit
83ce86ca54
@ -966,7 +966,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
pmap->pm_pdir[pdir_idx] = ptbl;
|
||||
}
|
||||
pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
|
||||
pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
|
||||
pte->rpn = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
|
||||
pte->flags |= (PTE_VALID | flags);
|
||||
|
||||
tlb_miss_unlock();
|
||||
@ -1129,7 +1129,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
/* Calculate corresponding physical addresses for the kernel region. */
|
||||
phys_kernelend = kernload + kernsize;
|
||||
debugf("kernel image and allocated data:\n");
|
||||
debugf(" kernload = 0x%08x\n", kernload);
|
||||
debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
|
||||
debugf(" kernstart = 0x%08x\n", kernstart);
|
||||
debugf(" kernsize = 0x%08x\n", kernsize);
|
||||
|
||||
@ -1282,7 +1282,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
kernel_pmap->pm_tid[i] = TID_KERNEL;
|
||||
|
||||
/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
|
||||
tidbusy[i][0] = kernel_pmap;
|
||||
tidbusy[i][TID_KERNEL] = kernel_pmap;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1315,7 +1315,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
|
||||
debugf("kstack_sz = 0x%08x\n", kstack0_sz);
|
||||
debugf("kstack0_phys at 0x%08x - 0x%08x\n",
|
||||
debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
|
||||
kstack0_phys, kstack0_phys + kstack0_sz);
|
||||
debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
|
||||
|
||||
@ -1512,7 +1512,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
tlb0_flush_entry(va);
|
||||
}
|
||||
|
||||
pte->rpn = pa & ~PTE_PA_MASK;
|
||||
pte->rpn = PTE_RPN_FROM_PA(pa);
|
||||
pte->flags = flags;
|
||||
|
||||
//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
|
||||
@ -2632,7 +2632,7 @@ mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
|
||||
/* Minidumps are based on virtual memory addresses. */
|
||||
if (do_minidump) {
|
||||
*va = (void *)pa;
|
||||
*va = (void *)(vm_offset_t)pa;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2776,7 +2776,7 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
if (pa >= tlb1[i].phys &&
|
||||
(pa + size) <= (tlb1[i].phys + tlb1[i].size))
|
||||
return (void *)(tlb1[i].virt +
|
||||
(pa - tlb1[i].phys));
|
||||
(vm_offset_t)(pa - tlb1[i].phys));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2804,7 +2804,7 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
do {
|
||||
sz = 1 << (ilog2(size) & ~1);
|
||||
if (bootverbose)
|
||||
printf("Wiring VA=%x to PA=%x (size=%x), "
|
||||
printf("Wiring VA=%x to PA=%llx (size=%x), "
|
||||
"using TLB1[%d]\n", va, pa, sz, tlb1_idx);
|
||||
tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
|
||||
size -= sz;
|
||||
@ -3026,13 +3026,10 @@ tlb0_print_tlbentries(void)
|
||||
static void
|
||||
tlb1_write_entry(unsigned int idx)
|
||||
{
|
||||
uint32_t mas0, mas7;
|
||||
uint32_t mas0;
|
||||
|
||||
//debugf("tlb1_write_entry: s\n");
|
||||
|
||||
/* Clear high order RPN bits */
|
||||
mas7 = 0;
|
||||
|
||||
/* Select entry */
|
||||
mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
|
||||
//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
|
||||
@ -3045,7 +3042,7 @@ tlb1_write_entry(unsigned int idx)
|
||||
__asm __volatile("isync");
|
||||
mtspr(SPR_MAS3, tlb1[idx].mas3);
|
||||
__asm __volatile("isync");
|
||||
mtspr(SPR_MAS7, mas7);
|
||||
mtspr(SPR_MAS7, tlb1[idx].mas7);
|
||||
__asm __volatile("isync; tlbwe; isync; msync");
|
||||
|
||||
//debugf("tlb1_write_entry: e\n");
|
||||
@ -3128,6 +3125,7 @@ tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
|
||||
|
||||
/* Set supervisor RWX permission bits */
|
||||
tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
|
||||
tlb1[index].mas7 = (pa >> 32) & MAS7_RPN;
|
||||
|
||||
tlb1_write_entry(index);
|
||||
|
||||
@ -3191,7 +3189,7 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
|
||||
|
||||
for (idx = 0; idx < nents; idx++) {
|
||||
pgsz = pgs[idx];
|
||||
debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz);
|
||||
debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
|
||||
tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
|
||||
pa += pgsz;
|
||||
va += pgsz;
|
||||
@ -3210,7 +3208,7 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
|
||||
void
|
||||
tlb1_init()
|
||||
{
|
||||
uint32_t mas0, mas1, mas2, mas3;
|
||||
uint32_t mas0, mas1, mas2, mas3, mas7;
|
||||
uint32_t tsz;
|
||||
u_int i;
|
||||
|
||||
@ -3231,12 +3229,15 @@ tlb1_init()
|
||||
|
||||
mas2 = mfspr(SPR_MAS2);
|
||||
mas3 = mfspr(SPR_MAS3);
|
||||
mas7 = mfspr(SPR_MAS7);
|
||||
|
||||
tlb1[i].mas1 = mas1;
|
||||
tlb1[i].mas2 = mfspr(SPR_MAS2);
|
||||
tlb1[i].mas3 = mas3;
|
||||
tlb1[i].mas7 = mas7;
|
||||
tlb1[i].virt = mas2 & MAS2_EPN_MASK;
|
||||
tlb1[i].phys = mas3 & MAS3_RPN;
|
||||
tlb1[i].phys = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
|
||||
(mas3 & MAS3_RPN);
|
||||
|
||||
if (i == 0)
|
||||
kernload = tlb1[i].phys;
|
||||
@ -3350,7 +3351,8 @@ tlb1_print_entries(void)
|
||||
|
||||
debugf("tlb1[] table entries:\n");
|
||||
for (i = 0; i < TLB1_ENTRIES; i++)
|
||||
tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
|
||||
tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3,
|
||||
tlb1[i].mas7);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3389,8 +3391,9 @@ tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
|
||||
KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
|
||||
|
||||
entry_size = tsize2size(entry_tsize);
|
||||
pa_start = tlb1[i].mas3 & MAS3_RPN;
|
||||
pa_end = pa_start + entry_size - 1;
|
||||
pa_start = (((vm_paddr_t)tlb1[i].mas7 & MAS7_RPN) << 32) |
|
||||
(tlb1[i].mas3 & MAS3_RPN);
|
||||
pa_end = pa_start + entry_size;
|
||||
|
||||
if ((pa < pa_start) || ((pa + size) > pa_end))
|
||||
return (ERANGE);
|
||||
|
Loading…
x
Reference in New Issue
Block a user