Hide most of the PTE initialization and management.
By confining the page table management to a handful of functions it'll be easier to modify the page table scheme without affecting other functions. This will be necessary when 64-bit support is added, and page tables become much larger.
This commit is contained in:
parent
09fb369fc5
commit
730341be8a
@ -242,6 +242,8 @@ static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
|
||||
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
|
||||
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
|
||||
static void kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr,
|
||||
vm_offset_t pdir);
|
||||
|
||||
static pv_entry_t pv_alloc(void);
|
||||
static void pv_free(pv_entry_t);
|
||||
@ -507,6 +509,10 @@ tlb1_get_tlbconf(void)
|
||||
tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
|
||||
}
|
||||
|
||||
/**************************************************************************/
|
||||
/* Page table related */
|
||||
/**************************************************************************/
|
||||
|
||||
/* Initialize pool of kva ptbl buffers. */
|
||||
static void
|
||||
ptbl_init(void)
|
||||
@ -1014,6 +1020,33 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Set up kernel page tables. */
|
||||
static void
|
||||
kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
|
||||
{
|
||||
int i;
|
||||
vm_offset_t va;
|
||||
pte_t *pte;
|
||||
|
||||
/* Initialize kernel pdir */
|
||||
for (i = 0; i < kernel_ptbls; i++)
|
||||
kernel_pmap->pm_pdir[kptbl_min + i] =
|
||||
(pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
|
||||
|
||||
/*
|
||||
* Fill in PTEs covering kernel code and data. They are not required
|
||||
* for address translation, as this area is covered by static TLB1
|
||||
* entries, but for pte_vatopa() to work correctly with kernel area
|
||||
* addresses.
|
||||
*/
|
||||
for (va = addr; va < data_end; va += PAGE_SIZE) {
|
||||
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
|
||||
pte->rpn = kernload + (va - kernstart);
|
||||
pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
|
||||
PTE_VALID;
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************************/
|
||||
/* PMAP related */
|
||||
/**************************************************************************/
|
||||
@ -1031,10 +1064,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
vm_paddr_t physsz, hwphyssz;
|
||||
u_int phys_avail_count;
|
||||
vm_size_t kstack0_sz;
|
||||
vm_offset_t kernel_pdir, kstack0, va;
|
||||
vm_offset_t kernel_pdir, kstack0;
|
||||
vm_paddr_t kstack0_phys;
|
||||
void *dpcpu;
|
||||
pte_t *pte;
|
||||
|
||||
debugf("mmu_booke_bootstrap: entered\n");
|
||||
|
||||
@ -1287,11 +1319,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
debugf("kernel pdir range: 0x%08x - 0x%08x\n",
|
||||
kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
|
||||
|
||||
/* Initialize kernel pdir */
|
||||
for (i = 0; i < kernel_ptbls; i++)
|
||||
kernel_pmap->pm_pdir[kptbl_min + i] =
|
||||
(pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
|
||||
|
||||
kernel_pte_alloc(data_end, kernstart, kernel_pdir);
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
kernel_pmap->pm_tid[i] = TID_KERNEL;
|
||||
|
||||
@ -1299,18 +1327,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
tidbusy[i][TID_KERNEL] = kernel_pmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in PTEs covering kernel code and data. They are not required
|
||||
* for address translation, as this area is covered by static TLB1
|
||||
* entries, but for pte_vatopa() to work correctly with kernel area
|
||||
* addresses.
|
||||
*/
|
||||
for (va = kernstart; va < data_end; va += PAGE_SIZE) {
|
||||
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
|
||||
pte->rpn = kernload + (va - kernstart);
|
||||
pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
|
||||
PTE_VALID;
|
||||
}
|
||||
/* Mark kernel_pmap active on all CPUs */
|
||||
CPU_FILL(&kernel_pmap->pm_active);
|
||||
|
||||
@ -1502,8 +1518,6 @@ mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
static void
|
||||
mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
uint32_t flags;
|
||||
pte_t *pte;
|
||||
|
||||
@ -1513,7 +1527,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
|
||||
flags |= tlb_calc_wimg(pa, ma);
|
||||
|
||||
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
@ -1548,17 +1562,15 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
static void
|
||||
mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
pte_t *pte;
|
||||
|
||||
// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
|
||||
CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
|
||||
|
||||
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
|
||||
(va <= VM_MAX_KERNEL_ADDRESS)),
|
||||
("mmu_booke_kremove: invalid va"));
|
||||
|
||||
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
|
||||
if (!PTE_ISVALID(pte)) {
|
||||
|
||||
@ -2333,7 +2345,7 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
critical_enter();
|
||||
qaddr = PCPU_GET(qmap_addr);
|
||||
|
||||
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(qaddr)][PTBL_IDX(qaddr)]);
|
||||
pte = pte_find(mmu, kernel_pmap, qaddr);
|
||||
|
||||
KASSERT(pte->flags == 0, ("mmu_booke_quick_enter_page: PTE busy"));
|
||||
|
||||
@ -2360,7 +2372,7 @@ mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(addr)][PTBL_IDX(addr)]);
|
||||
pte = pte_find(mmu, kernel_pmap, addr);
|
||||
|
||||
KASSERT(PCPU_GET(qmap_addr) == addr,
|
||||
("mmu_booke_quick_remove_page: invalid address"));
|
||||
@ -2832,8 +2844,8 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
} while (va % sz != 0);
|
||||
}
|
||||
if (bootverbose)
|
||||
printf("Wiring VA=%x to PA=%llx (size=%x), "
|
||||
"using TLB1[%d]\n", va, pa, sz, tlb1_idx);
|
||||
printf("Wiring VA=%lx to PA=%jx (size=%lx), "
|
||||
"using TLB1[%d]\n", va, (uintmax_t)pa, sz, tlb1_idx);
|
||||
tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
|
||||
size -= sz;
|
||||
pa += sz;
|
||||
@ -3237,7 +3249,11 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
mapped = (va - base);
|
||||
#ifdef __powerpc64__
|
||||
printf("mapped size 0x%016lx (wasted space 0x%16lx)\n",
|
||||
#else
|
||||
printf("mapped size 0x%08x (wasted space 0x%08x)\n",
|
||||
#endif
|
||||
mapped, mapped - size);
|
||||
return (mapped);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user