Migrate the PTE format for book-e to standardize on the 'indirect PTE' format
Summary: The revised Book-E spec, adding the specification for the MMUv2 and e6500, includes a hardware PTE layout for indirect page tables. In order to support this in the future, migrate the PTE format to match the MMUv2 hardware PTE format. Test Plan: Boot tested on a P5020 board. Booted to multiuser mode. Differential Revision: https://reviews.freebsd.org/D5224
This commit is contained in:
parent
5d05778815
commit
323ead721e
@ -412,13 +412,13 @@ tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
|
||||
if (ma != VM_MEMATTR_DEFAULT) {
|
||||
switch (ma) {
|
||||
case VM_MEMATTR_UNCACHEABLE:
|
||||
return (PTE_I | PTE_G);
|
||||
return (MAS2_I | MAS2_G);
|
||||
case VM_MEMATTR_WRITE_COMBINING:
|
||||
case VM_MEMATTR_WRITE_BACK:
|
||||
case VM_MEMATTR_PREFETCHABLE:
|
||||
return (PTE_I);
|
||||
return (MAS2_I);
|
||||
case VM_MEMATTR_WRITE_THROUGH:
|
||||
return (PTE_W | PTE_M);
|
||||
return (MAS2_W | MAS2_M);
|
||||
}
|
||||
}
|
||||
|
||||
@ -900,8 +900,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = 0;
|
||||
pte->rpn = 0;
|
||||
*pte = 0;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -984,8 +983,8 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
pmap->pm_pdir[pdir_idx] = ptbl;
|
||||
}
|
||||
pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
|
||||
pte->rpn = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
|
||||
pte->flags |= (PTE_VALID | flags);
|
||||
*pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
|
||||
*pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -1041,9 +1040,9 @@ kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
|
||||
*/
|
||||
for (va = addr; va < data_end; va += PAGE_SIZE) {
|
||||
pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
|
||||
pte->rpn = kernload + (va - kernstart);
|
||||
pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
|
||||
PTE_VALID;
|
||||
*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
|
||||
*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
|
||||
PTE_VALID | PTE_PS_4KB;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1525,7 +1524,8 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
|
||||
|
||||
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
|
||||
flags |= tlb_calc_wimg(pa, ma);
|
||||
flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
|
||||
flags |= PTE_PS_4KB;
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, va);
|
||||
|
||||
@ -1540,17 +1540,15 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
|
||||
tlb0_flush_entry(va);
|
||||
}
|
||||
|
||||
pte->rpn = PTE_RPN_FROM_PA(pa);
|
||||
pte->flags = flags;
|
||||
*pte = PTE_RPN_FROM_PA(pa) | flags;
|
||||
|
||||
//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
|
||||
// "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
|
||||
// pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
|
||||
|
||||
/* Flush the real memory from the instruction cache. */
|
||||
if ((flags & (PTE_I | PTE_G)) == 0) {
|
||||
if ((flags & (PTE_I | PTE_G)) == 0)
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
}
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -1584,8 +1582,7 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
|
||||
|
||||
/* Invalidate entry in TLB0, update PTE. */
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = 0;
|
||||
pte->rpn = 0;
|
||||
*pte = 0;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -1700,7 +1697,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* Before actually updating pte->flags we calculate and
|
||||
* prepare its new value in a helper var.
|
||||
*/
|
||||
flags = pte->flags;
|
||||
flags = *pte;
|
||||
flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
|
||||
|
||||
/* Wiring change, just update stats. */
|
||||
@ -1748,7 +1745,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
* are turning execute permissions on, icache should
|
||||
* be flushed.
|
||||
*/
|
||||
if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
|
||||
if ((*pte & (PTE_UX | PTE_SX)) == 0)
|
||||
sync++;
|
||||
}
|
||||
|
||||
@ -1762,7 +1759,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags = flags;
|
||||
*pte = flags;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -2069,7 +2066,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
vm_page_dirty(m);
|
||||
|
||||
tlb0_flush_entry(va);
|
||||
pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
|
||||
*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -2114,7 +2111,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
|
||||
vm_page_dirty(m);
|
||||
|
||||
/* Flush mapping from TLB0. */
|
||||
pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
|
||||
*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -2194,7 +2191,7 @@ retry:
|
||||
else
|
||||
pte_wbit = PTE_UW;
|
||||
|
||||
if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
|
||||
if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
|
||||
if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
|
||||
goto retry;
|
||||
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
|
||||
@ -2340,14 +2337,15 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
paddr = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
|
||||
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m));
|
||||
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
|
||||
flags |= PTE_PS_4KB;
|
||||
|
||||
critical_enter();
|
||||
qaddr = PCPU_GET(qmap_addr);
|
||||
|
||||
pte = pte_find(mmu, kernel_pmap, qaddr);
|
||||
|
||||
KASSERT(pte->flags == 0, ("mmu_booke_quick_enter_page: PTE busy"));
|
||||
KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
|
||||
|
||||
/*
|
||||
* XXX: tlbivax is broadcast to other cores, but qaddr should
|
||||
@ -2357,8 +2355,7 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
|
||||
__asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
|
||||
__asm __volatile("isync; msync");
|
||||
|
||||
pte->rpn = paddr & ~PTE_PA_MASK;
|
||||
pte->flags = flags;
|
||||
*pte = PTE_RPN_FROM_PA(paddr) | flags;
|
||||
|
||||
/* Flush the real memory from the instruction cache. */
|
||||
if ((flags & (PTE_I | PTE_G)) == 0)
|
||||
@ -2376,11 +2373,10 @@ mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
|
||||
|
||||
KASSERT(PCPU_GET(qmap_addr) == addr,
|
||||
("mmu_booke_quick_remove_page: invalid address"));
|
||||
KASSERT(pte->flags != 0,
|
||||
KASSERT(*pte != 0,
|
||||
("mmu_booke_quick_remove_page: PTE not in use"));
|
||||
|
||||
pte->flags = 0;
|
||||
pte->rpn = 0;
|
||||
*pte = 0;
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
@ -2494,9 +2490,9 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
|
||||
mtx_lock_spin(&tlbivax_mutex);
|
||||
tlb_miss_lock();
|
||||
|
||||
if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
|
||||
if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
|
||||
*pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
|
||||
PTE_REFERENCED);
|
||||
}
|
||||
|
||||
@ -2538,7 +2534,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
|
||||
tlb_miss_lock();
|
||||
|
||||
tlb0_flush_entry(pv->pv_va);
|
||||
pte->flags &= ~PTE_REFERENCED;
|
||||
*pte &= ~PTE_REFERENCED;
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
@ -2577,7 +2573,7 @@ mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
if (!PTE_ISWIRED(pte))
|
||||
panic("mmu_booke_unwire: pte %p isn't wired",
|
||||
pte);
|
||||
pte->flags &= ~PTE_WIRED;
|
||||
*pte &= ~PTE_WIRED;
|
||||
pmap->pm_stats.wired_count--;
|
||||
}
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ pte_lookup:
|
||||
* This load may cause a Data TLB miss for non-kernel pmap!
|
||||
*/
|
||||
lwz %r21, PTE_FLAGS(%r25)
|
||||
andis. %r21, %r21, PTE_VALID@h
|
||||
andi. %r21, %r21, PTE_VALID@l
|
||||
bne 2f
|
||||
1:
|
||||
li %r25, 0
|
||||
@ -721,20 +721,21 @@ tlb_fill_entry:
|
||||
|
||||
andi. %r22, %r21, (PTE_SW | PTE_UW)@l /* check if writable */
|
||||
beq 2f
|
||||
oris %r21, %r21, PTE_MODIFIED@h /* set modified bit */
|
||||
ori %r21, %r21, PTE_MODIFIED@l /* set modified bit */
|
||||
2:
|
||||
stwcx. %r21, %r23, %r25 /* write it back */
|
||||
bne- 1b
|
||||
|
||||
/* Update MAS2. */
|
||||
rlwimi %r27, %r21, 0, 27, 30 /* insert WIMG bits from pte */
|
||||
rlwimi %r27, %r21, 13, 27, 30 /* insert WIMG bits from pte */
|
||||
|
||||
/* Setup MAS3 value in r23. */
|
||||
lwz %r23, PTE_RPN(%r25) /* get pte->rpn */
|
||||
rlwinm %r22, %r23, 12, 0, 20 /* extract MAS3 portion of RPN */
|
||||
rlwinm %r22, %r23, 20, 0, 11 /* extract MAS3 portion of RPN */
|
||||
|
||||
rlwimi %r22, %r21, 24, 26, 31 /* insert protection bits from pte */
|
||||
rlwinm %r23, %r23, 12, 28, 31 /* MAS7 portion of RPN */
|
||||
rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */
|
||||
rlwimi %r22, %r21, 20, 12, 19 /* insert lower 8 RPN bits to MAS3 */
|
||||
rlwinm %r23, %r23, 20, 24, 31 /* MAS7 portion of RPN */
|
||||
|
||||
/* Load MAS registers. */
|
||||
mtspr SPR_MAS0, %r29
|
||||
|
@ -212,11 +212,7 @@ typedef struct lpte lpte_t;
|
||||
* page size is 4k (12-bit mask), so RPN can really fit into 24 bits.
|
||||
*/
|
||||
#ifndef LOCORE
|
||||
struct pte {
|
||||
vm_offset_t rpn;
|
||||
uint32_t flags;
|
||||
};
|
||||
typedef struct pte pte_t;
|
||||
typedef uint64_t pte_t;
|
||||
#endif
|
||||
|
||||
/* RPN mask, TLB0 4K pages */
|
||||
@ -225,13 +221,14 @@ typedef struct pte pte_t;
|
||||
#if defined(BOOKE_E500)
|
||||
|
||||
/* PTE bits assigned to MAS2, MAS3 flags */
|
||||
#define PTE_W MAS2_W
|
||||
#define PTE_I MAS2_I
|
||||
#define PTE_M MAS2_M
|
||||
#define PTE_G MAS2_G
|
||||
#define PTE_MAS2_SHIFT 19
|
||||
#define PTE_W (MAS2_W << PTE_MAS2_SHIFT)
|
||||
#define PTE_I (MAS2_I << PTE_MAS2_SHIFT)
|
||||
#define PTE_M (MAS2_M << PTE_MAS2_SHIFT)
|
||||
#define PTE_G (MAS2_G << PTE_MAS2_SHIFT)
|
||||
#define PTE_MAS2_MASK (MAS2_G | MAS2_M | MAS2_I | MAS2_W)
|
||||
|
||||
#define PTE_MAS3_SHIFT 8
|
||||
#define PTE_MAS3_SHIFT 2
|
||||
#define PTE_UX (MAS3_UX << PTE_MAS3_SHIFT)
|
||||
#define PTE_SX (MAS3_SX << PTE_MAS3_SHIFT)
|
||||
#define PTE_UW (MAS3_UW << PTE_MAS3_SHIFT)
|
||||
@ -241,6 +238,9 @@ typedef struct pte pte_t;
|
||||
#define PTE_MAS3_MASK ((MAS3_UX | MAS3_SX | MAS3_UW \
|
||||
| MAS3_SW | MAS3_UR | MAS3_SR) << PTE_MAS3_SHIFT)
|
||||
|
||||
#define PTE_PS_SHIFT 8
|
||||
#define PTE_PS_4KB (2 << PTE_PS_SHIFT)
|
||||
|
||||
#elif defined(BOOKE_PPC4XX)
|
||||
|
||||
#define PTE_WL1 TLB_WL1
|
||||
@ -262,21 +262,21 @@ typedef struct pte pte_t;
|
||||
#endif
|
||||
|
||||
/* Other PTE flags */
|
||||
#define PTE_VALID 0x80000000 /* Valid */
|
||||
#define PTE_MODIFIED 0x40000000 /* Modified */
|
||||
#define PTE_WIRED 0x20000000 /* Wired */
|
||||
#define PTE_MANAGED 0x10000000 /* Managed */
|
||||
#define PTE_REFERENCED 0x04000000 /* Referenced */
|
||||
#define PTE_VALID 0x00000001 /* Valid */
|
||||
#define PTE_MODIFIED 0x00001000 /* Modified */
|
||||
#define PTE_WIRED 0x00002000 /* Wired */
|
||||
#define PTE_MANAGED 0x00000002 /* Managed */
|
||||
#define PTE_REFERENCED 0x00040000 /* Referenced */
|
||||
|
||||
/* Macro argument must of pte_t type. */
|
||||
#define PTE_PA_SHIFT 12
|
||||
#define PTE_RPN_FROM_PA(pa) ((pa) >> PTE_PA_SHIFT)
|
||||
#define PTE_PA(pte) ((vm_paddr_t)((pte)->rpn) << PTE_PA_SHIFT)
|
||||
#define PTE_ISVALID(pte) ((pte)->flags & PTE_VALID)
|
||||
#define PTE_ISWIRED(pte) ((pte)->flags & PTE_WIRED)
|
||||
#define PTE_ISMANAGED(pte) ((pte)->flags & PTE_MANAGED)
|
||||
#define PTE_ISMODIFIED(pte) ((pte)->flags & PTE_MODIFIED)
|
||||
#define PTE_ISREFERENCED(pte) ((pte)->flags & PTE_REFERENCED)
|
||||
#define PTE_ARPN_SHIFT 12
|
||||
#define PTE_RPN_FROM_PA(pa) (((pa) & ~PAGE_MASK) << PTE_ARPN_SHIFT)
|
||||
#define PTE_PA(pte) ((vm_paddr_t)(*pte >> PTE_ARPN_SHIFT) & ~PAGE_MASK)
|
||||
#define PTE_ISVALID(pte) ((*pte) & PTE_VALID)
|
||||
#define PTE_ISWIRED(pte) ((*pte) & PTE_WIRED)
|
||||
#define PTE_ISMANAGED(pte) ((*pte) & PTE_MANAGED)
|
||||
#define PTE_ISMODIFIED(pte) ((*pte) & PTE_MODIFIED)
|
||||
#define PTE_ISREFERENCED(pte) ((*pte) & PTE_REFERENCED)
|
||||
|
||||
#endif /* BOOKE */
|
||||
#endif /* _MACHINE_PTE_H_ */
|
||||
|
@ -119,8 +119,12 @@ ASSYM(USER_SR, USER_SR);
|
||||
#endif
|
||||
#elif defined(BOOKE)
|
||||
ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir));
|
||||
ASSYM(PTE_RPN, offsetof(struct pte, rpn));
|
||||
ASSYM(PTE_FLAGS, offsetof(struct pte, flags));
|
||||
/*
|
||||
* With pte_t being a bitfield struct, these fields cannot be addressed via
|
||||
* offsetof().
|
||||
*/
|
||||
ASSYM(PTE_RPN, 0);
|
||||
ASSYM(PTE_FLAGS, sizeof(uint32_t));
|
||||
#if defined(BOOKE_E500)
|
||||
ASSYM(TLB0_ENTRY_SIZE, sizeof(struct tlb_entry));
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user