Use the correct type for physical addresses.

On Book-E, physical addresses are actually 36-bits, not 32-bits.  This is
currently worked around by ignoring the top bits.  However, in some cases, the
boot loader configures CCSR to something above the 32-bit mark.  This is stage 1
in updating the pmap to handle 36-bit physaddr.
This commit is contained in:
Justin Hibbits 2015-07-04 19:00:38 +00:00
parent 8656f200dc
commit 0936003e3d
5 changed files with 30 additions and 30 deletions

View File

@ -250,7 +250,7 @@ static int moea_pte_insert(u_int, struct pte *);
* PVO calls.
*/
static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
vm_offset_t, vm_offset_t, u_int, int);
vm_offset_t, vm_paddr_t, u_int, int);
static void moea_pvo_remove(struct pvo_entry *, int);
static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
@ -260,7 +260,7 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
*/
static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, u_int, int8_t);
static void moea_syncicache(vm_offset_t, vm_size_t);
static void moea_syncicache(vm_paddr_t, vm_size_t);
static boolean_t moea_query_bit(vm_page_t, int);
static u_int moea_clear_bit(vm_page_t, int);
static void moea_kremove(mmu_t, vm_offset_t);
@ -306,10 +306,10 @@ void moea_deactivate(mmu_t, struct thread *);
void moea_cpu_bootstrap(mmu_t, int);
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
@ -371,7 +371,7 @@ static mmu_method_t moea_methods[] = {
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
static __inline uint32_t
moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
{
uint32_t pte_lo;
int i;
@ -1472,7 +1472,7 @@ moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
}
void
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
u_int pte_lo;
int error;
@ -1877,14 +1877,14 @@ moea_bootstrap_alloc(vm_size_t size, u_int align)
}
static void
moea_syncicache(vm_offset_t pa, vm_size_t len)
moea_syncicache(vm_paddr_t pa, vm_size_t len)
{
__syncicache((void *)pa, len);
}
static int
moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
vm_offset_t va, vm_paddr_t pa, u_int pte_lo, int flags)
{
struct pvo_entry *pvo;
u_int sr;
@ -2472,7 +2472,7 @@ moea_clear_bit(vm_page_t m, int ptebit)
* Return true if the physical range is encompassed by the battable[idx]
*/
static int
moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size)
{
u_int prot;
u_int32_t start;
@ -2539,7 +2539,7 @@ moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
}
void *
moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;
int i;

View File

@ -226,7 +226,7 @@ static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
static void moea64_kremove(mmu_t, vm_offset_t);
static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
vm_offset_t pa, vm_size_t sz);
vm_paddr_t pa, vm_size_t sz);
/*
* Kernel MMU interface
@ -267,11 +267,11 @@ void moea64_zero_page_idle(mmu_t, vm_page_t);
void moea64_activate(mmu_t, struct thread *);
void moea64_deactivate(mmu_t, struct thread *);
void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
@ -419,7 +419,7 @@ moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
}
static __inline uint64_t
moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
{
uint64_t pte_lo;
int i;
@ -1054,7 +1054,7 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
*/
static __inline
void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
@ -1159,7 +1159,7 @@ moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
void
moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
if (size + off > PAGE_SIZE)
panic("moea64_zero_page: size + off > PAGE_SIZE");
@ -1180,7 +1180,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
void
moea64_zero_page(mmu_t mmu, vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
vm_offset_t va, off;
if (!hw_direct_map) {
@ -1310,7 +1310,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
static void
moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
vm_size_t sz)
{
@ -1692,7 +1692,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
* Map a wired page into kernel virtual address space.
*/
void
moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
int error;
struct pvo_entry *pvo, *oldpvo;
@ -2517,7 +2517,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
* NOT real memory.
*/
void *
moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;

View File

@ -194,7 +194,7 @@ static tlbtid_t tid_alloc(struct pmap *);
static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
static int tlb1_set_entry(vm_offset_t, vm_paddr_t, vm_size_t, uint32_t);
static void tlb1_write_entry(unsigned int);
static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
@ -392,7 +392,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
static __inline uint32_t
tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
{
uint32_t attrib;
int i;
@ -3016,7 +3016,7 @@ size2tsize(vm_size_t size)
* kept in tlb1_idx) and are not supposed to be invalidated.
*/
static int
tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
uint32_t flags)
{
uint32_t ts, tid;
@ -3160,7 +3160,7 @@ tlb1_init()
tlb1[i].phys = mas3 & MAS3_RPN;
if (i == 0)
kernload = mas3 & MAS3_RPN;
kernload = tlb1[i].phys;
tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0;

View File

@ -210,7 +210,7 @@ typedef struct lpte lpte_t;
*/
#ifndef LOCORE
struct pte {
vm_offset_t rpn;
vm_paddr_t rpn;
uint32_t flags;
};
typedef struct pte pte_t;
@ -273,5 +273,5 @@ typedef struct pte pte_t;
#define PTE_ISMODIFIED(pte) ((pte)->flags & PTE_MODIFIED)
#define PTE_ISREFERENCED(pte) ((pte)->flags & PTE_REFERENCED)
#endif /* BOOKE_PPC4XX */
#endif /* BOOKE */
#endif /* _MACHINE_PTE_H_ */

View File

@ -107,14 +107,14 @@ CODE {
return;
}
static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
vm_size_t size, vm_memattr_t ma)
{
return MMU_MAPDEV(mmu, pa, size);
}
static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
vm_offset_t pa, vm_memattr_t ma)
vm_paddr_t pa, vm_memattr_t ma)
{
MMU_KENTER(mmu, va, pa);
}
@ -792,7 +792,7 @@ METHOD void * mapdev {
*/
METHOD void * mapdev_attr {
mmu_t _mmu;
vm_offset_t _pa;
vm_paddr_t _pa;
vm_size_t _size;
vm_memattr_t _attr;
} DEFAULT mmu_null_mapdev_attr;
@ -859,7 +859,7 @@ METHOD void kenter {
METHOD void kenter_attr {
mmu_t _mmu;
vm_offset_t _va;
vm_offset_t _pa;
vm_paddr_t _pa;
vm_memattr_t _ma;
} DEFAULT mmu_null_kenter_attr;