Fix physical address type to vm_paddr_t.
This commit is contained in:
parent
5704576a0a
commit
20b7961267
@ -289,7 +289,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
|
||||
boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
||||
boolean_t moea_is_referenced(mmu_t, vm_page_t);
|
||||
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
|
||||
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
|
||||
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
|
||||
int moea_page_wired_mappings(mmu_t, vm_page_t);
|
||||
void moea_pinit(mmu_t, pmap_t);
|
||||
@ -308,14 +308,14 @@ void moea_activate(mmu_t, struct thread *);
|
||||
void moea_deactivate(mmu_t, struct thread *);
|
||||
void moea_cpu_bootstrap(mmu_t, int);
|
||||
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
|
||||
void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
||||
void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
|
||||
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
|
||||
vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
|
||||
void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
|
||||
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
|
||||
void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
||||
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
|
||||
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
|
||||
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
||||
|
||||
static mmu_method_t moea_methods[] = {
|
||||
@ -1436,7 +1436,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
* Map a wired page into kernel virtual address space.
|
||||
*/
|
||||
void
|
||||
moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
|
||||
moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
|
||||
moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
|
||||
@ -1471,7 +1471,7 @@ moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
|
||||
* Extract the physical page address associated with the given kernel virtual
|
||||
* address.
|
||||
*/
|
||||
vm_offset_t
|
||||
vm_paddr_t
|
||||
moea_kextract(mmu_t mmu, vm_offset_t va)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
@ -1512,8 +1512,8 @@ moea_kremove(mmu_t mmu, vm_offset_t va)
|
||||
* first usable address after the mapped region.
|
||||
*/
|
||||
vm_offset_t
|
||||
moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
|
||||
vm_offset_t pa_end, int prot)
|
||||
moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
vm_paddr_t pa_end, int prot)
|
||||
{
|
||||
vm_offset_t sva, va;
|
||||
|
||||
@ -2408,7 +2408,7 @@ moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -2431,7 +2431,7 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
* NOT real memory.
|
||||
*/
|
||||
void *
|
||||
moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
|
||||
|
@ -291,7 +291,7 @@ static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
|
||||
static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
|
||||
static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
|
||||
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
|
||||
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
|
||||
int);
|
||||
static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
|
||||
vm_paddr_t *);
|
||||
@ -316,12 +316,12 @@ static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
|
||||
static void mmu_booke_activate(mmu_t, struct thread *);
|
||||
static void mmu_booke_deactivate(mmu_t, struct thread *);
|
||||
static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
|
||||
static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
||||
static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
|
||||
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
|
||||
static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
|
||||
static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
||||
static void mmu_booke_kremove(mmu_t, vm_offset_t);
|
||||
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
|
||||
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
||||
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
|
||||
vm_size_t);
|
||||
static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
|
||||
@ -1391,7 +1391,7 @@ mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
||||
* Map a wired page into kernel virtual address space.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
|
||||
mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
@ -1814,8 +1814,8 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
|
||||
* Map a range of physical addresses into kernel virtual address space.
|
||||
*/
|
||||
static vm_offset_t
|
||||
mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
|
||||
vm_offset_t pa_end, int prot)
|
||||
mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
||||
vm_paddr_t pa_end, int prot)
|
||||
{
|
||||
vm_offset_t sva = *virt;
|
||||
vm_offset_t va = sva;
|
||||
@ -2441,7 +2441,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
|
||||
static int
|
||||
mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
int i;
|
||||
vm_offset_t va;
|
||||
@ -2599,7 +2599,7 @@ mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
|
||||
* for mapping device memory, NOT real memory.
|
||||
*/
|
||||
static void *
|
||||
mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
|
||||
mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
void *res;
|
||||
uintptr_t va;
|
||||
|
@ -224,16 +224,16 @@ extern struct pmap kernel_pmap_store;
|
||||
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
|
||||
|
||||
void pmap_bootstrap(vm_offset_t, vm_offset_t);
|
||||
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
|
||||
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
|
||||
void pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);
|
||||
void pmap_kremove(vm_offset_t);
|
||||
void *pmap_mapdev(vm_offset_t, vm_size_t);
|
||||
void *pmap_mapdev(vm_paddr_t, vm_size_t);
|
||||
void *pmap_mapdev_attr(vm_offset_t, vm_size_t, vm_memattr_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
|
||||
void pmap_deactivate(struct thread *);
|
||||
vm_offset_t pmap_kextract(vm_offset_t);
|
||||
int pmap_dev_direct_mapped(vm_offset_t, vm_size_t);
|
||||
vm_paddr_t pmap_kextract(vm_offset_t);
|
||||
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||
boolean_t pmap_mmu_install(char *name, int prio);
|
||||
|
||||
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
|
||||
|
@ -92,7 +92,7 @@ mem_valid(vm_offset_t addr, int len)
|
||||
&aregions, &naregions);
|
||||
|
||||
for (i = 0; i < npregions; i++)
|
||||
if ((addr >= pregions[i].mr_start)
|
||||
if ((addr >= pregions[i].mr_start)
|
||||
&& (addr + len <= pregions[i].mr_start + pregions[i].mr_size))
|
||||
return (0);
|
||||
|
||||
@ -116,7 +116,7 @@ platform_timebase_freq(struct cpuref *cpu)
|
||||
{
|
||||
return (PLATFORM_TIMEBASE_FREQ(plat_obj, cpu));
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
platform_smp_first_cpu(struct cpuref *cpu)
|
||||
{
|
||||
|
@ -440,7 +440,7 @@ pmap_cpu_bootstrap(int ap)
|
||||
}
|
||||
|
||||
void *
|
||||
pmap_mapdev(vm_offset_t pa, vm_size_t size)
|
||||
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
@ -471,7 +471,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
MMU_UNMAPDEV(mmu_obj, va, size);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_paddr_t
|
||||
pmap_kextract(vm_offset_t va)
|
||||
{
|
||||
|
||||
@ -480,7 +480,7 @@ pmap_kextract(vm_offset_t va)
|
||||
}
|
||||
|
||||
void
|
||||
pmap_kenter(vm_offset_t va, vm_offset_t pa)
|
||||
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
|
||||
@ -496,7 +496,7 @@ pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
|
||||
pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
|
||||
|
Loading…
Reference in New Issue
Block a user