Use vm_paddr_t for physical addresses.
This commit is contained in:
parent
2590e88477
commit
58d7ebfa7c
@ -95,15 +95,15 @@ struct cacheinfo {
|
||||
u_int ec_l2linesize;
|
||||
};
|
||||
|
||||
typedef void dcache_page_inval_t(vm_offset_t pa);
|
||||
typedef void icache_page_inval_t(vm_offset_t pa);
|
||||
typedef void dcache_page_inval_t(vm_paddr_t pa);
|
||||
typedef void icache_page_inval_t(vm_paddr_t pa);
|
||||
|
||||
void cache_init(phandle_t node);
|
||||
|
||||
void cheetah_dcache_page_inval(vm_offset_t pa);
|
||||
void cheetah_icache_page_inval(vm_offset_t pa);
|
||||
void spitfire_dcache_page_inval(vm_offset_t pa);
|
||||
void spitfire_icache_page_inval(vm_offset_t pa);
|
||||
dcache_page_inval_t cheetah_dcache_page_inval;
|
||||
icache_page_inval_t cheetah_icache_page_inval;
|
||||
dcache_page_inval_t spitfire_dcache_page_inval;
|
||||
icache_page_inval_t spitfire_icache_page_inval;
|
||||
|
||||
extern dcache_page_inval_t *dcache_page_inval;
|
||||
extern icache_page_inval_t *icache_page_inval;
|
||||
|
@ -47,7 +47,7 @@ struct iommu_state {
|
||||
u_int64_t is_dvmabase;
|
||||
int64_t is_cr; /* IOMMU control register value */
|
||||
|
||||
vm_offset_t is_flushpa[2];
|
||||
vm_paddr_t is_flushpa[2];
|
||||
volatile int64_t *is_flushva[2];
|
||||
/*
|
||||
* When a flush is completed, 64 bytes will be stored at the given
|
||||
@ -77,7 +77,7 @@ struct iommu_state {
|
||||
/* interfaces for PCI/SBUS code */
|
||||
void iommu_init(char *, struct iommu_state *, int, u_int32_t, int);
|
||||
void iommu_reset(struct iommu_state *);
|
||||
void iommu_enter(struct iommu_state *, vm_offset_t, vm_offset_t, int);
|
||||
void iommu_enter(struct iommu_state *, vm_offset_t, vm_paddr_t, int);
|
||||
void iommu_remove(struct iommu_state *, vm_offset_t, size_t);
|
||||
void iommu_decode_fault(struct iommu_state *, vm_offset_t);
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#define _MACHINE_KERNELDUMP_H_
|
||||
|
||||
struct sparc64_dump_reg {
|
||||
vm_offset_t dr_pa;
|
||||
vm_paddr_t dr_pa;
|
||||
vm_offset_t dr_size;
|
||||
vm_offset_t dr_offs;
|
||||
};
|
||||
@ -41,7 +41,7 @@ struct sparc64_dump_reg {
|
||||
*/
|
||||
struct sparc64_dump_hdr {
|
||||
vm_offset_t dh_hdr_size;
|
||||
vm_offset_t dh_tsb_pa;
|
||||
vm_paddr_t dh_tsb_pa;
|
||||
vm_size_t dh_tsb_size;
|
||||
vm_size_t dh_tsb_mask;
|
||||
int dh_nregions;
|
||||
|
@ -42,7 +42,7 @@ extern char _end[];
|
||||
extern long Maxmem;
|
||||
|
||||
extern vm_offset_t kstack0;
|
||||
extern vm_offset_t kstack0_phys;
|
||||
extern vm_paddr_t kstack0_phys;
|
||||
|
||||
struct pcpu;
|
||||
|
||||
@ -50,7 +50,7 @@ void cpu_halt(void);
|
||||
void cpu_identify(u_long vers, u_int clock, u_int id);
|
||||
void cpu_reset(void);
|
||||
void cpu_setregs(struct pcpu *pc);
|
||||
int is_physical_memory(vm_offset_t addr);
|
||||
int is_physical_memory(vm_paddr_t addr);
|
||||
void swi_vm(void *v);
|
||||
|
||||
cpu_block_copy_t spitfire_block_copy;
|
||||
|
@ -30,13 +30,13 @@
|
||||
#define _MACHINE_OFW_MEM_H_
|
||||
|
||||
struct ofw_mem_region {
|
||||
vm_offset_t mr_start;
|
||||
vm_offset_t mr_size;
|
||||
vm_paddr_t mr_start;
|
||||
vm_size_t mr_size;
|
||||
};
|
||||
|
||||
struct ofw_map {
|
||||
vm_offset_t om_start;
|
||||
vm_offset_t om_size;
|
||||
vm_size_t om_size;
|
||||
u_long om_tte;
|
||||
};
|
||||
|
||||
|
@ -72,10 +72,10 @@ struct pmap {
|
||||
|
||||
void pmap_bootstrap(vm_offset_t ekva);
|
||||
void pmap_context_rollover(void);
|
||||
vm_offset_t pmap_kextract(vm_offset_t va);
|
||||
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
|
||||
vm_paddr_t pmap_kextract(vm_offset_t va);
|
||||
void pmap_kenter(vm_offset_t va, vm_page_t m);
|
||||
void pmap_kremove(vm_offset_t);
|
||||
void pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags);
|
||||
void pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags);
|
||||
void pmap_kremove_flags(vm_offset_t va);
|
||||
|
||||
int pmap_cache_enter(vm_page_t m, vm_offset_t va);
|
||||
@ -92,15 +92,15 @@ void pmap_clear_write(vm_page_t m);
|
||||
|
||||
#define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
|
||||
|
||||
extern vm_offset_t avail_start;
|
||||
extern vm_offset_t avail_end;
|
||||
extern vm_paddr_t avail_start;
|
||||
extern vm_paddr_t avail_end;
|
||||
extern struct pmap kernel_pmap_store;
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
extern vm_offset_t phys_avail[];
|
||||
extern vm_paddr_t phys_avail[];
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
|
||||
extern vm_offset_t msgbuf_phys;
|
||||
extern vm_paddr_t msgbuf_phys;
|
||||
|
||||
static __inline int
|
||||
pmap_track_modified(pmap_t pm, vm_offset_t va)
|
||||
|
@ -59,7 +59,7 @@ struct cpu_start_args {
|
||||
|
||||
struct ipi_cache_args {
|
||||
u_int ica_mask;
|
||||
u_long ica_pa;
|
||||
vm_paddr_t ica_pa;
|
||||
};
|
||||
|
||||
struct ipi_tlb_args {
|
||||
@ -110,7 +110,7 @@ extern char tl_ipi_tlb_range_demap[];
|
||||
#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
|
||||
|
||||
static __inline void *
|
||||
ipi_dcache_page_inval(void *func, vm_offset_t pa)
|
||||
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
|
||||
{
|
||||
struct ipi_cache_args *ica;
|
||||
|
||||
@ -125,7 +125,7 @@ ipi_dcache_page_inval(void *func, vm_offset_t pa)
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
ipi_icache_page_inval(void *func, vm_offset_t pa)
|
||||
ipi_icache_page_inval(void *func, vm_paddr_t pa)
|
||||
{
|
||||
struct ipi_cache_args *ica;
|
||||
|
||||
@ -215,13 +215,13 @@ ipi_wait(void *cookie)
|
||||
#else
|
||||
|
||||
static __inline void *
|
||||
ipi_dcache_page_inval(void *func, vm_offset_t pa)
|
||||
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
|
||||
{
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static __inline void *
|
||||
ipi_icache_page_inval(void *func, vm_offset_t pa)
|
||||
ipi_icache_page_inval(void *func, vm_paddr_t pa)
|
||||
{
|
||||
return (NULL);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@
|
||||
extern struct tte *tsb_kernel;
|
||||
extern vm_size_t tsb_kernel_mask;
|
||||
extern vm_size_t tsb_kernel_size;
|
||||
extern vm_offset_t tsb_kernel_phys;
|
||||
extern vm_paddr_t tsb_kernel_phys;
|
||||
|
||||
static __inline struct tte *
|
||||
tsb_vpntobucket(pmap_t pm, vm_offset_t vpn)
|
||||
|
@ -27,9 +27,9 @@
|
||||
#ifndef _MACHINE_WATCH_H_
|
||||
#define _MACHINE_WATCH_H_
|
||||
|
||||
int watch_phys_set_mask(vm_offset_t pa, u_long mask);
|
||||
int watch_phys_set(vm_offset_t pa, int sz);
|
||||
vm_offset_t watch_phys_get(int *bm);
|
||||
int watch_phys_set_mask(vm_paddr_t pa, u_long mask);
|
||||
int watch_phys_set(vm_paddr_t pa, int sz);
|
||||
vm_paddr_t watch_phys_get(int *bm);
|
||||
void watch_phys_clear(void);
|
||||
int watch_phys_active(void);
|
||||
int watch_virt_set_mask(vm_offset_t va, u_long mask);
|
||||
|
@ -340,9 +340,10 @@ psycho_attach(device_t dev)
|
||||
struct upa_regs *reg;
|
||||
struct ofw_pci_bdesc obd;
|
||||
struct psycho_desc *desc;
|
||||
vm_paddr_t pcictl_offs;
|
||||
phandle_t node;
|
||||
u_int64_t csr;
|
||||
u_long pcictl_offs, mlen;
|
||||
u_long mlen;
|
||||
int psycho_br[2];
|
||||
int n, i, nreg, rid;
|
||||
#if defined(PSYCHO_DEBUG) || defined(PSYCHO_STRAY)
|
||||
@ -375,13 +376,13 @@ psycho_attach(device_t dev)
|
||||
if (sc->sc_mode == PSYCHO_MODE_PSYCHO) {
|
||||
if (nreg <= 2)
|
||||
panic("psycho_attach: %d not enough registers", nreg);
|
||||
sc->sc_basepaddr = (vm_offset_t)UPA_REG_PHYS(®[2]);
|
||||
sc->sc_basepaddr = (vm_paddr_t)UPA_REG_PHYS(®[2]);
|
||||
mlen = UPA_REG_SIZE(®[2]);
|
||||
pcictl_offs = UPA_REG_PHYS(®[0]);
|
||||
} else {
|
||||
if (nreg <= 0)
|
||||
panic("psycho_attach: %d not enough registers", nreg);
|
||||
sc->sc_basepaddr = (vm_offset_t)UPA_REG_PHYS(®[0]);
|
||||
sc->sc_basepaddr = (vm_paddr_t)UPA_REG_PHYS(®[0]);
|
||||
mlen = UPA_REG_SIZE(reg);
|
||||
pcictl_offs = sc->sc_basepaddr + PSR_PCICTL0;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ struct psycho_softc {
|
||||
* PSYCHO register. we record the base physical address of these
|
||||
* also as it is the base of the entire PSYCHO
|
||||
*/
|
||||
vm_offset_t sc_basepaddr;
|
||||
vm_paddr_t sc_basepaddr;
|
||||
|
||||
/* Interrupt Group Number for this device */
|
||||
int sc_ign;
|
||||
|
@ -319,7 +319,7 @@ nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
|
||||
static int
|
||||
_nexus_dmamap_load_buffer(bus_dma_tag_t ddmat, bus_dma_segment_t segs[],
|
||||
void *buf, bus_size_t buflen, struct thread *td, int flags,
|
||||
vm_offset_t *lastaddrp, int *segp, int first)
|
||||
bus_addr_t *lastaddrp, int *segp, int first)
|
||||
{
|
||||
bus_size_t sgsize;
|
||||
bus_addr_t curaddr, lastaddr, baddr, bmask;
|
||||
@ -416,7 +416,7 @@ nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
|
||||
#else
|
||||
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
|
||||
#endif
|
||||
vm_offset_t lastaddr;
|
||||
bus_addr_t lastaddr;
|
||||
int error, nsegs;
|
||||
|
||||
error = _nexus_dmamap_load_buffer(ddmat, dm_segments, buf, buflen,
|
||||
@ -453,7 +453,7 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
error = 0;
|
||||
if (m0->m_pkthdr.len <= ddmat->dt_maxsize) {
|
||||
int first = 1;
|
||||
vm_offset_t lastaddr = 0;
|
||||
bus_addr_t lastaddr = 0;
|
||||
struct mbuf *m;
|
||||
|
||||
for (m = m0; m != NULL && error == 0; m = m->m_next) {
|
||||
@ -487,7 +487,7 @@ nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
|
||||
bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback,
|
||||
void *callback_arg, int flags)
|
||||
{
|
||||
vm_offset_t lastaddr;
|
||||
bus_addr_t lastaddr;
|
||||
#ifdef __GNUC__
|
||||
bus_dma_segment_t dm_segments[ddmat->dt_nsegments];
|
||||
#else
|
||||
@ -721,7 +721,7 @@ sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
|
||||
vm_offset_t addr;
|
||||
vm_offset_t sva;
|
||||
vm_offset_t va;
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
vm_size_t vsz;
|
||||
u_long pm_flags;
|
||||
|
||||
|
@ -48,9 +48,9 @@
|
||||
* Flush a physical page from the data cache.
|
||||
*/
|
||||
void
|
||||
cheetah_dcache_page_inval(vm_offset_t spa)
|
||||
cheetah_dcache_page_inval(vm_paddr_t spa)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
void *cookie;
|
||||
|
||||
KASSERT((spa & PAGE_MASK) == 0,
|
||||
@ -66,6 +66,6 @@ cheetah_dcache_page_inval(vm_offset_t spa)
|
||||
* consistency is maintained by hardware.
|
||||
*/
|
||||
void
|
||||
cheetah_icache_page_inval(vm_offset_t pa)
|
||||
cheetah_icache_page_inval(vm_paddr_t pa)
|
||||
{
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ void db_md_list_watchpoints(void);
|
||||
static void db_watch_print(vm_offset_t wp, int bm);
|
||||
|
||||
int
|
||||
watch_phys_set_mask(vm_offset_t pa, u_long mask)
|
||||
watch_phys_set_mask(vm_paddr_t pa, u_long mask)
|
||||
{
|
||||
u_long lsucr;
|
||||
|
||||
@ -62,7 +62,7 @@ watch_phys_set_mask(vm_offset_t pa, u_long mask)
|
||||
}
|
||||
|
||||
int
|
||||
watch_phys_set(vm_offset_t pa, int sz)
|
||||
watch_phys_set(vm_paddr_t pa, int sz)
|
||||
{
|
||||
u_long off;
|
||||
|
||||
@ -73,10 +73,10 @@ watch_phys_set(vm_offset_t pa, int sz)
|
||||
return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_paddr_t
|
||||
watch_phys_get(int *bm)
|
||||
{
|
||||
u_long pa;
|
||||
vm_paddr_t pa;
|
||||
u_long lsucr;
|
||||
|
||||
if (!watch_phys_active())
|
||||
@ -86,7 +86,7 @@ watch_phys_get(int *bm)
|
||||
lsucr = ldxa(0, ASI_LSU_CTL_REG);
|
||||
*bm = (lsucr & LSU_PM_MASK) >> LSU_PM_SHIFT;
|
||||
|
||||
return ((vm_offset_t)pa);
|
||||
return (pa);
|
||||
}
|
||||
|
||||
void
|
||||
@ -196,19 +196,20 @@ db_watch_print(vm_offset_t wp, int bm)
|
||||
void
|
||||
db_md_list_watchpoints(void)
|
||||
{
|
||||
vm_offset_t wp;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
int bm;
|
||||
|
||||
db_printf("Physical address watchpoint:\n");
|
||||
if (watch_phys_active()) {
|
||||
wp = watch_phys_get(&bm);
|
||||
db_watch_print(wp, bm);
|
||||
pa = watch_phys_get(&bm);
|
||||
db_watch_print(pa, bm);
|
||||
} else
|
||||
db_printf("\tnot active.\n");
|
||||
db_printf("Virtual address watchpoint:\n");
|
||||
if (watch_virt_active()) {
|
||||
wp = watch_virt_get(&bm);
|
||||
db_watch_print(wp, bm);
|
||||
va = watch_virt_get(&bm);
|
||||
db_watch_print(va, bm);
|
||||
} else
|
||||
db_printf("\tnot active.\n");
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ buf_flush(struct dumperinfo *di)
|
||||
}
|
||||
|
||||
static int
|
||||
reg_write(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
|
||||
reg_write(struct dumperinfo *di, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
struct sparc64_dump_reg r;
|
||||
|
||||
@ -132,7 +132,7 @@ reg_write(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
|
||||
}
|
||||
|
||||
static int
|
||||
blk_dump(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
|
||||
blk_dump(struct dumperinfo *di, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
vm_size_t pos, rsz;
|
||||
vm_offset_t va;
|
||||
|
@ -402,7 +402,7 @@ iommu_reset(struct iommu_state *is)
|
||||
* Here are the iommu control routines.
|
||||
*/
|
||||
void
|
||||
iommu_enter(struct iommu_state *is, vm_offset_t va, vm_offset_t pa, int flags)
|
||||
iommu_enter(struct iommu_state *is, vm_offset_t va, vm_paddr_t pa, int flags)
|
||||
{
|
||||
int64_t tte;
|
||||
|
||||
@ -784,9 +784,10 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
|
||||
bus_dmamap_t map, bus_dma_segment_t sgs[], void *buf,
|
||||
bus_size_t buflen, struct thread *td, int flags, int *segp, int align)
|
||||
{
|
||||
bus_size_t sgsize;
|
||||
vm_offset_t curaddr, vaddr, voffs;
|
||||
bus_addr_t amask, dvmaddr;
|
||||
bus_size_t sgsize;
|
||||
vm_offset_t vaddr, voffs;
|
||||
vm_paddr_t curaddr;
|
||||
int error, sgcnt, firstpg;
|
||||
pmap_t pmap = NULL;
|
||||
|
||||
|
@ -121,7 +121,7 @@ char uarea0[UAREA_PAGES * PAGE_SIZE];
|
||||
struct trapframe frame0;
|
||||
|
||||
vm_offset_t kstack0;
|
||||
vm_offset_t kstack0_phys;
|
||||
vm_paddr_t kstack0_phys;
|
||||
|
||||
struct kva_md_info kmi;
|
||||
|
||||
|
@ -127,9 +127,9 @@ mmrw(dev_t dev, struct uio *uio, int flags)
|
||||
vm_offset_t eva;
|
||||
vm_offset_t off;
|
||||
vm_offset_t ova;
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
vm_prot_t prot;
|
||||
vm_paddr_t pa;
|
||||
vm_size_t cnt;
|
||||
vm_page_t m;
|
||||
int color;
|
||||
|
@ -284,8 +284,8 @@ cpu_mp_unleash(void *v)
|
||||
{
|
||||
volatile struct cpu_start_args *csa;
|
||||
struct pcpu *pc;
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
u_int ctx_min;
|
||||
u_int ctx_inc;
|
||||
u_long s;
|
||||
|
@ -111,20 +111,20 @@
|
||||
* Virtual and physical address of message buffer.
|
||||
*/
|
||||
struct msgbuf *msgbufp;
|
||||
vm_offset_t msgbuf_phys;
|
||||
vm_paddr_t msgbuf_phys;
|
||||
|
||||
/*
|
||||
* Physical addresses of first and last available physical page.
|
||||
*/
|
||||
vm_offset_t avail_start;
|
||||
vm_offset_t avail_end;
|
||||
vm_paddr_t avail_start;
|
||||
vm_paddr_t avail_end;
|
||||
|
||||
int pmap_pagedaemon_waken;
|
||||
|
||||
/*
|
||||
* Map of physical memory reagions.
|
||||
*/
|
||||
vm_offset_t phys_avail[128];
|
||||
vm_paddr_t phys_avail[128];
|
||||
static struct ofw_mem_region mra[128];
|
||||
struct ofw_mem_region sparc64_memreg[128];
|
||||
int sparc64_nmemreg;
|
||||
@ -152,7 +152,7 @@ struct pmap kernel_pmap_store;
|
||||
/*
|
||||
* Allocate physical memory for use in pmap_bootstrap.
|
||||
*/
|
||||
static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
|
||||
static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
|
||||
|
||||
extern int tl1_immu_miss_patch_1[];
|
||||
extern int tl1_immu_miss_patch_2[];
|
||||
@ -269,8 +269,8 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
struct pmap *pm;
|
||||
struct tte *tp;
|
||||
vm_offset_t off;
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
vm_size_t physsz;
|
||||
vm_size_t virtsz;
|
||||
ihandle_t pmem;
|
||||
@ -505,7 +505,7 @@ void
|
||||
pmap_map_tsb(void)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
u_long data;
|
||||
u_long s;
|
||||
int i;
|
||||
@ -541,10 +541,10 @@ pmap_map_tsb(void)
|
||||
* Can only be called from pmap_bootstrap before avail start and end are
|
||||
* calculated.
|
||||
*/
|
||||
static vm_offset_t
|
||||
static vm_paddr_t
|
||||
pmap_bootstrap_alloc(vm_size_t size)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
int i;
|
||||
|
||||
size = round_page(size);
|
||||
@ -601,7 +601,7 @@ pmap_context_alloc(void)
|
||||
* Initialize the pmap module.
|
||||
*/
|
||||
void
|
||||
pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
|
||||
pmap_init(vm_paddr_t phys_start, vm_paddr_t phys_end)
|
||||
{
|
||||
vm_offset_t addr;
|
||||
vm_size_t size;
|
||||
@ -644,7 +644,7 @@ pmap_init2(void)
|
||||
* Extract the physical page address associated with the given
|
||||
* map/virtual_address pair.
|
||||
*/
|
||||
vm_offset_t
|
||||
vm_paddr_t
|
||||
pmap_extract(pmap_t pm, vm_offset_t va)
|
||||
{
|
||||
struct tte *tp;
|
||||
@ -662,7 +662,7 @@ pmap_extract(pmap_t pm, vm_offset_t va)
|
||||
* Extract the physical page address associated with the given kernel virtual
|
||||
* address.
|
||||
*/
|
||||
vm_offset_t
|
||||
vm_paddr_t
|
||||
pmap_kextract(vm_offset_t va)
|
||||
{
|
||||
struct tte *tp;
|
||||
@ -815,43 +815,40 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
|
||||
* Map a wired page into kernel virtual address space.
|
||||
*/
|
||||
void
|
||||
pmap_kenter(vm_offset_t va, vm_offset_t pa)
|
||||
pmap_kenter(vm_offset_t va, vm_page_t m)
|
||||
{
|
||||
vm_offset_t opa;
|
||||
vm_offset_t ova;
|
||||
struct tte *tp;
|
||||
vm_page_t om;
|
||||
vm_page_t m;
|
||||
u_long data;
|
||||
|
||||
PMAP_STATS_INC(pmap_nkenter);
|
||||
tp = tsb_kvtotte(va);
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
|
||||
va, pa, tp, tp->tte_data);
|
||||
va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
|
||||
if (m->pc != DCACHE_COLOR(va)) {
|
||||
CTR6(KTR_CT2,
|
||||
"pmap_kenter: off colour va=%#lx pa=%#lx o=%p oc=%#lx ot=%d pi=%#lx",
|
||||
va, pa, m->object,
|
||||
va, VM_PAGE_TO_PHYS(m), m->object,
|
||||
m->object ? m->object->pg_color : -1,
|
||||
m->object ? m->object->type : -1,
|
||||
m->pindex);
|
||||
PMAP_STATS_INC(pmap_nkenter_oc);
|
||||
}
|
||||
if ((tp->tte_data & TD_V) != 0) {
|
||||
opa = TTE_GET_PA(tp);
|
||||
om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
|
||||
ova = TTE_GET_VA(tp);
|
||||
if (pa == opa && va == ova) {
|
||||
if (m == om && va == ova) {
|
||||
PMAP_STATS_INC(pmap_nkenter_stupid);
|
||||
return;
|
||||
}
|
||||
om = PHYS_TO_VM_PAGE(opa);
|
||||
TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
|
||||
pmap_cache_remove(om, ova);
|
||||
if (va != ova)
|
||||
tlb_page_demap(kernel_pmap, ova);
|
||||
}
|
||||
data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_P | TD_W;
|
||||
data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
|
||||
TD_P | TD_W;
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
data |= TD_CV;
|
||||
tp->tte_vpn = TV_VPN(va, TS_8K);
|
||||
@ -867,7 +864,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
|
||||
* to flush entries that might still be in the cache, if applicable.
|
||||
*/
|
||||
void
|
||||
pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
|
||||
pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
|
||||
{
|
||||
struct tte *tp;
|
||||
|
||||
@ -922,7 +919,7 @@ pmap_kremove_flags(vm_offset_t va)
|
||||
* unchanged.
|
||||
*/
|
||||
vm_offset_t
|
||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
|
||||
{
|
||||
|
||||
return (TLB_PHYS_TO_DIRECT(start));
|
||||
@ -941,7 +938,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
|
||||
PMAP_STATS_INC(pmap_nqenter);
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
|
||||
pmap_kenter(va, *m);
|
||||
va += PAGE_SIZE;
|
||||
m++;
|
||||
}
|
||||
@ -1050,8 +1047,8 @@ pmap_dispose_thread(struct thread *td)
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
int pages;
|
||||
int i;
|
||||
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
@ -1109,8 +1106,8 @@ pmap_swapout_thread(struct thread *td)
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
int pages;
|
||||
int i;
|
||||
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
@ -1427,7 +1424,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
u_long data;
|
||||
int i;
|
||||
|
||||
@ -1628,9 +1625,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
void
|
||||
pmap_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("pmap_zero_page: fake page"));
|
||||
@ -1657,9 +1654,9 @@ pmap_zero_page(vm_page_t m)
|
||||
void
|
||||
pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("pmap_zero_page_area: fake page"));
|
||||
@ -1687,9 +1684,9 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
void
|
||||
pmap_zero_page_idle(vm_page_t m)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
vm_offset_t va;
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("pmap_zero_page_idle: fake page"));
|
||||
@ -1716,10 +1713,10 @@ pmap_zero_page_idle(vm_page_t m)
|
||||
void
|
||||
pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
|
||||
{
|
||||
vm_offset_t pdst;
|
||||
vm_offset_t psrc;
|
||||
vm_offset_t vdst;
|
||||
vm_offset_t vsrc;
|
||||
vm_paddr_t pdst;
|
||||
vm_paddr_t psrc;
|
||||
struct tte *tp;
|
||||
|
||||
KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
|
||||
|
@ -53,7 +53,7 @@ PMAP_STATS_VAR(spitfire_icache_npage_inval_match);
|
||||
* Flush a physical page from the data cache.
|
||||
*/
|
||||
void
|
||||
spitfire_dcache_page_inval(vm_offset_t pa)
|
||||
spitfire_dcache_page_inval(vm_paddr_t pa)
|
||||
{
|
||||
u_long target;
|
||||
void *cookie;
|
||||
@ -82,7 +82,7 @@ spitfire_dcache_page_inval(vm_offset_t pa)
|
||||
* Flush a physical page from the instruction cache.
|
||||
*/
|
||||
void
|
||||
spitfire_icache_page_inval(vm_offset_t pa)
|
||||
spitfire_icache_page_inval(vm_paddr_t pa)
|
||||
{
|
||||
register u_long tag __asm("%g1");
|
||||
u_long target;
|
||||
|
@ -77,7 +77,7 @@ PMAP_STATS_VAR(tsb_nforeach);
|
||||
struct tte *tsb_kernel;
|
||||
vm_size_t tsb_kernel_mask;
|
||||
vm_size_t tsb_kernel_size;
|
||||
vm_offset_t tsb_kernel_phys;
|
||||
vm_paddr_t tsb_kernel_phys;
|
||||
|
||||
struct tte *
|
||||
tsb_tte_lookup(pmap_t pm, vm_offset_t va)
|
||||
|
@ -315,7 +315,7 @@ cpu_wait(struct proc *p)
|
||||
}
|
||||
|
||||
int
|
||||
is_physical_memory(vm_offset_t addr)
|
||||
is_physical_memory(vm_paddr_t addr)
|
||||
{
|
||||
struct ofw_mem_region *mr;
|
||||
|
||||
@ -339,7 +339,7 @@ void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
static vm_pindex_t color;
|
||||
vm_offset_t pa;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
int pflags;
|
||||
void *va;
|
||||
|
Loading…
Reference in New Issue
Block a user