MIPS n64 support - continued...
1. On n64, use XKPHYS to map page table pages instead of KSEG0. Maintain just one freepages list on n64. The changes are mainly to introduce MIPS_PHYS_TO_DIRECT(pa), MIPS_DIRECT_TO_PHYS(), which will use KSEG0 in 32 bit compilation and XKPHYS in 64 bit compilation. 2. Change macro based PMAP_LMEM_MAP1(), PMAP_LMEM_MAP2(), PMAP_LMEM_UNMAP() to inline functions. 3. Introduce MIPS_DIRECT_MAPPABLE(pa), which will further reduce the cases in which we will need to have a special case for 64 bit compilation. 4. Update CP0 hazard definitions for CPU_RMI - the cpu does not need any nops Reviewed by: neel
This commit is contained in:
parent
28df1f238a
commit
8eec5e8f9c
@ -76,8 +76,10 @@
|
||||
* Caching of mapped addresses is controlled by bits in the TLB entry.
|
||||
*/
|
||||
|
||||
#define MIPS_KSEG0_LARGEST_PHYS (0x20000000)
|
||||
#define MIPS_PHYS_MASK (0x1fffffff)
|
||||
#define MIPS_KSEG0_LARGEST_PHYS (0x20000000)
|
||||
#define MIPS_KSEG0_PHYS_MASK (0x1fffffff)
|
||||
#define MIPS_XKPHYS_LARGEST_PHYS (0x10000000000) /* 40 bit PA */
|
||||
#define MIPS_XKPHYS_PHYS_MASK (0x0ffffffffff)
|
||||
|
||||
#ifndef LOCORE
|
||||
#define MIPS_KUSEG_START 0x00000000
|
||||
@ -95,8 +97,8 @@
|
||||
|
||||
#define MIPS_PHYS_TO_KSEG0(x) ((uintptr_t)(x) | MIPS_KSEG0_START)
|
||||
#define MIPS_PHYS_TO_KSEG1(x) ((uintptr_t)(x) | MIPS_KSEG1_START)
|
||||
#define MIPS_KSEG0_TO_PHYS(x) ((uintptr_t)(x) & MIPS_PHYS_MASK)
|
||||
#define MIPS_KSEG1_TO_PHYS(x) ((uintptr_t)(x) & MIPS_PHYS_MASK)
|
||||
#define MIPS_KSEG0_TO_PHYS(x) ((uintptr_t)(x) & MIPS_KSEG0_PHYS_MASK)
|
||||
#define MIPS_KSEG1_TO_PHYS(x) ((uintptr_t)(x) & MIPS_KSEG0_PHYS_MASK)
|
||||
|
||||
#define MIPS_IS_KSEG0_ADDR(x) \
|
||||
(((vm_offset_t)(x) >= MIPS_KSEG0_START) && \
|
||||
@ -107,9 +109,6 @@
|
||||
#define MIPS_IS_VALID_PTR(x) (MIPS_IS_KSEG0_ADDR(x) || \
|
||||
MIPS_IS_KSEG1_ADDR(x))
|
||||
|
||||
#define MIPS_XKPHYS_START 0x8000000000000000
|
||||
#define MIPS_XKPHYS_END 0xbfffffffffffffff
|
||||
|
||||
/*
|
||||
* Cache Coherency Attributes:
|
||||
* UC: Uncached.
|
||||
@ -180,19 +179,34 @@
|
||||
#define MIPS_PHYS_TO_XKPHYS_UNCACHED(x) \
|
||||
((0x2ULL << 62) | ((unsigned long long)(MIPS_CCA_UNCACHED) << 59) | (x))
|
||||
|
||||
#define MIPS_XKPHYS_TO_PHYS(x) ((x) & 0x07ffffffffffffffULL)
|
||||
#define MIPS_XKPHYS_TO_PHYS(x) ((uintptr_t)(x) & MIPS_XKPHYS_PHYS_MASK)
|
||||
|
||||
#define MIPS_XKPHYS_START 0x8000000000000000
|
||||
#define MIPS_XKPHYS_END 0xbfffffffffffffff
|
||||
#define MIPS_XUSEG_START 0x0000000000000000
|
||||
#define MIPS_XUSEG_END 0x0000010000000000
|
||||
|
||||
#define MIPS_XKSEG_START 0xc000000000000000
|
||||
#define MIPS_XKSEG_END 0xc00000ff80000000
|
||||
|
||||
#ifdef __mips_n64
|
||||
#define MIPS_DIRECT_MAPPABLE(pa) 1
|
||||
#define MIPS_PHYS_TO_DIRECT(pa) MIPS_PHYS_TO_XKPHYS_CACHED(pa)
|
||||
#define MIPS_PHYS_TO_DIRECT_UNCACHED(pa) MIPS_PHYS_TO_XKPHYS_UNCACHED(pa)
|
||||
#define MIPS_DIRECT_TO_PHYS(va) MIPS_XKPHYS_TO_PHYS(va)
|
||||
#else
|
||||
#define MIPS_DIRECT_MAPPABLE(pa) ((pa) < MIPS_KSEG0_LARGEST_PHYS)
|
||||
#define MIPS_PHYS_TO_DIRECT(pa) MIPS_PHYS_TO_KSEG0(pa)
|
||||
#define MIPS_PHYS_TO_DIRECT_UNCACHED(pa) MIPS_PHYS_TO_KSEG1(pa)
|
||||
#define MIPS_DIRECT_TO_PHYS(va) MIPS_KSEG0_TO_PHYS(va)
|
||||
#endif
|
||||
|
||||
/* CPU dependent mtc0 hazard hook */
|
||||
#ifdef CPU_CNMIPS
|
||||
#define COP0_SYNC nop; nop; nop; nop; nop;
|
||||
#elif defined(CPU_SB1)
|
||||
#define COP0_SYNC ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop
|
||||
#elif defined(CPU_RMI)
|
||||
#define COP0_SYNC
|
||||
#else
|
||||
/*
|
||||
* Pick a reasonable default based on the "typical" spacing described in the
|
||||
|
@ -96,7 +96,7 @@
|
||||
#define VM_MINUSER_ADDRESS ((vm_offset_t)0x00000000)
|
||||
#define VM_MAX_MMAP_ADDR VM_MAXUSER_ADDRESS
|
||||
|
||||
#if defined(__mips_n64)
|
||||
#ifdef __mips_n64
|
||||
#define VM_MAXUSER_ADDRESS (VM_MINUSER_ADDRESS + (NPDEPG * NBSEG))
|
||||
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xc000000000000000)
|
||||
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + (NPDEPG * NBSEG))
|
||||
@ -185,7 +185,7 @@
|
||||
* allocations use HIGHMEM if available, and then DEFAULT.
|
||||
* - HIGHMEM for other pages
|
||||
*/
|
||||
#if 0 /* Not yet, change n64 to use xkphys */
|
||||
#ifdef __mips_n64
|
||||
#define VM_NFREELIST 1
|
||||
#define VM_FREELIST_DEFAULT 0
|
||||
#define VM_FREELIST_DIRECT VM_FREELIST_DEFAULT
|
||||
|
@ -1167,7 +1167,7 @@ eintrcnt:
|
||||
.text
|
||||
VECTOR(MipsCache, unknown)
|
||||
PTR_LA k0, _C_LABEL(MipsCacheException)
|
||||
li k1, MIPS_PHYS_MASK
|
||||
li k1, MIPS_KSEG0_PHYS_MASK
|
||||
and k0, k1
|
||||
PTR_LI k1, MIPS_KSEG1_START
|
||||
or k0, k1
|
||||
|
@ -198,68 +198,135 @@ static void pmap_invalidate_all_action(void *arg);
|
||||
static void pmap_update_page_action(void *arg);
|
||||
#endif
|
||||
|
||||
#if !defined(__mips_n64)
|
||||
struct local_sysmaps {
|
||||
vm_offset_t base;
|
||||
uint16_t valid1, valid2;
|
||||
};
|
||||
|
||||
/* This structure is for large memory
|
||||
* above 512Meg. We can't (in 32 bit mode)
|
||||
* just use the direct mapped MIPS_KSEG0_TO_PHYS()
|
||||
* macros since we can't see the memory and must
|
||||
* map it in when we need to access it. In 64
|
||||
* bit mode this goes away.
|
||||
#ifndef __mips_n64
|
||||
/*
|
||||
* This structure is for high memory (memory above 512Meg in 32 bit)
|
||||
* This memory area does not have direct mapping, so we a mechanism to do
|
||||
* temporary per-CPU mapping to access these addresses.
|
||||
*
|
||||
* At bootup we reserve 2 virtual pages per CPU for mapping highmem pages, to
|
||||
* access a highmem physical address on a CPU, we will disable interrupts and
|
||||
* add the mapping from the reserved virtual address for the CPU to the physical
|
||||
* address in the kernel pagetable.
|
||||
*/
|
||||
struct local_sysmaps {
|
||||
vm_offset_t base;
|
||||
uint32_t saved_intr;
|
||||
uint16_t valid1, valid2;
|
||||
};
|
||||
static struct local_sysmaps sysmap_lmem[MAXCPU];
|
||||
|
||||
#define PMAP_LMEM_MAP1(va, phys) \
|
||||
int cpu; \
|
||||
struct local_sysmaps *sysm; \
|
||||
pt_entry_t *pte, npte; \
|
||||
\
|
||||
intr = intr_disable(); \
|
||||
cpu = PCPU_GET(cpuid); \
|
||||
sysm = &sysmap_lmem[cpu]; \
|
||||
va = sysm->base; \
|
||||
npte = TLBLO_PA_TO_PFN(phys) | \
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; \
|
||||
pte = pmap_pte(kernel_pmap, va); \
|
||||
*pte = npte; \
|
||||
sysm->valid1 = 1
|
||||
static __inline void
|
||||
pmap_alloc_lmem_map(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
#define PMAP_LMEM_MAP2(va1, phys1, va2, phys2) \
|
||||
int cpu; \
|
||||
struct local_sysmaps *sysm; \
|
||||
pt_entry_t *pte, npte; \
|
||||
\
|
||||
intr = intr_disable(); \
|
||||
cpu = PCPU_GET(cpuid); \
|
||||
sysm = &sysmap_lmem[cpu]; \
|
||||
va1 = sysm->base; \
|
||||
va2 = sysm->base + PAGE_SIZE; \
|
||||
npte = TLBLO_PA_TO_PFN(phys1) | \
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; \
|
||||
pte = pmap_pte(kernel_pmap, va1); \
|
||||
*pte = npte; \
|
||||
npte = TLBLO_PA_TO_PFN(phys2) | \
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; \
|
||||
pte = pmap_pte(kernel_pmap, va2); \
|
||||
*pte = npte; \
|
||||
sysm->valid1 = 1; \
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
sysmap_lmem[i].base = virtual_avail;
|
||||
virtual_avail += PAGE_SIZE * 2;
|
||||
sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map1(vm_paddr_t phys)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
vm_offset_t va;
|
||||
uint32_t intr;
|
||||
int cpu;
|
||||
|
||||
intr = intr_disable();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
sysm->saved_intr = intr;
|
||||
va = sysm->base;
|
||||
npte = TLBLO_PA_TO_PFN(phys) |
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
return (va);
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
vm_offset_t va1, va2;
|
||||
uint32_t intr;
|
||||
int cpu;
|
||||
|
||||
intr = intr_disable();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
sysm->saved_intr = intr;
|
||||
va1 = sysm->base;
|
||||
va2 = sysm->base + PAGE_SIZE;
|
||||
npte = TLBLO_PA_TO_PFN(phys1) |
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
|
||||
pte = pmap_pte(kernel_pmap, va1);
|
||||
*pte = npte;
|
||||
npte = TLBLO_PA_TO_PFN(phys2) |
|
||||
PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
|
||||
pte = pmap_pte(kernel_pmap, va2);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
sysm->valid2 = 1;
|
||||
return (va1);
|
||||
}
|
||||
|
||||
#define PMAP_LMEM_UNMAP() \
|
||||
pte = pmap_pte(kernel_pmap, sysm->base); \
|
||||
*pte = PTE_G; \
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base); \
|
||||
sysm->valid1 = 0; \
|
||||
pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); \
|
||||
*pte = PTE_G; \
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); \
|
||||
sysm->valid2 = 0; \
|
||||
intr_restore(intr)
|
||||
#endif
|
||||
static __inline void
|
||||
pmap_lmem_unmap(void)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte;
|
||||
int cpu;
|
||||
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
pte = pmap_pte(kernel_pmap, sysm->base);
|
||||
*pte = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base);
|
||||
sysm->valid1 = 0;
|
||||
if (sysm->valid2) {
|
||||
pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
|
||||
*pte = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
|
||||
sysm->valid2 = 0;
|
||||
}
|
||||
intr_restore(sysm->saved_intr);
|
||||
}
|
||||
#else /* __mips_n64 */
|
||||
|
||||
static __inline void
|
||||
pmap_alloc_lmem_map(void)
|
||||
{
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map1(vm_paddr_t phys)
|
||||
{
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
|
||||
{
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_unmap(void)
|
||||
{
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* !__mips_n64 */
|
||||
|
||||
/*
|
||||
* Page table entry lookup routines.
|
||||
@ -354,10 +421,9 @@ pmap_steal_memory(vm_size_t size)
|
||||
|
||||
pa = phys_avail[0];
|
||||
phys_avail[0] += size;
|
||||
if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
|
||||
if (MIPS_DIRECT_MAPPABLE(pa) == 0)
|
||||
panic("Out of memory below 512Meg?");
|
||||
}
|
||||
va = MIPS_PHYS_TO_KSEG0(pa);
|
||||
va = MIPS_PHYS_TO_DIRECT(pa);
|
||||
bzero((caddr_t)va, size);
|
||||
return (va);
|
||||
}
|
||||
@ -429,9 +495,7 @@ void
|
||||
pmap_bootstrap(void)
|
||||
{
|
||||
int i;
|
||||
#if !defined(__mips_n64)
|
||||
int memory_larger_than_512meg = 0;
|
||||
#endif
|
||||
int need_local_mappings = 0;
|
||||
|
||||
/* Sort. */
|
||||
again:
|
||||
@ -459,10 +523,13 @@ pmap_bootstrap(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(__mips_n64)
|
||||
if (phys_avail[i - 1] >= MIPS_KSEG0_LARGEST_PHYS)
|
||||
memory_larger_than_512meg = 1;
|
||||
#endif
|
||||
/*
|
||||
* In 32 bit, we may have memory which cannot be mapped directly
|
||||
* this memory will need temporary mapping before it can be
|
||||
* accessed.
|
||||
*/
|
||||
if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1]))
|
||||
need_local_mappings = 1;
|
||||
|
||||
/*
|
||||
* Copy the phys_avail[] array before we start stealing memory from it.
|
||||
@ -521,22 +588,8 @@ pmap_bootstrap(void)
|
||||
printf("pcpu is available at virtual address %p.\n", pcpup);
|
||||
#endif
|
||||
|
||||
#if !defined(__mips_n64)
|
||||
/*
|
||||
* Steal some virtual space that will not be in kernel_segmap. This
|
||||
* va memory space will be used to map in kernel pages that are
|
||||
* outside the 512Meg region. Note that we only do this steal when
|
||||
* we do have memory in this region, that way for systems with
|
||||
* smaller memory we don't "steal" any va ranges :-)
|
||||
*/
|
||||
if (memory_larger_than_512meg) {
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
sysmap_lmem[i].base = virtual_avail;
|
||||
virtual_avail += PAGE_SIZE * 2;
|
||||
sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (need_local_mappings)
|
||||
pmap_alloc_lmem_map();
|
||||
pmap_create_kernel_pagetable();
|
||||
pmap_max_asid = VMNUM_PIDS;
|
||||
mips_wr_entryhi(0);
|
||||
@ -850,20 +903,13 @@ pmap_kremove(vm_offset_t va)
|
||||
*
|
||||
* Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
|
||||
*/
|
||||
#if defined(__mips_n64)
|
||||
vm_offset_t
|
||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
{
|
||||
return (MIPS_PHYS_TO_XKPHYS_CACHED(start));
|
||||
}
|
||||
#else
|
||||
vm_offset_t
|
||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
{
|
||||
vm_offset_t va, sva;
|
||||
|
||||
if (end <= MIPS_KSEG0_LARGEST_PHYS)
|
||||
return (MIPS_PHYS_TO_KSEG0(start));
|
||||
if (MIPS_DIRECT_MAPPABLE(end))
|
||||
return (MIPS_PHYS_TO_DIRECT(start));
|
||||
|
||||
va = sva = *virt;
|
||||
while (start < end) {
|
||||
@ -874,7 +920,6 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
*virt = va;
|
||||
return (sva);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Add a list of wired pages to the kva
|
||||
@ -971,7 +1016,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
* Recursively decrement next level pagetable refcount
|
||||
*/
|
||||
pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
|
||||
pdpg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pdp));
|
||||
pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
|
||||
pmap_unwire_pte_hold(pmap, va, pdpg);
|
||||
}
|
||||
#endif
|
||||
@ -1006,7 +1051,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
|
||||
mpte = pmap->pm_ptphint;
|
||||
} else {
|
||||
pteva = *pmap_pde(pmap, va);
|
||||
mpte = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pteva));
|
||||
mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
}
|
||||
@ -1035,7 +1080,11 @@ static void
|
||||
pmap_grow_pte_page_cache()
|
||||
{
|
||||
|
||||
#ifdef __mips_n64
|
||||
vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
|
||||
#else
|
||||
vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
@ -1075,7 +1124,7 @@ pmap_pinit(pmap_t pmap)
|
||||
while ((ptdpg = pmap_alloc_pte_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
|
||||
pmap_grow_pte_page_cache();
|
||||
|
||||
ptdva = MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(ptdpg));
|
||||
ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
|
||||
pmap->pm_segtab = (pd_entry_t *)ptdva;
|
||||
pmap->pm_active = 0;
|
||||
pmap->pm_ptphint = NULL;
|
||||
@ -1126,7 +1175,7 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
* Map the pagetable page into the process address space, if it
|
||||
* isn't already there.
|
||||
*/
|
||||
pageva = MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(m));
|
||||
pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
|
||||
|
||||
#ifdef __mips_n64
|
||||
if (ptepindex >= NUPDE) {
|
||||
@ -1149,7 +1198,7 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
return (NULL);
|
||||
}
|
||||
} else {
|
||||
pg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(*pdep));
|
||||
pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
|
||||
pg->wire_count++;
|
||||
}
|
||||
/* Next level entry */
|
||||
@ -1202,7 +1251,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
|
||||
(pmap->pm_ptphint->pindex == ptepindex)) {
|
||||
m = pmap->pm_ptphint;
|
||||
} else {
|
||||
m = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(*pde));
|
||||
m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
|
||||
pmap->pm_ptphint = m;
|
||||
}
|
||||
m->wire_count++;
|
||||
@ -1250,7 +1299,7 @@ pmap_release(pmap_t pmap)
|
||||
pmap->pm_stats.resident_count));
|
||||
|
||||
ptdva = (vm_offset_t)pmap->pm_segtab;
|
||||
ptdpg = PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(ptdva));
|
||||
ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
|
||||
|
||||
ptdpg->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
@ -1281,7 +1330,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
nkpg = pmap_alloc_pte_page(nkpt, VM_ALLOC_INTERRUPT);
|
||||
if (nkpg == NULL)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
*pdpe = (pd_entry_t)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(nkpg));
|
||||
*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
|
||||
continue; /* try again */
|
||||
}
|
||||
#endif
|
||||
@ -1302,7 +1351,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
nkpt++;
|
||||
*pde = (pd_entry_t)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(nkpg));
|
||||
*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
|
||||
|
||||
/*
|
||||
* The R[4-7]?00 stores only one copy of the Global bit in
|
||||
@ -2080,7 +2129,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mpte = pmap->pm_ptphint;
|
||||
} else {
|
||||
mpte = PHYS_TO_VM_PAGE(
|
||||
MIPS_KSEG0_TO_PHYS(*pde));
|
||||
MIPS_DIRECT_TO_PHYS(*pde));
|
||||
pmap->pm_ptphint = mpte;
|
||||
}
|
||||
mpte->wire_count++;
|
||||
@ -2156,30 +2205,21 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
*
|
||||
* Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
|
||||
*/
|
||||
#if defined(__mips_n64)
|
||||
void *
|
||||
pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
{
|
||||
return ((void *)MIPS_PHYS_TO_XKPHYS_CACHED(pa));
|
||||
}
|
||||
void
|
||||
pmap_kenter_temporary_free(vm_paddr_t pa)
|
||||
{
|
||||
}
|
||||
#else
|
||||
void *
|
||||
pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
{
|
||||
vm_offset_t va;
|
||||
register_t intr;
|
||||
|
||||
if (i != 0)
|
||||
printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
|
||||
__func__);
|
||||
|
||||
if (pa < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(pa);
|
||||
if (MIPS_DIRECT_MAPPABLE(pa)) {
|
||||
va = MIPS_PHYS_TO_DIRECT(pa);
|
||||
} else {
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
int cpu;
|
||||
register_t intr;
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
|
||||
@ -2199,6 +2239,7 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
pmap_update_page(kernel_pmap, sysm->base, npte);
|
||||
va = sysm->base;
|
||||
intr_restore(intr);
|
||||
#endif
|
||||
}
|
||||
return ((void *)va);
|
||||
}
|
||||
@ -2206,14 +2247,17 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
void
|
||||
pmap_kenter_temporary_free(vm_paddr_t pa)
|
||||
{
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
int cpu;
|
||||
register_t intr;
|
||||
struct local_sysmaps *sysm;
|
||||
#endif
|
||||
|
||||
if (pa < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
if (MIPS_DIRECT_MAPPABLE(pa)) {
|
||||
/* nothing to do for this case */
|
||||
return;
|
||||
}
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
if (sysm->valid1) {
|
||||
@ -2226,8 +2270,8 @@ pmap_kenter_temporary_free(vm_paddr_t pa)
|
||||
intr_restore(intr);
|
||||
sysm->valid1 = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Moved the code to Machine Independent
|
||||
@ -2336,113 +2380,65 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
*
|
||||
* Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
|
||||
*/
|
||||
#if defined (__mips_n64)
|
||||
void
|
||||
pmap_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
}
|
||||
#else
|
||||
void
|
||||
pmap_zero_page(vm_page_t m)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
register_t intr;
|
||||
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
|
||||
if (MIPS_DIRECT_MAPPABLE(phys)) {
|
||||
va = MIPS_PHYS_TO_DIRECT(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
} else {
|
||||
PMAP_LMEM_MAP1(va, phys);
|
||||
|
||||
va = pmap_lmem_map1(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
|
||||
PMAP_LMEM_UNMAP();
|
||||
pmap_lmem_unmap();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* pmap_zero_page_area zeros the specified hardware page by mapping
|
||||
* the page into KVM and using bzero to clear its contents.
|
||||
*
|
||||
* off and size may not cover an area beyond a single hardware page.
|
||||
*/
|
||||
#if defined (__mips_n64)
|
||||
void
|
||||
pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
|
||||
bzero((char *)(caddr_t)va + off, size);
|
||||
mips_dcache_wbinv_range(va + off, size);
|
||||
}
|
||||
#else
|
||||
void
|
||||
pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
register_t intr;
|
||||
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
if (MIPS_DIRECT_MAPPABLE(phys)) {
|
||||
va = MIPS_PHYS_TO_DIRECT(phys);
|
||||
bzero((char *)(caddr_t)va + off, size);
|
||||
mips_dcache_wbinv_range(va + off, size);
|
||||
} else {
|
||||
PMAP_LMEM_MAP1(va, phys);
|
||||
|
||||
va = pmap_lmem_map1(phys);
|
||||
bzero((char *)va + off, size);
|
||||
mips_dcache_wbinv_range(va + off, size);
|
||||
|
||||
PMAP_LMEM_UNMAP();
|
||||
pmap_lmem_unmap();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined (__mips_n64)
|
||||
void
|
||||
pmap_zero_page_idle(vm_page_t m)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
}
|
||||
#else
|
||||
void
|
||||
pmap_zero_page_idle(vm_page_t m)
|
||||
{
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
register_t intr;
|
||||
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
if (MIPS_DIRECT_MAPPABLE(phys)) {
|
||||
va = MIPS_PHYS_TO_DIRECT(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
} else {
|
||||
PMAP_LMEM_MAP1(va, phys);
|
||||
|
||||
va = pmap_lmem_map1(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
|
||||
PMAP_LMEM_UNMAP();
|
||||
pmap_lmem_unmap();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* pmap_copy_page copies the specified (machine independent)
|
||||
@ -2452,31 +2448,14 @@ pmap_zero_page_idle(vm_page_t m)
|
||||
*
|
||||
* Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
|
||||
*/
|
||||
#if defined (__mips_n64)
|
||||
void
|
||||
pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
{
|
||||
vm_offset_t va_src, va_dst;
|
||||
vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
|
||||
vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
|
||||
vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
|
||||
vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
|
||||
|
||||
pmap_flush_pvcache(src);
|
||||
mips_dcache_wbinv_range_index(MIPS_PHYS_TO_XKPHYS_CACHED(phy_dst), PAGE_SIZE);
|
||||
va_src = MIPS_PHYS_TO_XKPHYS_CACHED(phy_src);
|
||||
va_dst = MIPS_PHYS_TO_XKPHYS_CACHED(phy_dst);
|
||||
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
|
||||
}
|
||||
#else
|
||||
void
|
||||
pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
{
|
||||
vm_offset_t va_src, va_dst;
|
||||
vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
|
||||
vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
|
||||
register_t intr;
|
||||
|
||||
if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
|
||||
if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
|
||||
/* easy case, all can be accessed via KSEG0 */
|
||||
/*
|
||||
* Flush all caches for VA that are mapped to this page
|
||||
@ -2484,21 +2463,19 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
*/
|
||||
pmap_flush_pvcache(src);
|
||||
mips_dcache_wbinv_range_index(
|
||||
MIPS_PHYS_TO_KSEG0(phy_dst), PAGE_SIZE);
|
||||
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
|
||||
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
|
||||
MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
|
||||
va_src = MIPS_PHYS_TO_DIRECT(phys_src);
|
||||
va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
|
||||
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
|
||||
} else {
|
||||
PMAP_LMEM_MAP2(va_src, phy_src, va_dst, phy_dst);
|
||||
|
||||
va_src = pmap_lmem_map2(phys_src, phys_dst);
|
||||
va_dst = va_src + PAGE_SIZE;
|
||||
bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
|
||||
|
||||
PMAP_LMEM_UNMAP();
|
||||
pmap_lmem_unmap();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Returns true if the pmap's pv is one of the first
|
||||
@ -2914,18 +2891,6 @@ pmap_clear_reference(vm_page_t m)
|
||||
*
|
||||
* Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
|
||||
*/
|
||||
#if defined(__mips_n64)
|
||||
void *
|
||||
pmap_mapdev(vm_offset_t pa, vm_size_t size)
|
||||
{
|
||||
return ((void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(pa));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
}
|
||||
#else
|
||||
void *
|
||||
pmap_mapdev(vm_offset_t pa, vm_size_t size)
|
||||
{
|
||||
@ -2935,8 +2900,8 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
|
||||
* KSEG1 maps only first 512M of phys address space. For
|
||||
* pa > 0x20000000 we should make proper mapping * using pmap_kenter.
|
||||
*/
|
||||
if ((pa + size - 1) < MIPS_KSEG0_LARGEST_PHYS)
|
||||
return ((void *)MIPS_PHYS_TO_KSEG1(pa));
|
||||
if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
|
||||
return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
|
||||
else {
|
||||
offset = pa & PAGE_MASK;
|
||||
size = roundup(size + offset, PAGE_SIZE);
|
||||
@ -2959,6 +2924,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
|
||||
void
|
||||
pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
{
|
||||
#ifndef __mips_n64
|
||||
vm_offset_t base, offset, tmpva;
|
||||
|
||||
/* If the address is within KSEG1 then there is nothing to do */
|
||||
@ -2971,8 +2937,8 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
|
||||
pmap_kremove(tmpva);
|
||||
kmem_free(kernel_map, base, size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
|
Loading…
Reference in New Issue
Block a user