o) Remove code related to VM_ALLOC_WIRED_TLB_PG_POOL, VM_KERNEL_ALLOC_OFFSET
and floating pages. They are unused and unsupported.
This commit is contained in:
parent
1bf63ac57f
commit
37d2dea4b8
@ -160,14 +160,8 @@ typedef struct pv_entry {
|
||||
extern vm_offset_t phys_avail[PHYS_AVAIL_ENTRIES + 2];
|
||||
extern vm_offset_t physmem_desc[PHYS_AVAIL_ENTRIES + 2];
|
||||
|
||||
extern char *ptvmmap; /* poor name! */
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
extern pd_entry_t *segbase;
|
||||
|
||||
extern vm_paddr_t mips_wired_tlb_physmem_start;
|
||||
extern vm_paddr_t mips_wired_tlb_physmem_end;
|
||||
extern u_int need_wired_tlb_page_pool;
|
||||
|
||||
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
|
||||
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
|
||||
@ -188,37 +182,6 @@ int pmap_compute_pages_to_dump(void);
|
||||
void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
|
||||
void pmap_flush_pvcache(vm_page_t m);
|
||||
|
||||
/*
|
||||
* floating virtual pages (FPAGES)
|
||||
*
|
||||
* These are the reserved virtual memory areas which can be
|
||||
* mapped to any physical memory.
|
||||
*/
|
||||
#define FPAGES 2
|
||||
#define FPAGES_SHARED 2
|
||||
#define FSPACE ((FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE)
|
||||
#define PMAP_FPAGE1 0x00 /* Used by pmap_zero_page &
|
||||
* pmap_copy_page */
|
||||
#define PMAP_FPAGE2 0x01 /* Used by pmap_copy_page */
|
||||
|
||||
#define PMAP_FPAGE3 0x00 /* Used by pmap_zero_page_idle */
|
||||
#define PMAP_FPAGE_KENTER_TEMP 0x01 /* Used by coredump */
|
||||
|
||||
struct fpage {
|
||||
vm_offset_t kva;
|
||||
u_int state;
|
||||
};
|
||||
|
||||
struct sysmaps {
|
||||
struct mtx lock;
|
||||
struct fpage fp[FPAGES];
|
||||
};
|
||||
|
||||
vm_offset_t
|
||||
pmap_map_fpage(vm_paddr_t pa, struct fpage *fp,
|
||||
boolean_t check_unmaped);
|
||||
void pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
|
||||
|
||||
/*
|
||||
* Function to save TLB contents so that they may be inspected in the debugger.
|
||||
*/
|
||||
|
@ -101,12 +101,8 @@
|
||||
#define VM_MAX_MMAP_ADDR VM_MAXUSER_ADDRESS
|
||||
#define VM_MAX_ADDRESS ((vm_offset_t)0x80000000)
|
||||
|
||||
#ifndef VM_KERNEL_ALLOC_OFFSET
|
||||
#define VM_KERNEL_ALLOC_OFFSET ((vm_offset_t)0x00000000)
|
||||
#endif
|
||||
|
||||
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xC0000000)
|
||||
#define VM_KERNEL_WIRED_ADDR_END (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET)
|
||||
#define VM_KERNEL_WIRED_ADDR_END (VM_MIN_KERNEL_ADDRESS)
|
||||
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFFFFC000)
|
||||
|
||||
/*
|
||||
|
@ -87,7 +87,6 @@ ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
|
||||
|
||||
ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
|
||||
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
|
||||
ASSYM(VM_KERNEL_ALLOC_OFFSET, VM_KERNEL_ALLOC_OFFSET);
|
||||
ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc));
|
||||
ASSYM(SIGFPE, SIGFPE);
|
||||
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
|
||||
|
@ -142,10 +142,6 @@ vm_offset_t physmem_desc[PHYS_AVAIL_ENTRIES + 2];
|
||||
struct platform platform;
|
||||
#endif
|
||||
|
||||
vm_paddr_t mips_wired_tlb_physmem_start;
|
||||
vm_paddr_t mips_wired_tlb_physmem_end;
|
||||
u_int need_wired_tlb_page_pool;
|
||||
|
||||
static void cpu_startup(void *);
|
||||
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
|
||||
|
||||
|
@ -161,10 +161,6 @@ static uma_zone_t pvzone;
|
||||
static struct vm_object pvzone_obj;
|
||||
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
|
||||
|
||||
struct fpage fpages_shared[FPAGES_SHARED];
|
||||
|
||||
struct sysmaps sysmaps_pcpu[MAXCPU];
|
||||
|
||||
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
|
||||
static pv_entry_t get_pv_entry(pmap_t locked_pmap);
|
||||
static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
|
||||
@ -188,7 +184,6 @@ static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
|
||||
static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
|
||||
static void pmap_TLB_invalidate_kernel(vm_offset_t);
|
||||
static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
|
||||
static void pmap_init_fpage(void);
|
||||
|
||||
#ifdef SMP
|
||||
static void pmap_invalidate_page_action(void *arg);
|
||||
@ -355,7 +350,7 @@ pmap_bootstrap(void)
|
||||
kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
|
||||
|
||||
|
||||
virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
|
||||
virtual_avail = VM_MIN_KERNEL_ADDRESS;
|
||||
virtual_end = VM_MAX_KERNEL_ADDRESS;
|
||||
|
||||
#ifdef SMP
|
||||
@ -480,8 +475,6 @@ void
|
||||
pmap_init(void)
|
||||
{
|
||||
|
||||
if (need_wired_tlb_page_pool)
|
||||
pmap_init_fpage();
|
||||
/*
|
||||
* Initialize the address space (zone) for the pv entries. Set a
|
||||
* high water mark so that the system can recover from excessive
|
||||
@ -805,136 +798,6 @@ pmap_qremove(vm_offset_t va, int count)
|
||||
* Page table page management routines.....
|
||||
***************************************************/
|
||||
|
||||
/*
|
||||
* floating pages (FPAGES) management routines
|
||||
*
|
||||
* FPAGES are the reserved virtual memory areas which can be
|
||||
* mapped to any physical memory. This gets used typically
|
||||
* in the following functions:
|
||||
*
|
||||
* pmap_zero_page
|
||||
* pmap_copy_page
|
||||
*/
|
||||
|
||||
/*
|
||||
* Create the floating pages, aka FPAGES!
|
||||
*/
|
||||
static void
|
||||
pmap_init_fpage()
|
||||
{
|
||||
vm_offset_t kva;
|
||||
int i, j;
|
||||
struct sysmaps *sysmaps;
|
||||
|
||||
/*
|
||||
* We allocate a total of (FPAGES*MAXCPU + FPAGES_SHARED + 1) pages
|
||||
* at first. FPAGES & FPAGES_SHARED should be EVEN Then we'll adjust
|
||||
* 'kva' to be even-page aligned so that the fpage area can be wired
|
||||
* in the TLB with a single TLB entry.
|
||||
*/
|
||||
kva = kmem_alloc_nofault(kernel_map,
|
||||
(FPAGES * MAXCPU + 1 + FPAGES_SHARED) * PAGE_SIZE);
|
||||
if ((void *)kva == NULL)
|
||||
panic("pmap_init_fpage: fpage allocation failed");
|
||||
|
||||
/*
|
||||
* Make up start at an even page number so we can wire down the
|
||||
* fpage area in the tlb with a single tlb entry.
|
||||
*/
|
||||
if ((((vm_offset_t)kva) >> PGSHIFT) & 1) {
|
||||
/*
|
||||
* 'kva' is not even-page aligned. Adjust it and free the
|
||||
* first page which is unused.
|
||||
*/
|
||||
kmem_free(kernel_map, (vm_offset_t)kva, NBPG);
|
||||
kva = ((vm_offset_t)kva) + NBPG;
|
||||
} else {
|
||||
/*
|
||||
* 'kva' is even page aligned. We don't need the last page,
|
||||
* free it.
|
||||
*/
|
||||
kmem_free(kernel_map, ((vm_offset_t)kva) + FSPACE, NBPG);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
sysmaps = &sysmaps_pcpu[i];
|
||||
mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
|
||||
|
||||
/* Assign FPAGES pages to the CPU */
|
||||
for (j = 0; j < FPAGES; j++)
|
||||
sysmaps->fp[j].kva = kva + (j) * PAGE_SIZE;
|
||||
kva = ((vm_offset_t)kva) + (FPAGES * PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* An additional 2 pages are needed, one for pmap_zero_page_idle()
|
||||
* and one for coredump. These pages are shared by all cpu's
|
||||
*/
|
||||
fpages_shared[PMAP_FPAGE3].kva = kva;
|
||||
fpages_shared[PMAP_FPAGE_KENTER_TEMP].kva = kva + PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the page to the fpage virtual address as specified thru' fpage id
|
||||
*/
|
||||
vm_offset_t
|
||||
pmap_map_fpage(vm_paddr_t pa, struct fpage *fp, boolean_t check_unmaped)
|
||||
{
|
||||
vm_offset_t kva;
|
||||
register pt_entry_t *pte;
|
||||
pt_entry_t npte;
|
||||
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
/*
|
||||
* Check if the fpage is free
|
||||
*/
|
||||
if (fp->state) {
|
||||
if (check_unmaped == TRUE)
|
||||
pmap_unmap_fpage(pa, fp);
|
||||
else
|
||||
panic("pmap_map_fpage: fpage is busy");
|
||||
}
|
||||
fp->state = TRUE;
|
||||
kva = fp->kva;
|
||||
|
||||
npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pte = pmap_pte(kernel_pmap, kva);
|
||||
*pte = npte;
|
||||
|
||||
pmap_TLB_update_kernel(kva, npte);
|
||||
|
||||
return (kva);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap the page from the fpage virtual address as specified thru' fpage id
|
||||
*/
|
||||
void
|
||||
pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp)
|
||||
{
|
||||
vm_offset_t kva;
|
||||
register pt_entry_t *pte;
|
||||
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
/*
|
||||
* Check if the fpage is busy
|
||||
*/
|
||||
if (!(fp->state)) {
|
||||
panic("pmap_unmap_fpage: fpage is free");
|
||||
}
|
||||
kva = fp->kva;
|
||||
|
||||
pte = pmap_pte(kernel_pmap, kva);
|
||||
*pte = PTE_G;
|
||||
pmap_TLB_invalidate_kernel(kva);
|
||||
|
||||
fp->state = FALSE;
|
||||
|
||||
/*
|
||||
* Should there be any flush operation at the end?
|
||||
*/
|
||||
}
|
||||
|
||||
/* Revision 1.507
|
||||
*
|
||||
* Simplify the reference counting of page table pages. Specifically, use
|
||||
@ -1051,10 +914,6 @@ pmap_pinit(pmap_t pmap)
|
||||
req = VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
|
||||
VM_ALLOC_ZERO;
|
||||
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool)
|
||||
req |= VM_ALLOC_WIRED_TLB_PG_POOL;
|
||||
#endif
|
||||
/*
|
||||
* allocate the page directory page
|
||||
*/
|
||||
@ -1105,10 +964,6 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
req = VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool)
|
||||
req |= VM_ALLOC_WIRED_TLB_PG_POOL;
|
||||
#endif
|
||||
/*
|
||||
* Find or fabricate a new pagetable page
|
||||
*/
|
||||
@ -1279,7 +1134,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
|
||||
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
|
||||
if (kernel_vm_end == 0) {
|
||||
kernel_vm_end = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
|
||||
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
|
||||
nkpt = 0;
|
||||
while (segtab_pde(kernel_segmap, kernel_vm_end)) {
|
||||
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
|
||||
@ -1308,10 +1163,6 @@ pmap_growkernel(vm_offset_t addr)
|
||||
* This index is bogus, but out of the way
|
||||
*/
|
||||
req = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool)
|
||||
req |= VM_ALLOC_WIRED_TLB_PG_POOL;
|
||||
#endif
|
||||
nkpg = vm_page_alloc(NULL, nkpt, req);
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
@ -2155,12 +2006,6 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
|
||||
__func__);
|
||||
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool) {
|
||||
va = pmap_map_fpage(pa, &fpages_shared[PMAP_FPAGE_KENTER_TEMP],
|
||||
TRUE);
|
||||
} else
|
||||
#endif
|
||||
if (pa < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(pa);
|
||||
} else {
|
||||
@ -2312,26 +2157,7 @@ pmap_zero_page(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
int int_level;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool) {
|
||||
struct fpage *fp1;
|
||||
struct sysmaps *sysmaps;
|
||||
|
||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
|
||||
mtx_lock(&sysmaps->lock);
|
||||
sched_pin();
|
||||
|
||||
fp1 = &sysmaps->fp[PMAP_FPAGE1];
|
||||
va = pmap_map_fpage(phys, fp1, FALSE);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
pmap_unmap_fpage(phys, fp1);
|
||||
sched_unpin();
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
/*
|
||||
* Should you do cache flush?
|
||||
*/
|
||||
} else
|
||||
#endif
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
@ -2358,7 +2184,6 @@ pmap_zero_page(vm_page_t m)
|
||||
sched_unpin();
|
||||
PMAP_LGMEM_UNLOCK(sysm);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2373,24 +2198,7 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
int int_level;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool) {
|
||||
struct fpage *fp1;
|
||||
struct sysmaps *sysmaps;
|
||||
|
||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
|
||||
mtx_lock(&sysmaps->lock);
|
||||
sched_pin();
|
||||
|
||||
fp1 = &sysmaps->fp[PMAP_FPAGE1];
|
||||
va = pmap_map_fpage(phys, fp1, FALSE);
|
||||
bzero((caddr_t)va + off, size);
|
||||
pmap_unmap_fpage(phys, fp1);
|
||||
|
||||
sched_unpin();
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
} else
|
||||
#endif
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
bzero((char *)(caddr_t)va + off, size);
|
||||
@ -2423,15 +2231,7 @@ pmap_zero_page_idle(vm_page_t m)
|
||||
vm_offset_t va;
|
||||
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
|
||||
int int_level;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool) {
|
||||
sched_pin();
|
||||
va = pmap_map_fpage(phys, &fpages_shared[PMAP_FPAGE3], FALSE);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
pmap_unmap_fpage(phys, &fpages_shared[PMAP_FPAGE3]);
|
||||
sched_unpin();
|
||||
} else
|
||||
#endif
|
||||
|
||||
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
va = MIPS_PHYS_TO_KSEG0(phys);
|
||||
bzero((caddr_t)va, PAGE_SIZE);
|
||||
@ -2472,95 +2272,67 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
|
||||
vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
|
||||
int int_level;
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
if (need_wired_tlb_page_pool) {
|
||||
struct fpage *fp1, *fp2;
|
||||
struct sysmaps *sysmaps;
|
||||
|
||||
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
|
||||
mtx_lock(&sysmaps->lock);
|
||||
sched_pin();
|
||||
|
||||
fp1 = &sysmaps->fp[PMAP_FPAGE1];
|
||||
fp2 = &sysmaps->fp[PMAP_FPAGE2];
|
||||
|
||||
va_src = pmap_map_fpage(phy_src, fp1, FALSE);
|
||||
va_dst = pmap_map_fpage(phy_dst, fp2, FALSE);
|
||||
|
||||
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
|
||||
|
||||
pmap_unmap_fpage(phy_src, fp1);
|
||||
pmap_unmap_fpage(phy_dst, fp2);
|
||||
sched_unpin();
|
||||
mtx_unlock(&sysmaps->lock);
|
||||
|
||||
if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
|
||||
/* easy case, all can be accessed via KSEG0 */
|
||||
/*
|
||||
* Should you flush the cache?
|
||||
* Flush all caches for VA that are mapped to this page
|
||||
* to make sure that data in SDRAM is up to date
|
||||
*/
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
|
||||
/* easy case, all can be accessed via KSEG0 */
|
||||
/*
|
||||
* Flush all caches for VA that are mapped to this page
|
||||
* to make sure that data in SDRAM is up to date
|
||||
*/
|
||||
pmap_flush_pvcache(src);
|
||||
mips_dcache_wbinv_range_index(
|
||||
MIPS_PHYS_TO_KSEG0(phy_dst), NBPG);
|
||||
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
|
||||
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
|
||||
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
|
||||
} else {
|
||||
int cpu;
|
||||
struct local_sysmaps *sysm;
|
||||
pmap_flush_pvcache(src);
|
||||
mips_dcache_wbinv_range_index(
|
||||
MIPS_PHYS_TO_KSEG0(phy_dst), NBPG);
|
||||
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
|
||||
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
|
||||
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
|
||||
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
|
||||
} else {
|
||||
int cpu;
|
||||
struct local_sysmaps *sysm;
|
||||
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
PMAP_LGMEM_LOCK(sysm);
|
||||
sched_pin();
|
||||
int_level = disableintr();
|
||||
if (phy_src < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
/* one side needs mapping - dest */
|
||||
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
|
||||
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
|
||||
sysm->valid2 = 1;
|
||||
va_dst = (vm_offset_t)sysm->CADDR2;
|
||||
} else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
/* one side needs mapping - src */
|
||||
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
|
||||
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
|
||||
va_src = (vm_offset_t)sysm->CADDR1;
|
||||
sysm->valid1 = 1;
|
||||
} else {
|
||||
/* all need mapping */
|
||||
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
|
||||
sysm->valid1 = sysm->valid2 = 1;
|
||||
va_src = (vm_offset_t)sysm->CADDR1;
|
||||
va_dst = (vm_offset_t)sysm->CADDR2;
|
||||
}
|
||||
bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
|
||||
if (sysm->valid1) {
|
||||
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
|
||||
sysm->CMAP1 = 0;
|
||||
sysm->valid1 = 0;
|
||||
}
|
||||
if (sysm->valid2) {
|
||||
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
|
||||
sysm->CMAP2 = 0;
|
||||
sysm->valid2 = 0;
|
||||
}
|
||||
restoreintr(int_level);
|
||||
sched_unpin();
|
||||
PMAP_LGMEM_UNLOCK(sysm);
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
PMAP_LGMEM_LOCK(sysm);
|
||||
sched_pin();
|
||||
int_level = disableintr();
|
||||
if (phy_src < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
/* one side needs mapping - dest */
|
||||
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
|
||||
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
|
||||
sysm->valid2 = 1;
|
||||
va_dst = (vm_offset_t)sysm->CADDR2;
|
||||
} else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
|
||||
/* one side needs mapping - src */
|
||||
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
|
||||
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
|
||||
va_src = (vm_offset_t)sysm->CADDR1;
|
||||
sysm->valid1 = 1;
|
||||
} else {
|
||||
/* all need mapping */
|
||||
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
|
||||
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
|
||||
sysm->valid1 = sysm->valid2 = 1;
|
||||
va_src = (vm_offset_t)sysm->CADDR1;
|
||||
va_dst = (vm_offset_t)sysm->CADDR2;
|
||||
}
|
||||
bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
|
||||
if (sysm->valid1) {
|
||||
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
|
||||
sysm->CMAP1 = 0;
|
||||
sysm->valid1 = 0;
|
||||
}
|
||||
if (sysm->valid2) {
|
||||
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
|
||||
sysm->CMAP2 = 0;
|
||||
sysm->valid2 = 0;
|
||||
}
|
||||
restoreintr(int_level);
|
||||
sched_unpin();
|
||||
PMAP_LGMEM_UNLOCK(sysm);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3308,11 +3080,6 @@ pmap_kextract(vm_offset_t va)
|
||||
else if (va >= MIPS_KSEG1_START &&
|
||||
va < MIPS_KSEG2_START)
|
||||
pa = MIPS_KSEG1_TO_PHYS(va);
|
||||
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
|
||||
else if (need_wired_tlb_page_pool && ((va >= VM_MIN_KERNEL_ADDRESS) &&
|
||||
(va < (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET))))
|
||||
pa = MIPS_KSEG0_TO_PHYS(va);
|
||||
#endif
|
||||
else if (va >= MIPS_KSEG2_START && va < VM_MAX_KERNEL_ADDRESS) {
|
||||
pt_entry_t *ptep;
|
||||
|
||||
|
@ -340,7 +340,7 @@ blocked_loop:
|
||||
lw a2, TD_PCB(a1)
|
||||
sw a2, PC_CURPCB(a3)
|
||||
lw v0, TD_REALKSTACK(a1)
|
||||
li s0, (MIPS_KSEG2_START+VM_KERNEL_ALLOC_OFFSET) # If Uarea addr is below kseg2,
|
||||
li s0, MIPS_KSEG2_START # If Uarea addr is below kseg2,
|
||||
bltu v0, s0, sw2 # no need to insert in TLB.
|
||||
lw a1, TD_UPTE+0(s7) # t0 = first u. pte
|
||||
lw a2, TD_UPTE+4(s7) # t1 = 2nd u. pte
|
||||
|
Loading…
Reference in New Issue
Block a user