o) Use inline functions to access coprocessor 0 registers rather than external

ones implemented using assembly.
o) Use TRAPF_USERMODE() consistently rather than USERMODE().  Eliminate
   <machine/psl.h> as a result.
o) Use intr_*() rather than *intr(), consistently.
o) Use register_t instead of u_int in some trap code.
o) Merge some more endian-related macros to machine/asm.h from NetBSD.
o) Add PTR_LI macro, which loads an address with the correct sign-extension for
   a pointer.
o) Restore interrupts when bailing out due to an excessive IRQ in
   nexus_setup_intr().
o) Remove unused functions from psraccess.S.
o) Enter temporary virtual entries for large memory access into the page tables
   rather than simply hoping they stay resident in the TLB and we don't need to
   do a refill for them.
o) Abstract out large memory mapping setup/teardown using some macros.
o) Do mips_dcache_wbinv_range() when using temporary virtual addresses just
   like we do when we can use the direct map.
This commit is contained in:
jmallett 2010-04-17 01:17:31 +00:00
parent 37d2dea4b8
commit 5605409291
16 changed files with 214 additions and 417 deletions

View File

@ -14,6 +14,10 @@
# The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last.
#
# 20100416: [mips] removed <machine/psl.h>
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/psl.h
.endif
# 20100415: [mips] removed unused headers
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/archtype.h

View File

@ -98,23 +98,6 @@
#define _C_LABEL(x) x
/*
* Endian-independent assembly-code aliases for unaligned memory accesses.
*/
#if BYTE_ORDER == LITTLE_ENDIAN
#define LWLO lwl
#define LWHI lwr
#define SWLO swl
#define SWHI swr
#endif
#if BYTE_ORDER == BIG_ENDIAN
#define LWLO lwr
#define LWHI lwl
#define SWLO swr
#define SWHI swl
#endif
#ifdef USE_AENT
#define AENT(x) \
.aent x, 0
@ -306,28 +289,32 @@ _C_LABEL(x):
/*
* Call ast if required
*
* XXX Do we really need to disable interrupts?
*/
#define DO_AST \
44: \
PTR_LA s0, _C_LABEL(disableintr) ;\
jalr s0 ;\
nop ;\
move a0, v0 ;\
mfc0 t0, MIPS_COP_0_STATUS ;\
and a0, t0, MIPS_SR_INT_IE ;\
xor t0, a0, t0 ;\
mtc0 t0, MIPS_COP_0_STATUS ;\
COP0_SYNC ;\
GET_CPU_PCPU(s1) ;\
lw s3, PC_CURPCB(s1) ;\
lw s1, PC_CURTHREAD(s1) ;\
PTR_L s3, PC_CURPCB(s1) ;\
PTR_L s1, PC_CURTHREAD(s1) ;\
lw s2, TD_FLAGS(s1) ;\
li s0, TDF_ASTPENDING | TDF_NEEDRESCHED;\
and s2, s0 ;\
PTR_LA s0, _C_LABEL(restoreintr) ;\
jalr s0 ;\
nop ;\
mfc0 t0, MIPS_COP_0_STATUS ;\
or t0, a0, t0 ;\
mtc0 t0, MIPS_COP_0_STATUS ;\
COP0_SYNC ;\
beq s2, zero, 4f ;\
nop ;\
PTR_LA s0, _C_LABEL(ast) ;\
jalr s0 ;\
PTR_ADDU a0, s3, U_PCB_REGS ;\
j 44b ;\
j 44b ;\
nop ;\
4:
@ -382,6 +369,45 @@ _C_LABEL(x):
#define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
#define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
/*
* Endian-independent assembly-code aliases for unaligned memory accesses.
*/
#if _BYTE_ORDER == _LITTLE_ENDIAN
# define LWHI lwr
# define LWLO lwl
# define SWHI swr
# define SWLO swl
# if SZREG == 4
# define REG_LHI lwr
# define REG_LLO lwl
# define REG_SHI swr
# define REG_SLO swl
# else
# define REG_LHI ldr
# define REG_LLO ldl
# define REG_SHI sdr
# define REG_SLO sdl
# endif
#endif
#if _BYTE_ORDER == _BIG_ENDIAN
# define LWHI lwl
# define LWLO lwr
# define SWHI swl
# define SWLO swr
# if SZREG == 4
# define REG_LHI lwl
# define REG_LLO lwr
# define REG_SHI swl
# define REG_SLO swr
# else
# define REG_LHI ldl
# define REG_LLO ldr
# define REG_SHI sdl
# define REG_SLO sdr
# endif
#endif
/*
* While it would be nice to be compatible with the SGI
* REG_L and REG_S macros, because they do not take parameters, it
@ -402,6 +428,7 @@ _C_LABEL(x):
#define PTR_SUBIU subu
#define PTR_L lw
#define PTR_LA la
#define PTR_LI li
#define PTR_S sw
#define PTR_SLL sll
#define PTR_SLLV sllv
@ -424,6 +451,7 @@ _C_LABEL(x):
#define PTR_SUBIU dsubu
#define PTR_L ld
#define PTR_LA dla
#define PTR_LI dli
#define PTR_S sd
#define PTR_SLL dsll
#define PTR_SLLV dsllv
@ -765,7 +793,7 @@ _C_LABEL(x):
#endif
#define GET_CPU_PCPU(reg) \
lw reg, _C_LABEL(pcpup);
PTR_L reg, _C_LABEL(pcpup);
/*
* Description of the setjmp buffer

View File

@ -47,7 +47,6 @@
#ifndef _MACHINE_CPU_H_
#define _MACHINE_CPU_H_
#include <machine/psl.h>
#include <machine/endian.h>
#define MIPS_KSEG0_LARGEST_PHYS 0x20000000
@ -334,6 +333,7 @@
#define cpu_swapout(p) panic("cpu_swapout: can't get here");
#ifndef _LOCORE
#include <machine/cpufunc.h>
#include <machine/frame.h>
/*
* Arguments to hardclock and gatherstats encapsulate the previous
@ -342,7 +342,6 @@
#define clockframe trapframe /* Use normal trap frame */
#define CLKF_USERMODE(framep) ((framep)->sr & SR_KSU_USER)
#define CLKF_BASEPRI(framep) ((framep)->cpl == 0)
#define CLKF_PC(framep) ((framep)->pc)
#define CLKF_INTR(framep) (0)
#define MIPS_CLKF_INTR() (intr_nesting_level >= 1)
@ -350,6 +349,11 @@
#define TRAPF_PC(framep) ((framep)->pc)
#define cpu_getstack(td) ((td)->td_frame->sp)
/*
* A machine-independent interface to the CPU's counter.
*/
#define get_cyclecount() mips_rd_count()
/*
* CPU identification, from PRID register.
*/
@ -542,18 +546,6 @@ extern int intr_nesting_level;
* Low level access routines to CPU registers
*/
void setsoftintr0(void);
void clearsoftintr0(void);
void setsoftintr1(void);
void clearsoftintr1(void);
u_int32_t mips_cp0_status_read(void);
void mips_cp0_status_write(u_int32_t);
int disableintr(void);
void restoreintr(int);
int enableintr(void);
int Mips_TLBGetPID(void);
void swi_vm(void *);
@ -562,7 +554,6 @@ void cpu_reset(void);
u_int32_t set_intr_mask(u_int32_t);
u_int32_t get_intr_mask(void);
u_int32_t get_cyclecount(void);
#define cpu_spinwait() /* nothing */

View File

@ -38,7 +38,6 @@
#define _MIPS_DB_MACHDEP_H_
#include <machine/frame.h>
#include <machine/psl.h>
#include <machine/trap.h>
#include <machine/endian.h>

View File

@ -46,9 +46,7 @@
#include <sys/cdefs.h>
#ifdef _KERNEL
#ifdef _LOCORE
#include <machine/psl.h>
#else
#ifndef _LOCORE
#include <machine/cpu.h>
#endif
#endif

View File

@ -84,17 +84,17 @@
#ifdef SMP
extern int mcount_lock;
#define MCOUNT_ENTER(s) { \
s = disable_intr(); \
s = intr_disable(); \
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
/* nothing */ ; \
}
#define MCOUNT_EXIT(s) { \
atomic_store_rel_int(&mcount_lock, 0); \
enableintr(s); \
intr_restore(s); \
}
#else
#define MCOUNT_ENTER(s) { s = disable_intr(); }
#define MCOUNT_EXIT(s) (enableintr(s))
#define MCOUNT_ENTER(s) { s = intr_disable(); }
#define MCOUNT_EXIT(s) (intr_restore(s))
#endif
/* REVISIT for mips */

View File

@ -1,50 +0,0 @@
/* $OpenBSD: psl.h,v 1.2 1998/01/28 13:46:25 pefo Exp $ */
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)psl.h 8.1 (Berkeley) 6/10/93
* JNPR: psl.h,v 1.1 2006/08/07 05:38:57 katta
* $FreeBSD$
*/
#ifndef _MACHINE_PSL_H_
#define _MACHINE_PSL_H_
#include <machine/cpu.h>
/*
* Macros to decode processor status word.
*/
#define USERMODE(ps) (((ps) & SR_KSU_MASK) == SR_KSU_USER)
#define BASEPRI(ps) (((ps) & (INT_MASK | SR_INT_ENA_PREV)) \
== (INT_MASK | SR_INT_ENA_PREV))
#endif /* _MACHINE_PSL_H_ */

View File

@ -74,17 +74,17 @@
#if !defined(SMP) && (defined(DDB) || defined(DEBUG))
struct trapdebug { /* trap history buffer for debugging */
u_int status;
u_int cause;
u_int vadr;
u_int pc;
u_int ra;
u_int sp;
u_int code;
register_t status;
register_t cause;
register_t vadr;
register_t pc;
register_t ra;
register_t sp;
register_t code;
};
#define trapdebug_enter(x, cd) { \
intrmask_t s = disableintr(); \
register_t s = intr_disable(); \
trp->status = x->sr; \
trp->cause = x->cause; \
trp->vadr = x->badvaddr; \
@ -94,7 +94,7 @@ struct trapdebug { /* trap history buffer for debugging */
trp->code = cd; \
if (++trp == &trapdebug[TRAPSIZE]) \
trp = trapdebug; \
restoreintr(s); \
intr_restore(s); \
}
#define TRAPSIZE 10 /* Trap log buffer length */
@ -116,7 +116,7 @@ void MipsTLBMissException(void);
void MipsUserGenException(void);
void MipsUserIntr(void);
u_int trap(struct trapframe *);
register_t trap(struct trapframe *);
#ifndef LOCORE /* XXX */
int check_address(void *);

View File

@ -370,11 +370,10 @@ mips_vector_init(void)
* when handler is installed for it
*/
set_intr_mask(ALL_INT_MASK);
enableintr();
intr_enable();
/* Clear BEV in SR so we start handling our own exceptions */
mips_cp0_status_write(mips_cp0_status_read() & ~SR_BOOT_EXC_VEC);
mips_wr_status(mips_rd_status() & ~SR_BOOT_EXC_VEC);
}
/*
@ -471,7 +470,7 @@ spinlock_enter(void)
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_intr = disableintr();
td->td_md.md_saved_intr = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
@ -485,16 +484,7 @@ spinlock_exit(void)
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
restoreintr(td->td_md.md_saved_intr);
}
u_int32_t
get_cyclecount(void)
{
u_int32_t count;
mfc0_macro(count, 9);
return (count);
intr_restore(td->td_md.md_saved_intr);
}
/*
@ -503,7 +493,7 @@ get_cyclecount(void)
void
cpu_idle(int busy)
{
if (mips_cp0_status_read() & SR_INT_ENAB)
if (mips_rd_status() & SR_INT_ENAB)
__asm __volatile ("wait");
else
panic("ints disabled in idleproc!");

View File

@ -296,7 +296,7 @@ smp_init_secondary(u_int32_t cpuid)
*/
mips_wr_compare(mips_rd_count() + counter_freq / hz);
enableintr();
intr_enable();
/* enter the scheduler */
sched_throw(NULL);

View File

@ -166,16 +166,19 @@ static int
nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep)
{
register_t s;
int irq;
intrmask_t s = disableintr();
s = intr_disable();
irq = rman_get_start(res);
if (irq >= NUM_MIPS_IRQS)
if (irq >= NUM_MIPS_IRQS) {
intr_restore(s);
return (0);
}
cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg,
irq, flags, cookiep);
restoreintr(s);
intr_restore(s);
return (0);
}

View File

@ -194,10 +194,7 @@ static void pmap_update_page_action(void *arg);
struct local_sysmaps {
struct mtx lock;
pt_entry_t CMAP1;
pt_entry_t CMAP2;
caddr_t CADDR1;
caddr_t CADDR2;
vm_offset_t base;
uint16_t valid1, valid2;
};
@ -211,6 +208,59 @@ struct local_sysmaps {
static struct local_sysmaps sysmap_lmem[MAXCPU];
caddr_t virtual_sys_start = (caddr_t)0;
#define PMAP_LMEM_MAP1(va, phys) \
int cpu; \
struct local_sysmaps *sysm; \
pt_entry_t *pte, npte; \
\
cpu = PCPU_GET(cpuid); \
sysm = &sysmap_lmem[cpu]; \
PMAP_LGMEM_LOCK(sysm); \
intr = intr_disable(); \
sched_pin(); \
va = sysm->base; \
npte = mips_paddr_to_tlbpfn(phys) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va); \
*pte = npte; \
sysm->valid1 = 1;
#define PMAP_LMEM_MAP2(va1, phys1, va2, phys2) \
int cpu; \
struct local_sysmaps *sysm; \
pt_entry_t *pte, npte; \
\
cpu = PCPU_GET(cpuid); \
sysm = &sysmap_lmem[cpu]; \
PMAP_LGMEM_LOCK(sysm); \
intr = intr_disable(); \
sched_pin(); \
va1 = sysm->base; \
va2 = sysm->base + PAGE_SIZE; \
npte = mips_paddr_to_tlbpfn(phys2) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va1); \
*pte = npte; \
npte = mips_paddr_to_tlbpfn(phys2) | \
PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE; \
pte = pmap_pte(kernel_pmap, va2); \
*pte = npte; \
sysm->valid1 = 1; \
sysm->valid2 = 1;
#define PMAP_LMEM_UNMAP() \
pte = pmap_pte(kernel_pmap, sysm->base); \
*pte = PTE_G; \
pmap_invalidate_page(kernel_pmap, sysm->base); \
sysm->valid1 = 0; \
pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); \
*pte = PTE_G; \
pmap_invalidate_page(kernel_pmap, sysm->base + PAGE_SIZE); \
sysm->valid2 = 0; \
sched_unpin(); \
intr_restore(intr); \
PMAP_LGMEM_UNLOCK(sysm);
pd_entry_t
pmap_segmap(pmap_t pmap, vm_offset_t va)
{
@ -382,12 +432,8 @@ pmap_bootstrap(void)
*/
if (memory_larger_than_512meg) {
for (i = 0; i < MAXCPU; i++) {
sysmap_lmem[i].CMAP1 = PTE_G;
sysmap_lmem[i].CMAP2 = PTE_G;
sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
virtual_avail += PAGE_SIZE;
sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
virtual_avail += PAGE_SIZE;
sysmap_lmem[i].base = virtual_avail;
virtual_avail += PAGE_SIZE * 2;
sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
}
@ -2001,7 +2047,7 @@ void *
pmap_kenter_temporary(vm_paddr_t pa, int i)
{
vm_offset_t va;
int int_level;
register_t intr;
if (i != 0)
printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
__func__);
@ -2011,20 +2057,24 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
} else {
int cpu;
struct local_sysmaps *sysm;
pt_entry_t *pte, npte;
/* If this is used other than for dumps, we may need to leave
* interrupts disasbled on return. If crash dumps don't work when
* we get to this point, we might want to consider this (leaving things
* disabled as a starting point ;-)
*/
int_level = disableintr();
intr = intr_disable();
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
/* Since this is for the debugger, no locks or any other fun */
sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
pte = pmap_pte(kernel_pmap, sysm->base);
*pte = npte;
sysm->valid1 = 1;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
va = (vm_offset_t)sysm->CADDR1;
restoreintr(int_level);
pmap_update_page(kernel_pmap, sysm->base, npte);
va = sysm->base;
intr_restore(intr);
}
return ((void *)va);
}
@ -2033,7 +2083,7 @@ void
pmap_kenter_temporary_free(vm_paddr_t pa)
{
int cpu;
int int_level;
register_t intr;
struct local_sysmaps *sysm;
if (pa < MIPS_KSEG0_LARGEST_PHYS) {
@ -2043,10 +2093,13 @@ pmap_kenter_temporary_free(vm_paddr_t pa)
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
if (sysm->valid1) {
int_level = disableintr();
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
restoreintr(int_level);
sysm->CMAP1 = 0;
pt_entry_t *pte;
intr = intr_disable();
pte = pmap_pte(kernel_pmap, sysm->base);
*pte = PTE_G;
pmap_invalidate_page(kernel_pmap, sysm->base);
intr_restore(intr);
sysm->valid1 = 0;
}
}
@ -2156,33 +2209,20 @@ pmap_zero_page(vm_page_t m)
{
vm_offset_t va;
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
int int_level;
register_t intr;
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
va = MIPS_PHYS_TO_KSEG0(phys);
bzero((caddr_t)va, PAGE_SIZE);
mips_dcache_wbinv_range(va, PAGE_SIZE);
} else {
int cpu;
struct local_sysmaps *sysm;
PMAP_LMEM_MAP1(va, phys);
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
PMAP_LGMEM_LOCK(sysm);
sched_pin();
int_level = disableintr();
sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
sysm->valid1 = 1;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
bzero(sysm->CADDR1, PAGE_SIZE);
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
restoreintr(int_level);
sysm->CMAP1 = 0;
sysm->valid1 = 0;
sched_unpin();
PMAP_LGMEM_UNLOCK(sysm);
bzero((caddr_t)va, PAGE_SIZE);
mips_dcache_wbinv_range(va, PAGE_SIZE);
PMAP_LMEM_UNMAP();
}
}
@ -2197,31 +2237,19 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t va;
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
int int_level;
register_t intr;
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
va = MIPS_PHYS_TO_KSEG0(phys);
bzero((char *)(caddr_t)va + off, size);
mips_dcache_wbinv_range(va + off, size);
} else {
int cpu;
struct local_sysmaps *sysm;
PMAP_LMEM_MAP1(va, phys);
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
PMAP_LGMEM_LOCK(sysm);
int_level = disableintr();
sched_pin();
sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
sysm->valid1 = 1;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
bzero((char *)sysm->CADDR1 + off, size);
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
restoreintr(int_level);
sysm->CMAP1 = 0;
sysm->valid1 = 0;
sched_unpin();
PMAP_LGMEM_UNLOCK(sysm);
bzero((char *)va + off, size);
mips_dcache_wbinv_range(va + off, size);
PMAP_LMEM_UNMAP();
}
}
@ -2230,33 +2258,20 @@ pmap_zero_page_idle(vm_page_t m)
{
vm_offset_t va;
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
int int_level;
register_t intr;
if (phys < MIPS_KSEG0_LARGEST_PHYS) {
va = MIPS_PHYS_TO_KSEG0(phys);
bzero((caddr_t)va, PAGE_SIZE);
mips_dcache_wbinv_range(va, PAGE_SIZE);
} else {
int cpu;
struct local_sysmaps *sysm;
PMAP_LMEM_MAP1(va, phys);
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
PMAP_LGMEM_LOCK(sysm);
int_level = disableintr();
sched_pin();
sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
sysm->valid1 = 1;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
bzero(sysm->CADDR1, PAGE_SIZE);
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
restoreintr(int_level);
sysm->CMAP1 = 0;
sysm->valid1 = 0;
sched_unpin();
PMAP_LGMEM_UNLOCK(sysm);
bzero((caddr_t)va, PAGE_SIZE);
mips_dcache_wbinv_range(va, PAGE_SIZE);
PMAP_LMEM_UNMAP();
}
}
/*
@ -2271,7 +2286,7 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
vm_offset_t va_src, va_dst;
vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
int int_level;
register_t intr;
if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
/* easy case, all can be accessed via KSEG0 */
@ -2281,58 +2296,18 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
*/
pmap_flush_pvcache(src);
mips_dcache_wbinv_range_index(
MIPS_PHYS_TO_KSEG0(phy_dst), NBPG);
MIPS_PHYS_TO_KSEG0(phy_dst), PAGE_SIZE);
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
} else {
int cpu;
struct local_sysmaps *sysm;
PMAP_LMEM_MAP2(va_src, phy_src, va_dst, phy_dst);
cpu = PCPU_GET(cpuid);
sysm = &sysmap_lmem[cpu];
PMAP_LGMEM_LOCK(sysm);
sched_pin();
int_level = disableintr();
if (phy_src < MIPS_KSEG0_LARGEST_PHYS) {
/* one side needs mapping - dest */
va_src = MIPS_PHYS_TO_KSEG0(phy_src);
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
sysm->valid2 = 1;
va_dst = (vm_offset_t)sysm->CADDR2;
} else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
/* one side needs mapping - src */
va_dst = MIPS_PHYS_TO_KSEG0(phy_dst);
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
va_src = (vm_offset_t)sysm->CADDR1;
sysm->valid1 = 1;
} else {
/* all need mapping */
sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
sysm->valid1 = sysm->valid2 = 1;
va_src = (vm_offset_t)sysm->CADDR1;
va_dst = (vm_offset_t)sysm->CADDR2;
}
bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
if (sysm->valid1) {
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
sysm->CMAP1 = 0;
sysm->valid1 = 0;
}
if (sysm->valid2) {
pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
sysm->CMAP2 = 0;
sysm->valid2 = 0;
}
restoreintr(int_level);
sched_unpin();
PMAP_LGMEM_UNLOCK(sysm);
mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
PMAP_LMEM_UNMAP();
}
}
@ -3085,34 +3060,11 @@ pmap_kextract(vm_offset_t va)
/* Is the kernel pmap initialized? */
if (kernel_pmap->pm_active) {
if (va >= (vm_offset_t)virtual_sys_start) {
/* Its inside the virtual address range */
ptep = pmap_pte(kernel_pmap, va);
if (ptep)
pa = mips_tlbpfn_to_paddr(*ptep) |
(va & PAGE_MASK);
} else {
int i;
/*
* its inside the special mapping area, I
* don't think this should happen, but if it
* does I want it toa all work right :-)
* Note if it does happen, we assume the
* caller has the lock? FIXME, this needs to
* be checked FIXEM - RRS.
*/
for (i = 0; i < MAXCPU; i++) {
if ((sysmap_lmem[i].valid1) && ((vm_offset_t)sysmap_lmem[i].CADDR1 == va)) {
pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP1);
break;
}
if ((sysmap_lmem[i].valid2) && ((vm_offset_t)sysmap_lmem[i].CADDR2 == va)) {
pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP2);
break;
}
}
}
/* Its inside the virtual address range */
ptep = pmap_pte(kernel_pmap, va);
if (ptep)
pa = mips_tlbpfn_to_paddr(*ptep) |
(va & PAGE_MASK);
}
}
return pa;

View File

@ -53,109 +53,6 @@
.set noreorder # Noreorder is default style!
/*
* Set/clear software interrupt.
*/
LEAF(setsoftintr0)
mfc0 v0, COP_0_CAUSE_REG # read cause register
nop
or v0, v0, SOFT_INT_MASK_0 # set soft clock interrupt
mtc0 v0, COP_0_CAUSE_REG # save it
j ra
nop
END(setsoftintr0)
LEAF(clearsoftintr0)
mfc0 v0, COP_0_CAUSE_REG # read cause register
nop
and v0, v0, ~SOFT_INT_MASK_0 # clear soft clock interrupt
mtc0 v0, COP_0_CAUSE_REG # save it
j ra
nop
END(clearsoftintr0)
LEAF(setsoftintr1)
mfc0 v0, COP_0_CAUSE_REG # read cause register
nop
or v0, v0, SOFT_INT_MASK_1 # set soft net interrupt
mtc0 v0, COP_0_CAUSE_REG # save it
j ra
nop
END(setsoftintr1)
LEAF(clearsoftintr1)
mfc0 v0, COP_0_CAUSE_REG # read cause register
nop
and v0, v0, ~SOFT_INT_MASK_1 # clear soft net interrupt
mtc0 v0, COP_0_CAUSE_REG # save it
j ra
nop
END(clearsoftintr1)
/*
* Set/change interrupt priority routines.
* These routines return the previous state.
*/
LEAF(restoreintr)
mfc0 t0,COP_0_STATUS_REG
and t1,t0,SR_INT_ENAB
beq a0,t1,1f
xor t0,SR_INT_ENAB
.set noreorder
mtc0 t0,COP_0_STATUS_REG
nop
nop
nop
nop
1:
j ra
nop
END(restoreintr)
/*
* Set/change interrupt priority routines.
* These routines return the previous state.
*/
LEAF(enableintr)
#ifdef TARGET_OCTEON
.set mips64r2
ei v0
and v0, SR_INT_ENAB # return old interrupt enable bit
.set mips0
#else
mfc0 v0, COP_0_STATUS_REG # read status register
nop
or v1, v0, SR_INT_ENAB
mtc0 v1, COP_0_STATUS_REG # enable all interrupts
and v0, SR_INT_ENAB # return old interrupt enable
#endif
j ra
nop
END(enableintr)
LEAF(disableintr)
#ifdef TARGET_OCTEON
.set mips64r2
di v0
and v0, SR_INT_ENAB # return old interrupt enable bit
.set mips0
#else
mfc0 v0, COP_0_STATUS_REG # read status register
nop
and v1, v0, ~SR_INT_ENAB
mtc0 v1, COP_0_STATUS_REG # disable all interrupts
MIPS_CPU_NOP_DELAY
and v0, SR_INT_ENAB # return old interrupt enable
#endif
j ra
nop
END(disableintr)
LEAF(set_intr_mask)
li t0, SR_INT_MASK # 1 means masked so invert.
not a0, a0 # 1 means masked so invert.
@ -182,17 +79,3 @@ LEAF(get_intr_mask)
nop
END(get_intr_mask)
/*
* u_int32_t mips_cp0_config1_read(void)
*
* Return the current value of the CP0 Config (Select 1) register.
*/
LEAF(mips_cp0_config1_read)
.set push
.set mips32
mfc0 v0, COP_0_CONFIG, 1
j ra
nop
.set pop
END(mips_cp0_config1_read)

View File

@ -301,16 +301,16 @@ clock_intr(void *arg)
if (cpu_ticks->hard_ticks >= cycles_per_hz) {
cpu_ticks->hard_ticks -= cycles_per_hz;
if (PCPU_GET(cpuid) == 0)
hardclock(USERMODE(tf->sr), tf->pc);
hardclock(TRAPF_USERMODE(tf), tf->pc);
else
hardclock_cpu(USERMODE(tf->sr));
hardclock_cpu(TRAPF_USERMODE(tf));
}
/* Fire statclock at stathz. */
cpu_ticks->stat_ticks += delta;
if (cpu_ticks->stat_ticks >= cycles_per_stathz) {
cpu_ticks->stat_ticks -= cycles_per_stathz;
statclock(USERMODE(tf->sr));
statclock(TRAPF_USERMODE(tf));
}
/* Fire profclock at profhz, but only when needed. */
@ -318,7 +318,7 @@ clock_intr(void *arg)
if (cpu_ticks->prof_ticks >= cycles_per_profhz) {
cpu_ticks->prof_ticks -= cycles_per_profhz;
if (profprocs != 0)
profclock(USERMODE(tf->sr), tf->pc);
profclock(TRAPF_USERMODE(tf), tf->pc);
}
critical_exit();
#if 0 /* TARGET_OCTEON */

View File

@ -75,7 +75,6 @@ __FBSDID("$FreeBSD$");
#include <net/netisr.h>
#include <machine/trap.h>
#include <machine/psl.h>
#include <machine/cpu.h>
#include <machine/pte.h>
#include <machine/pmap.h>
@ -272,7 +271,7 @@ extern char *syscallnames[];
* In the case of a kernel trap, we return the pc where to resume if
* p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
*/
u_int
register_t
trap(struct trapframe *trapframe)
{
int type, usermode;
@ -293,7 +292,7 @@ trap(struct trapframe *trapframe)
trapdebug_enter(trapframe, 0);
type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
if (USERMODE(trapframe->sr)) {
if (TRAPF_USERMODE(trapframe)) {
type |= T_USER;
usermode = 1;
} else {
@ -307,9 +306,9 @@ trap(struct trapframe *trapframe)
*/
if (trapframe->sr & SR_INT_ENAB) {
set_intr_mask(~(trapframe->sr & ALL_INT_MASK));
enableintr();
intr_enable();
} else {
disableintr();
intr_disable();
}
#ifdef TRAP_DEBUG
@ -983,9 +982,10 @@ trap(struct trapframe *trapframe)
void
trapDump(char *msg)
{
int i, s;
register_t s;
int i;
s = disableintr();
s = intr_disable();
printf("trapDump(%s)\n", msg);
for (i = 0; i < TRAPSIZE; i++) {
if (trp == trapdebug) {
@ -1003,9 +1003,8 @@ trapDump(char *msg)
printf(" RA %x SP %x code %d\n", trp->ra, trp->sp, trp->code);
}
restoreintr(s);
intr_restore(s);
}
#endif

View File

@ -116,11 +116,11 @@ count_compare_clockhandler(struct trapframe *tf)
cycles += XLR_CPU_HZ / hz;
mips_wr_compare(cycles);
hardclock_cpu(USERMODE(tf->sr));
hardclock_cpu(TRAPF_USERMODE(tf));
if (count_scale_factor[cpu] == STAT_PROF_CLOCK_SCALE_FACTOR) {
statclock(USERMODE(tf->sr));
statclock(TRAPF_USERMODE(tf));
if (profprocs != 0) {
profclock(USERMODE(tf->sr), tf->pc);
profclock(TRAPF_USERMODE(tf), tf->pc);
}
count_scale_factor[cpu] = 0;
}
@ -148,11 +148,11 @@ pic_hardclockhandler(struct trapframe *tf)
printf("Clock tick foo at %ld\n", clock_tick_foo);
}
*/
hardclock(USERMODE(tf->sr), tf->pc);
hardclock(TRAPF_USERMODE(tf), tf->pc);
if (scale_factor == STAT_PROF_CLOCK_SCALE_FACTOR) {
statclock(USERMODE(tf->sr));
statclock(TRAPF_USERMODE(tf));
if (profprocs != 0) {
profclock(USERMODE(tf->sr), tf->pc);
profclock(TRAPF_USERMODE(tf), tf->pc);
}
scale_factor = 0;
}