cosmetic changes and style fixes
This commit is contained in:
parent
8c4d5bbc6f
commit
6557990017
@ -143,7 +143,7 @@
|
||||
|
||||
#define ASI_DMMU_TSB_8KB_PTR_REG 0x59
|
||||
#define ASI_DMMU_TSB_64KB_PTR_REG 0x5a
|
||||
#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b
|
||||
#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b
|
||||
#define ASI_DTLB_DATA_IN_REG 0x5c
|
||||
/* US-III Cu: also ASI_DTLB_CAM_ADDRESS_REG */
|
||||
#define ASI_DTLB_DATA_ACCESS_REG 0x5d
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*-
|
||||
* Copyright (c) 1996
|
||||
* The President and Fellows of Harvard College. All rights reserved.
|
||||
* The President and Fellows of Harvard College. All rights reserved.
|
||||
* Copyright (c) 1992, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
@ -43,7 +43,7 @@
|
||||
*/
|
||||
|
||||
#ifndef _MACHINE_CACHE_H_
|
||||
#define _MACHINE_CACHE_H_
|
||||
#define _MACHINE_CACHE_H_
|
||||
|
||||
#ifndef LOCORE
|
||||
#include <dev/ofw/openfirm.h>
|
||||
@ -77,21 +77,21 @@
|
||||
#ifndef LOCORE
|
||||
|
||||
/*
|
||||
* Cache control information.
|
||||
* Cache control information
|
||||
*/
|
||||
struct cacheinfo {
|
||||
u_int c_enabled; /* true => cache is enabled */
|
||||
u_int ic_size; /* instruction cache */
|
||||
u_int ic_size; /* instruction cache */
|
||||
u_int ic_set;
|
||||
u_int ic_l2set;
|
||||
u_int ic_assoc;
|
||||
u_int ic_linesize;
|
||||
u_int dc_size; /* data cache */
|
||||
u_int ic_assoc;
|
||||
u_int ic_linesize;
|
||||
u_int dc_size; /* data cache */
|
||||
u_int dc_l2size;
|
||||
u_int dc_assoc;
|
||||
u_int dc_linesize;
|
||||
u_int dc_assoc;
|
||||
u_int dc_linesize;
|
||||
u_int ec_size; /* external cache info */
|
||||
u_int ec_assoc;
|
||||
u_int ec_assoc;
|
||||
u_int ec_l2set;
|
||||
u_int ec_linesize;
|
||||
u_int ec_l2linesize;
|
||||
|
@ -35,7 +35,7 @@
|
||||
struct thread;
|
||||
|
||||
/*
|
||||
* membar operand macros for use in other macros when # is a special
|
||||
* Membar operand macros for use in other macros when # is a special
|
||||
* character. Keep these in sync with what the hardware expects.
|
||||
*/
|
||||
#define C_Lookaside (0)
|
||||
@ -88,8 +88,8 @@ struct thread;
|
||||
__asm __volatile("mov %0, %" __XSTRING(reg) : : "r" (val)); \
|
||||
} while (0)
|
||||
|
||||
/* Generate ld*a/st*a functions for non-constant ASI's. */
|
||||
#define LDNC_GEN(tp, o) \
|
||||
/* Generate ld*a/st*a functions for non-constant ASIs. */
|
||||
#define LDNC_GEN(tp, o) \
|
||||
static __inline tp \
|
||||
o ## _nc(caddr_t va, int asi) \
|
||||
{ \
|
||||
@ -116,7 +116,7 @@ LDNC_GEN(u_long, ldxa);
|
||||
#define lduwa(va, asi) LD_GENERIC(va, asi, lduwa, u_int)
|
||||
#define ldxa(va, asi) LD_GENERIC(va, asi, ldxa, u_long)
|
||||
|
||||
#define STNC_GEN(tp, o) \
|
||||
#define STNC_GEN(tp, o) \
|
||||
static __inline void \
|
||||
o ## _nc(caddr_t va, int asi, tp val) \
|
||||
{ \
|
||||
@ -175,9 +175,9 @@ int fasword32(u_long asi, void *addr, uint32_t *val);
|
||||
|
||||
/*
|
||||
* Macro intended to be used instead of wr(asr23, val, xor) for writing to
|
||||
* the TICK_CMPR register in order to avoid a bug in BlackBird CPUs that
|
||||
* the TICK_COMPARE register in order to avoid a bug in BlackBird CPUs that
|
||||
* can cause these writes to fail under certain condidtions which in turn
|
||||
* causes the hardclock to stop. The workaround is to perform the write
|
||||
* causes the hardclock to stop. The workaround is to perform the write
|
||||
* at the beginning of an I-Cache line directly followed by a dummy read.
|
||||
*/
|
||||
#define wrtickcmpr(val, xor) ({ \
|
||||
@ -193,13 +193,14 @@ int fasword32(u_long asi, void *addr, uint32_t *val);
|
||||
static __inline void
|
||||
breakpoint(void)
|
||||
{
|
||||
|
||||
__asm __volatile("ta %%xcc, 1" : :);
|
||||
}
|
||||
|
||||
static __inline register_t
|
||||
intr_disable(void)
|
||||
{
|
||||
u_long s;
|
||||
register_t s;
|
||||
|
||||
s = rdpr(pstate);
|
||||
wrpr(pstate, s & ~PSTATE_IE, 0);
|
||||
@ -209,11 +210,11 @@ intr_disable(void)
|
||||
|
||||
/*
|
||||
* In some places, it is required that the store is directly followed by a
|
||||
* membar #Sync. Don't trust the compiler to not insert instructions in
|
||||
* between. We also need to disable interrupts completely.
|
||||
* membar #Sync. Don't trust the compiler to not insert instructions in
|
||||
* between. We also need to disable interrupts completely.
|
||||
*/
|
||||
#define stxa_sync(va, asi, val) do { \
|
||||
u_long s; \
|
||||
register_t s; \
|
||||
s = intr_disable(); \
|
||||
__asm __volatile("stxa %0, [%1] %2; membar #Sync" \
|
||||
: : "r" (val), "r" (va), "n" (asi)); \
|
||||
@ -226,7 +227,7 @@ void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len);
|
||||
void aszero(u_long asi, vm_offset_t dst, size_t len);
|
||||
|
||||
/*
|
||||
* Ultrasparc II doesn't implement popc in hardware. Suck.
|
||||
* Ultrasparc II doesn't implement popc in hardware.
|
||||
*/
|
||||
#if 0
|
||||
#define HAVE_INLINE_FFS
|
||||
|
@ -49,11 +49,11 @@ struct pmap;
|
||||
struct intr_request *pc_irhead; \
|
||||
struct intr_request **pc_irtail; \
|
||||
struct intr_request *pc_irfree; \
|
||||
struct pmap *pc_pmap; \
|
||||
struct pmap *pc_pmap; \
|
||||
vm_offset_t pc_addr; \
|
||||
u_long pc_tickref; \
|
||||
u_long pc_tickadj; \
|
||||
u_int pc_mid; \
|
||||
u_int pc_mid; \
|
||||
u_int pc_node; \
|
||||
u_int pc_tlb_ctx; \
|
||||
u_int pc_tlb_ctx_max; \
|
||||
|
@ -30,13 +30,10 @@ __FBSDID("$FreeBSD$");
|
||||
#include "opt_pmap.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/linker_set.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/pmap.h>
|
||||
@ -73,8 +70,7 @@ cheetah_dcache_page_inval(vm_paddr_t spa)
|
||||
vm_paddr_t pa;
|
||||
void *cookie;
|
||||
|
||||
KASSERT((spa & PAGE_MASK) == 0,
|
||||
("dcache_page_inval: pa not page aligned"));
|
||||
KASSERT((spa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
|
||||
cookie = ipi_dcache_page_inval(tl_ipi_cheetah_dcache_page_inval, spa);
|
||||
for (pa = spa; pa < spa + PAGE_SIZE; pa += cache.dc_linesize)
|
||||
stxa_sync(pa, ASI_DCACHE_INVALIDATE, 0);
|
||||
|
@ -22,10 +22,11 @@
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <machine/clock.h>
|
||||
@ -50,9 +51,11 @@ DELAY(int n)
|
||||
void
|
||||
cpu_startprofclock(void)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
cpu_stopprofclock(void)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ __FBSDID("$FreeBSD$");
|
||||
.register %g7,#ignore
|
||||
|
||||
/*
|
||||
* Atomically set the reference bit in a tte.
|
||||
* Atomically set the reference bit in a TTE.
|
||||
*/
|
||||
#define TTE_SET_BIT(r1, r2, r3, bit) \
|
||||
add r1, TTE_DATA, r1 ; \
|
||||
@ -147,7 +147,7 @@ __FBSDID("$FreeBSD$");
|
||||
#define KSTACK_SLOP 1024
|
||||
|
||||
/*
|
||||
* Sanity check the kernel stack and bail out if its wrong.
|
||||
* Sanity check the kernel stack and bail out if it's wrong.
|
||||
* XXX: doesn't handle being on the panic stack.
|
||||
*/
|
||||
#define KSTACK_CHECK \
|
||||
@ -287,7 +287,7 @@ END(tl1_kstack_fault)
|
||||
inc 16, ASP_REG
|
||||
|
||||
/*
|
||||
* For certain faults we need to clear the sfsr mmu register before returning.
|
||||
* For certain faults we need to clear the SFSR mmu register before returning.
|
||||
*/
|
||||
#define RSF_CLR_SFSR \
|
||||
wr %g0, ASI_DMMU, %asi ; \
|
||||
@ -429,8 +429,8 @@ END(rsf_fatal)
|
||||
* user stack, and with its live registers, so we must save soon. We
|
||||
* are on alternate globals so we do have some registers. Set the
|
||||
* transitional window state, and do the save. If this traps we
|
||||
* we attempt to spill a window to the user stack. If this fails,
|
||||
* we spill the window to the pcb and continue. Spilling to the pcb
|
||||
* attempt to spill a window to the user stack. If this fails, we
|
||||
* spill the window to the pcb and continue. Spilling to the pcb
|
||||
* must not fail.
|
||||
*
|
||||
* NOTE: Must be called with alternate globals and clobbers %g1.
|
||||
@ -584,7 +584,7 @@ END(tl0_sfsr_trap)
|
||||
|
||||
.macro tl0_immu_miss
|
||||
/*
|
||||
* Load the virtual page number and context from the tag access
|
||||
* Load the context and the virtual page number from the tag access
|
||||
* register. We ignore the context.
|
||||
*/
|
||||
wr %g0, ASI_IMMU, %asi
|
||||
@ -614,7 +614,7 @@ END(tl0_sfsr_trap)
|
||||
srlx %g1, %g3, %g3
|
||||
|
||||
/*
|
||||
* Compute the tte bucket address.
|
||||
* Compute the TTE bucket address.
|
||||
*/
|
||||
ldxa [%g0 + AA_IMMU_TSB] %asi, %g5
|
||||
and %g3, TSB_BUCKET_MASK, %g4
|
||||
@ -622,17 +622,17 @@ END(tl0_sfsr_trap)
|
||||
add %g4, %g5, %g4
|
||||
|
||||
/*
|
||||
* Compute the tte tag target.
|
||||
* Compute the TTE tag target.
|
||||
*/
|
||||
sllx %g3, TV_SIZE_BITS, %g3
|
||||
or %g3, %g2, %g3
|
||||
|
||||
/*
|
||||
* Loop over the ttes in this bucket
|
||||
* Loop over the TTEs in this bucket.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load the tte. Note that this instruction may fault, clobbering
|
||||
* Load the TTE. Note that this instruction may fault, clobbering
|
||||
* the contents of the tag access register, %g5, %g6, and %g7. We
|
||||
* do not use %g5, and %g6 and %g7 are not used until this instruction
|
||||
* completes successfully.
|
||||
@ -640,7 +640,7 @@ END(tl0_sfsr_trap)
|
||||
2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and executable and that the tte tags match.
|
||||
* Check that it's valid and executable and that the TTE tags match.
|
||||
*/
|
||||
brgez,pn %g7, 3f
|
||||
andcc %g7, TD_EXEC, %g0
|
||||
@ -650,7 +650,7 @@ END(tl0_sfsr_trap)
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* We matched a tte, load the tlb.
|
||||
* We matched a TTE, load the TLB.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -661,14 +661,14 @@ END(tl0_sfsr_trap)
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte tag and data into the tlb and retry the instruction.
|
||||
* Load the TTE tag and data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g1, [%g0 + AA_IMMU_TAR] %asi
|
||||
stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
|
||||
retry
|
||||
|
||||
/*
|
||||
* Advance to the next tte in this bucket, and check the low bits
|
||||
* Advance to the next TTE in this bucket, and check the low bits
|
||||
* of the bucket pointer to see if we've finished the bucket.
|
||||
*/
|
||||
3: add %g4, 1 << TTE_SHIFT, %g4
|
||||
@ -685,7 +685,7 @@ END(tl0_sfsr_trap)
|
||||
add %g2, 1, %g2
|
||||
|
||||
/*
|
||||
* Not in user tsb, call c code.
|
||||
* Not in user TSB, call C code.
|
||||
*/
|
||||
ba,a %xcc, tl0_immu_miss_trap
|
||||
.align 128
|
||||
@ -704,7 +704,7 @@ ENTRY(tl0_immu_miss_set_ref)
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte tag and data into the tlb and retry the instruction.
|
||||
* Load the TTE tag and data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g1, [%g0 + AA_IMMU_TAR] %asi
|
||||
stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
|
||||
@ -742,7 +742,7 @@ END(tl0_immu_miss_trap)
|
||||
|
||||
.macro tl0_dmmu_miss
|
||||
/*
|
||||
* Load the virtual page number and context from the tag access
|
||||
* Load the context and the virtual page number from the tag access
|
||||
* register. We ignore the context.
|
||||
*/
|
||||
wr %g0, ASI_DMMU, %asi
|
||||
@ -773,7 +773,7 @@ tl1_dmmu_miss_user:
|
||||
srlx %g1, %g3, %g3
|
||||
|
||||
/*
|
||||
* Compute the tte bucket address.
|
||||
* Compute the TTE bucket address.
|
||||
*/
|
||||
ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
|
||||
and %g3, TSB_BUCKET_MASK, %g4
|
||||
@ -781,17 +781,17 @@ tl1_dmmu_miss_user:
|
||||
add %g4, %g5, %g4
|
||||
|
||||
/*
|
||||
* Compute the tte tag target.
|
||||
* Compute the TTE tag target.
|
||||
*/
|
||||
sllx %g3, TV_SIZE_BITS, %g3
|
||||
or %g3, %g2, %g3
|
||||
|
||||
/*
|
||||
* Loop over the ttes in this bucket
|
||||
* Loop over the TTEs in this bucket.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load the tte. Note that this instruction may fault, clobbering
|
||||
* Load the TTE. Note that this instruction may fault, clobbering
|
||||
* the contents of the tag access register, %g5, %g6, and %g7. We
|
||||
* do not use %g5, and %g6 and %g7 are not used until this instruction
|
||||
* completes successfully.
|
||||
@ -799,7 +799,7 @@ tl1_dmmu_miss_user:
|
||||
2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and that the virtual page numbers match.
|
||||
* Check that it's valid and that the virtual page numbers match.
|
||||
*/
|
||||
brgez,pn %g7, 3f
|
||||
cmp %g3, %g6
|
||||
@ -807,7 +807,7 @@ tl1_dmmu_miss_user:
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* We matched a tte, load the tlb.
|
||||
* We matched a TTE, load the TLB.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -818,14 +818,14 @@ tl1_dmmu_miss_user:
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte tag and data into the tlb and retry the instruction.
|
||||
* Load the TTE tag and data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g1, [%g0 + AA_DMMU_TAR] %asi
|
||||
stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
retry
|
||||
|
||||
/*
|
||||
* Advance to the next tte in this bucket, and check the low bits
|
||||
* Advance to the next TTE in this bucket, and check the low bits
|
||||
* of the bucket pointer to see if we've finished the bucket.
|
||||
*/
|
||||
3: add %g4, 1 << TTE_SHIFT, %g4
|
||||
@ -842,7 +842,7 @@ tl1_dmmu_miss_user:
|
||||
add %g2, 1, %g2
|
||||
|
||||
/*
|
||||
* Not in user tsb, call c code.
|
||||
* Not in user TSB, call C code.
|
||||
*/
|
||||
ba,a %xcc, tl0_dmmu_miss_trap
|
||||
.align 128
|
||||
@ -861,7 +861,7 @@ ENTRY(tl0_dmmu_miss_set_ref)
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte tag and data into the tlb and retry the instruction.
|
||||
* Load the TTE tag and data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g1, [%g0 + AA_DMMU_TAR] %asi
|
||||
stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
@ -930,7 +930,7 @@ END(tl0_dmmu_miss_trap)
|
||||
|
||||
ENTRY(tl0_dmmu_prot_1)
|
||||
/*
|
||||
* Load the virtual page number and context from the tag access
|
||||
* Load the context and the virtual page number from the tag access
|
||||
* register. We ignore the context.
|
||||
*/
|
||||
wr %g0, ASI_DMMU, %asi
|
||||
@ -961,7 +961,7 @@ tl1_dmmu_prot_user:
|
||||
srlx %g1, %g3, %g3
|
||||
|
||||
/*
|
||||
* Compute the tte bucket address.
|
||||
* Compute the TTE bucket address.
|
||||
*/
|
||||
ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
|
||||
and %g3, TSB_BUCKET_MASK, %g4
|
||||
@ -969,17 +969,17 @@ tl1_dmmu_prot_user:
|
||||
add %g4, %g5, %g4
|
||||
|
||||
/*
|
||||
* Compute the tte tag target.
|
||||
* Compute the TTE tag target.
|
||||
*/
|
||||
sllx %g3, TV_SIZE_BITS, %g3
|
||||
or %g3, %g2, %g3
|
||||
|
||||
/*
|
||||
* Loop over the ttes in this bucket
|
||||
* Loop over the TTEs in this bucket.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load the tte. Note that this instruction may fault, clobbering
|
||||
* Load the TTE. Note that this instruction may fault, clobbering
|
||||
* the contents of the tag access register, %g5, %g6, and %g7. We
|
||||
* do not use %g5, and %g6 and %g7 are not used until this instruction
|
||||
* completes successfully.
|
||||
@ -987,7 +987,7 @@ tl1_dmmu_prot_user:
|
||||
2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and writable and that the virtual page
|
||||
* Check that it's valid and writable and that the virtual page
|
||||
* numbers match.
|
||||
*/
|
||||
brgez,pn %g7, 4f
|
||||
@ -1003,7 +1003,7 @@ tl1_dmmu_prot_user:
|
||||
TTE_SET_W(%g4, %g2, %g3)
|
||||
|
||||
/*
|
||||
* Delete the old TLB entry and clear the sfsr.
|
||||
* Delete the old TLB entry and clear the SFSR.
|
||||
*/
|
||||
srlx %g1, PAGE_SHIFT, %g3
|
||||
sllx %g3, PAGE_SHIFT, %g3
|
||||
@ -1018,7 +1018,7 @@ tl1_dmmu_prot_user:
|
||||
or %g2, TD_W, %g2
|
||||
|
||||
/*
|
||||
* Load the tte data into the tlb and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g1, [%g0 + AA_DMMU_TAR] %asi
|
||||
stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
@ -1041,7 +1041,7 @@ tl1_dmmu_prot_user:
|
||||
add %g2, 1, %g2
|
||||
|
||||
/*
|
||||
* Not in user tsb, call c code.
|
||||
* Not in user TSB, call C code.
|
||||
*/
|
||||
ba,a %xcc, tl0_dmmu_prot_trap
|
||||
nop
|
||||
@ -1069,7 +1069,7 @@ ENTRY(tl0_dmmu_prot_trap)
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tar, sfar and sfsr.
|
||||
* Load the SFAR, SFSR and TAR.
|
||||
*/
|
||||
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
|
||||
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
|
||||
@ -1095,7 +1095,7 @@ ENTRY(tl0_dmmu_prot_trap)
|
||||
1: RESUME_SPILLFILL_MMU_CLR_SFSR
|
||||
|
||||
/*
|
||||
* Load the sfar, sfsr and tar. Clear the sfsr.
|
||||
* Load the SFAR, SFSR and TAR. Clear the SFSR.
|
||||
*/
|
||||
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
|
||||
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
|
||||
@ -1322,8 +1322,8 @@ END(tl1_sfsr_trap)
|
||||
ldxa [%g0 + AA_IMMU_TAR] %asi, %g5
|
||||
|
||||
/*
|
||||
* Compute the address of the tte. The tsb mask and address of the
|
||||
* tsb are patched at startup.
|
||||
* Compute the address of the TTE. The TSB mask and address of the
|
||||
* TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_immu_miss_patch_1
|
||||
tl1_immu_miss_patch_1:
|
||||
@ -1337,12 +1337,12 @@ tl1_immu_miss_patch_1:
|
||||
add %g6, %g7, %g6
|
||||
|
||||
/*
|
||||
* Load the tte.
|
||||
* Load the TTE.
|
||||
*/
|
||||
ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and executable and that the virtual page
|
||||
* Check that it's valid and executable and that the virtual page
|
||||
* numbers match.
|
||||
*/
|
||||
brgez,pn %g7, tl1_immu_miss_trap
|
||||
@ -1354,14 +1354,14 @@ tl1_immu_miss_patch_1:
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* Set the reference bit if its currently clear.
|
||||
* Set the reference bit if it's currently clear.
|
||||
*/
|
||||
andcc %g7, TD_REF, %g0
|
||||
bz,a,pn %xcc, tl1_immu_miss_set_ref
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
|
||||
retry
|
||||
@ -1370,8 +1370,8 @@ tl1_immu_miss_patch_1:
|
||||
|
||||
ENTRY(tl1_immu_miss_set_ref)
|
||||
/*
|
||||
* Recompute the tte address, which we clobbered loading the tte. The
|
||||
* tsb mask and address of the tsb are patched at startup.
|
||||
* Recompute the TTE address, which we clobbered loading the TTE.
|
||||
* The TSB mask and address of the TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_immu_miss_patch_2
|
||||
tl1_immu_miss_patch_2:
|
||||
@ -1395,7 +1395,7 @@ tl1_immu_miss_patch_2:
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g6, [%g0] ASI_ITLB_DATA_IN_REG
|
||||
1: retry
|
||||
@ -1427,7 +1427,7 @@ END(tl1_immu_miss_trap)
|
||||
|
||||
/*
|
||||
* Extract the context from the contents of the tag access register.
|
||||
* If its non-zero this is a fault on a user address. Note that the
|
||||
* If it's non-zero this is a fault on a user address. Note that the
|
||||
* faulting address is passed in %g1.
|
||||
*/
|
||||
sllx %g5, 64 - TAR_VPN_SHIFT, %g6
|
||||
@ -1442,8 +1442,8 @@ END(tl1_immu_miss_trap)
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* Compute the address of the tte. The tsb mask and address of the
|
||||
* tsb are patched at startup.
|
||||
* Compute the address of the TTE. The TSB mask and address of the
|
||||
* TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_dmmu_miss_patch_1
|
||||
tl1_dmmu_miss_patch_1:
|
||||
@ -1457,12 +1457,12 @@ tl1_dmmu_miss_patch_1:
|
||||
add %g6, %g7, %g6
|
||||
|
||||
/*
|
||||
* Load the tte.
|
||||
* Load the TTE.
|
||||
*/
|
||||
ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and that the virtual page numbers match.
|
||||
* Check that it's valid and that the virtual page numbers match.
|
||||
*/
|
||||
brgez,pn %g7, tl1_dmmu_miss_trap
|
||||
srlx %g6, TV_SIZE_BITS, %g6
|
||||
@ -1471,14 +1471,14 @@ tl1_dmmu_miss_patch_1:
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* Set the reference bit if its currently clear.
|
||||
* Set the reference bit if it's currently clear.
|
||||
*/
|
||||
andcc %g7, TD_REF, %g0
|
||||
bz,a,pt %xcc, tl1_dmmu_miss_set_ref
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
retry
|
||||
@ -1487,8 +1487,8 @@ tl1_dmmu_miss_patch_1:
|
||||
|
||||
ENTRY(tl1_dmmu_miss_set_ref)
|
||||
/*
|
||||
* Recompute the tte address, which we clobbered loading the tte. The
|
||||
* tsb mask and address of the tsb are patched at startup.
|
||||
* Recompute the TTE address, which we clobbered loading the TTE.
|
||||
* The TSB mask and address of the TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_dmmu_miss_patch_2
|
||||
tl1_dmmu_miss_patch_2:
|
||||
@ -1512,7 +1512,7 @@ tl1_dmmu_miss_patch_2:
|
||||
nop
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
1: retry
|
||||
@ -1539,9 +1539,9 @@ END(tl1_dmmu_miss_trap)
|
||||
ENTRY(tl1_dmmu_miss_direct)
|
||||
/*
|
||||
* Mask off the high bits of the virtual address to get the physical
|
||||
* address, and or in the tte bits. The virtual address bits that
|
||||
* correspond to the tte valid and page size bits are left set, so
|
||||
* they don't have to be included in the tte bits below. We know they
|
||||
* address, and or in the TTE bits. The virtual address bits that
|
||||
* correspond to the TTE valid and page size bits are left set, so
|
||||
* they don't have to be included in the TTE bits below. We know they
|
||||
* are set because the virtual address is in the upper va hole.
|
||||
*/
|
||||
setx TLB_DIRECT_TO_TTE_MASK, %g7, %g6
|
||||
@ -1549,7 +1549,7 @@ ENTRY(tl1_dmmu_miss_direct)
|
||||
or %g5, TD_CP | TD_CV | TD_W, %g5
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
retry
|
||||
@ -1571,7 +1571,7 @@ ENTRY(tl1_dmmu_prot_1)
|
||||
|
||||
/*
|
||||
* Extract the context from the contents of the tag access register.
|
||||
* If its non-zero this is a fault on a user address. Note that the
|
||||
* If it's non-zero this is a fault on a user address. Note that the
|
||||
* faulting address is passed in %g1.
|
||||
*/
|
||||
sllx %g5, 64 - TAR_VPN_SHIFT, %g6
|
||||
@ -1579,8 +1579,8 @@ ENTRY(tl1_dmmu_prot_1)
|
||||
mov %g5, %g1
|
||||
|
||||
/*
|
||||
* Compute the address of the tte. The tsb mask and address of the
|
||||
* tsb are patched at startup.
|
||||
* Compute the address of the TTE. The TSB mask and address of the
|
||||
* TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_dmmu_prot_patch_1
|
||||
tl1_dmmu_prot_patch_1:
|
||||
@ -1594,12 +1594,12 @@ tl1_dmmu_prot_patch_1:
|
||||
add %g6, %g7, %g6
|
||||
|
||||
/*
|
||||
* Load the tte.
|
||||
* Load the TTE.
|
||||
*/
|
||||
ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
|
||||
|
||||
/*
|
||||
* Check that its valid and writeable and that the virtual page
|
||||
* Check that it's valid and writeable and that the virtual page
|
||||
* numbers match.
|
||||
*/
|
||||
brgez,pn %g7, tl1_dmmu_prot_trap
|
||||
@ -1611,7 +1611,7 @@ tl1_dmmu_prot_patch_1:
|
||||
EMPTY
|
||||
|
||||
/*
|
||||
* Delete the old TLB entry and clear the sfsr.
|
||||
* Delete the old TLB entry and clear the SFSR.
|
||||
*/
|
||||
sllx %g5, TAR_VPN_SHIFT, %g6
|
||||
or %g6, TLB_DEMAP_NUCLEUS, %g6
|
||||
@ -1620,8 +1620,8 @@ tl1_dmmu_prot_patch_1:
|
||||
membar #Sync
|
||||
|
||||
/*
|
||||
* Recompute the tte address, which we clobbered loading the tte. The
|
||||
* tsb mask and address of the tsb are patched at startup.
|
||||
* Recompute the TTE address, which we clobbered loading the TTE.
|
||||
* The TSB mask and address of the TSB are patched at startup.
|
||||
*/
|
||||
.globl tl1_dmmu_prot_patch_2
|
||||
tl1_dmmu_prot_patch_2:
|
||||
@ -1645,7 +1645,7 @@ tl1_dmmu_prot_patch_2:
|
||||
or %g6, TD_W, %g6
|
||||
|
||||
/*
|
||||
* Load the tte data into the TLB and retry the instruction.
|
||||
* Load the TTE data into the TLB and retry the instruction.
|
||||
*/
|
||||
stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
|
||||
1: retry
|
||||
@ -1658,7 +1658,7 @@ ENTRY(tl1_dmmu_prot_trap)
|
||||
wrpr %g0, PSTATE_ALT, %pstate
|
||||
|
||||
/*
|
||||
* Load the sfar, sfsr and tar. Clear the sfsr.
|
||||
* Load the SFAR, SFSR and TAR. Clear the SFSR.
|
||||
*/
|
||||
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
|
||||
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
|
||||
@ -2034,10 +2034,10 @@ tl_trap_end:
|
||||
nop
|
||||
|
||||
/*
|
||||
* User trap entry point.
|
||||
* User trap entry point
|
||||
*
|
||||
* void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
|
||||
* u_long sfsr)
|
||||
* u_long sfsr)
|
||||
*
|
||||
* This handles redirecting a trap back to usermode as a user trap. The user
|
||||
* program must have first registered a trap handler with the kernel using
|
||||
@ -2080,7 +2080,7 @@ ENTRY(tl0_utrap)
|
||||
* user traps, which implies that the condition that caused the trap
|
||||
* in the first place is still valid, so it will occur again when we
|
||||
* re-execute the trapping instruction.
|
||||
*/
|
||||
*/
|
||||
ldx [PCB_REG + PCB_NSAVED], %l1
|
||||
brnz,a,pn %l1, tl0_trap
|
||||
mov T_SPILL, %o0
|
||||
@ -2138,10 +2138,10 @@ ENTRY(tl0_utrap)
|
||||
END(tl0_utrap)
|
||||
|
||||
/*
|
||||
* (Real) User trap entry point.
|
||||
* (Real) User trap entry point
|
||||
*
|
||||
* void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
|
||||
* u_int sfsr)
|
||||
* void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
|
||||
* u_int sfsr)
|
||||
*
|
||||
* The following setup has been performed:
|
||||
* - the windows have been split and the active user window has been saved
|
||||
@ -2638,7 +2638,7 @@ END(tl0_ret)
|
||||
* Kernel trap entry point
|
||||
*
|
||||
* void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
|
||||
* u_int sfsr)
|
||||
* u_int sfsr)
|
||||
*
|
||||
* This is easy because the stack is already setup and the windows don't need
|
||||
* to be split. We build a trapframe and call trap(), the same as above, but
|
||||
|
@ -36,11 +36,11 @@ __FBSDID("$FreeBSD$");
|
||||
.register %g2,#ignore
|
||||
|
||||
.globl kernbase
|
||||
.set kernbase,KERNBASE
|
||||
.set kernbase, KERNBASE
|
||||
|
||||
/*
|
||||
* void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3,
|
||||
* u_long ofw_vec)
|
||||
* u_long ofw_vec)
|
||||
*/
|
||||
ENTRY(btext)
|
||||
ENTRY(_start)
|
||||
@ -57,8 +57,8 @@ ENTRY(_start)
|
||||
wrpr %g0, 0, %tick
|
||||
|
||||
/*
|
||||
* Get onto our per-cpu panic stack, which precedes the struct pcpu in
|
||||
* the per-cpu page.
|
||||
* Get onto our per-CPU panic stack, which precedes the struct pcpu in
|
||||
* the per-CPU page.
|
||||
*/
|
||||
SET(pcpu0 + (PCPU_PAGES * PAGE_SIZE) - PC_SIZEOF, %l1, %l0)
|
||||
sub %l0, SPOFF + CCFSZ, %sp
|
||||
@ -100,37 +100,37 @@ ENTRY(cpu_setregs)
|
||||
wrpr %g0, PSTATE_NORMAL, %pstate
|
||||
|
||||
/*
|
||||
* Normal %g6 points to the current thread's pcb, and %g7 points to
|
||||
* the per-cpu data structure.
|
||||
* Normal %g6 points to the current thread's PCB, and %g7 points to
|
||||
* the per-CPU data structure.
|
||||
*/
|
||||
mov %o1, PCB_REG
|
||||
mov %o0, PCPU_REG
|
||||
|
||||
/*
|
||||
* Alternate globals.
|
||||
* Switch to alternate globals.
|
||||
*/
|
||||
wrpr %g0, PSTATE_ALT, %pstate
|
||||
|
||||
/*
|
||||
* Alternate %g5 points to a per-cpu panic stack, %g6 points to the
|
||||
* current thread's pcb, and %g7 points to the per-cpu data structure.
|
||||
* Alternate %g5 points to a per-CPU panic stack, %g6 points to the
|
||||
* current thread's PCB, and %g7 points to the per-CPU data structure.
|
||||
*/
|
||||
mov %o0, ASP_REG
|
||||
mov %o1, PCB_REG
|
||||
mov %o0, PCPU_REG
|
||||
|
||||
/*
|
||||
* Interrupt globals.
|
||||
* Switch to interrupt globals.
|
||||
*/
|
||||
wrpr %g0, PSTATE_INTR, %pstate
|
||||
|
||||
/*
|
||||
* Interrupt %g7 points to the per-cpu data structure.
|
||||
* Interrupt %g7 points to the per-CPU data structure.
|
||||
*/
|
||||
mov %o0, PCPU_REG
|
||||
|
||||
/*
|
||||
* Normal globals again.
|
||||
* Switch to normal globals again.
|
||||
*/
|
||||
wrpr %g0, PSTATE_NORMAL, %pstate
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
|
||||
* from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
|
||||
* from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
@ -154,7 +154,7 @@ cpu_block_zero_t *cpu_block_zero;
|
||||
|
||||
static timecounter_get_t tick_get_timecount;
|
||||
void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
|
||||
ofw_vec_t *vec);
|
||||
ofw_vec_t *vec);
|
||||
void sparc64_shutdown_final(void *dummy, int howto);
|
||||
|
||||
static void cpu_startup(void *);
|
||||
@ -271,20 +271,20 @@ tick_get_timecount(struct timecounter *tc)
|
||||
void
|
||||
sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
{
|
||||
phandle_t child;
|
||||
phandle_t root;
|
||||
char type[8];
|
||||
char *env;
|
||||
struct pcpu *pc;
|
||||
vm_offset_t end;
|
||||
caddr_t kmdp;
|
||||
phandle_t child;
|
||||
phandle_t root;
|
||||
u_int clock;
|
||||
char *env;
|
||||
char type[8];
|
||||
|
||||
end = 0;
|
||||
kmdp = NULL;
|
||||
|
||||
/*
|
||||
* Find out what kind of cpu we have first, for anything that changes
|
||||
* Find out what kind of CPU we have first, for anything that changes
|
||||
* behaviour.
|
||||
*/
|
||||
cpu_impl = VER_IMPL(rdpr(ver));
|
||||
@ -340,7 +340,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
*/
|
||||
if (mdp == NULL || kmdp == NULL) {
|
||||
printf("sparc64_init: no loader metadata.\n"
|
||||
"This probably means you are not using loader(8).\n");
|
||||
"This probably means you are not using loader(8).\n");
|
||||
panic("sparc64_init");
|
||||
}
|
||||
|
||||
@ -349,7 +349,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
*/
|
||||
if (end == 0) {
|
||||
printf("sparc64_init: warning, kernel end not specified.\n"
|
||||
"Attempting to continue anyway.\n");
|
||||
"Attempting to continue anyway.\n");
|
||||
end = (vm_offset_t)_end;
|
||||
}
|
||||
|
||||
@ -455,6 +455,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
void
|
||||
set_openfirm_callback(ofw_vec_t *vec)
|
||||
{
|
||||
|
||||
ofw_tba = rdpr(tba);
|
||||
ofw_vec = (u_long)vec;
|
||||
}
|
||||
@ -469,8 +470,8 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
|
||||
struct thread *td;
|
||||
struct frame *fp;
|
||||
struct proc *p;
|
||||
int oonstack;
|
||||
u_long sp;
|
||||
int oonstack;
|
||||
int sig;
|
||||
|
||||
oonstack = 0;
|
||||
@ -503,8 +504,8 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
|
||||
get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
|
||||
sf.sf_uc.uc_sigmask = *mask;
|
||||
sf.sf_uc.uc_stack = td->td_sigstk;
|
||||
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
|
||||
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
|
||||
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
|
||||
((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
|
||||
|
||||
/* Allocate and validate space for the signal handler context. */
|
||||
if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
|
||||
@ -699,7 +700,7 @@ cpu_shutdown(void *args)
|
||||
openfirmware_exit(args);
|
||||
}
|
||||
|
||||
/* Get current clock frequency for the given cpu id. */
|
||||
/* Get current clock frequency for the given CPU ID. */
|
||||
int
|
||||
cpu_est_clockrate(int cpu_id, uint64_t *rate)
|
||||
{
|
||||
@ -752,7 +753,8 @@ sparc64_shutdown_final(void *dummy, int howto)
|
||||
void
|
||||
cpu_idle(int busy)
|
||||
{
|
||||
/* Insert code to halt (until next interrupt) for the idle loop */
|
||||
|
||||
/* Insert code to halt (until next interrupt) for the idle loop. */
|
||||
}
|
||||
|
||||
int
|
||||
@ -774,6 +776,7 @@ ptrace_set_pc(struct thread *td, u_long addr)
|
||||
int
|
||||
ptrace_single_step(struct thread *td)
|
||||
{
|
||||
|
||||
/* TODO; */
|
||||
return (0);
|
||||
}
|
||||
@ -781,6 +784,7 @@ ptrace_single_step(struct thread *td)
|
||||
int
|
||||
ptrace_clear_single_step(struct thread *td)
|
||||
{
|
||||
|
||||
/* TODO; */
|
||||
return (0);
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ ENTRY(mp_startup)
|
||||
UPA_GET_MID(%o0)
|
||||
|
||||
#if KTR_COMPILE & KTR_SMP
|
||||
CATR(KTR_SMP, "mp_start: cpu %d entered kernel"
|
||||
CATR(KTR_SMP, "mp_start: CPU %d entered kernel"
|
||||
, %g1, %g2, %g3, 7, 8, 9)
|
||||
stx %o0, [%g1 + KTR_PARM1]
|
||||
9:
|
||||
@ -129,7 +129,7 @@ ENTRY(mp_startup)
|
||||
nop
|
||||
|
||||
#if KTR_COMPILE & KTR_SMP
|
||||
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
|
||||
CATR(KTR_SMP, "_mp_start: CPU %d got start signal"
|
||||
, %g1, %g2, %g3, 7, 8, 9)
|
||||
stx %o0, [%g1 + KTR_PARM1]
|
||||
9:
|
||||
@ -139,7 +139,7 @@ ENTRY(mp_startup)
|
||||
clr %l2
|
||||
|
||||
/*
|
||||
* Map the per-cpu pages.
|
||||
* Map the per-CPU pages.
|
||||
*/
|
||||
3: sllx %l2, TTE_SHIFT, %l3
|
||||
add %l1, %l3, %l3
|
||||
@ -160,8 +160,8 @@ ENTRY(mp_startup)
|
||||
nop
|
||||
|
||||
/*
|
||||
* Get onto our per-cpu panic stack, which precedes the struct pcpu
|
||||
* in the per-cpu page.
|
||||
* Get onto our per-CPU panic stack, which precedes the struct pcpu
|
||||
* in the per-CPU page.
|
||||
*/
|
||||
ldx [%l0 + CSA_PCPU], %l1
|
||||
set PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2
|
||||
|
@ -116,7 +116,7 @@ static int isjbus;
|
||||
static volatile u_int shutdown_cpus;
|
||||
|
||||
static void cpu_mp_unleash(void *v);
|
||||
static void spitfire_ipi_send(u_int, u_long, u_long, u_long);
|
||||
static void spitfire_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
|
||||
static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg);
|
||||
static void sun4u_stopself(void);
|
||||
|
||||
@ -249,8 +249,8 @@ cpu_mp_start(void)
|
||||
register_t s;
|
||||
vm_offset_t va;
|
||||
phandle_t child;
|
||||
u_int clock;
|
||||
u_int mid;
|
||||
u_int clock;
|
||||
u_int cpuid;
|
||||
|
||||
mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
|
||||
@ -325,8 +325,8 @@ cpu_mp_unleash(void *v)
|
||||
register_t s;
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
u_int ctx_min;
|
||||
u_int ctx_inc;
|
||||
u_int ctx_min;
|
||||
int i;
|
||||
|
||||
ctx_min = TLB_CTX_USER_MIN;
|
||||
|
@ -118,13 +118,13 @@ __FBSDID("$FreeBSD$");
|
||||
extern struct mtx sched_lock;
|
||||
|
||||
/*
|
||||
* Virtual and physical address of message buffer.
|
||||
* Virtual and physical address of message buffer
|
||||
*/
|
||||
struct msgbuf *msgbufp;
|
||||
vm_paddr_t msgbuf_phys;
|
||||
|
||||
/*
|
||||
* Map of physical memory reagions.
|
||||
* Map of physical memory reagions
|
||||
*/
|
||||
vm_paddr_t phys_avail[128];
|
||||
static struct ofw_mem_region mra[128];
|
||||
@ -138,7 +138,7 @@ static vm_offset_t pmap_temp_map_1;
|
||||
static vm_offset_t pmap_temp_map_2;
|
||||
|
||||
/*
|
||||
* First and last available kernel virtual addresses.
|
||||
* First and last available kernel virtual addresses
|
||||
*/
|
||||
vm_offset_t virtual_avail;
|
||||
vm_offset_t virtual_end;
|
||||
@ -147,7 +147,7 @@ vm_offset_t kernel_vm_end;
|
||||
vm_offset_t vm_max_kernel_address;
|
||||
|
||||
/*
|
||||
* Kernel pmap.
|
||||
* Kernel pmap
|
||||
*/
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
@ -237,7 +237,7 @@ PMAP_STATS_VAR(pmap_nnew_thread);
|
||||
PMAP_STATS_VAR(pmap_nnew_thread_oc);
|
||||
|
||||
/*
|
||||
* Quick sort callout for comparing memory regions.
|
||||
* Quick sort callout for comparing memory regions
|
||||
*/
|
||||
static int mr_cmp(const void *a, const void *b);
|
||||
static int om_cmp(const void *a, const void *b);
|
||||
@ -285,14 +285,14 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
vm_paddr_t pa;
|
||||
vm_size_t physsz;
|
||||
vm_size_t virtsz;
|
||||
ihandle_t pmem;
|
||||
ihandle_t vmem;
|
||||
phandle_t pmem;
|
||||
phandle_t vmem;
|
||||
int sz;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
/*
|
||||
* Find out what physical memory is available from the prom and
|
||||
* Find out what physical memory is available from the PROM and
|
||||
* initialize the phys_avail array. This must be done before
|
||||
* pmap_bootstrap_alloc is called.
|
||||
*/
|
||||
@ -333,7 +333,7 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
|
||||
/*
|
||||
* Calculate the size of kernel virtual memory, and the size and mask
|
||||
* for the kernel tsb.
|
||||
* for the kernel TSB.
|
||||
*/
|
||||
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
|
||||
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
|
||||
@ -341,7 +341,7 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
|
||||
|
||||
/*
|
||||
* Allocate the kernel tsb and lock it in the tlb.
|
||||
* Allocate the kernel TSB and lock it in the TLB.
|
||||
*/
|
||||
pa = pmap_bootstrap_alloc(tsb_kernel_size);
|
||||
if (pa & PAGE_MASK_4M)
|
||||
@ -404,8 +404,8 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the start and end of kva. The kernel is loaded at the first
|
||||
* available 4 meg super page, so round up to the end of the page.
|
||||
* Set the start and end of KVA. The kernel is loaded at the first
|
||||
* available 4MB super page, so round up to the end of the page.
|
||||
*/
|
||||
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
|
||||
virtual_end = vm_max_kernel_address;
|
||||
@ -422,10 +422,10 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
virtual_avail += PAGE_SIZE * DCACHE_COLORS;
|
||||
|
||||
/*
|
||||
* Allocate a kernel stack with guard page for thread0 and map it into
|
||||
* the kernel tsb. We must ensure that the virtual address is coloured
|
||||
* properly, since we're allocating from phys_avail so the memory won't
|
||||
* have an associated vm_page_t.
|
||||
* Allocate a kernel stack with guard page for thread0 and map it
|
||||
* into the kernel TSB. We must ensure that the virtual address is
|
||||
* coloured properly, since we're allocating from phys_avail so the
|
||||
* memory won't have an associated vm_page_t.
|
||||
*/
|
||||
pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
|
||||
PAGE_SIZE);
|
||||
@ -453,7 +453,7 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
Maxmem = sparc64_btop(phys_avail[i + 1]);
|
||||
|
||||
/*
|
||||
* Add the prom mappings to the kernel tsb.
|
||||
* Add the PROM mappings to the kernel TSB.
|
||||
*/
|
||||
if ((vmem = OF_finddevice("/virtual-memory")) == -1)
|
||||
panic("pmap_bootstrap: finddevice /virtual-memory");
|
||||
@ -489,8 +489,8 @@ pmap_bootstrap(vm_offset_t ekva)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the available physical memory ranges from /memory/reg. These
|
||||
* are only used for kernel dumps, but it may not be wise to do prom
|
||||
* Get the available physical memory ranges from /memory/reg. These
|
||||
* are only used for kernel dumps, but it may not be wise to do PROM
|
||||
* calls in that situation.
|
||||
*/
|
||||
if ((sz = OF_getproplen(pmem, "reg")) == -1)
|
||||
@ -525,13 +525,13 @@ pmap_map_tsb(void)
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
u_long data;
|
||||
u_long s;
|
||||
register_t s;
|
||||
int i;
|
||||
|
||||
s = intr_disable();
|
||||
|
||||
/*
|
||||
* Map the 4mb tsb pages.
|
||||
* Map the 4MB TSB pages.
|
||||
*/
|
||||
for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
|
||||
va = (vm_offset_t)tsb_kernel + i;
|
||||
@ -546,7 +546,7 @@ pmap_map_tsb(void)
|
||||
|
||||
/*
|
||||
* Set the secondary context to be the kernel context (needed for
|
||||
* fp block operations in the kernel and the cache code).
|
||||
* FP block operations in the kernel).
|
||||
*/
|
||||
stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
|
||||
membar(Sync);
|
||||
@ -873,9 +873,9 @@ pmap_kenter(vm_offset_t va, vm_page_t m)
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a wired page into kernel virtual address space. This additionally
|
||||
* takes a flag argument wich is or'ed to the TTE data. This is used by
|
||||
* bus_space_map().
|
||||
* Map a wired page into kernel virtual address space. This additionally
|
||||
* takes a flag argument wich is or'ed to the TTE data. This is used by
|
||||
* sparc64_bus_mem_map().
|
||||
* NOTE: if the mapping is non-cacheable, it's the caller's responsibility
|
||||
* to flush entries that might still be in the cache, if applicable.
|
||||
*/
|
||||
@ -1021,7 +1021,7 @@ pmap_pinit(pmap_t pm)
|
||||
PMAP_LOCK_INIT(pm);
|
||||
|
||||
/*
|
||||
* Allocate kva space for the tsb.
|
||||
* Allocate KVA space for the TSB.
|
||||
*/
|
||||
if (pm->pm_tsb == NULL) {
|
||||
pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
|
||||
@ -1069,7 +1069,7 @@ pmap_release(pmap_t pm)
|
||||
struct pcpu *pc;
|
||||
|
||||
CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
|
||||
pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
|
||||
pm->pm_context[curcpu], pm->pm_tsb);
|
||||
KASSERT(pmap_resident_count(pm) == 0,
|
||||
("pmap_release: resident pages %ld != 0",
|
||||
pmap_resident_count(pm)));
|
||||
@ -1079,7 +1079,7 @@ pmap_release(pmap_t pm)
|
||||
* When switching, this might lead us to wrongly assume that we need
|
||||
* not switch contexts because old and new pmap pointer are equal.
|
||||
* Therefore, make sure that this pmap is not referenced by any PCPU
|
||||
* pointer any more. This could happen in two cases:
|
||||
* pointer any more. This could happen in two cases:
|
||||
* - A process that referenced the pmap is currently exiting on a CPU.
|
||||
* However, it is guaranteed to not switch in any more after setting
|
||||
* its state to PRS_ZOMBIE.
|
||||
@ -1165,7 +1165,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
|
||||
vm_offset_t va;
|
||||
|
||||
CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
|
||||
pm->pm_context[PCPU_GET(cpuid)], start, end);
|
||||
pm->pm_context[curcpu], start, end);
|
||||
if (PMAP_REMOVE_DONE(pm))
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
@ -1247,7 +1247,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
struct tte *tp;
|
||||
|
||||
CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
|
||||
pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
|
||||
pm->pm_context[curcpu], sva, eva, prot);
|
||||
|
||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
||||
pmap_remove(pm, sva, eva);
|
||||
@ -1326,7 +1326,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
|
||||
CTR6(KTR_PMAP,
|
||||
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
|
||||
pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
|
||||
pm->pm_context[curcpu], m, va, pa, prot, wired);
|
||||
|
||||
/*
|
||||
* If there is an existing mapping, and the physical address has not
|
||||
@ -1785,6 +1785,7 @@ pmap_page_wired_mappings(vm_page_t m)
|
||||
void
|
||||
pmap_remove_pages(pmap_t pm)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1806,18 +1807,15 @@ pmap_page_is_mapped(vm_page_t m)
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_ts_referenced:
|
||||
* Return a count of reference bits for a page, clearing those bits.
|
||||
* It is not necessary for every reference bit to be cleared, but it
|
||||
* is necessary that 0 only be returned when there are truly no
|
||||
* reference bits set.
|
||||
*
|
||||
* Return a count of reference bits for a page, clearing those bits.
|
||||
* It is not necessary for every reference bit to be cleared, but it
|
||||
* is necessary that 0 only be returned when there are truly no
|
||||
* reference bits set.
|
||||
*
|
||||
* XXX: The exact number of bits to check and clear is a matter that
|
||||
* should be tested and standardized at some point in the future for
|
||||
* optimal aging of shared pages.
|
||||
* XXX: The exact number of bits to check and clear is a matter that
|
||||
* should be tested and standardized at some point in the future for
|
||||
* optimal aging of shared pages.
|
||||
*/
|
||||
|
||||
int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
@ -1966,7 +1964,7 @@ pmap_activate(struct thread *td)
|
||||
}
|
||||
PCPU_SET(tlb_ctx, context + 1);
|
||||
|
||||
pm->pm_context[PCPU_GET(cpuid)] = context;
|
||||
pm->pm_context[curcpu] = context;
|
||||
pm->pm_active |= PCPU_GET(cpumask);
|
||||
PCPU_SET(pmap, pm);
|
||||
|
||||
@ -1979,11 +1977,12 @@ pmap_activate(struct thread *td)
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
* Increase the starting virtual address of the given mapping if a
|
||||
* different alignment might result in more superpage mappings.
|
||||
*/
|
||||
void
|
||||
pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
|
||||
vm_offset_t *addr, vm_size_t size)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -25,10 +25,11 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: src/sys/i386/isa/prof_machdep.c,v 1.16 2000/07/04 11:25:19
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#ifdef GUPROF
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -44,15 +45,15 @@ int cputime_bias;
|
||||
/*
|
||||
* Return the time elapsed since the last call. The units are machine-
|
||||
* dependent.
|
||||
* XXX: this is not SMP-safe. It should use per-CPU variables; %tick can be
|
||||
* XXX: this is not SMP-safe. It should use per-CPU variables; %tick can be
|
||||
* used though.
|
||||
*/
|
||||
int
|
||||
cputime(void)
|
||||
{
|
||||
u_long count;
|
||||
int delta;
|
||||
static u_long prev_count;
|
||||
int delta;
|
||||
|
||||
count = rd(tick);
|
||||
delta = (int)(count - prev_count);
|
||||
@ -76,6 +77,7 @@ startguprof(struct gmonparam *gp)
|
||||
void
|
||||
stopguprof(struct gmonparam *gp)
|
||||
{
|
||||
|
||||
/* Nothing to do. */
|
||||
}
|
||||
|
||||
|
@ -30,13 +30,11 @@ __FBSDID("$FreeBSD$");
|
||||
#include "opt_pmap.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/linker_set.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/pmap.h>
|
||||
@ -91,8 +89,7 @@ spitfire_dcache_page_inval(vm_paddr_t pa)
|
||||
u_long addr;
|
||||
u_long tag;
|
||||
|
||||
KASSERT((pa & PAGE_MASK) == 0,
|
||||
("dcache_page_inval: pa not page aligned"));
|
||||
KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
|
||||
PMAP_STATS_INC(spitfire_dcache_npage_inval);
|
||||
target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
|
||||
cookie = ipi_dcache_page_inval(tl_ipi_spitfire_dcache_page_inval, pa);
|
||||
@ -120,8 +117,7 @@ spitfire_icache_page_inval(vm_paddr_t pa)
|
||||
void *cookie;
|
||||
u_long addr;
|
||||
|
||||
KASSERT((pa & PAGE_MASK) == 0,
|
||||
("icache_page_inval: pa not page aligned"));
|
||||
KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
|
||||
PMAP_STATS_INC(spitfire_icache_npage_inval);
|
||||
target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
|
||||
cookie = ipi_icache_page_inval(tl_ipi_spitfire_icache_page_inval, pa);
|
||||
|
@ -765,7 +765,7 @@ ENTRY(openfirmware)
|
||||
END(openfirmware)
|
||||
|
||||
/*
|
||||
* void ofw_exit(cell_t args[])
|
||||
* void openfirmware_exit(cell_t args[])
|
||||
*/
|
||||
ENTRY(openfirmware_exit)
|
||||
save %sp, -CCFSZ, %sp
|
||||
@ -773,14 +773,14 @@ ENTRY(openfirmware_exit)
|
||||
wrpr %g0, PIL_TICK, %pil
|
||||
SET(ofw_tba, %l7, %l5)
|
||||
ldx [%l5], %l5
|
||||
wrpr %l5, 0, %tba ! restore the ofw trap table
|
||||
wrpr %l5, 0, %tba ! restore the OFW trap table
|
||||
SET(ofw_vec, %l7, %l6)
|
||||
ldx [%l6], %l6
|
||||
SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l7, %l0)
|
||||
sub %l0, SPOFF, %fp ! setup a stack in a locked page
|
||||
sub %l0, SPOFF + CCFSZ, %sp
|
||||
mov AA_DMMU_PCXR, %l3 ! set context 0
|
||||
stxa %g0, [%l3] ASI_DMMU
|
||||
mov AA_DMMU_PCXR, %l3 ! force primary DMMU context 0
|
||||
stxa %g0, [%l3] ASI_DMMU
|
||||
membar #Sync
|
||||
wrpr %g0, 0, %tl ! force trap level 0
|
||||
call %l6
|
||||
@ -819,14 +819,14 @@ ENTRY(eintr)
|
||||
|
||||
ENTRY(__cyg_profile_func_enter)
|
||||
SET(_gmonparam, %o3, %o2)
|
||||
lduw [%o2 + GM_STATE], %o3
|
||||
cmp %o3, GMON_PROF_OFF
|
||||
lduw [%o2 + GM_STATE], %o3
|
||||
cmp %o3, GMON_PROF_OFF
|
||||
be,a,pn %icc, 1f
|
||||
nop
|
||||
SET(mcount, %o3, %o2)
|
||||
jmpl %o2, %g0
|
||||
nop
|
||||
1: retl
|
||||
1: retl
|
||||
nop
|
||||
END(__cyg_profile_func_enter)
|
||||
|
||||
@ -834,14 +834,14 @@ END(__cyg_profile_func_enter)
|
||||
|
||||
ENTRY(__cyg_profile_func_exit)
|
||||
SET(_gmonparam, %o3, %o2)
|
||||
lduw [%o2 + GM_STATE], %o3
|
||||
cmp %o3, GMON_PROF_HIRES
|
||||
lduw [%o2 + GM_STATE], %o3
|
||||
cmp %o3, GMON_PROF_HIRES
|
||||
be,a,pn %icc, 1f
|
||||
nop
|
||||
SET(mexitcount, %o3, %o2)
|
||||
jmpl %o2, %g0
|
||||
nop
|
||||
1: retl
|
||||
1: retl
|
||||
nop
|
||||
END(__cyg_profile_func_exit)
|
||||
|
||||
|
@ -169,7 +169,7 @@ ENTRY(cpu_switch)
|
||||
|
||||
/*
|
||||
* Mark the pmap of the last non-kernel vmspace to run as no longer
|
||||
* active on this cpu.
|
||||
* active on this CPU.
|
||||
*/
|
||||
lduw [%l2 + PM_ACTIVE], %l3
|
||||
lduw [PCPU(CPUMASK)], %l4
|
||||
@ -186,8 +186,8 @@ ENTRY(cpu_switch)
|
||||
stw %l5, [%l3 + %l4]
|
||||
|
||||
/*
|
||||
* Find a new tlb context. If we've run out we have to flush all user
|
||||
* mappings from the tlb and reset the context numbers.
|
||||
* Find a new TLB context. If we've run out we have to flush all
|
||||
* user mappings from the TLB and reset the context numbers.
|
||||
*/
|
||||
3: lduw [PCPU(TLB_CTX)], %i3
|
||||
lduw [PCPU(TLB_CTX_MAX)], %i4
|
||||
@ -215,7 +215,7 @@ ENTRY(cpu_switch)
|
||||
stw %i3, [%i4 + %i5]
|
||||
|
||||
/*
|
||||
* Mark the pmap as active on this cpu.
|
||||
* Mark the pmap as active on this CPU.
|
||||
*/
|
||||
lduw [%i2 + PM_ACTIVE], %i4
|
||||
lduw [PCPU(CPUMASK)], %i5
|
||||
@ -228,8 +228,8 @@ ENTRY(cpu_switch)
|
||||
stx %i2, [PCPU(PMAP)]
|
||||
|
||||
/*
|
||||
* Fiddle the hardware bits. Set the tsb registers and install the
|
||||
* new context number in the cpu.
|
||||
* Fiddle the hardware bits. Set the TSB registers and install the
|
||||
* new context number in the CPU.
|
||||
*/
|
||||
ldx [%i2 + PM_TSB], %i4
|
||||
mov AA_DMMU_TSB, %i5
|
||||
@ -241,7 +241,7 @@ ENTRY(cpu_switch)
|
||||
membar #Sync
|
||||
|
||||
/*
|
||||
* Done. Return and load the new process's window from the stack.
|
||||
* Done, return and load the new process's window from the stack.
|
||||
*/
|
||||
5: ret
|
||||
restore
|
||||
|
@ -63,7 +63,7 @@ static int adjust_ticks = 0;
|
||||
SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks,
|
||||
0, "total number of tick interrupts with adjustment");
|
||||
|
||||
static void tick_hardclock(struct trapframe *);
|
||||
static void tick_hardclock(struct trapframe *tf);
|
||||
|
||||
static uint64_t
|
||||
tick_cputicks(void)
|
||||
@ -80,11 +80,11 @@ cpu_initclocks(void)
|
||||
tick_start();
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static inline void
|
||||
tick_process(struct trapframe *tf)
|
||||
{
|
||||
|
||||
if (PCPU_GET(cpuid) == 0)
|
||||
if (curcpu == 0)
|
||||
hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
|
||||
else
|
||||
hardclock_cpu(TRAPF_USERMODE(tf));
|
||||
@ -96,8 +96,9 @@ tick_process(struct trapframe *tf)
|
||||
static void
|
||||
tick_hardclock(struct trapframe *tf)
|
||||
{
|
||||
u_long adj, s, tick, ref;
|
||||
u_long adj, ref, tick;
|
||||
long delta;
|
||||
register_t s;
|
||||
int count;
|
||||
|
||||
/*
|
||||
@ -165,7 +166,8 @@ tick_init(u_long clock)
|
||||
void
|
||||
tick_start(void)
|
||||
{
|
||||
u_long base, s;
|
||||
u_long base;
|
||||
register_t s;
|
||||
|
||||
/*
|
||||
* Avoid stopping of hardclock in terms of a lost tick interrupt
|
||||
@ -179,7 +181,7 @@ tick_start(void)
|
||||
panic("%s: HZ too high, decrease to at least %ld", __func__,
|
||||
tick_freq / TICK_GRACE);
|
||||
|
||||
if (PCPU_GET(cpuid) == 0)
|
||||
if (curcpu == 0)
|
||||
intr_setup(PIL_TICK, tick_hardclock, -1, NULL, NULL);
|
||||
|
||||
/*
|
||||
|
@ -64,7 +64,7 @@ void
|
||||
tlb_context_demap(struct pmap *pm)
|
||||
{
|
||||
void *cookie;
|
||||
u_long s;
|
||||
register_t s;
|
||||
|
||||
/*
|
||||
* It is important that we are not interrupted or preempted while
|
||||
@ -80,7 +80,7 @@ tlb_context_demap(struct pmap *pm)
|
||||
PMAP_STATS_INC(tlb_ncontext_demap);
|
||||
cookie = ipi_tlb_context_demap(pm);
|
||||
if (pm->pm_active & PCPU_GET(cpumask)) {
|
||||
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
|
||||
KASSERT(pm->pm_context[curcpu] != -1,
|
||||
("tlb_context_demap: inactive pmap?"));
|
||||
s = intr_disable();
|
||||
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
|
||||
@ -96,12 +96,12 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
|
||||
{
|
||||
u_long flags;
|
||||
void *cookie;
|
||||
u_long s;
|
||||
register_t s;
|
||||
|
||||
PMAP_STATS_INC(tlb_npage_demap);
|
||||
cookie = ipi_tlb_page_demap(pm, va);
|
||||
if (pm->pm_active & PCPU_GET(cpumask)) {
|
||||
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
|
||||
KASSERT(pm->pm_context[curcpu] != -1,
|
||||
("tlb_page_demap: inactive pmap?"));
|
||||
if (pm == kernel_pmap)
|
||||
flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
|
||||
@ -123,12 +123,12 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
|
||||
vm_offset_t va;
|
||||
void *cookie;
|
||||
u_long flags;
|
||||
u_long s;
|
||||
register_t s;
|
||||
|
||||
PMAP_STATS_INC(tlb_nrange_demap);
|
||||
cookie = ipi_tlb_range_demap(pm, start, end);
|
||||
if (pm->pm_active & PCPU_GET(cpumask)) {
|
||||
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
|
||||
KASSERT(pm->pm_context[curcpu] != -1,
|
||||
("tlb_range_demap: inactive pmap?"));
|
||||
if (pm == kernel_pmap)
|
||||
flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
|
||||
|
@ -35,8 +35,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* from: FreeBSD: src/sys/i386/i386/trap.c,v 1.197 2001/07/19
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* from: FreeBSD: src/sys/i386/i386/trap.c,v 1.197 2001/07/19
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
@ -237,7 +237,7 @@ trap(struct trapframe *tf)
|
||||
register_t addr;
|
||||
ksiginfo_t ksi;
|
||||
|
||||
td = PCPU_GET(curthread);
|
||||
td = curthread;
|
||||
|
||||
CTR4(KTR_TRAP, "trap: %p type=%s (%s) pil=%#lx", td,
|
||||
trap_msg[tf->tf_type & ~T_KERNEL],
|
||||
@ -300,7 +300,7 @@ trap(struct trapframe *tf)
|
||||
|
||||
userret(td, tf);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
} else {
|
||||
} else {
|
||||
KASSERT((tf->tf_type & T_KERNEL) != 0,
|
||||
("trap: kernel trap isn't"));
|
||||
|
||||
@ -349,7 +349,7 @@ trap(struct trapframe *tf)
|
||||
break;
|
||||
}
|
||||
}
|
||||
error = 1;
|
||||
error = 1;
|
||||
break;
|
||||
case T_DATA_ERROR:
|
||||
/*
|
||||
@ -408,7 +408,7 @@ trap_pfault(struct thread *td, struct trapframe *tf)
|
||||
va = TLB_TAR_VA(tf->tf_tar);
|
||||
|
||||
CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx",
|
||||
td, p->p_vmspace->vm_pmap.pm_context[PCPU_GET(cpuid)], va, ctx);
|
||||
td, p->p_vmspace->vm_pmap.pm_context[curcpu], va, ctx);
|
||||
|
||||
if (type == T_DATA_PROTECTION) {
|
||||
prot = VM_PROT_WRITE;
|
||||
@ -424,7 +424,7 @@ trap_pfault(struct thread *td, struct trapframe *tf)
|
||||
if (ctx != TLB_CTX_KERNEL) {
|
||||
if ((tf->tf_tstate & TSTATE_PRIV) != 0 &&
|
||||
(tf->tf_tpc >= (u_long)fs_nofault_intr_begin &&
|
||||
tf->tf_tpc <= (u_long)fs_nofault_intr_end)) {
|
||||
tf->tf_tpc <= (u_long)fs_nofault_intr_end)) {
|
||||
tf->tf_tpc = (u_long)fs_fault;
|
||||
tf->tf_tnpc = tf->tf_tpc + 4;
|
||||
return (0);
|
||||
@ -462,8 +462,8 @@ trap_pfault(struct thread *td, struct trapframe *tf)
|
||||
("trap_pfault: fault on nucleus context from user mode"));
|
||||
|
||||
/*
|
||||
* Don't have to worry about process locking or stacks in the
|
||||
* kernel.
|
||||
* We don't have to worry about process locking or stacks in
|
||||
* the kernel.
|
||||
*/
|
||||
rv = vm_fault(kernel_map, va, prot, VM_FAULT_NORMAL);
|
||||
}
|
||||
@ -512,7 +512,7 @@ syscall(struct trapframe *tf)
|
||||
int narg;
|
||||
int error;
|
||||
|
||||
td = PCPU_GET(curthread);
|
||||
td = curthread;
|
||||
KASSERT(td != NULL, ("trap: curthread NULL"));
|
||||
KASSERT(td->td_proc != NULL, ("trap: curproc NULL"));
|
||||
|
||||
@ -544,19 +544,19 @@ syscall(struct trapframe *tf)
|
||||
*/
|
||||
#if 0
|
||||
(*p->p_sysent->sv_prepsyscall)(tf, args, &code, ¶ms);
|
||||
#endif
|
||||
} else if (code == SYS_syscall || code == SYS___syscall) {
|
||||
#endif
|
||||
} else if (code == SYS_syscall || code == SYS___syscall) {
|
||||
code = tf->tf_out[reg++];
|
||||
regcnt--;
|
||||
}
|
||||
|
||||
if (p->p_sysent->sv_mask)
|
||||
code &= p->p_sysent->sv_mask;
|
||||
if (p->p_sysent->sv_mask)
|
||||
code &= p->p_sysent->sv_mask;
|
||||
|
||||
if (code >= p->p_sysent->sv_size)
|
||||
callp = &p->p_sysent->sv_table[0];
|
||||
else
|
||||
callp = &p->p_sysent->sv_table[code];
|
||||
if (code >= p->p_sysent->sv_size)
|
||||
callp = &p->p_sysent->sv_table[0];
|
||||
else
|
||||
callp = &p->p_sysent->sv_table[code];
|
||||
|
||||
narg = callp->sy_narg;
|
||||
|
||||
@ -599,7 +599,7 @@ syscall(struct trapframe *tf)
|
||||
error, syscallnames[code], td->td_retval[0],
|
||||
td->td_retval[1]);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MP SAFE (we may or may not have the MP lock at this point)
|
||||
*/
|
||||
@ -623,11 +623,11 @@ syscall(struct trapframe *tf)
|
||||
break;
|
||||
|
||||
default:
|
||||
if (p->p_sysent->sv_errsize) {
|
||||
if (error >= p->p_sysent->sv_errsize)
|
||||
error = -1; /* XXX */
|
||||
else
|
||||
error = p->p_sysent->sv_errtbl[error];
|
||||
if (p->p_sysent->sv_errsize) {
|
||||
if (error >= p->p_sysent->sv_errsize)
|
||||
error = -1; /* XXX */
|
||||
else
|
||||
error = p->p_sysent->sv_errtbl[error];
|
||||
}
|
||||
tf->tf_out[0] = error;
|
||||
tf->tf_tstate |= TSTATE_XCC_C;
|
||||
|
Loading…
x
Reference in New Issue
Block a user