Flesh out the sparc64 port considerably. This contains:

- mostly complete kernel pmap support, and tested but currently turned
  off userland pmap support
- low level assembly language trap, context switching and support code
- fully implemented atomic.h and supporting cpufunc.h
- some support for kernel debugging with ddb
- various header tweaks and filling out of machine dependent structures
This commit is contained in:
Jake Burkholder 2001-07-31 06:05:05 +00:00
parent 98bb5304e1
commit 89bf8575ee
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=80709
46 changed files with 6935 additions and 483 deletions

93
sys/sparc64/include/asi.h Normal file
View File

@ -0,0 +1,93 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_ASI_H_
#define _MACHINE_ASI_H_
/*
* Standard v9 asis
*/
#define ASI_N 0x4
#define ASI_NL 0xc
#define ASI_AIUP 0x10
#define ASI_AIUS 0x11
#define ASI_AIUSL 0x19
#define ASI_P 0x80
#define ASI_S 0x81
#define ASI_PNF 0x82
#define ASI_SNF 0x83
#define ASI_PL 0x88
#define ASI_PNFL 0x8a
#define ASI_SNFL 0x8b
/*
* UltraSPARC extensions
*/
#define ASI_PHYS_USE_EC 0x14
#define ASI_PHYS_BYPASS_EC_WITH_EBIT 0x15
#define ASI_PHYS_USE_EC_L 0x1c
#define ASI_PHYS_BYPASS_EC_WITH_EBIT_L 0x1d
#define ASI_NUCLEUS_QUAD_LDD 0x24
#define ASI_NUCLEUS_QUAD_LDD_L 0x2c
#define ASI_IMMU 0x50
#define AA_IMMU_TTR 0x0
#define AA_IMMU_SFSR 0x18
#define AA_IMMU_TSB 0x28
#define AA_IMMU_TAR 0x30
#define ASI_IMMU_TSB_8KB_PTR_REG 0x51
#define ASI_IMMU_TSB_64KB_PTR_REG 0x52
#define ASI_ITLB_DATA_IN_REG 0x54
#define ASI_ITLB_DATA_ACCESS_REG 0x55
#define ASI_ITLB_TAG_READ_REG 0x56
#define ASI_IMMU_DEMAP 0x57
#define ASI_DMMU_TAG_TARGET_REG 0x58
#define ASI_DMMU 0x58
#define AA_DMMU_TTR 0x0
#define AA_DMMU_PCXR 0x8
#define AA_DMMU_SCXR 0x10
#define AA_DMMU_SFSR 0x18
#define AA_DMMU_SFAR 0x20
#define AA_DMMU_TSB 0x28
#define AA_DMMU_TAR 0x30
#define AA_DMMU_VWPR 0x38
#define AA_DMMU_PWPR 0x40
#define ASI_DMMU_TSB_8KB_PTR_REG 0x59
#define ASI_DMMU_TSB_64KB_PTR_REG 0x5a
#define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b
#define ASI_DTLB_DATA_IN_REG 0x5c
#define ASI_DTLB_DATA_ACCESS_REG 0x5d
#define ASI_DTLB_TAG_READ_REG 0x5e
#define ASI_DMMU_DEMAP 0x5f
#endif /* !_MACHINE_ASI_H_ */

View File

@ -0,0 +1,64 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_ASMACROS_H_
#define _MACHINE_ASMACROS_H_
#ifdef _KERNEL
#define PCPU(member) %g7 + GD_ ## member
#define DEBUGGER() ta %xcc, 1
#define PANIC(msg, reg) \
.sect .rodata ; \
9: .asciz msg ; \
.previous ; \
setx 9b, reg, %o0 ; \
call panic ; \
nop
#endif
#define DATA(name) \
.data ; \
.globl name ; \
.type name, @object ; \
name ## :
#define EMPTY
#define ENTRY(name) \
.text ; \
.align 4 ; \
.globl name ; \
.type name, @function ; \
name ## :
#define END(name) \
.size name, . - name
#endif /* !_MACHINE_ASMACROS_H_ */

View File

@ -26,327 +26,241 @@
* $FreeBSD$
*/
/*
* This is not atomic. It is just a stub to make things compile.
*/
#ifndef _MACHINE_ATOMIC_H_
#define _MACHINE_ATOMIC_H_
#define __atomic_op(p, op, v) ({ \
__typeof(*p) __v = (__typeof(*p))v; \
*p op __v; \
#include <machine/cpufunc.h>
/*
* Various simple arithmetic on memory which is atomic in the presence
* of interrupts and multiple processors. See atomic(9) for details.
* Note that efficient hardware support exists only for the 32 and 64
* bit variants; the 8 and 16 bit versions are not provided and should
* not be used in MI code.
*
* This implementation takes advantage of the fact that the sparc64
* cas instruction is both a load and a store. The loop is often coded
* as follows:
*
* do {
* expect = *p;
* new = expect + 1;
* } while (cas(p, expect, new) != expect);
*
* which performs an unnnecessary load on each iteration that the cas
* operation fails. Modified as follows:
*
* expect = *p;
* for (;;) {
* new = expect + 1;
* result = cas(p, expect, new);
* if (result == expect)
* break;
* expect = result;
* }
*
* the return value of cas is used to avoid the extra reload. At the
* time of writing, with gcc version 2.95.3, the branch for the if
* statement is predicted incorrectly as not taken, rather than taken.
* It is expected that the branch prediction hints available in gcc 3.0,
* __builtin_expect, will allow better code to be generated.
*
* The memory barriers provided by the acq and rel variants are intended
* to be sufficient for use of relaxed memory ordering. Due to the
* suggested assembly syntax of the membar operands containing a #
* character, they cannot be used in macros. The cmask and mmask bits
* are hard coded in machine/cpufunc.h and used here through macros.
* Hopefully sun will choose not to change the bit numbers.
*/
#define itype(sz) u_int ## sz ## _t
#define atomic_cas_32(p, e, s) casa(p, e, s, ASI_N)
#define atomic_cas_64(p, e, s) casxa(p, e, s, ASI_N)
#define atomic_cas(p, e, s, sz) \
atomic_cas_ ## sz(p, e, s)
#define atomic_cas_acq(p, e, s, sz) ({ \
itype(sz) v; \
v = atomic_cas(p, e, s, sz); \
membar(LoadLoad | LoadStore); \
v; \
})
#define __atomic_load(p) ({ \
__typeof(*p) __v; \
__v = *p; \
__v; \
#define atomic_cas_rel(p, e, s, sz) ({ \
itype(sz) v; \
membar(LoadStore | StoreStore); \
v = atomic_cas(p, e, s, sz); \
v; \
})
#define __atomic_load_clear(p) ({ \
__typeof(*p) __v; \
__v = *p; \
*p = 0; \
__v; \
})
#define __atomic_cas(p, e, s) ({ \
u_int __v; \
if (*p == (__typeof(*p))e) { \
*p = (__typeof(*p))s; \
__v = 1; \
} else { \
__v = 0; \
#define atomic_op(p, op, v, sz) do { \
itype(sz) e, r, s; \
for (e = *(volatile itype(sz) *)p;; e = r) { \
s = e op v; \
r = atomic_cas_ ## sz(p, e, s); \
if (r == e) \
break; \
} \
__v; \
} while (0)
#define atomic_op_acq(p, op, v, sz) do { \
atomic_op(p, op, v, sz); \
membar(LoadLoad | LoadStore); \
} while (0)
#define atomic_op_rel(p, op, v, sz) do { \
membar(LoadStore | StoreStore); \
atomic_op(p, op, v, sz); \
} while (0)
#define atomic_load_acq(p, sz) ({ \
itype(sz) v; \
v = atomic_cas_ ## sz(p, 0, 0); \
membar(LoadLoad | LoadStore); \
v; \
})
#define __atomic_op_8(p, op, v) __atomic_op(p, op, v)
#define __atomic_op_16(p, op, v) __atomic_op(p, op, v)
#define __atomic_op_32(p, op, v) __atomic_op(p, op, v)
#define __atomic_load_32(p) __atomic_load(p)
#define __atomic_load_clear_32(p) __atomic_load_clear(p)
#define __atomic_cas_32(p, e, s) __atomic_cas(p, e, s)
#define __atomic_op_64(p, op, v) __atomic_op(p, op, v)
#define __atomic_load_64(p) __atomic_load(p)
#define __atomic_load_clear_64(p) __atomic_load_clear(p)
#define __atomic_cas_64(p, e, s) __atomic_cas(p, e, s)
#define atomic_load_clear(p, sz) ({ \
itype(sz) e, r; \
for (e = *(volatile itype(sz) *)p;; e = r) { \
r = atomic_cas_ ## sz(p, e, 0); \
if (r == e) \
break; \
} \
e; \
})
#define atomic_add_8(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_8(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_8(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_8(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_8(p, v) __atomic_op_8(p, =, v)
#define atomic_store_rel(p, v, sz) do { \
itype(sz) e, r; \
membar(LoadStore | StoreStore); \
for (e = *(volatile itype(sz) *)p;; e = r) { \
r = atomic_cas_ ## sz(p, e, v); \
if (r == e) \
break; \
} \
} while (0)
#define atomic_add_16(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_16(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_16(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_16(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_16(p, v) __atomic_op_16(p, =, v)
#define ATOMIC_GEN(name, ptype, vtype, atype, sz) \
\
static __inline void \
atomic_add_ ## name(volatile ptype p, atype v) \
{ \
atomic_op(p, +, v, sz); \
} \
static __inline void \
atomic_add_acq_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_acq(p, +, v, sz); \
} \
static __inline void \
atomic_add_rel_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_rel(p, +, v, sz); \
} \
\
static __inline void \
atomic_clear_ ## name(volatile ptype p, atype v) \
{ \
atomic_op(p, &, ~v, sz); \
} \
static __inline void \
atomic_clear_acq_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_acq(p, &, ~v, sz); \
} \
static __inline void \
atomic_clear_rel_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_rel(p, &, ~v, sz); \
} \
\
static __inline int \
atomic_cmpset_ ## name(volatile ptype p, vtype e, vtype s) \
{ \
return (((vtype)atomic_cas(p, e, s, sz)) == e); \
} \
static __inline int \
atomic_cmpset_acq_ ## name(volatile ptype p, vtype e, vtype s) \
{ \
return (((vtype)atomic_cas_acq(p, e, s, sz)) == e); \
} \
static __inline int \
atomic_cmpset_rel_ ## name(volatile ptype p, vtype e, vtype s) \
{ \
return (((vtype)atomic_cas_rel(p, e, s, sz)) == e); \
} \
\
static __inline vtype \
atomic_load_acq_ ## name(volatile ptype p) \
{ \
return ((vtype)atomic_cas_acq(p, 0, 0, sz)); \
} \
\
static __inline vtype \
atomic_readandclear_ ## name(volatile ptype p) \
{ \
return ((vtype)atomic_load_clear(p, sz)); \
} \
\
static __inline void \
atomic_set_ ## name(volatile ptype p, atype v) \
{ \
atomic_op(p, |, v, sz); \
} \
static __inline void \
atomic_set_acq_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_acq(p, |, v, sz); \
} \
static __inline void \
atomic_set_rel_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_rel(p, |, v, sz); \
} \
\
static __inline void \
atomic_subtract_ ## name(volatile ptype p, atype v) \
{ \
atomic_op(p, -, v, sz); \
} \
static __inline void \
atomic_subtract_acq_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_acq(p, -, v, sz); \
} \
static __inline void \
atomic_subtract_rel_ ## name(volatile ptype p, atype v) \
{ \
atomic_op_rel(p, -, v, sz); \
} \
\
static __inline void \
atomic_store_rel_ ## name(volatile ptype p, vtype v) \
{ \
atomic_store_rel(p, v, sz); \
}
#define atomic_add_32(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_32(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_32(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_32(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_32(p, v) __atomic_op_32(p, =, v)
#define atomic_load_32(p) __atomic_load_32(p)
#define atomic_readandclear_32(p) __atomic_load_clear_32(p)
#define atomic_cmpset_32(p, e, s) __atomic_cas_32(p, e, s)
ATOMIC_GEN(int, int *, int, int, 32);
ATOMIC_GEN(32, int *, int, int, 32);
#define atomic_add_64(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_64(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_64(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_64(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_64(p, v) __atomic_op_64(p, =, v)
#define atomic_load_64(p) __atomic_load_64(p)
#define atomic_readandclear_64(p) __atomic_load_clear_64(p)
#define atomic_cmpset_64(p, e, s) __atomic_cas_64(p, e, s)
ATOMIC_GEN(long, long *, long, long, 64);
ATOMIC_GEN(64, long *, long, long, 64);
#define atomic_add_acq_8(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_acq_8(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_acq_8(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_acq_8(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_acq_8(p, v) __atomic_op_8(p, =, v)
ATOMIC_GEN(ptr, void *, void *, uintptr_t, 64);
#define atomic_add_acq_16(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_acq_16(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_acq_16(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_acq_16(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_acq_16(p, v) __atomic_op_16(p, =, v)
#define atomic_add_acq_32(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_acq_32(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_acq_32(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_acq_32(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_acq_32(p, v) __atomic_op_32(p, =, v)
#define atomic_load_acq_32(p) __atomic_load_32(p)
#define atomic_cmpset_acq_32(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_acq_64(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_acq_64(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_acq_64(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_acq_64(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_acq_64(p, v) __atomic_op_64(p, =, v)
#define atomic_load_acq_64(p) __atomic_load_64(p)
#define atomic_cmpset_acq_64(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_rel_8(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_rel_8(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_rel_8(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_rel_8(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_rel_8(p, v) __atomic_op_8(p, =, v)
#define atomic_add_rel_16(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_rel_16(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_rel_16(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_rel_16(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_rel_16(p, v) __atomic_op_16(p, =, v)
#define atomic_add_rel_32(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_rel_32(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_rel_32(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_rel_32(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_rel_32(p, v) __atomic_op_32(p, =, v)
#define atomic_cmpset_rel_32(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_rel_64(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_rel_64(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_rel_64(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_rel_64(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_rel_64(p, v) __atomic_op_64(p, =, v)
#define atomic_cmpset_rel_64(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_int(p, v) __atomic_op_32(p, =, v)
#define atomic_load_int(p) __atomic_load_32(p)
#define atomic_readandclear_int(p) __atomic_load_clear_32(p)
#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_long(p, v) __atomic_op_64(p, =, v)
#define atomic_load_long(p) __atomic_load_64(p)
#define atomic_readandclear_long(p) __atomic_load_clear_64(p)
#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v)
#define atomic_load_acq_int(p) __atomic_load_32(p)
#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v)
#define atomic_load_acq_long(p) __atomic_load_64(p)
#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v)
#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v)
#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_int(p, v) __atomic_op_32(p, =, v)
#define atomic_load_int(p) __atomic_load_32(p)
#define atomic_readandclear_int(p) __atomic_load_clear_32(p)
#define atomic_cmpset_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_long(p, v) __atomic_op_64(p, =, v)
#define atomic_load_long(p) __atomic_load_64(p)
#define atomic_readandclear_long(p) __atomic_load_clear_64(p)
#define atomic_cmpset_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_ptr(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_ptr(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_ptr(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_ptr(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_ptr(p, v) __atomic_op_64(p, =, v)
#define atomic_load_ptr(p) __atomic_load_64(p)
#define atomic_readandclear_ptr(p) __atomic_load_clear_64(p)
#define atomic_cmpset_ptr(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_acq_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_acq_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_acq_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_acq_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_acq_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_acq_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_acq_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_acq_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_acq_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_acq_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_acq_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_acq_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_acq_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_acq_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_acq_int(p, v) __atomic_op_32(p, =, v)
#define atomic_load_acq_int(p) __atomic_load_32(p)
#define atomic_cmpset_acq_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_acq_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_acq_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_acq_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_acq_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_acq_long(p, v) __atomic_op_64(p, =, v)
#define atomic_load_acq_long(p) __atomic_load_64(p)
#define atomic_cmpset_acq_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_acq_ptr(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_acq_ptr(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_acq_ptr(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_acq_ptr(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_acq_ptr(p, v) __atomic_op_64(p, =, v)
#define atomic_load_acq_ptr(p) __atomic_load_64(p)
#define atomic_cmpset_acq_ptr(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_rel_char(p, v) __atomic_op_8(p, +=, v)
#define atomic_subtract_rel_char(p, v) __atomic_op_8(p, -=, v)
#define atomic_set_rel_char(p, v) __atomic_op_8(p, |=, v)
#define atomic_clear_rel_char(p, v) __atomic_op_8(p, &=, ~v)
#define atomic_store_rel_char(p, v) __atomic_op_8(p, =, v)
#define atomic_add_rel_short(p, v) __atomic_op_16(p, +=, v)
#define atomic_subtract_rel_short(p, v) __atomic_op_16(p, -=, v)
#define atomic_set_rel_short(p, v) __atomic_op_16(p, |=, v)
#define atomic_clear_rel_short(p, v) __atomic_op_16(p, &=, ~v)
#define atomic_store_rel_short(p, v) __atomic_op_16(p, =, v)
#define atomic_add_rel_int(p, v) __atomic_op_32(p, +=, v)
#define atomic_subtract_rel_int(p, v) __atomic_op_32(p, -=, v)
#define atomic_set_rel_int(p, v) __atomic_op_32(p, |=, v)
#define atomic_clear_rel_int(p, v) __atomic_op_32(p, &=, ~v)
#define atomic_store_rel_int(p, v) __atomic_op_32(p, =, v)
#define atomic_cmpset_rel_int(p, e, s) __atomic_cas_32(p, e, s)
#define atomic_add_rel_long(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_rel_long(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_rel_long(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_rel_long(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_rel_long(p, v) __atomic_op_64(p, =, v)
#define atomic_cmpset_rel_long(p, e, s) __atomic_cas_64(p, e, s)
#define atomic_add_rel_ptr(p, v) __atomic_op_64(p, +=, v)
#define atomic_subtract_rel_ptr(p, v) __atomic_op_64(p, -=, v)
#define atomic_set_rel_ptr(p, v) __atomic_op_64(p, |=, v)
#define atomic_clear_rel_ptr(p, v) __atomic_op_64(p, &=, ~v)
#define atomic_store_rel_ptr(p, v) __atomic_op_64(p, =, v)
#define atomic_cmpset_rel_ptr(p, e, s) __atomic_cas_64(p, e, s)
#undef ATOMIC_GEN
#undef atomic_cas_32
#undef atomic_cas_64
#undef atomic_cas
#undef atomic_cas_acq
#undef atomic_cas_rel
#undef atomic_op
#undef atomic_op_acq
#undef atomic_op_rel
#undef atomic_load_acq
#undef atomic_store_rel
#undef atomic_load_clear
#endif /* !_MACHINE_ATOMIC_H_ */

View File

@ -0,0 +1,44 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_BOOTINFO_H_
#define _MACHINE_BOOTINFO_H_
/*
* Increment the version number when you break binary compatibiity.
*/
#define BOOTINFO_VERSION 1
struct bootinfo {
u_int bi_version;
u_long bi_end;
u_long bi_kpa;
u_long bi_metadata;
};
#endif /* !_MACHINE_BOOTINFO_H_ */

View File

@ -32,10 +32,13 @@
#include <machine/frame.h>
#define CLKF_USERMODE(cfp) (0)
#define CLKF_PC(cfp) (0)
#define CLKF_PC(cfp) ((cfp)->cf_tf.tf_tpc)
#define cpu_getstack(p) (0)
#define cpu_setstack(p, sp) (0)
#define TRAPF_PC(tfp) ((tfp)->tf_tpc)
#define TRAPF_USERMODE(tfp) (0)
#define cpu_getstack(p) ((p)->p_frame->tf_sp)
#define cpu_setstack(p, sp) ((p)->p_frame->tf_sp = (sp))
/*
* Arrange to handle pending profiling ticks before returning to user mode.
@ -66,11 +69,13 @@
{ "wall_cmos_clock", CTLTYPE_INT }, \
}
void fork_trampoline(void);
static __inline u_int64_t
get_cyclecount(void)
{
static u_long now;
return (++now);
return (rd(tick));
}
#endif /* !_MACHINE_CPU_H_ */

View File

@ -29,15 +29,139 @@
#ifndef _MACHINE_CPUFUNC_H_
#define _MACHINE_CPUFUNC_H_
#include <machine/asi.h>
#include <machine/pstate.h>
/*
* membar operand macros for use in other macros when # is a special
* character. Keep these in sync with what the hardware expects.
*/
#define C_Lookaside (0)
#define C_MemIssue (1)
#define C_Sync (2)
#define M_LoadLoad (0)
#define M_StoreLoad (1)
#define M_LoadStore (2)
#define M_StoreStore (3)
#define CMASK_SHIFT (4)
#define MMASK_SHIFT (0)
#define CMASK_GEN(bit) ((1 << (bit)) << CMASK_SHIFT)
#define MMASK_GEN(bit) ((1 << (bit)) << MMASK_SHIFT)
#define Lookaside CMASK_GEN(C_Lookaside)
#define MemIssue CMASK_GEN(C_MemIssue)
#define Sync CMASK_GEN(C_Sync)
#define LoadLoad MMASK_GEN(M_LoadLoad)
#define StoreLoad MMASK_GEN(M_StoreLoad)
#define LoadStore MMASK_GEN(M_LoadStore)
#define StoreStore MMASK_GEN(M_StoreStore)
#define casa(rs1, rs2, rd, asi) ({ \
u_int __rd = (u_int32_t)(rd); \
__asm __volatile("casa [%1] %2, %3, %0" \
: "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \
__rd; \
})
#define casxa(rs1, rs2, rd, asi) ({ \
u_long __rd = (u_int64_t)(rd); \
__asm __volatile("casxa [%1] %2, %3, %0" \
: "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2)); \
__rd; \
})
#define flush(va) do { \
__asm __volatile("flush %0" : : "r" (va)); \
} while (0)
#define ldxa(va, asi) ({ \
u_long __r; \
__asm __volatile("ldxa [%1] %2, %0" \
: "=r" (__r) : "r" (va), "n" (asi)); \
__r; \
})
#define stxa(va, asi, val) do { \
__asm __volatile("stxa %0, [%1] %2" \
: : "r" (val), "r" (va), "n" (asi)); \
} while (0)
#define membar(mask) do { \
__asm __volatile("membar %0" : : "n" (mask)); \
} while (0)
#define rd(name) ({ \
u_int64_t __sr; \
__asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :); \
__sr; \
})
#define wr(name, val, xor) do { \
__asm __volatile("wr %0, %1, %%" #name \
: : "r" (val), "rI" (xor)); \
} while (0)
#define rdpr(name) ({ \
u_int64_t __pr; \
__asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :); \
__pr; \
})
#define wrpr(name, val, xor) do { \
__asm __volatile("wrpr %0, %1, %%" #name \
: : "r" (val), "rI" (xor)); \
} while (0)
static __inline void
breakpoint(void)
{
__asm __volatile("ta 1");
}
/*
* XXX use %pil for these.
*/
static __inline critical_t
critical_enter(void)
{
return (0);
critical_t ie;
ie = rdpr(pstate);
if (ie & PSTATE_IE)
wrpr(pstate, ie, PSTATE_IE);
return (ie);
}
static __inline void
critical_exit(critical_t ie)
{
if (ie & PSTATE_IE)
wrpr(pstate, ie, 0);
}
#if 0
#define HAVE_INLINE_FFS
/*
* See page 202 of the SPARC v9 Architecture Manual.
*/
static __inline int
ffs(int mask)
{
int result;
int neg;
int tmp;
__asm __volatile(
" neg %3, %1 ; "
" xnor %3, %1, %2 ; "
" popc %2, %0 ; "
" movrz %3, %%g0, %0 ; "
: "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask));
return (result);
}
#endif
#endif /* !_MACHINE_CPUFUNC_H_ */

View File

@ -29,4 +29,48 @@
#ifndef _MACHINE_DB_MACHDEP_H_
#define _MACHINE_DB_MACHDEP_H_
#include <machine/frame.h>
#include <machine/trap.h>
#define BYTE_MSF (1)
typedef vm_offset_t db_addr_t;
typedef u_long db_expr_t;
struct db_regs {
u_long dr_global[8];
};
typedef struct trapframe db_regs_t;
extern db_regs_t ddb_regs;
#define DDB_REGS (&ddb_regs)
#define PC_REGS(regs) ((db_addr_t)(regs)->tf_tpc)
#define BKPT_INST (0)
#define BKPT_SIZE (4)
#define BKPT_SET(inst) (BKPT_INST)
#define FIXUP_PC_AFTER_BREAK do { \
ddb_regs.tf_tpc = ddb_regs.tf_tnpc; \
ddb_regs.tf_tnpc += BKPT_SIZE; \
} while (0);
#define db_clear_single_step(regs)
#define db_set_single_step(regs)
#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT)
#define IS_WATCHPOINT_TRAP(type, code) (0)
#define inst_trap_return(ins) (0)
#define inst_return(ins) (0)
#define inst_call(ins) (0)
#define inst_load(ins) (0)
#define inst_store(ins) (0)
#define DB_SMALL_VALUE_MAX (0x7fffffff)
#define DB_SMALL_VALUE_MIN (-0x40001)
#define DB_ELFSIZE 64
#endif /* !_MACHINE_DB_MACHDEP_H_ */

View File

@ -29,10 +29,47 @@
#ifndef _MACHINE_FRAME_H_
#define _MACHINE_FRAME_H_
struct clockframe {
};
#define SPOFF 2047
struct trapframe {
u_long tf_global[8];
u_long tf_out[8];
u_long tf_tnpc;
u_long tf_tpc;
u_long tf_tstate;
u_long tf_type;
void *tf_arg;
};
#define tf_sp tf_out[6]
struct mmuframe {
u_long mf_sfar;
u_long mf_sfsr;
u_long mf_tar;
};
struct kdbframe {
u_long kf_fp;
u_long kf_cfp;
u_long kf_canrestore;
u_long kf_cansave;
u_long kf_cleanwin;
u_long kf_cwp;
u_long kf_otherwin;
};
struct clockframe {
struct trapframe cf_tf;
};
struct frame {
u_long f_local[8];
u_long f_in[8];
u_long f_pad[8];
};
#define f_fp f_in[6]
#define f_pc f_in[7]
int kdb_trap(struct trapframe *tf);
#endif /* !_MACHINE_FRAME_H_ */

View File

@ -37,7 +37,7 @@ struct globaldata {
SLIST_ENTRY(globaldata) gd_allcpu;
struct pcb *gd_curpcb;
struct proc *gd_curproc;
struct proc *gd_fpproc;
struct proc *gd_fpcurproc;
struct proc *gd_idleproc;
u_int gd_cpuid;
u_int gd_other_cpus;

View File

@ -32,6 +32,8 @@
static __inline void
mtx_intr_enable(struct mtx *mtx)
{
mtx->mtx_savecrit |= PSTATE_IE;
}
#endif /* !_MACHINE_MUTEX_H_ */

View File

@ -85,15 +85,25 @@
#define ALIGNBYTES _ALIGNBYTES
#define ALIGN(p) _ALIGN(p)
#define PAGE_SHIFT 13 /* LOG2(PAGE_SIZE) */
#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
#define PAGE_MASK (PAGE_SIZE-1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define PAGE_SHIFT_8K 13
#define PAGE_SIZE_8K (1<<PAGE_SHIFT_8K)
#define PAGE_MASK_8K (PAGE_SIZE_8K-1)
#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define PDRSHIFT 22 /* LOG2(NBPDR) */
#define NBPDR (1<<PDRSHIFT) /* bytes/page dir */
#define PDRMASK (NBPDR-1)
#define PAGE_SHIFT_64K 16
#define PAGE_SIZE_64K (1<<PAGE_SHIFT_64K)
#define PAGE_MASK_64K (PAGE_SIZE_64K-1)
#define PAGE_SHIFT_512K 19
#define PAGE_SIZE_512K (1<<PAGE_SHIFT_512K)
#define PAGE_MASK_512K (PAGE_SIZE_512K-1)
#define PAGE_SHIFT_4M 22
#define PAGE_SIZE_4M (1<<PAGE_SHIFT_4M)
#define PAGE_MASK_4M (PAGE_SIZE_4M-1)
#define PAGE_SHIFT PAGE_SHIFT_8K /* LOG2(PAGE_SIZE) */
#define PAGE_SIZE PAGE_SIZE_8K /* bytes/page */
#define PAGE_MASK PAGE_MASK_8K
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1<<DEV_BSHIFT)
@ -126,32 +136,24 @@
*/
/* clicks to bytes */
#define ctob(x) ((x)<<PAGE_SHIFT)
#define ctob(x) ((unsigned long)(x)<<PAGE_SHIFT)
/* bytes to clicks */
#define btoc(x) (((unsigned)(x)+PAGE_MASK)>>PAGE_SHIFT)
#define btoc(x) (((unsigned long)(x)+PAGE_MASK)>>PAGE_SHIFT)
/*
* btodb() is messy and perhaps slow because `bytes' may be an off_t. We
* want to shift an unsigned type to avoid sign extension and we don't
* want to widen `bytes' unnecessarily. Assume that the result fits in
* a daddr_t.
*/
/* bytes to disk blocks */
#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
(sizeof (bytes) > sizeof(long) \
? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \
: (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT))
(daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)
/* disk blocks to bytes */
#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
((off_t)(db) << DEV_BSHIFT)
(off_t)((unsigned long)(db) << DEV_BSHIFT)
/*
* Mach derived conversion macros
*/
#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
#define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK)
#define trunc_4mpage(x) ((unsigned long)(x) & ~PDRMASK)
#define round_4mpage(x) ((((unsigned long)(x)) + PDRMASK) & ~PDRMASK)
#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)

View File

@ -30,6 +30,8 @@
#define _MACHINE_PCB_H_
struct pcb {
u_long pcb_fp;
u_long pcb_pc;
caddr_t pcb_onfault;
};
@ -37,7 +39,7 @@ struct md_coredump {
};
#ifdef _KERNEL
void savectx(struct pcb *pcb);
int savectx(struct pcb *pcb);
#endif
#endif /* !_MACHINE_PCB_H_ */

View File

@ -37,7 +37,7 @@ struct globaldata {
SLIST_ENTRY(globaldata) gd_allcpu;
struct pcb *gd_curpcb;
struct proc *gd_curproc;
struct proc *gd_fpproc;
struct proc *gd_fpcurproc;
struct proc *gd_idleproc;
u_int gd_cpuid;
u_int gd_other_cpus;

View File

@ -29,30 +29,36 @@
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#include <sys/kobj.h>
#include <machine/tte.h>
#define PMAP_CONTEXT_MAX 8192
#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
typedef struct pmap *pmap_t;
struct md_page {
};
struct pmap {
struct pmap_statistics pm_stats;
struct stte pm_stte;
u_int pm_active;
u_int pm_context;
u_int pm_count;
struct pmap_statistics pm_stats;
};
typedef struct pmap *pmap_t;
extern struct pmap __kernel_pmap;
#define kernel_pmap (&__kernel_pmap)
#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
#ifdef _KERNEL
void pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva);
vm_offset_t pmap_kextract(vm_offset_t va);
extern vm_offset_t avail_start;
extern vm_offset_t avail_end;
extern vm_offset_t clean_eva;
extern vm_offset_t clean_sva;
extern struct pmap *kernel_pmap;
extern vm_offset_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#endif
#endif /* !_MACHINE_PMAP_H_ */

View File

@ -30,6 +30,7 @@
#define _MACHINE_PROC_H_
#include <machine/globals.h>
#include <machine/tte.h>
struct mdproc {
};

View File

@ -0,0 +1,79 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PSTATE_H_
#define _MACHINE_PSTATE_H_
#define PSTATE_AG (1<<0)
#define PSTATE_IE (1<<1)
#define PSTATE_PRIV (1<<2)
#define PSTATE_AM (1<<3)
#define PSTATE_PEF (1<<4)
#define PSTATE_RED (1<<5)
#define PSTATE_MM_SHIFT (6)
#define PSTATE_MM_MASK ((1<<PSTATE_MM_SHIFT)|(1<<(PSTATE_MM_SHIFT+1)))
#define PSTATE_MM_TSO (0<<PSTATE_MM_SHIFT)
#define PSTATE_MM_PSO (1<<PSTATE_MM_SHIFT)
#define PSTATE_MM_RMO (2<<PSTATE_MM_SHIFT)
#define PSTATE_TLE (1<<8)
#define PSTATE_CLE (1<<9)
#define PSTATE_MG (1<<10)
#define PSTATE_IG (1<<11)
#define VER_MANUF_SHIFT (48)
#define VER_IMPL_SHIFT (32)
#define VER_MASK_SHIFT (24)
#define VER_MAXTL_SHIFT (8)
#define VER_MAXWIN_SHIFT (0)
#define VER_MANUF_SIZE (16)
#define VER_IMPL_SIZE (16)
#define VER_MASK_SIZE (8)
#define VER_MAXTL_SIZE (8)
#define VER_MAXWIN_SIZE (5)
#define VER_MANUF_MASK (((1L<<VER_MANUF_SIZE)-1)<<VER_MANUF_SHIFT)
#define VER_IMPL_MASK (((1L<<VER_IMPL_SIZE)-1)<<VER_IMPL_SHIFT)
#define VER_MASK_MASK (((1L<<VER_MASK_SIZE)-1)<<VER_MASK_SHIFT)
#define VER_MAXTL_MASK (((1L<<VER_MAXTL_SIZE)-1)<<VER_MAXTL_SHIFT)
#define VER_MAXWIN_MASK (((1L<<VER_MAXWIN_SIZE)-1)<<VER_MAXWIN_SHIFT)
#define VER_MANUF(ver) \
(((ver) & VER_MANUF_MASK) >> VER_MANUF_SHIFT)
#define VER_IMPL(ver) \
(((ver) & VER_IMPL_MASK) >> VER_IMPL_SHIFT)
#define VER_MASK(ver) \
(((ver) & VER_MASK_MASK) >> VER_MASK_SHIFT)
#define VER_MAXTL(ver) \
(((ver) & VER_MAXTL_MASK) >> VER_MAXTL_SHIFT)
#define VER_MAXWIN(ver) \
(((ver) & VER_MAXWIN_MASK) >> VER_MAXWIN_SHIFT)
#endif /* !_MACHINE_PSTATE_H_ */

175
sys/sparc64/include/pv.h Normal file
View File

@ -0,0 +1,175 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PV_H_
#define _MACHINE_PV_H_
#define PV_LOCK()
#define PV_UNLOCK()
#define ST_TTE offsetof(struct stte, st_tte)
#define ST_NEXT offsetof(struct stte, st_next)
#define ST_PREV offsetof(struct stte, st_prev)
#define TTE_DATA offsetof(struct tte, tte_data)
#define TTE_TAG offsetof(struct tte, tte_tag)
#define PV_OFF(pa) ((vm_offset_t)(pa) - avail_start)
#define PV_INDEX(pa) (PV_OFF(pa) >> PAGE_SHIFT)
#define PV_SHIFT (3)
#define casxp(pa, exp, src) \
casxa((vm_offset_t *)pa, exp, src, ASI_PHYS_USE_EC)
#define ldxp(pa) ldxa(pa, ASI_PHYS_USE_EC)
#define stxp(pa, val) stxa(pa, ASI_PHYS_USE_EC, val)
extern vm_offset_t pv_table;
extern u_long pv_generation;
static __inline vm_offset_t
pv_lookup(vm_offset_t pa)
{
return (pv_table + (PV_INDEX(pa) << PV_SHIFT));
}
static __inline vm_offset_t
pv_get_first(vm_offset_t pvh)
{
return (ldxp(pvh));
}
static __inline vm_offset_t
pv_get_next(vm_offset_t pstp)
{
return (ldxp(pstp + ST_NEXT));
}
static __inline vm_offset_t
pv_get_prev(vm_offset_t pstp)
{
return (ldxp(pstp + ST_PREV));
}
static __inline u_long
pv_get_tte_data(vm_offset_t pstp)
{
return (ldxp(pstp + ST_TTE + TTE_DATA));
}
static __inline u_long
pv_get_tte_tag(vm_offset_t pstp)
{
return (ldxp(pstp + ST_TTE + TTE_TAG));
}
#define pv_get_tte(pstp) ({ \
struct tte __tte; \
__tte.tte_tag = pv_get_tte_tag(pstp); \
__tte.tte_data = pv_get_tte_data(pstp); \
__tte; \
})
static __inline void
pv_set_first(vm_offset_t pvh, vm_offset_t first)
{
stxp(pvh, first);
}
static __inline void
pv_set_next(vm_offset_t pstp, vm_offset_t next)
{
stxp(pstp + ST_NEXT, next);
}
static __inline void
pv_set_prev(vm_offset_t pstp, vm_offset_t prev)
{
stxp(pstp + ST_PREV, prev);
}
static __inline void
pv_remove_phys(vm_offset_t pstp)
{
vm_offset_t pv_next;
vm_offset_t pv_prev;
pv_next = pv_get_next(pstp);
pv_prev = pv_get_prev(pstp);
if (pv_next != 0)
pv_set_prev(pv_next, pv_prev);
stxp(pv_prev, pv_next);
}
static __inline void
pv_bit_clear(vm_offset_t pstp, u_long bits)
{
vm_offset_t dp;
vm_offset_t d1;
vm_offset_t d2;
vm_offset_t d3;
dp = pstp + ST_TTE + TTE_DATA;
for (d1 = ldxp(dp);; d1 = d3) {
d2 = d1 & ~bits;
d3 = casxp(dp, d1, d2);
if (d1 == d3)
break;
}
}
static __inline void
pv_bit_set(vm_offset_t pstp, u_long bits)
{
vm_offset_t dp;
vm_offset_t d1;
vm_offset_t d2;
vm_offset_t d3;
dp = pstp + ST_TTE + TTE_DATA;
for (d1 = ldxp(dp);; d1 = d3) {
d2 = d1 | bits;
d3 = casxp(dp, d1, d2);
if (d1 == d3)
break;
}
}
static __inline int
pv_bit_test(vm_offset_t pstp, u_long bits)
{
vm_offset_t dp;
dp = pstp + ST_TTE + TTE_DATA;
return ((casxp(dp, 0, 0) & bits) != 0);
}
void pv_dump(vm_offset_t pvh);
void pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp);
void pv_remove_virt(struct stte *stp);
#endif /* !_MACHINE_PV_H_ */

View File

@ -1,4 +1,3 @@
/* $FreeBSD$ */
/*
* Copyright 1998 Massachusetts Institute of Technology
*
@ -26,6 +25,8 @@
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_RESOURCE_H_

View File

@ -0,0 +1,55 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SETJMP_H_
#define _MACHINE_SETJMP_H_
#define _JBLEN 3
#define _JB_FP 0
#define _JB_PC 1
#define _JB_SP 2
/*
* jmp_buf and sigjmp_buf are encapsulated in different structs to force
* compile-time diagnostics for mismatches. The structs are the same
* internally to avoid some run-time errors for mismatches.
*/
#ifndef _ANSI_SOURCE
struct _sigjmp_buf {
long _sjb[_JBLEN + 1];
};
typedef struct _sigjmp_buf sigjmp_buf[1];
#endif
struct _jmp_buf {
long _jb[_JBLEN + 1];
};
typedef struct _jmp_buf jmp_buf[1];
#endif /* !_MACHINE_SETJMP_H_ */

View File

@ -33,6 +33,7 @@
* SUCH DAMAGE.
*
* from: @(#)stdarg.h 8.2 (Berkeley) 9/27/93
* from: NetBSD: stdarg.h,v 1.11 2000/07/23 21:36:56 mycroft Exp
* $FreeBSD$
*/

149
sys/sparc64/include/tlb.h Normal file
View File

@ -0,0 +1,149 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_TLB_H_
#define _MACHINE_TLB_H_
#define TLB_SLOT_COUNT 64
#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
#define TLB_SLOT_TSB_USER_PRIMARY 61
#define TLB_SLOT_TSB_USER_SECONDARY 62
#define TLB_SLOT_KERNEL 63
#define TLB_DAR_SLOT_SHIFT (3)
#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
#define TLB_TAR_CTX(ctx) (ctx)
#define TLB_DEMAP_ID_SHIFT (4)
#define TLB_DEMAP_ID_PRIMARY (0)
#define TLB_DEMAP_ID_SECONDARY (1)
#define TLB_DEMAP_ID_NUCLEUS (2)
#define TLB_DEMAP_TYPE_SHIFT (6)
#define TLB_DEMAP_TYPE_PAGE (0)
#define TLB_DEMAP_TYPE_CONTEXT (1)
#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
#define TLB_CTX_KERNEL (0)
#define TLB_DTLB (1 << 0)
#define TLB_ITLB (1 << 1)
static __inline void
tlb_dtlb_page_demap(u_int ctx, vm_offset_t va)
{
if (ctx == TLB_CTX_KERNEL) {
stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
ASI_DMMU_DEMAP, 0);
membar(Sync);
} else
TODO;
}
static __inline void
tlb_dtlb_store(vm_offset_t va, struct tte tte)
{
stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
membar(Sync);
}
static __inline void
tlb_dtlb_store_slot(vm_offset_t va, struct tte tte, int slot)
{
stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
membar(Sync);
}
static __inline void
tlb_itlb_page_demap(u_int ctx, vm_offset_t va)
{
if (ctx == TLB_CTX_KERNEL) {
stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
ASI_IMMU_DEMAP, 0);
flush(KERNBASE);
} else
TODO;
}
static __inline void
tlb_itlb_store(vm_offset_t va, struct tte tte)
{
TODO;
}
static __inline void
tlb_itlb_store_slot(vm_offset_t va, struct tte tte, int slot)
{
stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(0));
stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
flush(va);
}
static __inline void
tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
{
if (tlb & TLB_DTLB)
tlb_dtlb_page_demap(ctx, va);
if (tlb & TLB_ITLB)
tlb_itlb_page_demap(ctx, va);
}
static __inline void
tlb_store(u_int tlb, vm_offset_t va, struct tte tte)
{
if (tlb & TLB_DTLB)
tlb_dtlb_store(va, tte);
if (tlb & TLB_ITLB)
tlb_itlb_store(va, tte);
}
static __inline void
tlb_store_slot(u_int tlb, vm_offset_t va, struct tte tte, int slot)
{
if (tlb & TLB_DTLB)
tlb_dtlb_store_slot(va, tte, slot);
if (tlb & TLB_ITLB)
tlb_itlb_store_slot(va, tte, slot);
}
#endif /* !_MACHINE_TLB_H_ */

View File

@ -0,0 +1,70 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_TRAP_H_
#define _MACHINE_TRAP_H_
#define T_RESERVED 0x0
#define T_POWER_ON 0x1
#define T_WATCHDOG 0x2
#define T_RESET_EXT 0x3
#define T_RESET_SOFT 0x4
#define T_RED_STATE 0x5
#define T_INSN_EXCPTN 0x6
#define T_INSN_ERROR 0x7
#define T_INSN_ILLEGAL 0x8
#define T_PRIV_OPCODE 0x9
#define T_FP_DISABLED 0xa
#define T_FP_IEEE 0xb
#define T_FP_OTHER 0xc
#define T_TAG_OVFLW 0xd
#define T_DIVIDE 0xe
#define T_DATA_EXCPTN 0xf
#define T_DATA_ERROR 0x10
#define T_ALIGN 0x11
#define T_ALIGN_LDDF 0x12
#define T_ALIGN_STDF 0x13
#define T_PRIV_ACTION 0x14
#define T_INTERRUPT 0x15
#define T_WATCH_PHYS 0x16
#define T_WATCH_VIRT 0x17
#define T_ECC 0x18
#define T_IMMU_MISS 0x19
#define T_DMMU_MISS 0x1a
#define T_DMMU_PROT 0x1b
#define T_SPILL 0x1c
#define T_FILL 0x1d
#define T_BREAKPOINT 0x1e
#define T_KERNEL 0x20
#ifndef LOCORE
extern const char *trap_msg[];
#endif
#endif /* !_MACHINE_TRAP_H_ */

220
sys/sparc64/include/tsb.h Normal file
View File

@ -0,0 +1,220 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp
* $FreeBSD$
*/
#ifndef _MACHINE_TSB_H_
#define _MACHINE_TSB_H_
#define TSB_KERNEL_MIN_ADDRESS (0x6e000000000)
#define TSB_USER_MIN_ADDRESS (0x6f000000000)
#define TSB_MASK_WIDTH (6)
#define TSB_PRIMARY_BUCKET_SHIFT (2)
#define TSB_PRIMARY_BUCKET_SIZE (1 << TSB_PRIMARY_BUCKET_SHIFT)
#define TSB_PRIMARY_BUCKET_MASK (TSB_PRIMARY_BUCKET_SIZE - 1)
#define TSB_SECONDARY_BUCKET_SHIFT (3)
#define TSB_SECONDARY_BUCKET_SIZE (1 << TSB_SECONDARY_BUCKET_SHIFT)
#define TSB_SECONDARY_BUCKET_MASK (TSB_SECONDARY_BUCKET_SIZE - 1)
#define TSB_SECONDARY_STTE_SHIFT \
(STTE_SHIFT + TSB_SECONDARY_BUCKET_SHIFT)
#define TSB_SECONDARY_STTE_MASK (1 << TSB_SECONDARY_STTE_SHIFT)
#define TSB_LEVEL1_BUCKET_MASK \
((TSB_SECONDARY_BUCKET_MASK & ~TSB_PRIMARY_BUCKET_MASK) << \
(PAGE_SHIFT - TSB_PRIMARY_BUCKET_SHIFT))
#define TSB_LEVEL1_BUCKET_SHIFT \
(TSB_BUCKET_SPREAD_SHIFT + \
(TSB_SECONDARY_BUCKET_SHIFT - TSB_PRIMARY_BUCKET_SHIFT))
#define TSB_BUCKET_SPREAD_SHIFT (2)
#define TSB_DEPTH (7)
#define TSB_KERNEL_PAGES (1)
#define TSB_KERNEL_SIZE (TSB_KERNEL_PAGES * PAGE_SIZE_4M)
#define TSB_KERNEL_MB (512)
#define TSB_KERNEL_VM_RANGE (TSB_KERNEL_MB * (1 << 20))
#define TSB_KERNEL_RANGE \
((TSB_KERNEL_VM_RANGE / PAGE_SIZE) * sizeof (struct stte))
#define TSB_KERNEL_MASK \
((TSB_KERNEL_RANGE / sizeof (struct stte)) - 1)
#define TSB_1M_STTE_SHIFT (21)
#define TSB_1M_STTE_SIZE (1 << TSB_1M_SHIFT)
#define TSB_SIZE_REG (7)
extern vm_offset_t tsb_kernel_phys;
static __inline struct stte *
tsb_base(u_int level)
{
vm_offset_t base;
size_t len;
if (level == 0)
base = TSB_USER_MIN_ADDRESS;
else {
len = 1UL << ((level * TSB_BUCKET_SPREAD_SHIFT) +
TSB_MASK_WIDTH + TSB_SECONDARY_BUCKET_SHIFT +
STTE_SHIFT);
base = TSB_USER_MIN_ADDRESS + len;
}
return (struct stte *)base;
}
static __inline u_long
tsb_bucket_shift(u_int level)
{
return (level == 0 ?
TSB_PRIMARY_BUCKET_SHIFT : TSB_SECONDARY_BUCKET_SHIFT);
}
static __inline u_long
tsb_bucket_size(u_int level)
{
return (1UL << tsb_bucket_shift(level));
}
static __inline u_long
tsb_bucket_mask(u_int level)
{
return (tsb_bucket_size(level) - 1);
}
static __inline u_long
tsb_mask_width(u_int level)
{
return ((level * TSB_BUCKET_SPREAD_SHIFT) + TSB_MASK_WIDTH);
}
static __inline u_long
tsb_mask(u_int level)
{
return ((1UL << tsb_mask_width(level)) - 1);
}
static __inline u_int
tsb_tlb_slot(u_int level)
{
return (level == 0 ?
TLB_SLOT_TSB_USER_PRIMARY : TLB_SLOT_TSB_USER_SECONDARY);
}
static __inline vm_offset_t
tsb_stte_vtophys(pmap_t pm, struct stte *stp)
{
vm_offset_t va;
u_long data;
va = (vm_offset_t)stp;
if (pm == kernel_pmap)
return (tsb_kernel_phys +
((va - TSB_KERNEL_MIN_ADDRESS) << STTE_SHIFT));
if (trunc_page(va) == TSB_USER_MIN_ADDRESS)
data = pm->pm_stte.st_tte.tte_data;
else
data = ldxa(TLB_DAR_SLOT(tsb_tlb_slot(1)),
ASI_DTLB_DATA_ACCESS_REG);
return ((vm_offset_t)((TD_PA(data)) + (va & PAGE_MASK)));
}
static __inline struct stte *
tsb_vpntobucket(vm_offset_t vpn, u_int level)
{
return (tsb_base(level) +
((vpn & tsb_mask(level)) << tsb_bucket_shift(level)));
}
static __inline struct stte *
tsb_vtobucket(vm_offset_t va, u_int level)
{
return (tsb_vpntobucket(va >> PAGE_SHIFT, level));
}
static __inline struct stte *
tsb_kvpntostte(vm_offset_t vpn)
{
struct stte *stp;
stp = (struct stte *)(TSB_KERNEL_MIN_ADDRESS +
((vpn & TSB_KERNEL_MASK) << STTE_SHIFT));
return (stp);
}
static __inline struct stte *
tsb_kvtostte(vm_offset_t va)
{
return (tsb_kvpntostte(va >> PAGE_SHIFT));
}
static __inline void
tsb_tte_enter_kernel(vm_offset_t va, struct tte tte)
{
struct stte *stp;
stp = tsb_kvtostte(va);
stp->st_tte = tte;
#if 1
pv_insert(kernel_pmap, TD_PA(tte.tte_data), va, stp);
#endif
}
static __inline void
tsb_remove_kernel(vm_offset_t va)
{
struct stte *stp;
stp = tsb_kvtostte(va);
tte_invalidate(&stp->st_tte);
#if 1
pv_remove_virt(stp);
#endif
}
struct stte *tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va,
int allocate);
int tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf);
struct tte tsb_page_alloc(pmap_t pm, vm_offset_t va);
void tsb_page_fault(pmap_t pm, int level, vm_offset_t va, struct stte *stp);
void tsb_page_init(void *va, int level);
struct stte *tsb_stte_lookup(pmap_t pm, vm_offset_t va);
struct stte *tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp);
void tsb_stte_remove(struct stte *stp);
struct stte *tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte);
void tsb_tte_local_remove(struct tte *tp);
extern vm_offset_t tsb_bootstrap_pages[];
extern int tsb_bootstrap_index;
#endif /* !_MACHINE_TSB_H_ */

146
sys/sparc64/include/tte.h Normal file
View File

@ -0,0 +1,146 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: BSDI: pmap.v9.h,v 1.10.2.6 1999/08/23 22:18:44 cp Exp
* $FreeBSD$
*/
#ifndef _MACHINE_TTE_H_
#define _MACHINE_TTE_H_
#include <machine/atomic.h>
#define TTE_SHIFT 4
#define STTE_SHIFT 5
#define TT_CTX_SHIFT (48)
#define TT_VA_SHIFT (22)
#define TT_VPN_SHIFT (9)
#define TT_CTX_SIZE (13)
#define TT_VA_SIZE (42)
#define TT_CTX_MASK ((1L << TT_CTX_SIZE) - 1)
#define TT_VA_MASK ((1L << TT_VA_SIZE) - 1)
#define TT_G (1L << 63)
#define TT_CTX(ctx) (((u_long)(ctx) << TT_CTX_SHIFT) & TT_CTX_MASK)
#define TT_VA(va) (((u_long)(va) >> TT_VA_SHIFT) & TT_VA_MASK)
#define TD_SIZE_SHIFT (61)
#define TD_SOFT2_SHIFT (50)
#define TD_DIAG_SHIFT (41)
#define TD_PA_SHIFT (13)
#define TD_SOFT_SHIFT (7)
#define TD_SIZE_SIZE (2)
#define TD_SOFT2_SIZE (9)
#define TD_DIAG_SIZE (9)
#define TD_PA_SIZE (28)
#define TD_SOFT_SIZE (6)
#define TD_SIZE_MASK (((1L << TD_SIZE_SIZE) - 1) << TD_SIZE_SHIFT)
#define TD_SOFT2_MASK (((1L << TD_SOFT2_SIZE) - 1) << TD_SOFT2_SHIFT)
#define TD_DIAG_MASK (((1L << TD_DIAG_SIZE) - 1) << TD_DIAG_SHIFT)
#define TD_PA_MASK (((1L << TD_PA_SIZE) - 1) << TD_PA_SHIFT)
#define TD_SOFT_MASK (((1L << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT)
#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT
#define TD_VA_LOW_MASK TD_SOFT2_MASK
#define TS_EXEC (1L << 3)
#define TS_MOD (1L << 2)
#define TS_REF (1L << 1)
#define TS_TSB (1L << 0)
#define TD_V (1L << 63)
#define TD_8K (0L << TD_SIZE_SHIFT)
#define TD_64K (1L << TD_SIZE_SHIFT)
#define TD_512K (2L << TD_SIZE_SHIFT)
#define TD_4M (3L << TD_SIZE_SHIFT)
#define TD_NFO (1L << 60)
#define TD_IE (1L << 59)
#define TD_VPN_LOW(vpn) ((vpn << TD_SOFT2_SHIFT) & TD_SOFT2_MASK)
#define TD_VA_LOW(va) (TD_VPN_LOW((va) >> PAGE_SHIFT))
#define TD_PA(pa) ((pa) & TD_PA_MASK)
#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
#define TD_MOD (TS_MOD << TD_SOFT_SHIFT)
#define TD_REF (TS_REF << TD_SOFT_SHIFT)
#define TD_TSB (TS_TSB << TD_SOFT_SHIFT)
#define TD_L (1L << 6)
#define TD_CP (1L << 5)
#define TD_CV (1L << 4)
#define TD_E (1L << 3)
#define TD_P (1L << 2)
#define TD_W (1L << 1)
#define TD_G (1L << 0)
struct tte {
u_long tte_tag;
u_long tte_data;
};
struct stte {
struct tte st_tte;
vm_offset_t st_next;
vm_offset_t st_prev;
};
static __inline u_int
tte_get_ctx(struct tte tte)
{
return ((tte.tte_tag & TT_CTX_MASK) >> TT_CTX_SHIFT);
}
static __inline vm_offset_t
tte_get_vpn(struct tte tte)
{
return (((tte.tte_tag & TT_VA_MASK) << TT_VPN_SHIFT) |
((tte.tte_data & TD_VA_LOW_MASK) >> TD_VA_LOW_SHIFT));
}
static __inline vm_offset_t
tte_get_va(struct tte tte)
{
return (tte_get_vpn(tte) << PAGE_SHIFT);
}
static __inline void
tte_invalidate(struct tte *tp)
{
atomic_clear_long(&tp->tte_data, TD_V);
}
static __inline int
tte_match(struct tte tte, vm_offset_t va)
{
return ((tte.tte_data & TD_V) != 0 &&
((tte.tte_tag ^ TT_VA(va)) & TT_VA_MASK) == 0 &&
((tte.tte_data ^ TD_VA_LOW(va)) & TD_VA_LOW_MASK) == 0);
}
#endif /* !_MACHINE_TTE_H_ */

View File

@ -62,23 +62,23 @@
*/
#define MAXSLP 20
#define VM_MAXUSER_ADDRESS 0
#define VM_MAXUSER_ADDRESS (0x5ffffffffff)
#define USRSTACK VM_MAXUSER_ADDRESS
#define VM_MIN_ADDRESS 0
#define VM_MIN_ADDRESS (0)
/*
* Virtual size (bytes) for various kernel submaps.
*/
#ifndef VM_KMEM_SIZE
#define VM_KMEM_SIZE (12*1024*1024)
#define VM_KMEM_SIZE (12*1024*1024)
#endif
#define VM_MIN_KERNEL_ADDRESS (0)
#define VM_MAX_KERNEL_ADDRESS (0)
#define VM_MIN_KERNEL_ADDRESS (0x60000000000)
#define VM_MAX_KERNEL_ADDRESS (0x6e000000000)
#define KERNBASE (0)
#define KERNBASE (0x60000000000)
/*
* Initial pagein size of beginning of executable file.

View File

@ -27,6 +27,23 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cons.h>
#include <sys/kernel.h>
dev_t dumpdev = NODEV;
dev_t rootdev = NODEV;
static void configure(void *);
SYSINIT(configure, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure, NULL);
static void
configure(void *v)
{
device_add_child(root_bus, "upa", 0);
root_bus_configure();
cninit_finish();
cold = 0;
}

View File

@ -32,7 +32,7 @@
void
cpu_initclocks(void)
{
TODO;
/* XXX */
}
void
@ -44,7 +44,7 @@ DELAY(int n)
void
inittodr(time_t base)
{
TODO;
/* XXX */
}
void

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/reboot.h>
#include <sys/cons.h>
#include <sys/ktr.h>
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/smp.h>
#include <machine/cpu.h>
#include <machine/md_var.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <ddb/ddb.h>
#include <ddb/db_access.h>
#include <ddb/db_sym.h>
#include <ddb/db_variables.h>
#include <setjmp.h>
static jmp_buf *db_nofault = 0;
extern jmp_buf db_jmpbuf;
int db_active;
db_regs_t ddb_regs;
static jmp_buf db_global_jmpbuf;
static int db_global_jmpbuf_valid;
int
kdb_trap(struct trapframe *tf)
{
struct kdbframe *kf;
if (db_global_jmpbuf_valid)
longjmp(db_global_jmpbuf, 1);
ddb_regs = *tf;
kf = ddb_regs.tf_arg;
kf->kf_cfp = kf->kf_fp;
setjmp(db_global_jmpbuf);
db_global_jmpbuf_valid = TRUE;
db_active++;
cndbctl(TRUE);
db_trap(tf->tf_type, 0);
cndbctl(FALSE);
db_active--;
db_global_jmpbuf_valid = FALSE;
return (1);
}
void
db_read_bytes(vm_offset_t addr, size_t size, char *data)
{
char *src;
db_nofault = &db_jmpbuf;
src = (char *)addr;
while (size-- > 0)
*data++ = *src++;
db_nofault = NULL;
}
void
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
char *dst;
db_nofault = &db_jmpbuf;
dst = (char *)addr;
while (size-- > 0)
*dst++ = *data++;
db_nofault = NULL;
}
DB_COMMAND(reboot, db_reboot)
{
cpu_reset();
}

View File

@ -0,0 +1,275 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/linker_set.h>
#include <sys/proc.h>
#include <sys/user.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/cpu.h>
#include <machine/trap.h>
#include <machine/vmparam.h>
#include <ddb/ddb.h>
#include <ddb/db_access.h>
#include <ddb/db_sym.h>
#include <ddb/db_variables.h>
#include <ddb/db_watch.h>
#define INKERNEL(va) \
((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
static db_varfcn_t db_show_in0;
static db_varfcn_t db_show_in1;
static db_varfcn_t db_show_in2;
static db_varfcn_t db_show_in3;
static db_varfcn_t db_show_in4;
static db_varfcn_t db_show_in5;
static db_varfcn_t db_show_in6;
static db_varfcn_t db_show_in7;
static db_varfcn_t db_show_local0;
static db_varfcn_t db_show_local1;
static db_varfcn_t db_show_local2;
static db_varfcn_t db_show_local3;
static db_varfcn_t db_show_local4;
static db_varfcn_t db_show_local5;
static db_varfcn_t db_show_local6;
static db_varfcn_t db_show_local7;
static void db_print_trap(struct trapframe *);
extern char tl1_trap[];
struct db_variable db_regs[] = {
{ "g0", &ddb_regs.tf_global[0], FCN_NULL },
{ "g1", &ddb_regs.tf_global[1], FCN_NULL },
{ "g2", &ddb_regs.tf_global[2], FCN_NULL },
{ "g3", &ddb_regs.tf_global[3], FCN_NULL },
{ "g4", &ddb_regs.tf_global[4], FCN_NULL },
{ "g5", &ddb_regs.tf_global[5], FCN_NULL },
{ "g6", &ddb_regs.tf_global[6], FCN_NULL },
{ "g7", &ddb_regs.tf_global[7], FCN_NULL },
{ "i0", NULL, db_show_in0 },
{ "i1", NULL, db_show_in1 },
{ "i2", NULL, db_show_in2 },
{ "i3", NULL, db_show_in3 },
{ "i4", NULL, db_show_in4 },
{ "i5", NULL, db_show_in5 },
{ "i6", NULL, db_show_in6 },
{ "i7", NULL, db_show_in7 },
{ "l0", NULL, db_show_local0 },
{ "l1", NULL, db_show_local1 },
{ "l2", NULL, db_show_local2 },
{ "l3", NULL, db_show_local3 },
{ "l4", NULL, db_show_local4 },
{ "l5", NULL, db_show_local5 },
{ "l6", NULL, db_show_local6 },
{ "l7", NULL, db_show_local7 },
{ "tstate", &ddb_regs.tf_tstate, FCN_NULL },
{ "tpc", &ddb_regs.tf_tpc, FCN_NULL },
{ "tnpc", &ddb_regs.tf_tnpc, FCN_NULL }
};
struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
void
db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count,
char *modif)
{
struct trapframe *tf;
struct kdbframe *kfp;
struct frame *fp;
const char *name;
c_db_sym_t sym;
db_expr_t offset;
db_expr_t value;
db_addr_t nfp;
db_addr_t npc;
db_addr_t pc;
int trap;
trap = 0;
npc = 0;
if (count == -1)
count = 1024;
if (!have_addr) {
kfp = DDB_REGS->tf_arg;
fp = (struct frame *)(kfp->kf_cfp + SPOFF);
} else
fp = (struct frame *)(addr + SPOFF);
while (count-- && INKERNEL((vm_offset_t)fp)) {
pc = (db_addr_t)db_get_value((db_addr_t)&fp->f_pc,
sizeof(db_addr_t), FALSE);
if (trap) {
pc = npc;
trap = 0;
}
sym = db_search_symbol(pc, DB_STGY_ANY, &offset);
db_symbol_values(sym, &name, &value);
if (name == NULL)
name = "(null)";
if (value == (u_long)tl1_trap) {
nfp = db_get_value((db_addr_t)&fp->f_fp,
sizeof(u_long), FALSE) + SPOFF;
tf = (struct trapframe *)(nfp + sizeof(*fp));
npc = db_get_value((db_addr_t)&tf->tf_tpc,
sizeof(u_long), FALSE);
db_print_trap(tf);
trap = 1;
} else {
db_printf("%s() at ", name);
db_printsym(pc, DB_STGY_PROC);
db_printf("\n");
}
fp = (struct frame *)(db_get_value((db_addr_t)&fp->f_fp,
sizeof(u_long), FALSE) + SPOFF);
}
}
static void
db_print_trap(struct trapframe *tf)
{
struct mmuframe *mf;
u_long type;
u_long va;
type = db_get_value((db_addr_t)&tf->tf_type, sizeof(u_long), FALSE);
db_printf("-- %s trap (%s) -- ", type & T_KERNEL ? "kernel" : "user",
trap_msg[type & ~T_KERNEL]);
switch (type & ~T_KERNEL) {
case T_ALIGN:
mf = (struct mmuframe *)db_get_value((db_addr_t)&tf->tf_arg,
sizeof(void *), FALSE);
va = (u_long)db_get_value((db_addr_t)&mf->mf_sfar,
sizeof(u_long), FALSE);
db_printf("va=%#lx", va);
break;
default:
break;
}
db_printf("\n");
}
DB_COMMAND(down, db_frame_down)
{
struct kdbframe *kfp;
struct frame *fp;
u_long cfp;
u_long ofp;
kfp = DDB_REGS->tf_arg;
fp = (struct frame *)(kfp->kf_fp + SPOFF);
cfp = kfp->kf_cfp;
for (;;) {
if (!INKERNEL((u_long)fp)) {
db_printf("already at bottom\n");
break;
}
ofp = db_get_value((db_addr_t)&fp->f_fp, sizeof(u_long),
FALSE);
if (ofp == cfp) {
kfp->kf_cfp = (u_long)fp - SPOFF;
break;
}
fp = (struct frame *)(ofp + SPOFF);
}
}
DB_COMMAND(up, db_frame_up)
{
struct kdbframe *kfp;
struct frame *cfp;
kfp = DDB_REGS->tf_arg;
cfp = (struct frame *)(kfp->kf_cfp + SPOFF);
if (!INKERNEL((u_long)cfp)) {
db_printf("already at top\n");
return;
}
kfp->kf_cfp = db_get_value((db_addr_t)&cfp->f_fp, sizeof(u_long),
FALSE);
}
#define DB_SHOW_REG(name, num) \
static int \
db_show_ ## name ## num(struct db_variable *dp, db_expr_t *vp, int op) \
{ \
struct kdbframe *kfp; \
struct frame *fp; \
\
kfp = DDB_REGS->tf_arg; \
fp = (struct frame *)(kfp->kf_cfp + SPOFF); \
if (op == DB_VAR_GET) \
*vp = db_get_value((db_addr_t)&fp->f_ ## name ## [num], \
sizeof(u_long), FALSE); \
else \
db_put_value((db_addr_t)&fp->f_ ## name ## [num], \
sizeof(u_long), *vp); \
return (0); \
}
DB_SHOW_REG(in, 0)
DB_SHOW_REG(in, 1)
DB_SHOW_REG(in, 2)
DB_SHOW_REG(in, 3)
DB_SHOW_REG(in, 4)
DB_SHOW_REG(in, 5)
DB_SHOW_REG(in, 6)
DB_SHOW_REG(in, 7)
DB_SHOW_REG(local, 0)
DB_SHOW_REG(local, 1)
DB_SHOW_REG(local, 2)
DB_SHOW_REG(local, 3)
DB_SHOW_REG(local, 4)
DB_SHOW_REG(local, 5)
DB_SHOW_REG(local, 6)
DB_SHOW_REG(local, 7)
int
db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
{
return (-1);
}
int
db_md_clr_watchpoint(db_expr_t addr, db_expr_t size)
{
return (-1);
}
void
db_md_list_watchpoints(void)
{
return;
}

View File

@ -0,0 +1,603 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "opt_ddb.h"
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/trap.h>
#include "assym.s"
#define SPILL(storer, base, asi) \
storer %l0, [base + F_L0] asi ; \
storer %l1, [base + F_L1] asi ; \
storer %l2, [base + F_L2] asi ; \
storer %l3, [base + F_L3] asi ; \
storer %l4, [base + F_L4] asi ; \
storer %l5, [base + F_L5] asi ; \
storer %l6, [base + F_L6] asi ; \
storer %l7, [base + F_L7] asi ; \
storer %i0, [base + F_I0] asi ; \
storer %i1, [base + F_I1] asi ; \
storer %i2, [base + F_I2] asi ; \
storer %i3, [base + F_I3] asi ; \
storer %i4, [base + F_I4] asi ; \
storer %i5, [base + F_I5] asi ; \
storer %i6, [base + F_I6] asi ; \
storer %i7, [base + F_I7] asi
#define FILL(loader, base, asi) \
loader [base + F_L0] asi, %l0 ; \
loader [base + F_L1] asi, %l1 ; \
loader [base + F_L2] asi, %l2 ; \
loader [base + F_L3] asi, %l3 ; \
loader [base + F_L4] asi, %l4 ; \
loader [base + F_L5] asi, %l5 ; \
loader [base + F_L6] asi, %l6 ; \
loader [base + F_L7] asi, %l7 ; \
loader [base + F_I0] asi, %i0 ; \
loader [base + F_I1] asi, %i1 ; \
loader [base + F_I2] asi, %i2 ; \
loader [base + F_I3] asi, %i3 ; \
loader [base + F_I4] asi, %i4 ; \
loader [base + F_I5] asi, %i5 ; \
loader [base + F_I6] asi, %i6 ; \
loader [base + F_I7] asi, %i7
DATA(intrnames)
.asciz "foo"
DATA(eintrnames)
DATA(intrcnt)
.long 0
DATA(eintrcnt)
.macro clean_window
clr %o0
clr %o1
clr %o2
clr %o3
clr %o4
clr %o5
clr %o6
clr %o7
clr %l0
clr %l1
clr %l2
clr %l3
clr %l4
clr %l5
clr %l6
rdpr %cleanwin, %l7
inc %l7
wrpr %l7, 0, %cleanwin
clr %l7
retry
.align 128
.endm
.macro tl0_gen type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type, %o0
.align 32
.endm
.macro tl0_wide type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type, %o0
.align 128
.endm
.macro tl0_reserved count
.rept \count
tl0_gen T_RESERVED
.endr
.endm
.macro tl0_intr_level
tl0_reserved 15
.endm
.macro tl0_intr_vector
tl0_gen 0
.endm
.macro tl0_immu_miss
tl0_wide T_IMMU_MISS
.endm
.macro tl0_dmmu_miss
tl0_wide T_DMMU_MISS
.endm
.macro tl0_dmmu_prot
tl0_wide T_DMMU_PROT
.endm
.macro tl0_spill_0_n
wr %g0, ASI_AIUP, %asi
SPILL(stxa, %sp + SPOFF, %asi)
saved
retry
.align 128
.endm
.macro tl0_spill_bad count
.rept \count
tl0_wide T_SPILL
.endr
.endm
.macro tl0_fill_0_n
wr %g0, ASI_AIUP, %asi
FILL(ldxa, %sp + SPOFF, %asi)
restored
retry
.align 128
.endm
.macro tl0_fill_bad count
.rept \count
tl0_wide T_FILL
.endr
.endm
.macro tl0_soft count
tl0_reserved \count
.endm
.macro tl1_gen type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type | T_KERNEL, %o0
.align 32
.endm
.macro tl1_wide type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type | T_KERNEL, %o0
.align 128
.endm
.macro tl1_reserved count
.rept \count
tl1_gen T_RESERVED
.endr
.endm
.macro tl1_insn_excptn
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_INSN_EXCPTN | T_KERNEL, %o0
.align 32
.endm
.macro tl1_align
b %xcc, tl1_sfsr_trap
nop
.align 32
.endm
ENTRY(tl1_sfsr_trap)
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g1
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g2
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
save %sp, -(CCFSZ + MF_SIZEOF), %sp
stx %g1, [%sp + SPOFF + CCFSZ + MF_SFAR]
stx %g2, [%sp + SPOFF + CCFSZ + MF_SFSR]
mov T_ALIGN | T_KERNEL, %o0
b %xcc, tl1_trap
add %sp, SPOFF + CCFSZ, %o1
END(tl1_sfsr_trap)
.macro tl1_intr_level
tl1_reserved 15
.endm
.macro tl1_intr_vector
rdpr %pstate, %g1
wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_INTERRUPT | T_KERNEL, %o0
.align 8
.endm
.macro tl1_immu_miss
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_IMMU_MISS | T_KERNEL, %o0
.align 128
.endm
.macro tl1_dmmu_miss
/*
* Load the target tte tag, and extract the context. If the context
* is non-zero handle as user space access. In either case, load the
* tsb 8k pointer.
*/
ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
srlx %g1, TT_CTX_SHIFT, %g2
brnz,pn %g2, 2f
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
/*
* Convert the tte pointer to an stte pointer, and add extra bits to
* accomodate for large tsb.
*/
sllx %g2, STTE_SHIFT - TTE_SHIFT, %g2
#ifdef notyet
mov AA_DMMU_TAR, %g3
ldxa [%g3] ASI_DMMU, %g3
srlx %g3, TSB_1M_STTE_SHIFT, %g3
and %g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
sllx %g3, TSB_1M_STTE_SHIFT, %g3
add %g2, %g3, %g2
#endif
/*
* Load the tte, check that it's valid and that the tags match.
*/
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
brgez,pn %g5, 2f
cmp %g4, %g1
bne %xcc, 2f
EMPTY
/*
* Set the refence bit, if its currently clear.
*/
andcc %g5, TD_REF, %g0
bnz %xcc, 1f
or %g5, TD_REF, %g1
stx %g1, [%g2 + ST_TTE + TTE_DATA]
/*
* If the mod bit is clear, clear the write bit too.
*/
1: andcc %g5, TD_MOD, %g1
movz %xcc, TD_W, %g1
andn %g5, %g1, %g5
/*
* Load the tte data into the TLB and retry the instruction.
*/
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
/*
* For now just bail. This might cause a red state exception,
* but oh well.
*/
2: DEBUGGER()
.align 128
.endm
.macro tl1_dmmu_prot
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_DMMU_PROT | T_KERNEL, %o0
.align 128
.endm
.macro tl1_spill_0_n
SPILL(stx, %sp + SPOFF, EMPTY)
saved
retry
.align 128
.endm
.macro tl1_spill_bad count
.rept \count
tl1_wide T_SPILL
.endr
.endm
.macro tl1_fill_0_n
FILL(ldx, %sp + SPOFF, EMPTY)
restored
retry
.align 128
.endm
.macro tl1_fill_bad count
.rept \count
tl1_wide T_FILL
.endr
.endm
.macro tl1_breakpoint
b %xcc, tl1_breakpoint_trap
nop
.align 32
.endm
ENTRY(tl1_breakpoint_trap)
save %sp, -(CCFSZ + KF_SIZEOF), %sp
flushw
stx %fp, [%sp + SPOFF + CCFSZ + KF_FP]
mov T_BREAKPOINT | T_KERNEL, %o0
b %xcc, tl1_trap
add %sp, SPOFF + CCFSZ, %o1
END(tl1_breakpoint_trap)
.macro tl1_soft count
tl1_reserved \count
.endm
.sect .trap
.align 0x8000
.globl tl0_base
tl0_base:
tl0_reserved 1 ! 0x0 unused
tl0_power_on:
tl0_gen T_POWER_ON ! 0x1 power on reset
tl0_watchdog:
tl0_gen T_WATCHDOG ! 0x2 watchdog rest
tl0_reset_ext:
tl0_gen T_RESET_EXT ! 0x3 externally initiated reset
tl0_reset_soft:
tl0_gen T_RESET_SOFT ! 0x4 software initiated reset
tl0_red_state:
tl0_gen T_RED_STATE ! 0x5 red state exception
tl0_reserved 2 ! 0x6-0x7 reserved
tl0_insn_excptn:
tl0_gen T_INSN_EXCPTN ! 0x8 instruction access exception
tl0_reserved 1 ! 0x9 reserved
tl0_insn_error:
tl0_gen T_INSN_ERROR ! 0xa instruction access error
tl0_reserved 5 ! 0xb-0xf reserved
tl0_insn_illegal:
tl0_gen T_INSN_ILLEGAL ! 0x10 illegal instruction
tl0_priv_opcode:
tl0_gen T_PRIV_OPCODE ! 0x11 privileged opcode
tl0_reserved 14 ! 0x12-0x1f reserved
tl0_fp_disabled:
tl0_gen T_FP_DISABLED ! 0x20 floating point disabled
tl0_fp_ieee:
tl0_gen T_FP_IEEE ! 0x21 floating point exception ieee
tl0_fp_other:
tl0_gen T_FP_OTHER ! 0x22 floating point exception other
tl0_tag_ovflw:
tl0_gen T_TAG_OVFLW ! 0x23 tag overflow
tl0_clean_window:
clean_window ! 0x24 clean window
tl0_divide:
tl0_gen T_DIVIDE ! 0x28 division by zero
tl0_reserved 7 ! 0x29-0x2f reserved
tl0_data_excptn:
tl0_gen T_DATA_EXCPTN ! 0x30 data access exception
tl0_reserved 1 ! 0x31 reserved
tl0_data_error:
tl0_gen T_DATA_ERROR ! 0x32 data access error
tl0_reserved 1 ! 0x33 reserved
tl0_align:
tl0_gen T_ALIGN ! 0x34 memory address not aligned
tl0_align_lddf:
tl0_gen T_ALIGN_LDDF ! 0x35 lddf memory address not aligned
tl0_align_stdf:
tl0_gen T_ALIGN_STDF ! 0x36 stdf memory address not aligned
tl0_priv_action:
tl0_gen T_PRIV_ACTION ! 0x37 privileged action
tl0_reserved 9 ! 0x38-0x40 reserved
tl0_intr_level:
tl0_intr_level ! 0x41-0x4f interrupt level 1 to 15
tl0_reserved 16 ! 0x50-0x5f reserved
tl0_intr_vector:
tl0_intr_vector ! 0x60 interrupt vector
tl0_watch_phys:
tl0_gen T_WATCH_PHYS ! 0x61 physical address watchpoint
tl0_watch_virt:
tl0_gen T_WATCH_VIRT ! 0x62 virtual address watchpoint
tl0_ecc:
tl0_gen T_ECC ! 0x63 corrected ecc error
tl0_immu_miss:
tl0_immu_miss ! 0x64 fast instruction access mmu miss
tl0_dmmu_miss:
tl0_dmmu_miss ! 0x68 fast data access mmu miss
tl0_dmmu_prot:
tl0_dmmu_prot ! 0x6c fast data access protection
tl0_reserved 16 ! 0x70-0x7f reserved
tl0_spill_0_n:
tl0_spill_0_n ! 0x80 spill 0 normal
tl0_spill_bad:
tl0_spill_bad 15 ! 0x84-0xbf spill normal, other
tl0_fill_0_n:
tl0_fill_0_n ! 0xc0 fill 0 normal
tl0_fill_bad:
tl0_fill_bad 15 ! 0xc4-0xff fill normal, other
tl0_sun_syscall:
tl0_reserved 1 ! 0x100 sun system call
tl0_breakpoint:
tl0_gen T_BREAKPOINT ! 0x101 breakpoint
tl0_soft 126 ! 0x102-0x17f trap instruction
tl0_reserved 128 ! 0x180-0x1ff reserved
tl1_base:
tl1_reserved 1 ! 0x200 unused
tl1_power_on:
tl1_gen T_POWER_ON ! 0x201 power on reset
tl1_watchdog:
tl1_gen T_WATCHDOG ! 0x202 watchdog rest
tl1_reset_ext:
tl1_gen T_RESET_EXT ! 0x203 externally initiated reset
tl1_reset_soft:
tl1_gen T_RESET_SOFT ! 0x204 software initiated reset
tl1_red_state:
tl1_gen T_RED_STATE ! 0x205 red state exception
tl1_reserved 2 ! 0x206-0x207 reserved
tl1_insn_excptn:
tl1_insn_excptn ! 0x208 instruction access exception
tl1_reserved 1 ! 0x209 reserved
tl1_insn_error:
tl1_gen T_INSN_ERROR ! 0x20a instruction access error
tl1_reserved 5 ! 0x20b-0x20f reserved
tl1_insn_illegal:
tl1_gen T_INSN_ILLEGAL ! 0x210 illegal instruction
tl1_priv_opcode:
tl1_gen T_PRIV_OPCODE ! 0x211 privileged opcode
tl1_reserved 14 ! 0x212-0x21f reserved
tl1_fp_disabled:
tl1_gen T_FP_DISABLED ! 0x220 floating point disabled
tl1_fp_ieee:
tl1_gen T_FP_IEEE ! 0x221 floating point exception ieee
tl1_fp_other:
tl1_gen T_FP_OTHER ! 0x222 floating point exception other
tl1_tag_ovflw:
tl1_gen T_TAG_OVFLW ! 0x223 tag overflow
tl1_clean_window:
clean_window ! 0x224 clean window
tl1_divide:
tl1_gen T_DIVIDE ! 0x228 division by zero
tl1_reserved 7 ! 0x229-0x22f reserved
tl1_data_excptn:
tl1_gen T_DATA_EXCPTN ! 0x230 data access exception
tl1_reserved 1 ! 0x231 reserved
tl1_data_error:
tl1_gen T_DATA_ERROR ! 0x232 data access error
tl1_reserved 1 ! 0x233 reserved
tl1_align:
tl1_align ! 0x234 memory address not aligned
tl1_align_lddf:
tl1_gen T_ALIGN_LDDF ! 0x235 lddf memory address not aligned
tl1_align_stdf:
tl1_gen T_ALIGN_STDF ! 0x236 stdf memory address not aligned
tl1_priv_action:
tl1_gen T_PRIV_ACTION ! 0x237 privileged action
tl1_reserved 9 ! 0x238-0x240 reserved
tl1_intr_level:
tl1_intr_level ! 0x241-0x24f interrupt level 1 to 15
tl1_reserved 16 ! 0x250-0x25f reserved
tl1_intr_vector:
tl1_intr_vector ! 0x260 interrupt vector
tl1_watch_phys:
tl1_gen T_WATCH_PHYS ! 0x261 physical address watchpoint
tl1_watch_virt:
tl1_gen T_WATCH_VIRT ! 0x262 virtual address watchpoint
tl1_ecc:
tl1_gen T_ECC ! 0x263 corrected ecc error
tl1_immu_miss:
tl1_immu_miss ! 0x264 fast instruction access mmu miss
tl1_dmmu_miss:
tl1_dmmu_miss ! 0x268 fast data access mmu miss
tl1_dmmu_prot:
tl1_dmmu_prot ! 0x26c fast data access protection
tl1_reserved 16 ! 0x270-0x27f reserved
tl1_spill_0_n:
tl1_spill_0_n ! 0x280 spill 0 normal
tl1_spill_bad:
tl1_spill_bad 15 ! 0x284-0x2bf spill normal, other
tl1_fill_0_n:
tl1_fill_0_n ! 0x2c0 fill 0 normal
tl1_fill_bad:
tl1_fill_bad 15 ! 0x2c4-0x2ff fill normal, other
tl1_reserved 1 ! 0x300 trap instruction
tl1_breakpoint:
tl1_breakpoint ! 0x301 breakpoint
tl1_soft 126 ! 0x302-0x37f trap instruction
tl1_reserved 128 ! 0x380-0x3ff reserved
ENTRY(tl0_trap)
illtrap
END(tl0_trap)
/*
* void tl1_trap(u_long o0, u_long o1, u_long o2, u_long type)
*/
ENTRY(tl1_trap)
sub %sp, TF_SIZEOF, %sp
rdpr %tstate, %l0
stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
rdpr %tpc, %l1
stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
rdpr %tnpc, %l2
stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
wrpr %g0, 1, %tl
rdpr %pstate, %l7
wrpr %l7, PSTATE_AG | PSTATE_IE, %pstate
stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
stx %o1, [%sp + SPOFF + CCFSZ + TF_ARG]
stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
call trap
add %sp, CCFSZ + SPOFF, %o0
ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
rdpr %pstate, %o0
wrpr %o0, PSTATE_AG | PSTATE_IE, %pstate
wrpr %g0, 2, %tl
wrpr %l0, 0, %tstate
wrpr %l1, 0, %tpc
wrpr %l2, 0, %tnpc
restore
retry
END(tl1_trap)
ENTRY(fork_trampoline)
mov %l0, %o0
mov %l1, %o1
mov %l2, %o2
call fork_exit
nop
DEBUGGER()
END(fork_trampoline)

View File

@ -0,0 +1,603 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "opt_ddb.h"
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/trap.h>
#include "assym.s"
#define SPILL(storer, base, asi) \
storer %l0, [base + F_L0] asi ; \
storer %l1, [base + F_L1] asi ; \
storer %l2, [base + F_L2] asi ; \
storer %l3, [base + F_L3] asi ; \
storer %l4, [base + F_L4] asi ; \
storer %l5, [base + F_L5] asi ; \
storer %l6, [base + F_L6] asi ; \
storer %l7, [base + F_L7] asi ; \
storer %i0, [base + F_I0] asi ; \
storer %i1, [base + F_I1] asi ; \
storer %i2, [base + F_I2] asi ; \
storer %i3, [base + F_I3] asi ; \
storer %i4, [base + F_I4] asi ; \
storer %i5, [base + F_I5] asi ; \
storer %i6, [base + F_I6] asi ; \
storer %i7, [base + F_I7] asi
#define FILL(loader, base, asi) \
loader [base + F_L0] asi, %l0 ; \
loader [base + F_L1] asi, %l1 ; \
loader [base + F_L2] asi, %l2 ; \
loader [base + F_L3] asi, %l3 ; \
loader [base + F_L4] asi, %l4 ; \
loader [base + F_L5] asi, %l5 ; \
loader [base + F_L6] asi, %l6 ; \
loader [base + F_L7] asi, %l7 ; \
loader [base + F_I0] asi, %i0 ; \
loader [base + F_I1] asi, %i1 ; \
loader [base + F_I2] asi, %i2 ; \
loader [base + F_I3] asi, %i3 ; \
loader [base + F_I4] asi, %i4 ; \
loader [base + F_I5] asi, %i5 ; \
loader [base + F_I6] asi, %i6 ; \
loader [base + F_I7] asi, %i7
DATA(intrnames)
.asciz "foo"
DATA(eintrnames)
DATA(intrcnt)
.long 0
DATA(eintrcnt)
.macro clean_window
clr %o0
clr %o1
clr %o2
clr %o3
clr %o4
clr %o5
clr %o6
clr %o7
clr %l0
clr %l1
clr %l2
clr %l3
clr %l4
clr %l5
clr %l6
rdpr %cleanwin, %l7
inc %l7
wrpr %l7, 0, %cleanwin
clr %l7
retry
.align 128
.endm
.macro tl0_gen type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type, %o0
.align 32
.endm
.macro tl0_wide type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type, %o0
.align 128
.endm
.macro tl0_reserved count
.rept \count
tl0_gen T_RESERVED
.endr
.endm
.macro tl0_intr_level
tl0_reserved 15
.endm
.macro tl0_intr_vector
tl0_gen 0
.endm
.macro tl0_immu_miss
tl0_wide T_IMMU_MISS
.endm
.macro tl0_dmmu_miss
tl0_wide T_DMMU_MISS
.endm
.macro tl0_dmmu_prot
tl0_wide T_DMMU_PROT
.endm
.macro tl0_spill_0_n
wr %g0, ASI_AIUP, %asi
SPILL(stxa, %sp + SPOFF, %asi)
saved
retry
.align 128
.endm
.macro tl0_spill_bad count
.rept \count
tl0_wide T_SPILL
.endr
.endm
.macro tl0_fill_0_n
wr %g0, ASI_AIUP, %asi
FILL(ldxa, %sp + SPOFF, %asi)
restored
retry
.align 128
.endm
.macro tl0_fill_bad count
.rept \count
tl0_wide T_FILL
.endr
.endm
.macro tl0_soft count
tl0_reserved \count
.endm
.macro tl1_gen type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type | T_KERNEL, %o0
.align 32
.endm
.macro tl1_wide type
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov \type | T_KERNEL, %o0
.align 128
.endm
.macro tl1_reserved count
.rept \count
tl1_gen T_RESERVED
.endr
.endm
.macro tl1_insn_excptn
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_INSN_EXCPTN | T_KERNEL, %o0
.align 32
.endm
.macro tl1_align
b %xcc, tl1_sfsr_trap
nop
.align 32
.endm
ENTRY(tl1_sfsr_trap)
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g1
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g2
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
save %sp, -(CCFSZ + MF_SIZEOF), %sp
stx %g1, [%sp + SPOFF + CCFSZ + MF_SFAR]
stx %g2, [%sp + SPOFF + CCFSZ + MF_SFSR]
mov T_ALIGN | T_KERNEL, %o0
b %xcc, tl1_trap
add %sp, SPOFF + CCFSZ, %o1
END(tl1_sfsr_trap)
.macro tl1_intr_level
tl1_reserved 15
.endm
.macro tl1_intr_vector
rdpr %pstate, %g1
wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_INTERRUPT | T_KERNEL, %o0
.align 8
.endm
.macro tl1_immu_miss
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_IMMU_MISS | T_KERNEL, %o0
.align 128
.endm
.macro tl1_dmmu_miss
/*
* Load the target tte tag, and extract the context. If the context
* is non-zero handle as user space access. In either case, load the
* tsb 8k pointer.
*/
ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
srlx %g1, TT_CTX_SHIFT, %g2
brnz,pn %g2, 2f
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
/*
* Convert the tte pointer to an stte pointer, and add extra bits to
* accomodate for large tsb.
*/
sllx %g2, STTE_SHIFT - TTE_SHIFT, %g2
#ifdef notyet
mov AA_DMMU_TAR, %g3
ldxa [%g3] ASI_DMMU, %g3
srlx %g3, TSB_1M_STTE_SHIFT, %g3
and %g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
sllx %g3, TSB_1M_STTE_SHIFT, %g3
add %g2, %g3, %g2
#endif
/*
* Load the tte, check that it's valid and that the tags match.
*/
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
brgez,pn %g5, 2f
cmp %g4, %g1
bne %xcc, 2f
EMPTY
/*
* Set the refence bit, if its currently clear.
*/
andcc %g5, TD_REF, %g0
bnz %xcc, 1f
or %g5, TD_REF, %g1
stx %g1, [%g2 + ST_TTE + TTE_DATA]
/*
* If the mod bit is clear, clear the write bit too.
*/
1: andcc %g5, TD_MOD, %g1
movz %xcc, TD_W, %g1
andn %g5, %g1, %g5
/*
* Load the tte data into the TLB and retry the instruction.
*/
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
/*
* For now just bail. This might cause a red state exception,
* but oh well.
*/
2: DEBUGGER()
.align 128
.endm
.macro tl1_dmmu_prot
rdpr %pstate, %g1
wrpr %g1, PSTATE_MG | PSTATE_AG, %pstate
save %sp, -CCFSZ, %sp
b %xcc, tl1_trap
mov T_DMMU_PROT | T_KERNEL, %o0
.align 128
.endm
.macro tl1_spill_0_n
SPILL(stx, %sp + SPOFF, EMPTY)
saved
retry
.align 128
.endm
.macro tl1_spill_bad count
.rept \count
tl1_wide T_SPILL
.endr
.endm
.macro tl1_fill_0_n
FILL(ldx, %sp + SPOFF, EMPTY)
restored
retry
.align 128
.endm
.macro tl1_fill_bad count
.rept \count
tl1_wide T_FILL
.endr
.endm
.macro tl1_breakpoint
b %xcc, tl1_breakpoint_trap
nop
.align 32
.endm
ENTRY(tl1_breakpoint_trap)
save %sp, -(CCFSZ + KF_SIZEOF), %sp
flushw
stx %fp, [%sp + SPOFF + CCFSZ + KF_FP]
mov T_BREAKPOINT | T_KERNEL, %o0
b %xcc, tl1_trap
add %sp, SPOFF + CCFSZ, %o1
END(tl1_breakpoint_trap)
.macro tl1_soft count
tl1_reserved \count
.endm
.sect .trap
.align 0x8000
.globl tl0_base
tl0_base:
tl0_reserved 1 ! 0x0 unused
tl0_power_on:
tl0_gen T_POWER_ON ! 0x1 power on reset
tl0_watchdog:
tl0_gen T_WATCHDOG ! 0x2 watchdog rest
tl0_reset_ext:
tl0_gen T_RESET_EXT ! 0x3 externally initiated reset
tl0_reset_soft:
tl0_gen T_RESET_SOFT ! 0x4 software initiated reset
tl0_red_state:
tl0_gen T_RED_STATE ! 0x5 red state exception
tl0_reserved 2 ! 0x6-0x7 reserved
tl0_insn_excptn:
tl0_gen T_INSN_EXCPTN ! 0x8 instruction access exception
tl0_reserved 1 ! 0x9 reserved
tl0_insn_error:
tl0_gen T_INSN_ERROR ! 0xa instruction access error
tl0_reserved 5 ! 0xb-0xf reserved
tl0_insn_illegal:
tl0_gen T_INSN_ILLEGAL ! 0x10 illegal instruction
tl0_priv_opcode:
tl0_gen T_PRIV_OPCODE ! 0x11 privileged opcode
tl0_reserved 14 ! 0x12-0x1f reserved
tl0_fp_disabled:
tl0_gen T_FP_DISABLED ! 0x20 floating point disabled
tl0_fp_ieee:
tl0_gen T_FP_IEEE ! 0x21 floating point exception ieee
tl0_fp_other:
tl0_gen T_FP_OTHER ! 0x22 floating point exception other
tl0_tag_ovflw:
tl0_gen T_TAG_OVFLW ! 0x23 tag overflow
tl0_clean_window:
clean_window ! 0x24 clean window
tl0_divide:
tl0_gen T_DIVIDE ! 0x28 division by zero
tl0_reserved 7 ! 0x29-0x2f reserved
tl0_data_excptn:
tl0_gen T_DATA_EXCPTN ! 0x30 data access exception
tl0_reserved 1 ! 0x31 reserved
tl0_data_error:
tl0_gen T_DATA_ERROR ! 0x32 data access error
tl0_reserved 1 ! 0x33 reserved
tl0_align:
tl0_gen T_ALIGN ! 0x34 memory address not aligned
tl0_align_lddf:
tl0_gen T_ALIGN_LDDF ! 0x35 lddf memory address not aligned
tl0_align_stdf:
tl0_gen T_ALIGN_STDF ! 0x36 stdf memory address not aligned
tl0_priv_action:
tl0_gen T_PRIV_ACTION ! 0x37 privileged action
tl0_reserved 9 ! 0x38-0x40 reserved
tl0_intr_level:
tl0_intr_level ! 0x41-0x4f interrupt level 1 to 15
tl0_reserved 16 ! 0x50-0x5f reserved
tl0_intr_vector:
tl0_intr_vector ! 0x60 interrupt vector
tl0_watch_phys:
tl0_gen T_WATCH_PHYS ! 0x61 physical address watchpoint
tl0_watch_virt:
tl0_gen T_WATCH_VIRT ! 0x62 virtual address watchpoint
tl0_ecc:
tl0_gen T_ECC ! 0x63 corrected ecc error
tl0_immu_miss:
tl0_immu_miss ! 0x64 fast instruction access mmu miss
tl0_dmmu_miss:
tl0_dmmu_miss ! 0x68 fast data access mmu miss
tl0_dmmu_prot:
tl0_dmmu_prot ! 0x6c fast data access protection
tl0_reserved 16 ! 0x70-0x7f reserved
tl0_spill_0_n:
tl0_spill_0_n ! 0x80 spill 0 normal
tl0_spill_bad:
tl0_spill_bad 15 ! 0x84-0xbf spill normal, other
tl0_fill_0_n:
tl0_fill_0_n ! 0xc0 fill 0 normal
tl0_fill_bad:
tl0_fill_bad 15 ! 0xc4-0xff fill normal, other
tl0_sun_syscall:
tl0_reserved 1 ! 0x100 sun system call
tl0_breakpoint:
tl0_gen T_BREAKPOINT ! 0x101 breakpoint
tl0_soft 126 ! 0x102-0x17f trap instruction
tl0_reserved 128 ! 0x180-0x1ff reserved
tl1_base:
tl1_reserved 1 ! 0x200 unused
tl1_power_on:
tl1_gen T_POWER_ON ! 0x201 power on reset
tl1_watchdog:
tl1_gen T_WATCHDOG ! 0x202 watchdog rest
tl1_reset_ext:
tl1_gen T_RESET_EXT ! 0x203 externally initiated reset
tl1_reset_soft:
tl1_gen T_RESET_SOFT ! 0x204 software initiated reset
tl1_red_state:
tl1_gen T_RED_STATE ! 0x205 red state exception
tl1_reserved 2 ! 0x206-0x207 reserved
tl1_insn_excptn:
tl1_insn_excptn ! 0x208 instruction access exception
tl1_reserved 1 ! 0x209 reserved
tl1_insn_error:
tl1_gen T_INSN_ERROR ! 0x20a instruction access error
tl1_reserved 5 ! 0x20b-0x20f reserved
tl1_insn_illegal:
tl1_gen T_INSN_ILLEGAL ! 0x210 illegal instruction
tl1_priv_opcode:
tl1_gen T_PRIV_OPCODE ! 0x211 privileged opcode
tl1_reserved 14 ! 0x212-0x21f reserved
tl1_fp_disabled:
tl1_gen T_FP_DISABLED ! 0x220 floating point disabled
tl1_fp_ieee:
tl1_gen T_FP_IEEE ! 0x221 floating point exception ieee
tl1_fp_other:
tl1_gen T_FP_OTHER ! 0x222 floating point exception other
tl1_tag_ovflw:
tl1_gen T_TAG_OVFLW ! 0x223 tag overflow
tl1_clean_window:
clean_window ! 0x224 clean window
tl1_divide:
tl1_gen T_DIVIDE ! 0x228 division by zero
tl1_reserved 7 ! 0x229-0x22f reserved
tl1_data_excptn:
tl1_gen T_DATA_EXCPTN ! 0x230 data access exception
tl1_reserved 1 ! 0x231 reserved
tl1_data_error:
tl1_gen T_DATA_ERROR ! 0x232 data access error
tl1_reserved 1 ! 0x233 reserved
tl1_align:
tl1_align ! 0x234 memory address not aligned
tl1_align_lddf:
tl1_gen T_ALIGN_LDDF ! 0x235 lddf memory address not aligned
tl1_align_stdf:
tl1_gen T_ALIGN_STDF ! 0x236 stdf memory address not aligned
tl1_priv_action:
tl1_gen T_PRIV_ACTION ! 0x237 privileged action
tl1_reserved 9 ! 0x238-0x240 reserved
tl1_intr_level:
tl1_intr_level ! 0x241-0x24f interrupt level 1 to 15
tl1_reserved 16 ! 0x250-0x25f reserved
tl1_intr_vector:
tl1_intr_vector ! 0x260 interrupt vector
tl1_watch_phys:
tl1_gen T_WATCH_PHYS ! 0x261 physical address watchpoint
tl1_watch_virt:
tl1_gen T_WATCH_VIRT ! 0x262 virtual address watchpoint
tl1_ecc:
tl1_gen T_ECC ! 0x263 corrected ecc error
tl1_immu_miss:
tl1_immu_miss ! 0x264 fast instruction access mmu miss
tl1_dmmu_miss:
tl1_dmmu_miss ! 0x268 fast data access mmu miss
tl1_dmmu_prot:
tl1_dmmu_prot ! 0x26c fast data access protection
tl1_reserved 16 ! 0x270-0x27f reserved
tl1_spill_0_n:
tl1_spill_0_n ! 0x280 spill 0 normal
tl1_spill_bad:
tl1_spill_bad 15 ! 0x284-0x2bf spill normal, other
tl1_fill_0_n:
tl1_fill_0_n ! 0x2c0 fill 0 normal
tl1_fill_bad:
tl1_fill_bad 15 ! 0x2c4-0x2ff fill normal, other
tl1_reserved 1 ! 0x300 trap instruction
tl1_breakpoint:
tl1_breakpoint ! 0x301 breakpoint
tl1_soft 126 ! 0x302-0x37f trap instruction
tl1_reserved 128 ! 0x380-0x3ff reserved
ENTRY(tl0_trap)
illtrap
END(tl0_trap)
/*
* void tl1_trap(u_long o0, u_long o1, u_long o2, u_long type)
*/
ENTRY(tl1_trap)
sub %sp, TF_SIZEOF, %sp
rdpr %tstate, %l0
stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
rdpr %tpc, %l1
stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
rdpr %tnpc, %l2
stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
wrpr %g0, 1, %tl
rdpr %pstate, %l7
wrpr %l7, PSTATE_AG | PSTATE_IE, %pstate
stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
stx %o1, [%sp + SPOFF + CCFSZ + TF_ARG]
stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
call trap
add %sp, CCFSZ + SPOFF, %o0
ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
rdpr %pstate, %o0
wrpr %o0, PSTATE_AG | PSTATE_IE, %pstate
wrpr %g0, 2, %tl
wrpr %l0, 0, %tstate
wrpr %l1, 0, %tpc
wrpr %l2, 0, %tnpc
restore
retry
END(tl1_trap)
ENTRY(fork_trampoline)
mov %l0, %o0
mov %l1, %o1
mov %l2, %o2
call fork_exit
nop
DEBUGGER()
END(fork_trampoline)

View File

@ -0,0 +1,163 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/assym.h>
#include <sys/errno.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/user.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/asi.h>
#include <machine/vmparam.h>
#include <machine/cpufunc.h>
#include <machine/frame.h>
#include <machine/globals.h>
#include <machine/pcb.h>
#include <machine/pstate.h>
#include <machine/setjmp.h>
#include <machine/pv.h>
#include <machine/tte.h>
#include <machine/tlb.h>
#include <machine/tsb.h>
ASSYM(EFAULT, EFAULT);
ASSYM(ENAMETOOLONG, ENAMETOOLONG);
ASSYM(UPAGES, UPAGES);
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(PSTATE_AG, PSTATE_AG);
ASSYM(PSTATE_IE, PSTATE_IE);
ASSYM(PSTATE_PRIV, PSTATE_PRIV);
ASSYM(PSTATE_PEF, PSTATE_PEF);
ASSYM(PSTATE_MG, PSTATE_MG);
ASSYM(PSTATE_IG, PSTATE_IG);
ASSYM(TTE_SHIFT, TTE_SHIFT);
ASSYM(STTE_SHIFT, STTE_SHIFT);
ASSYM(TSB_PRIMARY_BUCKET_SHIFT, TSB_PRIMARY_BUCKET_SHIFT);
ASSYM(TSB_KERNEL_MIN_ADDRESS, TSB_KERNEL_MIN_ADDRESS);
ASSYM(TSB_MASK_WIDTH, TSB_MASK_WIDTH);
ASSYM(TSB_SECONDARY_BUCKET_SHIFT, TSB_SECONDARY_BUCKET_SHIFT);
ASSYM(TSB_BUCKET_SPREAD_SHIFT, TSB_BUCKET_SPREAD_SHIFT);
ASSYM(TSB_SECONDARY_STTE_MASK, TSB_SECONDARY_STTE_MASK);
ASSYM(TSB_SECONDARY_STTE_SHIFT, TSB_SECONDARY_STTE_SHIFT);
ASSYM(TSB_LEVEL1_BUCKET_MASK, TSB_LEVEL1_BUCKET_MASK);
ASSYM(TSB_LEVEL1_BUCKET_SHIFT, TSB_LEVEL1_BUCKET_SHIFT);
ASSYM(TSB_1M_STTE_SHIFT, TSB_1M_STTE_SHIFT);
ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_MASK, PAGE_MASK);
ASSYM(TTE_DATA, offsetof(struct tte, tte_data));
ASSYM(TTE_TAG, offsetof(struct tte, tte_tag));
ASSYM(ST_TTE, offsetof(struct stte, st_tte));
ASSYM(STTE_SIZEOF, sizeof(struct stte));
ASSYM(TD_VA_LOW_MASK, TD_VA_LOW_MASK);
ASSYM(TD_VA_LOW_SHIFT, TD_VA_LOW_SHIFT);
ASSYM(TD_MOD, TD_MOD);
ASSYM(TD_REF, TD_REF);
ASSYM(TD_W, TD_W);
ASSYM(TT_VA_MASK, TT_VA_MASK);
ASSYM(TT_VA_SHIFT, TT_VA_SHIFT);
ASSYM(TT_CTX_SHIFT, TT_CTX_SHIFT);
ASSYM(GD_CURPROC, offsetof(struct globaldata, gd_curproc));
ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb));
ASSYM(GD_FPCURPROC, offsetof(struct globaldata, gd_fpcurproc));
ASSYM(JB_FP, offsetof(struct _jmp_buf, _jb[_JB_FP]));
ASSYM(JB_PC, offsetof(struct _jmp_buf, _jb[_JB_PC]));
ASSYM(JB_SP, offsetof(struct _jmp_buf, _jb[_JB_SP]));
ASSYM(P_ADDR, offsetof(struct proc, p_addr));
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
ASSYM(PCB_FP, offsetof(struct pcb, pcb_fp));
ASSYM(PCB_PC, offsetof(struct pcb, pcb_pc));
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(F_L0, offsetof(struct frame, f_local[0]));
ASSYM(F_L1, offsetof(struct frame, f_local[1]));
ASSYM(F_L2, offsetof(struct frame, f_local[2]));
ASSYM(F_L3, offsetof(struct frame, f_local[3]));
ASSYM(F_L4, offsetof(struct frame, f_local[4]));
ASSYM(F_L5, offsetof(struct frame, f_local[5]));
ASSYM(F_L6, offsetof(struct frame, f_local[6]));
ASSYM(F_L7, offsetof(struct frame, f_local[7]));
ASSYM(F_I0, offsetof(struct frame, f_in[0]));
ASSYM(F_I1, offsetof(struct frame, f_in[1]));
ASSYM(F_I2, offsetof(struct frame, f_in[2]));
ASSYM(F_I3, offsetof(struct frame, f_in[3]));
ASSYM(F_I4, offsetof(struct frame, f_in[4]));
ASSYM(F_I5, offsetof(struct frame, f_in[5]));
ASSYM(F_I6, offsetof(struct frame, f_in[6]));
ASSYM(F_I7, offsetof(struct frame, f_in[7]));
ASSYM(CCFSZ, sizeof(struct frame));
ASSYM(SPOFF, SPOFF);
ASSYM(KF_FP, offsetof(struct kdbframe, kf_fp));
ASSYM(KF_SIZEOF, sizeof(struct kdbframe));
ASSYM(MF_SFAR, offsetof(struct mmuframe, mf_sfar));
ASSYM(MF_SFSR, offsetof(struct mmuframe, mf_sfsr));
ASSYM(MF_TAR, offsetof(struct mmuframe, mf_tar));
ASSYM(MF_SIZEOF, sizeof(struct mmuframe));
ASSYM(TF_G0, offsetof(struct trapframe, tf_global[0]));
ASSYM(TF_G1, offsetof(struct trapframe, tf_global[1]));
ASSYM(TF_G2, offsetof(struct trapframe, tf_global[2]));
ASSYM(TF_G3, offsetof(struct trapframe, tf_global[3]));
ASSYM(TF_G4, offsetof(struct trapframe, tf_global[4]));
ASSYM(TF_G5, offsetof(struct trapframe, tf_global[5]));
ASSYM(TF_G6, offsetof(struct trapframe, tf_global[6]));
ASSYM(TF_G7, offsetof(struct trapframe, tf_global[7]));
ASSYM(TF_O0, offsetof(struct trapframe, tf_out[0]));
ASSYM(TF_O1, offsetof(struct trapframe, tf_out[1]));
ASSYM(TF_O2, offsetof(struct trapframe, tf_out[2]));
ASSYM(TF_O3, offsetof(struct trapframe, tf_out[3]));
ASSYM(TF_O4, offsetof(struct trapframe, tf_out[4]));
ASSYM(TF_O5, offsetof(struct trapframe, tf_out[5]));
ASSYM(TF_O6, offsetof(struct trapframe, tf_out[6]));
ASSYM(TF_O7, offsetof(struct trapframe, tf_out[7]));
ASSYM(TF_TSTATE, offsetof(struct trapframe, tf_tstate));
ASSYM(TF_TPC, offsetof(struct trapframe, tf_tpc));
ASSYM(TF_TNPC, offsetof(struct trapframe, tf_tnpc));
ASSYM(TF_TYPE, offsetof(struct trapframe, tf_type));
ASSYM(TF_ARG, offsetof(struct trapframe, tf_arg));
ASSYM(TF_SIZEOF, sizeof(struct trapframe));
ASSYM(U_PCB, offsetof(struct user, u_pcb));

View File

@ -0,0 +1,61 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "assym.s"
/*
* void _start(struct bootinfo *bi, u_long ofw_vec)
*/
ENTRY(_start)
wrpr %g0, PSTATE_IE|PSTATE_PRIV, %pstate
mov %o0, %g1
mov %o1, %g2
flushw
wrpr %g0, 1, %cwp
wrpr %g0, 0, %cleanwin
setx user0 + UPAGES * PAGE_SIZE - SPOFF, %l0, %o5
save %o5, -CCFSZ, %sp
mov %g1, %o0
call sparc64_init
mov %g2, %o1
call mi_startup
nop
! NOTREACHED
END(_start)
ENTRY(sigcode)
illtrap
esigcode:
END(sigcode)
DATA(szsigcode)
.long esigcode - sigcode

View File

@ -0,0 +1,61 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "assym.s"
/*
* void _start(struct bootinfo *bi, u_long ofw_vec)
*/
ENTRY(_start)
wrpr %g0, PSTATE_IE|PSTATE_PRIV, %pstate
mov %o0, %g1
mov %o1, %g2
flushw
wrpr %g0, 1, %cwp
wrpr %g0, 0, %cleanwin
setx user0 + UPAGES * PAGE_SIZE - SPOFF, %l0, %o5
save %o5, -CCFSZ, %sp
mov %g1, %o0
call sparc64_init
mov %g2, %o1
call mi_startup
nop
! NOTREACHED
END(_start)
ENTRY(sigcode)
illtrap
esigcode:
END(sigcode)
DATA(szsigcode)
.long esigcode - sigcode

View File

@ -26,21 +26,52 @@
* $FreeBSD$
*/
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/cons.h>
#include <sys/kernel.h>
#include <sys/linker.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/ptrace.h>
#include <sys/signalvar.h>
#include <sys/sysproto.h>
#include <sys/timetc.h>
#include <sys/user.h>
#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
#include <ddb/ddb.h>
#include <machine/bootinfo.h>
#include <machine/frame.h>
#include <machine/md_var.h>
#include <machine/pmap.h>
#include <machine/pstate.h>
#include <machine/reg.h>
void sparc64_init(ofw_vec_t *ofw_vec);
typedef int ofw_vec_t(void *);
extern char tl0_base[];
extern char _end[];
int physmem = 0;
int cold = 1;
long dumplo;
int Maxmem = 0;
@ -48,14 +79,254 @@ int Maxmem = 0;
struct mtx Giant;
struct mtx sched_lock;
struct user *proc0paddr;
struct globaldata __globaldata;
char user0[UPAGES * PAGE_SIZE];
vm_offset_t clean_sva;
vm_offset_t clean_eva;
u_long ofw_vec;
u_long ofw_tba;
static vm_offset_t buffer_sva;
static vm_offset_t buffer_eva;
static vm_offset_t pager_sva;
static vm_offset_t pager_eva;
static struct timecounter tick_tc;
static timecounter_get_t tick_get_timecount;
void sparc64_init(struct bootinfo *bi, ofw_vec_t *vec);
static void cpu_startup(void *);
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
static void
cpu_startup(void *arg)
{
vm_offset_t physmem_est;
vm_offset_t minaddr;
vm_offset_t maxaddr;
phandle_t child;
phandle_t root;
vm_offset_t va;
vm_size_t size;
char name[32];
char type[8];
u_int clock;
int factor;
caddr_t p;
int i;
root = OF_peer(0);
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
OF_getprop(child, "device_type", type, sizeof(type));
if (strcmp(type, "cpu") == 0)
break;
}
if (child == 0)
panic("cpu_startup: no cpu\n");
OF_getprop(child, "name", name, sizeof(name));
OF_getprop(child, "clock-frequency", &clock, sizeof(clock));
tick_tc.tc_get_timecount = tick_get_timecount;
tick_tc.tc_poll_pps = NULL;
tick_tc.tc_counter_mask = ~0u;
tick_tc.tc_frequency = clock;
tick_tc.tc_name = "tick";
tc_init(&tick_tc);
p = name;
if (bcmp(p, "SUNW,", 5) == 0)
p += 5;
printf("CPU: %s Processor (%d.%02d MHz CPU)\n", p,
(clock + 4999) / 1000000, ((clock + 4999) / 10000) % 100);
#if 0
ver = rdpr(ver);
printf("manuf: %#lx impl: %#lx mask: %#lx maxtl: %#lx maxwin: %#lx\n",
VER_MANUF(ver), VER_IMPL(ver), VER_MASK(ver), VER_MAXTL(ver),
VER_MAXWIN(ver));
#endif
/*
* XXX make most of this MI and move to sys/kern.
*/
/*
* Calculate callout wheel size.
*/
for (callwheelsize = 1, callwheelbits = 0; callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
size = 0;
va = 0;
again:
p = (caddr_t)va;
#define valloc(name, type, num) \
(name) = (type *)p; p = (caddr_t)((name) + (num))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else
physmem_est = min(physmem,
kernel_map->max_offset - kernel_map->min_offset);
if (nbuf == 0) {
factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
p = bufhashinit(p);
if (va == 0) {
size = (vm_size_t)(p - va);
if ((va = kmem_alloc(kernel_map, round_page(size))) == 0)
panic("startup: no room for tables");
goto again;
}
if ((vm_size_t)(p - va) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++)
TAILQ_INIT(&callwheel[i]);
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
bufinit();
vm_pager_bufferinit();
globaldata_register(globaldata);
}
unsigned
tick_get_timecount(struct timecounter *tc)
{
return ((unsigned)rd(tick));
}
void
sparc64_init(ofw_vec_t *ofw_vec)
sparc64_init(struct bootinfo *bi, ofw_vec_t *vec)
{
OF_init(ofw_vec);
struct trapframe *tf;
/*
* Initialize openfirmware (needed for console).
*/
OF_init(vec);
/*
* Initialize the console before printing anything.
*/
cninit();
printf("hello world!!\n");
/*
* Check that the bootinfo struct is sane.
*/
if (bi->bi_version != BOOTINFO_VERSION)
panic("sparc64_init: bootinfo version mismatch");
if (bi->bi_metadata == 0)
panic("sparc64_init: no loader metadata");
preload_metadata = (caddr_t)bi->bi_metadata;
#ifdef DDB
kdb_init();
#endif
/*
* Initialize virtual memory.
*/
pmap_bootstrap(bi->bi_kpa, bi->bi_end);
/*
* XXX Clear tick and disable the comparator.
*/
wrpr(tick, 0, 0);
wr(asr23, 1L << 63, 0);
/*
* Force trap level 1 and take over the trap table.
*/
wrpr(tl, 0, 1);
wrpr(tba, tl0_base, 0);
/*
* Initialize proc0 stuff (p_contested needs to be done early).
*/
LIST_INIT(&proc0.p_contested);
proc0.p_addr = (struct user *)user0;
tf = (struct trapframe *)(user0 + UPAGES * PAGE_SIZE - sizeof(*tf));
proc0.p_frame = tf;
/*
* Initialize the per-cpu pointer so we can set curproc.
*/
globaldata = &__globaldata;
/*
* Initialize curproc so that mutexes work.
*/
PCPU_SET(curproc, &proc0);
PCPU_SET(curpcb, &((struct user *)user0)->u_pcb);
PCPU_SET(spinlocks, NULL);
/*
* Initialize mutexes.
*/
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
mtx_init(&proc0.p_mtx, "process lock", MTX_DEF);
mtx_lock(&Giant);
}
void
set_openfirm_callback(ofw_vec_t *vec)
{
ofw_tba = rdpr(tba);
ofw_vec = (u_long)vec;
}
void
@ -120,7 +391,9 @@ setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
void
Debugger(const char *msg)
{
TODO;
printf("Debugger(\"%s\")\n", msg);
breakpoint();
}
int

View File

@ -26,10 +26,35 @@
* $FreeBSD$
*/
/*
* Manages physical address maps.
*
* In addition to hardware address maps, this module is called upon to
* provide software-use-only maps which may or may not be stored in the
* same form as hardware maps. These pseudo-maps are used to store
* intermediate results from copy operations to and from address spaces.
*
* Since the information managed by this module is also stored by the
* logical address mapping module, this module may throw away valid virtual
* to physical mappings at almost any time. However, invalidations of
* mappings must be done as requested.
*
* In order to cope with hardware architectures which make virtual to
* physical map invalidates expensive, this module may delay invalidate
* reduced protection operations until such time as they are actually
* necessary. This module is given full information as to which processors
* are currently using which maps, and to when physical maps must be made
* correct.
*/
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/vmmeter.h>
#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -42,18 +67,675 @@
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <machine/frame.h>
#include <machine/pv.h>
#include <machine/tlb.h>
#include <machine/tte.h>
#include <machine/tsb.h>
#define PMAP_DEBUG
#define PMAP_LOCK(pm)
#define PMAP_UNLOCK(pm)
#define dcache_global_flush(pa)
#define icache_global_flush(pa)
struct mem_region {
vm_offset_t mr_start;
vm_offset_t mr_size;
};
struct ofw_map {
vm_offset_t om_start;
vm_offset_t om_size;
u_long om_tte;
};
/*
* Virtual address of message buffer.
*/
struct msgbuf *msgbufp;
/*
* Physical addresses of first and last available physical page.
*/
vm_offset_t avail_start;
vm_offset_t avail_end;
vm_offset_t kernel_vm_end;
/*
* Map of physical memory reagions.
*/
vm_offset_t phys_avail[10];
/*
* First and last available kernel virtual addresses.
*/
vm_offset_t virtual_avail;
vm_offset_t virtual_end;
vm_offset_t kernel_vm_end;
struct pmap __kernel_pmap;
/*
* Kernel pmap handle and associated storage.
*/
pmap_t kernel_pmap;
static struct pmap kernel_pmap_store;
static boolean_t pmap_initialized = FALSE;
/*
* Map of free and in use hardware contexts and index of first potentially
* free context.
*/
static char pmap_context_map[PMAP_CONTEXT_MAX];
static u_int pmap_context_base;
/*
* Virtual addresses of free space for temporary mappings. Used for copying
* and zeroing physical pages.
*/
static vm_offset_t CADDR1;
static vm_offset_t CADDR2;
static __inline int
pmap_track_modified(vm_offset_t va)
{
return ((va < clean_sva) || (va >= clean_eva));
}
/*
* Manipulate tte bits of all virtual to physical mappings for the given page.
*/
static void pmap_bit_clear(vm_page_t m, u_long bits);
static void pmap_bit_set(vm_page_t m, u_long bits);
static int pmap_bit_test(vm_page_t m, u_long bits);
static void pmap_local_remove_all(vm_page_t m);
static void pmap_global_remove_all(vm_page_t m);
/*
* Allocate and free hardware context numbers.
*/
static u_int pmap_context_alloc(void);
static void pmap_context_destroy(u_int i);
/*
* Allocate physical memory for use in pmap_bootstrap.
*/
static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
/*
* Quick sort callout for comparing memory regions.
*/
static int mr_cmp(const void *a, const void *b);
static int
mr_cmp(const void *a, const void *b)
{
return ((const struct mem_region *)a)->mr_start -
((const struct mem_region *)b)->mr_start;
}
/*
* Bootstrap the system enough to run with virtual memory.
*/
void
pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva)
{
struct mem_region mra[8];
ihandle_t pmem;
struct pmap *pm;
vm_offset_t pa;
vm_offset_t va;
struct tte tte;
int sz;
int i;
int j;
/*
* Find out what physical memory is available from the prom and
* initialize the phys_avail array.
*/
if ((pmem = OF_finddevice("/memory")) == -1)
panic("pmap_bootstrap: finddevice /memory");
if ((sz = OF_getproplen(pmem, "available")) == -1)
panic("pmap_bootstrap: getproplen /memory/available");
if (sizeof(phys_avail) < sz)
panic("pmap_bootstrap: phys_avail too small");
bzero(mra, sz);
if (OF_getprop(pmem, "available", mra, sz) == -1)
panic("pmap_bootstrap: getprop /memory/available");
sz /= sizeof(*mra);
qsort(mra, sz, sizeof *mra, mr_cmp);
for (i = 0, j = 0; i < sz; i++, j += 2) {
phys_avail[j] = mra[i].mr_start;
phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
}
/*
* Initialize the kernel pmap (which is statically allocated).
*/
pm = &kernel_pmap_store;
pm->pm_context = TLB_CTX_KERNEL;
pm->pm_active = ~0;
pm->pm_count = 1;
kernel_pmap = pm;
/*
* Allocate the kernel tsb and lock it in the tlb.
*/
pa = pmap_bootstrap_alloc(TSB_KERNEL_SIZE);
if (pa & PAGE_MASK_4M)
panic("pmap_bootstrap: tsb unaligned\n");
tsb_kernel_phys = pa;
for (i = 0; i < TSB_KERNEL_PAGES; i++) {
va = TSB_KERNEL_MIN_ADDRESS + i * PAGE_SIZE_4M;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
TD_MOD | TD_REF | TD_TSB | TD_L | TD_CP | TD_P | TD_W;
tlb_store_slot(TLB_DTLB, va, tte, TLB_SLOT_TSB_KERNEL_MIN + i);
}
bzero((void *)va, TSB_KERNEL_SIZE);
stxa(AA_IMMU_TSB, ASI_IMMU,
(va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
stxa(AA_DMMU_TSB, ASI_DMMU,
(va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
membar(Sync);
/*
* Calculate the first and last available physical addresses.
*/
avail_start = phys_avail[0];
for (i = 0; phys_avail[i + 2] != 0; i += 2)
;
avail_end = phys_avail[i + 1];
/*
* Allocate physical memory for the heads of the stte alias chains.
*/
sz = round_page(((avail_end - avail_start) >> PAGE_SHIFT) *
sizeof (vm_offset_t));
pv_table = pmap_bootstrap_alloc(sz);
/* XXX */
avail_start += sz;
for (i = 0; i < sz; i += sizeof(vm_offset_t))
stxp(pv_table + i, 0);
/*
* Set the start and end of kva. The kernel is loaded at the first
* available 4 meg super page, so round up to the end of the page.
*/
virtual_avail = roundup(ekva, PAGE_SIZE_4M);
virtual_end = VM_MAX_KERNEL_ADDRESS;
/*
* Allocate virtual address space for copying and zeroing pages of
* physical memory.
*/
CADDR1 = virtual_avail;
virtual_avail += PAGE_SIZE;
CADDR2 = virtual_avail;
virtual_avail += PAGE_SIZE;
}
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are
* calculated.
*/
static vm_offset_t
pmap_bootstrap_alloc(vm_size_t size)
{
vm_offset_t pa;
int i;
size = round_page(size);
for (i = 0; phys_avail[i] != 0; i += 2) {
if (phys_avail[i + 1] - phys_avail[i] < size)
continue;
pa = phys_avail[i];
phys_avail[i] += size;
return (pa);
}
panic("pmap_bootstrap_alloc");
}
/*
* Allocate a hardware context number from the context map.
*/
static u_int
pmap_context_alloc(void)
{
u_int i;
i = pmap_context_base;
do {
if (pmap_context_map[i] == 0) {
pmap_context_map[i] = 1;
pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1);
return (i);
}
} while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base);
panic("pmap_context_alloc");
}
/*
* Free a hardware context number back to the context map.
*/
static void
pmap_context_destroy(u_int i)
{
pmap_context_map[i] = 0;
}
/*
* Map a range of physical addresses into kernel virtual address space.
*
* The value passed in *virt is a suggested virtual address for the mapping.
* Architectures which can support a direct-mapped physical to virtual region
* can return the appropriate address within that region, leaving '*virt'
* unchanged. We cannot and therefore do not; *virt is updated with the
* first usable address after the mapped region.
*/
vm_offset_t
pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
{
vm_offset_t sva;
vm_offset_t va;
sva = *virt;
va = sva;
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
pmap_kenter(va, pa_start);
*virt = va;
return (sva);
}
/*
* Map a wired page into kernel virtual address space.
*/
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
struct tte tte;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_MOD | TD_REF | TD_CP | TD_P | TD_W;
tsb_tte_enter_kernel(va, tte);
}
/*
* Remove a wired page from kernel virtual address space.
*/
void
pmap_kremove(vm_offset_t va)
{
tsb_remove_kernel(va);
}
/*
* Map a list of wired pages into kernel virtual address space. This is
* intended for temporary mappings which do not need page modification or
* references recorded. Existing mappings in the region are overwritten.
*/
void
pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
{
int i;
for (i = 0; i < count; i++, va += PAGE_SIZE)
pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
}
/*
* Remove page mappings from kernel virtual address space. Intended for
* temporary mappings entered by pmap_qenter.
*/
void
pmap_qremove(vm_offset_t va, int count)
{
int i;
for (i = 0; i < count; i++, va += PAGE_SIZE)
pmap_kremove(va);
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page
* will be wired down.
*/
void
pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
struct stte *stp;
struct tte tte;
vm_offset_t pa;
pa = VM_PAGE_TO_PHYS(m);
tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_CP | TD_CV;
if (pm->pm_context == TLB_CTX_KERNEL)
tte.tte_data |= TD_P;
if (wired == TRUE) {
tte.tte_data |= TD_REF;
if (prot & VM_PROT_WRITE)
tte.tte_data |= TD_MOD;
}
if (prot & VM_PROT_WRITE)
tte.tte_data |= TD_W;
if (prot & VM_PROT_EXECUTE) {
tte.tte_data |= TD_EXEC;
icache_global_flush(&pa);
}
if (pm == kernel_pmap) {
tsb_tte_enter_kernel(va, tte);
return;
}
PMAP_LOCK(pm);
if ((stp = tsb_stte_lookup(pm, va)) != NULL) {
pv_remove_virt(stp);
tsb_stte_remove(stp);
pv_insert(pm, pa, va, stp);
stp->st_tte = tte;
} else {
tsb_tte_enter(pm, va, tte);
}
PMAP_UNLOCK(pm);
}
/*
* Initialize the pmap module.
*/
void
pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
}
void
pmap_init2(void)
{
}
/*
* Initialize the pmap associated with process 0.
*/
void
pmap_pinit0(pmap_t pm)
{
pm = &kernel_pmap_store;
pm->pm_context = pmap_context_alloc();
pm->pm_active = 0;
pm->pm_count = 1;
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
/*
* Initialize a preallocated and zeroed pmap structure.
*/
void
pmap_pinit(pmap_t pm)
{
struct stte *stp;
pm->pm_context = pmap_context_alloc();
pm->pm_active = 0;
pm->pm_count = 1;
stp = &pm->pm_stte;
stp->st_tte = tsb_page_alloc(pm, (vm_offset_t)tsb_base(0));
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
void
pmap_pinit2(pmap_t pmap)
{
}
/*
* Grow the number of kernel page table entries. Unneeded.
*/
void
pmap_growkernel(vm_offset_t addr)
{
}
/*
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_zero_page(vm_offset_t pa)
{
struct tte tte;
vm_offset_t va;
va = CADDR2;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
tlb_store(TLB_DTLB, va, tte);
bzero((void *)va, PAGE_SIZE);
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
}
/*
* Make the specified page pageable (or not). Unneeded.
*/
void
pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
boolean_t pageable)
{
}
/*
* Create the kernel stack and user structure for a new process. This
* routine directly affects the performance of fork().
*/
void
pmap_new_proc(struct proc *p)
{
struct user *u;
vm_object_t o;
vm_page_t m;
u_int i;
if ((o = p->p_upages_obj) == NULL) {
o = vm_object_allocate(OBJT_DEFAULT, UPAGES);
p->p_upages_obj = o;
}
if ((u = p->p_addr) == NULL) {
u = (struct user *)kmem_alloc_nofault(kernel_map,
UPAGES * PAGE_SIZE);
KASSERT(u != NULL, ("pmap_new_proc: u area\n"));
p->p_addr = u;
}
for (i = 0; i < UPAGES; i++) {
m = vm_page_grab(o, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m->wire_count++;
cnt.v_wire_count++;
pmap_kenter((vm_offset_t)u + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
}
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if (m->flags & PG_FICTITIOUS || prot & VM_PROT_WRITE)
return;
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
pmap_bit_clear(m, TD_W);
else
pmap_global_remove_all(m);
}
void
pmap_clear_modify(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
return;
pmap_bit_clear(m, TD_MOD);
}
static void
pmap_bit_clear(vm_page_t m, u_long bits)
{
vm_offset_t pstp;
vm_offset_t pvh;
vm_offset_t pa;
vm_offset_t va;
struct tte tte;
pa = VM_PAGE_TO_PHYS(m);
pvh = pv_lookup(pa);
PV_LOCK();
#ifdef notyet
restart:
#endif
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
tte = pv_get_tte(pstp);
KASSERT(TD_PA(tte.tte_data) == pa,
("pmap_bit_clear: corrupt alias chain"));
if ((tte.tte_data & bits) == 0)
continue;
va = tte_get_va(tte);
if (bits == TD_W && !pmap_track_modified(va))
continue;
if (bits == TD_W && tte.tte_data & TD_MOD) {
vm_page_dirty(m);
bits |= TD_MOD;
}
pv_bit_clear(pstp, bits);
#ifdef notyet
generation = pv_generation;
PV_UNLOCK();
/* XXX pass function and parameter to ipi call */
ipi_all(IPI_TLB_PAGE_DEMAP);
PV_LOCK();
if (generation != pv_generation)
goto restart;
#else
tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), va);
#endif
}
PV_UNLOCK();
}
static void
pmap_bit_set(vm_page_t m, u_long bits)
{
vm_offset_t pstp;
vm_offset_t pvh;
vm_offset_t pa;
struct tte tte;
pa = VM_PAGE_TO_PHYS(m);
pvh = pv_lookup(pa);
PV_LOCK();
#ifdef notyet
restart:
#endif
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
tte = pv_get_tte(pstp);
KASSERT(TD_PA(tte.tte_data) == pa,
("pmap_bit_set: corrupt alias chain"));
if (tte.tte_data & bits)
continue;
pv_bit_set(pstp, bits);
#ifdef notyet
generation = pv_generation;
PV_UNLOCK();
/* XXX pass function and parameter to ipi call */
ipi_all(IPI_TLB_PAGE_DEMAP);
PV_LOCK();
if (generation != pv_generation)
goto restart;
#else
tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), tte_get_va(tte));
#endif
}
PV_UNLOCK();
}
static int
pmap_bit_test(vm_page_t m, u_long bits)
{
vm_offset_t pstp;
vm_offset_t pvh;
vm_offset_t pa;
pa = VM_PAGE_TO_PHYS(m);
pvh = pv_lookup(pa);
PV_LOCK();
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
if (pv_bit_test(pstp, bits)) {
PV_UNLOCK();
return (1);
}
}
PV_UNLOCK();
return (0);
}
static void
pmap_global_remove_all(vm_page_t m)
{
vm_offset_t pstp;
vm_offset_t pvh;
vm_offset_t pa;
printf("pmap_global_remove_all\n");
pa = VM_PAGE_TO_PHYS(m);
pvh = pv_lookup(pa);
pv_dump(pvh);
PV_LOCK();
printf("pmap_global_remove_all: for\n");
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
pv_bit_clear(pstp, TD_V);
printf("pmap_global_remove_all: done for\n");
PV_UNLOCK();
pmap_local_remove_all(m);
pv_dump(pvh);
PV_LOCK();
printf("pmap_global_remove_all: while\n");
while ((pstp = pv_get_first(pvh)) != 0) {
pv_dump(pvh);
pv_remove_phys(pstp);
}
printf("pmap_global_remove_all: done while\n");
PV_UNLOCK();
printf("pmap_global_remove_all: done\n");
}
static void
pmap_local_remove_all(vm_page_t m)
{
vm_offset_t pstp;
vm_offset_t pvh;
vm_offset_t pa;
struct tte tte;
pa = VM_PAGE_TO_PHYS(m);
pvh = pv_lookup(pa);
PV_LOCK();
printf("pmap_local_remove_all: for\n");
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
tte = pv_get_tte(pstp);
tsb_tte_local_remove(&tte);
}
printf("pmap_local_remove_all: done for\n");
PV_UNLOCK();
}
void
pmap_activate(struct proc *p)
@ -74,12 +756,6 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
TODO;
}
void
pmap_clear_modify(vm_page_t m)
{
TODO;
}
void
pmap_collect(void)
{
@ -99,25 +775,12 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
TODO;
}
void
pmap_zero_page(vm_offset_t pa)
{
TODO;
}
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
{
TODO;
}
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
TODO;
}
vm_offset_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{
@ -125,24 +788,6 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
return (0);
}
void
pmap_growkernel(vm_offset_t addr)
{
TODO;
}
void
pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
TODO;
}
void
pmap_init2(void)
{
TODO;
}
boolean_t
pmap_is_modified(vm_page_t m)
{
@ -163,12 +808,6 @@ pmap_ts_referenced(vm_page_t m)
return (0);
}
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
TODO;
}
vm_offset_t
pmap_kextract(vm_offset_t va)
{
@ -176,19 +815,6 @@ pmap_kextract(vm_offset_t va)
return (0);
}
void
pmap_kremove(vm_offset_t va)
{
TODO;
}
vm_offset_t
pmap_map(vm_offset_t *va, vm_offset_t start, vm_offset_t end, int prot)
{
TODO;
return (0);
}
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
{
@ -196,12 +822,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
return (0);
}
void
pmap_new_proc(struct proc *p)
{
TODO;
}
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size, int limit)
@ -209,19 +829,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
TODO;
}
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
TODO;
}
void
pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
boolean_t pageable)
{
TODO;
}
boolean_t
pmap_page_exists(pmap_t pmap, vm_page_t m)
{
@ -229,24 +836,6 @@ pmap_page_exists(pmap_t pmap, vm_page_t m)
return (0);
}
void
pmap_pinit(pmap_t pmap)
{
TODO;
}
void
pmap_pinit0(pmap_t pmap)
{
TODO;
}
void
pmap_pinit2(pmap_t pmap)
{
TODO;
}
void
pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
{
@ -267,21 +856,10 @@ pmap_phys_address(int ppn)
}
void
pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
pmap_reference(pmap_t pm)
{
TODO;
}
void
pmap_qremove(vm_offset_t va, int count)
{
TODO;
}
void
pmap_reference(pmap_t pmap)
{
TODO;
if (pm != NULL)
pm->pm_count++;
}
void

90
sys/sparc64/sparc64/pv.c Normal file
View File

@ -0,0 +1,90 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mutex.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/asi.h>
#include <machine/frame.h>
#include <machine/pmap.h>
#include <machine/pv.h>
#include <machine/tte.h>
#include <machine/tlb.h>
#include <machine/tsb.h>
/*
* Physical address of array of physical addresses of stte alias chain heads,
* and generation count of alias chains.
*/
vm_offset_t pv_table;
u_long pv_generation;
void
pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp)
{
vm_offset_t pstp;
vm_offset_t pvh;
pstp = tsb_stte_vtophys(pm, stp);
pvh = pv_lookup(pa);
PV_LOCK();
if ((stp->st_next = pv_get_first(pvh)) != 0)
pv_set_prev(stp->st_next, pstp + ST_NEXT);
pv_set_first(pvh, pstp);
stp->st_prev = pvh;
pv_generation++;
PV_UNLOCK();
}
void
pv_remove_virt(struct stte *stp)
{
PV_LOCK();
if (stp->st_next != 0)
pv_set_prev(stp->st_next, stp->st_prev);
stxp(stp->st_prev, stp->st_next);
pv_generation++;
PV_UNLOCK();
}
void
pv_dump(vm_offset_t pvh)
{
vm_offset_t pstp;
printf("pv_dump: pvh=%#lx first=%#lx\n", pvh, pv_get_first(pvh));
for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
printf("\tpstp=%#lx next=%#lx prev=%#lx\n", pstp,
pv_get_next(pstp), pv_get_prev(pstp));
printf("pv_dump: done\n");
}

View File

@ -0,0 +1,346 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include "assym.s"
#define E
#define _LD(w, a) ld ## w ## a
#define _ST(w, a) st ## w ## a
#define LD(w, a) _LD(w, a)
#define ST(w, a) _ST(w, a)
#define _BCOPY(src, dst, len, sa, sasi, da, dasi) \
brz,pn len, 2f ; \
mov len, %o3 ; \
1: LD(ub, sa) [src] sasi, %o4 ; \
ST(b, da) %o4, [dst] dasi ; \
dec %o3 ; \
inc src ; \
brnz,pt %o3, 1b ; \
inc dst ; \
2:
#define BCOPY(src, dst, len) \
_BCOPY(src, dst, len, E, E, E, E)
#define COPYIN(uaddr, kaddr, len) \
wr %g0, ASI_AIUP, %asi ; \
_BCOPY(uaddr, kaddr, len, a, %asi, E, E)
#define COPYOUT(kaddr, uaddr, len) \
wr %g0, ASI_AIUP, %asi ; \
_BCOPY(kaddr, uaddr, len, E, E, a, %asi)
#define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
clr %o4 ; \
clr %o5 ; \
1: LD(ub, sa) [src] sasi, %g1 ; \
ST(b, da) %g1, [dst] dasi ; \
brz,pn %g1, 2f ; \
inc %o4 ; \
dec len ; \
inc src ; \
brgz,pt len, 1b ; \
inc dst ; \
mov ENAMETOOLONG, %o5 ; \
2: brnz,a done, 3f ; \
stx %o4, [done] ; \
3:
#define COPYSTR(dst, src, len, done) \
_COPYSTR(dst, src, len, done, E, E, E, E)
#define COPYINSTR(uaddr, kaddr, len, done) \
wr %g0, ASI_AIUP, %asi ; \
_COPYSTR(uaddr, kaddr, len, done, a, %asi, E, E)
#define CATCH_SETUP(label) \
setx label, %g2, %g1 ; \
ldx [PCPU(CURPCB)], %g6 ; \
stx %g1, [%g6 + PCB_ONFAULT] ;
#define CATCH_END() \
stx %g0, [%g6 + PCB_ONFAULT] ;
#define FU_ALIGNED(loader, label) \
CATCH_SETUP(label) ; \
loader [%o0] ASI_AIUP, %o0 ; \
retl ; \
CATCH_END()
#define FU_BYTES(loader, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
FU_ALIGNED(loader, label)
#define SU_ALIGNED(storer, label) \
CATCH_SETUP(label) ; \
storer %o1, [%o0] ASI_AIUP ; \
retl ; \
CATCH_END()
#define SU_BYTES(storer, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
SU_ALIGNED(storer, label)
/*
* void bcmp(void *b, size_t len)
*/
ENTRY(bcmp)
brz,pn %o2, 2f
clr %o3
1: ldub [%o0 + %o3], %o4
ldub [%o1 + %o3], %o5
cmp %o4, %o5
bne,pn %xcc, 1f
inc %o3
deccc %o2
bne,pt %xcc, 1b
nop
2: retl
mov %o2, %o0
END(bcmp)
/*
* void bcopy(const void *src, void *dst, size_t len)
*/
ENTRY(bcopy)
BCOPY(%o0, %o1, %o2)
retl
nop
END(bcopy)
/*
* void ovbcopy(const void *src, void *dst, size_t len)
* XXX handle overlap...
*/
ENTRY(ovbcopy)
BCOPY(%o0, %o1, %o2)
retl
nop
END(ovbcopy)
/*
* void bzero(void *b, size_t len)
*/
ENTRY(bzero)
brz,pn %o1, 1f
nop
1: deccc %o1
stb %g0, [%o0]
bne,pt %xcc, 1b
inc %o0
2: retl
nop
END(bzero)
/*
* void *memcpy(void *dst, const void *src, size_t len)
*/
ENTRY(memcpy)
BCOPY(%o1, %o0, %o2)
retl
nop
END(memcpy)
/*
* int copyin(const void *uaddr, void *kaddr, size_t len)
*/
ENTRY(copyin)
CATCH_SETUP(.Lefault)
COPYIN(%o0, %o1, %o2)
CATCH_END()
retl
clr %o0
END(copyin)
/*
* int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
*/
ENTRY(copyinstr)
CATCH_SETUP(.Lefault)
COPYINSTR(%o0, %o1, %o2, %o3)
CATCH_END()
retl
mov %o5, %o0
END(copyinstr)
/*
* int copyout(const void *kaddr, void *uaddr, size_t len)
*/
ENTRY(copyout)
CATCH_SETUP(.Lefault)
COPYOUT(%o0, %o1, %o2)
CATCH_END()
retl
clr %o0
END(copyout)
.Lefault:
CATCH_END()
retl
mov EFAULT, %o0
/*
* int copystr(const void *src, void *dst, size_t len, size_t *done)
*/
ENTRY(copystr)
COPYSTR(%o0, %o1, %o2, %o3)
retl
mov %o5, %o0
END(copystr)
/*
* int fubyte(const void *base)
*/
ENTRY(fubyte)
FU_ALIGNED(lduba, .Lfsfault)
END(fubyte)
/*
* int fusword(const void *base)
*/
ENTRY(fusword)
FU_BYTES(lduwa, 2, .Lfsfault)
END(fusword)
/*
* int fuswintr(const void *base)
*/
ENTRY(fuswintr)
FU_BYTES(lduwa, 2, fsbail)
END(fuswintr)
/*
* int fuword(const void *base)
*/
ENTRY(fuword)
FU_BYTES(ldxa, 8, .Lfsfault)
END(fuword)
/*
* int subyte(const void *base)
*/
ENTRY(subyte)
SU_ALIGNED(stba, .Lfsfault)
END(subyte)
/*
* int suibyte(const void *base)
*/
ENTRY(suibyte)
SU_ALIGNED(stba, fsbail)
END(suibyte)
/*
* int susword(const void *base)
*/
ENTRY(susword)
SU_BYTES(stwa, 2, .Lfsfault)
END(susword)
/*
* int suswintr(const void *base)
*/
ENTRY(suswintr)
SU_BYTES(stwa, 2, fsbail)
END(suswintr)
/*
* int suword(const void *base)
*/
ENTRY(suword)
SU_BYTES(stwa, 8, .Lfsfault)
END(suword)
ENTRY(fsbail)
nop
.Lfsfault:
CATCH_END()
.Lfsalign:
retl
mov -1, %o0
END(fsbail)
ENTRY(longjmp)
set 1, %g3
movrz %o1, %o1, %g3
mov %o0, %g1
ldx [%g1 + JB_FP], %g2
1: cmp %fp, %g2
bl,a,pt %xcc, 1b
restore
bne,pn %xcc, 2f
ldx [%g1 + JB_SP], %o2
ldx [%g1 + JB_PC], %o3
cmp %o2, %sp
blt,pn %xcc, 2f
movge %xcc, %o2, %sp
jmp %o3 + 8
mov %g3, %o0
2: PANIC("longjmp botch", %l1)
END(longjmp)
ENTRY(setjmp)
stx %sp, [%o0 + JB_SP]
stx %o7, [%o0 + JB_PC]
stx %fp, [%o0 + JB_FP]
retl
clr %o0
END(setjmp)
/*
* void openfirmware(cell_t args[])
*/
ENTRY(openfirmware)
save %sp, -CCFSZ, %sp
rdpr %pstate, %l0
rdpr %tl, %l1
rdpr %tba, %l2
wrpr %g0, 0, %tl
setx ofw_tba, %l4, %l3
ldx [%l3], %l3
setx ofw_vec, %l5, %l4
ldx [%l4], %l4
wrpr %l3, 0, %tba
call %l4
mov %i0, %o0
wrpr %l0, 0, %pstate
wrpr %l1, 0, %tl
wrpr %l2, 0, %tba
ret
restore
END(openfirmware)

View File

@ -0,0 +1,346 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include "assym.s"
#define E
#define _LD(w, a) ld ## w ## a
#define _ST(w, a) st ## w ## a
#define LD(w, a) _LD(w, a)
#define ST(w, a) _ST(w, a)
#define _BCOPY(src, dst, len, sa, sasi, da, dasi) \
brz,pn len, 2f ; \
mov len, %o3 ; \
1: LD(ub, sa) [src] sasi, %o4 ; \
ST(b, da) %o4, [dst] dasi ; \
dec %o3 ; \
inc src ; \
brnz,pt %o3, 1b ; \
inc dst ; \
2:
#define BCOPY(src, dst, len) \
_BCOPY(src, dst, len, E, E, E, E)
#define COPYIN(uaddr, kaddr, len) \
wr %g0, ASI_AIUP, %asi ; \
_BCOPY(uaddr, kaddr, len, a, %asi, E, E)
#define COPYOUT(kaddr, uaddr, len) \
wr %g0, ASI_AIUP, %asi ; \
_BCOPY(kaddr, uaddr, len, E, E, a, %asi)
#define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
clr %o4 ; \
clr %o5 ; \
1: LD(ub, sa) [src] sasi, %g1 ; \
ST(b, da) %g1, [dst] dasi ; \
brz,pn %g1, 2f ; \
inc %o4 ; \
dec len ; \
inc src ; \
brgz,pt len, 1b ; \
inc dst ; \
mov ENAMETOOLONG, %o5 ; \
2: brnz,a done, 3f ; \
stx %o4, [done] ; \
3:
#define COPYSTR(dst, src, len, done) \
_COPYSTR(dst, src, len, done, E, E, E, E)
#define COPYINSTR(uaddr, kaddr, len, done) \
wr %g0, ASI_AIUP, %asi ; \
_COPYSTR(uaddr, kaddr, len, done, a, %asi, E, E)
#define CATCH_SETUP(label) \
setx label, %g2, %g1 ; \
ldx [PCPU(CURPCB)], %g6 ; \
stx %g1, [%g6 + PCB_ONFAULT] ;
#define CATCH_END() \
stx %g0, [%g6 + PCB_ONFAULT] ;
#define FU_ALIGNED(loader, label) \
CATCH_SETUP(label) ; \
loader [%o0] ASI_AIUP, %o0 ; \
retl ; \
CATCH_END()
#define FU_BYTES(loader, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
FU_ALIGNED(loader, label)
#define SU_ALIGNED(storer, label) \
CATCH_SETUP(label) ; \
storer %o1, [%o0] ASI_AIUP ; \
retl ; \
CATCH_END()
#define SU_BYTES(storer, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
SU_ALIGNED(storer, label)
/*
* void bcmp(void *b, size_t len)
*/
ENTRY(bcmp)
brz,pn %o2, 2f
clr %o3
1: ldub [%o0 + %o3], %o4
ldub [%o1 + %o3], %o5
cmp %o4, %o5
bne,pn %xcc, 1f
inc %o3
deccc %o2
bne,pt %xcc, 1b
nop
2: retl
mov %o2, %o0
END(bcmp)
/*
* void bcopy(const void *src, void *dst, size_t len)
*/
ENTRY(bcopy)
BCOPY(%o0, %o1, %o2)
retl
nop
END(bcopy)
/*
* void ovbcopy(const void *src, void *dst, size_t len)
* XXX handle overlap...
*/
ENTRY(ovbcopy)
BCOPY(%o0, %o1, %o2)
retl
nop
END(ovbcopy)
/*
* void bzero(void *b, size_t len)
*/
ENTRY(bzero)
brz,pn %o1, 1f
nop
1: deccc %o1
stb %g0, [%o0]
bne,pt %xcc, 1b
inc %o0
2: retl
nop
END(bzero)
/*
* void *memcpy(void *dst, const void *src, size_t len)
*/
ENTRY(memcpy)
BCOPY(%o1, %o0, %o2)
retl
nop
END(memcpy)
/*
* int copyin(const void *uaddr, void *kaddr, size_t len)
*/
ENTRY(copyin)
CATCH_SETUP(.Lefault)
COPYIN(%o0, %o1, %o2)
CATCH_END()
retl
clr %o0
END(copyin)
/*
* int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
*/
ENTRY(copyinstr)
CATCH_SETUP(.Lefault)
COPYINSTR(%o0, %o1, %o2, %o3)
CATCH_END()
retl
mov %o5, %o0
END(copyinstr)
/*
* int copyout(const void *kaddr, void *uaddr, size_t len)
*/
ENTRY(copyout)
CATCH_SETUP(.Lefault)
COPYOUT(%o0, %o1, %o2)
CATCH_END()
retl
clr %o0
END(copyout)
.Lefault:
CATCH_END()
retl
mov EFAULT, %o0
/*
* int copystr(const void *src, void *dst, size_t len, size_t *done)
*/
ENTRY(copystr)
COPYSTR(%o0, %o1, %o2, %o3)
retl
mov %o5, %o0
END(copystr)
/*
* int fubyte(const void *base)
*/
ENTRY(fubyte)
FU_ALIGNED(lduba, .Lfsfault)
END(fubyte)
/*
* int fusword(const void *base)
*/
ENTRY(fusword)
FU_BYTES(lduwa, 2, .Lfsfault)
END(fusword)
/*
* int fuswintr(const void *base)
*/
ENTRY(fuswintr)
FU_BYTES(lduwa, 2, fsbail)
END(fuswintr)
/*
* int fuword(const void *base)
*/
ENTRY(fuword)
FU_BYTES(ldxa, 8, .Lfsfault)
END(fuword)
/*
* int subyte(const void *base)
*/
ENTRY(subyte)
SU_ALIGNED(stba, .Lfsfault)
END(subyte)
/*
* int suibyte(const void *base)
*/
ENTRY(suibyte)
SU_ALIGNED(stba, fsbail)
END(suibyte)
/*
* int susword(const void *base)
*/
ENTRY(susword)
SU_BYTES(stwa, 2, .Lfsfault)
END(susword)
/*
* int suswintr(const void *base)
*/
ENTRY(suswintr)
SU_BYTES(stwa, 2, fsbail)
END(suswintr)
/*
* int suword(const void *base)
*/
ENTRY(suword)
SU_BYTES(stwa, 8, .Lfsfault)
END(suword)
ENTRY(fsbail)
nop
.Lfsfault:
CATCH_END()
.Lfsalign:
retl
mov -1, %o0
END(fsbail)
ENTRY(longjmp)
set 1, %g3
movrz %o1, %o1, %g3
mov %o0, %g1
ldx [%g1 + JB_FP], %g2
1: cmp %fp, %g2
bl,a,pt %xcc, 1b
restore
bne,pn %xcc, 2f
ldx [%g1 + JB_SP], %o2
ldx [%g1 + JB_PC], %o3
cmp %o2, %sp
blt,pn %xcc, 2f
movge %xcc, %o2, %sp
jmp %o3 + 8
mov %g3, %o0
2: PANIC("longjmp botch", %l1)
END(longjmp)
ENTRY(setjmp)
stx %sp, [%o0 + JB_SP]
stx %o7, [%o0 + JB_PC]
stx %fp, [%o0 + JB_FP]
retl
clr %o0
END(setjmp)
/*
* void openfirmware(cell_t args[])
*/
ENTRY(openfirmware)
save %sp, -CCFSZ, %sp
rdpr %pstate, %l0
rdpr %tl, %l1
rdpr %tba, %l2
wrpr %g0, 0, %tl
setx ofw_tba, %l4, %l3
ldx [%l3], %l3
setx ofw_vec, %l5, %l4
ldx [%l4], %l4
wrpr %l3, 0, %tba
call %l4
mov %i0, %o0
wrpr %l0, 0, %pstate
wrpr %l1, 0, %tl
wrpr %l2, 0, %tba
ret
restore
END(openfirmware)

View File

@ -0,0 +1,69 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "assym.s"
ENTRY(cpu_switch)
save %sp, -CCFSZ, %sp
call chooseproc
ldx [PCPU(CURPROC)], %l0
cmp %l0, %o0
be,pn %xcc, 2f
ldx [PCPU(FPCURPROC)], %l2
cmp %l0, %l2
bne,pt %xcc, 1f
ldx [PCPU(CURPCB)], %l1
PANIC("cpu_switch: fpcurproc", %i0)
1: flushw
wrpr %g0, 0, %cleanwin
stx %fp, [%l1 + PCB_FP]
stx %i7, [%l1 + PCB_PC]
ldx [%o0 + P_ADDR], %o1
ldx [%o1 + U_PCB + PCB_FP], %fp
ldx [%o1 + U_PCB + PCB_PC], %i7
stx %o0, [PCPU(CURPROC)]
stx %o1, [PCPU(CURPCB)]
sub %fp, CCFSZ, %sp
2: ret
restore
END(cpu_switch)
ENTRY(savectx)
save %sp, -CCFSZ, %sp
flushw
ldx [PCPU(FPCURPROC)], %l0
brz,pt %l0, 1f
nop
illtrap
1: stx %fp, [%i0 + PCB_FP]
stx %i7, [%i0 + PCB_PC]
ret
restore %g0, 0, %o0
END(savectx)

View File

@ -0,0 +1,69 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "assym.s"
ENTRY(cpu_switch)
save %sp, -CCFSZ, %sp
call chooseproc
ldx [PCPU(CURPROC)], %l0
cmp %l0, %o0
be,pn %xcc, 2f
ldx [PCPU(FPCURPROC)], %l2
cmp %l0, %l2
bne,pt %xcc, 1f
ldx [PCPU(CURPCB)], %l1
PANIC("cpu_switch: fpcurproc", %i0)
1: flushw
wrpr %g0, 0, %cleanwin
stx %fp, [%l1 + PCB_FP]
stx %i7, [%l1 + PCB_PC]
ldx [%o0 + P_ADDR], %o1
ldx [%o1 + U_PCB + PCB_FP], %fp
ldx [%o1 + U_PCB + PCB_PC], %i7
stx %o0, [PCPU(CURPROC)]
stx %o1, [PCPU(CURPCB)]
sub %fp, CCFSZ, %sp
2: ret
restore
END(cpu_switch)
ENTRY(savectx)
save %sp, -CCFSZ, %sp
flushw
ldx [PCPU(FPCURPROC)], %l0
brz,pt %l0, 1f
nop
illtrap
1: stx %fp, [%i0 + PCB_FP]
stx %i7, [%i0 + PCB_PC]
ret
restore %g0, 0, %o0
END(savectx)

View File

@ -26,15 +26,74 @@
* $FreeBSD$
*/
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <machine/frame.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
void trap(u_int type, struct trapframe *tf);
#include <machine/frame.h>
#include <machine/pv.h>
#include <machine/trap.h>
#include <machine/tte.h>
#include <machine/tlb.h>
#include <machine/tsb.h>
void trap(struct trapframe *tf);
const char *trap_msg[] = {
"reserved",
"power on reset",
"watchdog reset",
"externally initiated reset",
"software initiated reset",
"red state exception",
"instruction access exception",
"instruction access error",
"illegal instruction",
"privileged opcode",
"floating point disabled",
"floating point exception ieee 754",
"floating point exception other",
"tag overflow",
"division by zero",
"data access exception",
"data access error",
"memory address not aligned",
"lddf memory address not aligned",
"stdf memory address not aligned",
"privileged action",
"interrupt vector",
"physical address watchpoint",
"virtual address watchpoint",
"corrected ecc error",
"fast instruction access mmu miss",
"fast data access mmu miss",
"fast data access protection",
"bad spill",
"bad fill",
"breakpoint",
};
void
trap(u_int type, struct trapframe *tf)
trap(struct trapframe *tf)
{
TODO;
switch (tf->tf_type) {
#ifdef DDB
case T_BREAKPOINT | T_KERNEL:
if (kdb_trap(tf) != 0)
return;
break;
#endif
default:
break;
}
panic("trap: %s", trap_msg[tf->tf_type & ~T_KERNEL]);
}

279
sys/sparc64/sparc64/tsb.c Normal file
View File

@ -0,0 +1,279 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from BSDI: pmap.c,v 1.28.2.15 2000/04/27 03:10:31 cp Exp
* $FreeBSD$
*/
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <machine/cpufunc.h>
#include <machine/frame.h>
#include <machine/trap.h>
#include <machine/pmap.h>
#include <machine/pv.h>
#include <machine/tlb.h>
#include <machine/tsb.h>
#include <machine/tte.h>
vm_offset_t tsb_kernel_phys;
struct stte *
tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va, int allocate)
{
struct stte *bucket;
struct stte *stp;
vm_offset_t bva;
u_long bits;
bucket = tsb_vtobucket(va, level);
if (level == 0)
return (bucket);
bits = (va & ((tsb_mask(level) & ~tsb_mask(level - 1)) << PAGE_SHIFT))
>> tsb_mask_width(level);
if (level == 1) {
bits |= ((long)bucket & TSB_LEVEL1_BUCKET_MASK) >>
TSB_LEVEL1_BUCKET_SHIFT;
}
bva = trunc_page((u_long)tsb_vtobucket(va, level - 1)) | bits;
stp = (struct stte *)(long)bva + tsb_bucket_size(level - 1) - 1;
if (tte_match(stp->st_tte, (u_long)bucket) == 0) {
if (!allocate)
return (NULL);
tsb_page_fault(pm, level, trunc_page((u_long)bucket), stp);
} else {
tlb_store_slot(TLB_DTLB, trunc_page((u_long)bucket),
stp->st_tte, tsb_tlb_slot(1));
}
return (bucket);
}
int
tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf)
{
struct stte *stp;
vm_offset_t va;
va = mf->mf_tar;
if ((stp = tsb_stte_lookup(pm, va)) == NULL)
return (EFAULT);
switch (type) {
case T_DMMU_MISS:
tlb_store(TLB_DTLB, va, stp->st_tte);
break;
default:
return (EFAULT);
}
return (0);
}
struct tte
tsb_page_alloc(pmap_t pm, vm_offset_t va)
{
struct tte tte;
/* XXX */
tte.tte_tag = 0;
tte.tte_data = 0;
return (tte);
}
void
tsb_page_fault(pmap_t pm, int level, vm_offset_t va, struct stte *stp)
{
struct tte tte;
tte = tsb_page_alloc(pm, va);
stp->st_tte = tte;
tlb_store_slot(TLB_DTLB, va, stp->st_tte, tsb_tlb_slot(level));
tsb_page_init((void *)va, level);
}
void
tsb_page_init(void *va, int level)
{
struct stte *stp;
caddr_t p;
u_int bsize;
u_int inc;
u_int i;
inc = PAGE_SIZE >> TSB_BUCKET_SPREAD_SHIFT;
if (level == 0)
inc >>= TSB_SECONDARY_BUCKET_SHIFT - TSB_PRIMARY_BUCKET_SHIFT;
bsize = tsb_bucket_size(level);
bzero(va, PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += inc) {
p = (caddr_t)va + i;
stp = (struct stte *)p + bsize - 1;
stp->st_tte.tte_data = TD_TSB;
}
}
struct stte *
tsb_stte_lookup(pmap_t pm, vm_offset_t va)
{
struct stte *bucket;
u_int level;
u_int i;
va = trunc_page(va);
for (level = 0; level < TSB_DEPTH; level++) {
bucket = tsb_get_bucket(pm, level, va, 0);
if (bucket == NULL)
break;
for (i = 0; i < tsb_bucket_size(level); i++) {
if (tte_match(bucket[i].st_tte, va))
return (&bucket[i]);
}
}
return (NULL);
}
struct stte *
tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp)
{
struct stte *bucket;
struct tte tte;
int bmask;
int b0;
int i;
bmask = tsb_bucket_mask(0);
bucket = tsb_vtobucket(va, 0);
b0 = rd(tick) & bmask;
i = b0;
do {
if ((bucket[i].st_tte.tte_data & TD_V) == 0 ||
(bucket[i].st_tte.tte_data & (TD_L | TD_REF)) == 0) {
tte = stp->st_tte;
stp->st_tte.tte_data = 0;
pv_remove_virt(stp);
return (tsb_tte_enter(pm, va, tte));
}
} while ((i = (i + 1) & bmask) != b0);
return (stp);
}
void
tsb_stte_remove(struct stte *stp)
{
struct tte tte;
tte = stp->st_tte;
tte_invalidate(&stp->st_tte);
tsb_tte_local_remove(&tte);
}
void
tsb_tte_local_remove(struct tte *tp)
{
vm_offset_t va;
u_int ctx;
va = tte_get_va(*tp);
ctx = tte_get_ctx(*tp);
tlb_page_demap(TLB_DTLB | TLB_ITLB, ctx, va);
}
struct stte *
tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte)
{
struct stte *bucket;
struct stte *nstp;
struct stte *rstp;
struct stte *stp;
struct tte otte;
u_int bmask;
int level;
int b0;
int i;
nstp = NULL;
for (level = 0; level < TSB_DEPTH; level++) {
bucket = tsb_get_bucket(pm, level, va, 1);
stp = NULL;
rstp = NULL;
bmask = tsb_bucket_mask(level);
b0 = rd(tick) & bmask;
i = b0;
do {
if ((bucket[i].st_tte.tte_data & (TD_TSB | TD_L)) != 0)
continue;
if ((bucket[i].st_tte.tte_data & TD_V) == 0) {
stp = &bucket[i];
break;
}
if (stp == NULL) {
if ((bucket[i].st_tte.tte_data & TD_REF) == 0)
stp = &bucket[i];
else if (rstp == NULL)
rstp = &bucket[i];
}
} while ((i = (i + 1) & bmask) != b0);
if (stp == NULL)
stp = rstp;
if (stp == NULL)
panic("tsb_enter_tte");
if (nstp == NULL)
nstp = stp;
otte = stp->st_tte;
if (otte.tte_data & TD_V)
pv_remove_virt(stp);
stp->st_tte = tte;
pv_insert(pm, TD_PA(tte.tte_data), va, stp);
if ((otte.tte_data & TD_V) == 0)
break;
tte = otte;
va = tte_get_va(tte);
}
if (level >= TSB_DEPTH)
panic("tsb_enter_tte: TSB full");
return (nstp);
}

66
sys/sparc64/sparc64/upa.c Normal file
View File

@ -0,0 +1,66 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
static int upa_probe(device_t dev);
static int upa_attach(device_t dev);
static device_method_t upa_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, upa_probe),
DEVMETHOD(device_attach, upa_attach),
{ NULL, NULL }
};
static driver_t upa_driver = {
"upa",
upa_methods,
1,
};
static devclass_t upa_devclass;
DRIVER_MODULE(upa, root, upa_driver, upa_devclass, 0, 0);
static int
upa_probe(device_t dev)
{
return (bus_generic_probe(dev));
}
static int
upa_attach(device_t dev)
{
return (bus_generic_attach(dev));
}

View File

@ -31,10 +31,16 @@
#include <sys/proc.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/unistd.h>
#include <sys/user.h>
#include <dev/ofw/openfirm.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/md_var.h>
void
@ -46,18 +52,46 @@ cpu_exit(struct proc *p)
void
cpu_fork(struct proc *p1, struct proc *p2, int flags)
{
TODO;
struct trapframe *tf;
struct frame *fp;
struct pcb *pcb;
if ((flags & RFPROC) == 0)
return;
if (PCPU_GET(fpcurproc) == p1)
panic("cpu_fork: save fp state\n");
pcb = &p2->p_addr->u_pcb;
bcopy(&p1->p_addr->u_pcb, pcb, sizeof(*pcb));
tf = (struct trapframe *)((caddr_t)pcb + UPAGES * PAGE_SIZE) - 1;
bcopy(p1->p_frame, tf, sizeof(*tf));
p2->p_frame = tf;
fp = (struct frame *)tf - 1;
fp->f_local[0] = (u_long)fork_return;
fp->f_local[1] = (u_long)p2;
fp->f_local[2] = (u_long)tf;
pcb->pcb_fp = (u_long)fp - SPOFF;
pcb->pcb_pc = (u_long)fork_trampoline - 8;
}
void
cpu_reset(void)
{
OF_exit();
}
void
cpu_set_fork_handler(struct proc *p, void (*func)(void *), void *arg)
{
TODO;
struct frame *fp;
struct pcb *pcb;
pcb = &p->p_addr->u_pcb;
fp = (struct frame *)(pcb->pcb_fp + SPOFF);
fp->f_local[0] = (u_long)func;
fp->f_local[1] = (u_long)arg;
}
void