Kernel sources for 64-bit PowerPC, along with build-system changes to keep
32-bit kernels compiling (build system changes for 64-bit kernels are
coming later). Existing 32-bit PowerPC kernel configurations must be
updated after this change to specify their architecture.
This commit is contained in:
Nathan Whitehorn 2010-07-13 05:32:19 +00:00
parent 6a6705e980
commit c3e289e1ce
75 changed files with 5553 additions and 1048 deletions

View File

@ -23,6 +23,14 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 9.x IS SLOW:
ln -s aj /etc/malloc.conf.)
20100713:
Due to the import of powerpc64 support, all existing powerpc kernel
configuration files must be updated with a machine directive like this:
machine powerpc powerpc
In addition, an updated config(8) is required to build powerpc kernels
after this change.
20100713:
A new version of ZFS (version 15) has been merged to -HEAD.
This version uses a python library for the following subcommands:

View File

@ -17,7 +17,7 @@
#
# Which version of config(8) is required.
%VERSREQ= 600004
%VERSREQ= 600010
STD8X16FONT?= iso
@ -28,6 +28,9 @@ S= ./@
S= ../../..
.endif
.endif
LDSCRIPT_NAME?= ldscript.${MACHINE_ARCH}
.include "$S/conf/kern.pre.mk"
INCLUDES+= -I$S/contrib/libfdt

View File

@ -36,7 +36,7 @@ dev/ofw/ofw_console.c optional aim
dev/ofw/ofw_disk.c optional ofwd aim
dev/ofw/ofw_fdt.c optional fdt
dev/ofw/ofw_iicbus.c optional iicbus aim
dev/ofw/ofw_standard.c optional aim
dev/ofw/ofw_standard.c optional aim powerpc
dev/powermac_nvram/powermac_nvram.c optional powermac_nvram powermac
dev/quicc/quicc_bfe_fdt.c optional quicc mpc85xx
dev/scc/scc_bfe_macio.c optional scc powermac
@ -53,37 +53,39 @@ dev/tsec/if_tsec.c optional tsec
dev/tsec/if_tsec_fdt.c optional tsec fdt
dev/uart/uart_cpu_powerpc.c optional uart aim
kern/syscalls.c optional ktr
libkern/ashldi3.c standard
libkern/ashrdi3.c standard
libkern/ashldi3.c optional powerpc
libkern/ashrdi3.c optional powerpc
libkern/bcmp.c standard
libkern/cmpdi2.c standard
libkern/divdi3.c standard
libkern/cmpdi2.c optional powerpc
libkern/divdi3.c optional powerpc
libkern/ffs.c standard
libkern/ffsl.c standard
libkern/fls.c standard
libkern/flsl.c standard
libkern/lshrdi3.c standard
libkern/lshrdi3.c optional powerpc
libkern/memchr.c optional fdt
libkern/memmove.c standard
libkern/memset.c standard
libkern/moddi3.c standard
libkern/qdivrem.c standard
libkern/ucmpdi2.c standard
libkern/udivdi3.c standard
libkern/umoddi3.c standard
libkern/moddi3.c optional powerpc
libkern/qdivrem.c optional powerpc
libkern/ucmpdi2.c optional powerpc
libkern/udivdi3.c optional powerpc
libkern/umoddi3.c optional powerpc
powerpc/aim/clock.c optional aim
powerpc/aim/copyinout.c optional aim
powerpc/aim/interrupt.c optional aim
powerpc/aim/locore.S optional aim no-obj
powerpc/aim/machdep.c optional aim
powerpc/aim/mmu_oea.c optional aim
powerpc/aim/mmu_oea.c optional aim powerpc
powerpc/aim/mmu_oea64.c optional aim
powerpc/aim/mp_cpudep.c optional aim smp
powerpc/aim/nexus.c optional aim
powerpc/aim/ofw_machdep.c optional aim
powerpc/aim/ofwmagic.S optional aim
powerpc/aim/platform_chrp.c optional aim
powerpc/aim/swtch.S optional aim
powerpc/aim/slb.c optional aim powerpc64
powerpc/aim/swtch32.S optional aim powerpc
powerpc/aim/swtch64.S optional aim powerpc64
powerpc/aim/trap.c optional aim
powerpc/aim/uma_machdep.c optional aim
powerpc/aim/vm_machdep.c optional aim
@ -156,7 +158,8 @@ powerpc/powerpc/db_hwwatch.c optional ddb
powerpc/powerpc/db_interface.c optional ddb
powerpc/powerpc/db_trace.c optional ddb
powerpc/powerpc/dump_machdep.c standard
powerpc/powerpc/elf_machdep.c standard
powerpc/powerpc/elf32_machdep.c optional powerpc | compat_freebsd32
powerpc/powerpc/elf64_machdep.c optional powerpc64
powerpc/powerpc/exec_machdep.c standard
powerpc/powerpc/fpu.c optional aim
powerpc/powerpc/fuswintr.c standard
@ -173,7 +176,8 @@ powerpc/powerpc/platform.c standard
powerpc/powerpc/platform_if.m standard
powerpc/powerpc/sc_machdep.c optional sc
powerpc/powerpc/setjmp.S standard
powerpc/powerpc/sigcode.S standard
powerpc/powerpc/sigcode32.S optional powerpc | compat_freebsd32
powerpc/powerpc/sigcode64.S optional powerpc64
powerpc/powerpc/stack_machdep.c optional ddb | stack
powerpc/powerpc/suswintr.c standard
powerpc/powerpc/syncicache.c standard
@ -183,3 +187,10 @@ powerpc/psim/iobus.c optional psim
powerpc/psim/ata_iobus.c optional ata psim
powerpc/psim/openpic_iobus.c optional psim
powerpc/psim/uart_iobus.c optional uart psim
compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32
compat/freebsd32/freebsd32_misc.c optional compat_freebsd32
compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32
compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32
kern/imgact_elf32.c optional compat_freebsd32

View File

@ -4,8 +4,13 @@
AIM opt_global.h
E500 opt_global.h
POWERPC
POWERPC64
FPU_EMU
COMPAT_FREEBSD32 opt_compat.h
GFB_DEBUG opt_gfb.h
GFB_NO_FONT_LOADING opt_gfb.h
GFB_NO_MODE_CHANGE opt_gfb.h

View File

@ -57,6 +57,8 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -66,20 +68,45 @@ __FBSDID("$FreeBSD$");
#include <machine/pcb.h>
#include <machine/sr.h>
#include <machine/slb.h>
int setfault(faultbuf); /* defined in locore.S */
/*
* Makes sure that the right segment of userspace is mapped in.
*/
#ifdef __powerpc64__
static __inline void
set_user_sr(register_t vsid)
set_user_sr(pmap_t pm, const void *addr)
{
register_t esid, vsid, slb1, slb2;
esid = USER_ADDR >> ADDR_SR_SHFT;
PMAP_LOCK(pm);
vsid = va_to_vsid(pm, (vm_offset_t)addr);
PMAP_UNLOCK(pm);
slb1 = vsid << SLBV_VSID_SHIFT;
slb2 = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | USER_SR;
__asm __volatile ("slbie %0; slbmte %1, %2" :: "r"(esid << 28),
"r"(slb1), "r"(slb2));
isync();
}
#else
static __inline void
set_user_sr(pmap_t pm, const void *addr)
{
register_t vsid;
vsid = va_to_vsid(pm, (vm_offset_t)addr);
isync();
__asm __volatile ("mtsr %0,%1" :: "n"(USER_SR), "r"(vsid));
isync();
}
#endif
int
copyout(const void *kaddr, void *udaddr, size_t len)
@ -103,13 +130,13 @@ copyout(const void *kaddr, void *udaddr, size_t len)
up = udaddr;
while (len > 0) {
p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
p = (char *)USER_ADDR + ((uintptr_t)up & ~SEGMENT_MASK);
l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
if (l > len)
l = len;
set_user_sr(pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
set_user_sr(pm,up);
bcopy(kp, p, l);
@ -144,13 +171,13 @@ copyin(const void *udaddr, void *kaddr, size_t len)
up = udaddr;
while (len > 0) {
p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
p = (char *)USER_ADDR + ((uintptr_t)up & ~SEGMENT_MASK);
l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
if (l > len)
l = len;
set_user_sr(pm->pm_sr[(u_int)up >> ADDR_SR_SHFT]);
set_user_sr(pm,up);
bcopy(p, kp, l);
@ -218,14 +245,14 @@ subyte(void *addr, int byte)
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (char *)((u_int)USER_ADDR + ((u_int)addr & ~SEGMENT_MASK));
p = (char *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm->pm_sr[(u_int)addr >> ADDR_SR_SHFT]);
set_user_sr(pm,addr);
*p = (char)byte;
@ -233,6 +260,33 @@ subyte(void *addr, int byte)
return (0);
}
#ifdef __powerpc64__
int
suword32(void *addr, int word)
{
struct thread *td;
pmap_t pm;
faultbuf env;
int *p;
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (int *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm,addr);
*p = word;
td->td_pcb->pcb_onfault = NULL;
return (0);
}
#endif
int
suword(void *addr, long word)
{
@ -243,14 +297,14 @@ suword(void *addr, long word)
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (long *)((u_int)USER_ADDR + ((u_int)addr & ~SEGMENT_MASK));
p = (long *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm->pm_sr[(u_int)addr >> ADDR_SR_SHFT]);
set_user_sr(pm,addr);
*p = word;
@ -258,12 +312,19 @@ suword(void *addr, long word)
return (0);
}
#ifdef __powerpc64__
int
suword64(void *addr, int64_t word)
{
return (suword(addr, (long)word));
}
#else
int
suword32(void *addr, int32_t word)
{
return (suword(addr, (long)word));
}
#endif
int
fubyte(const void *addr)
@ -276,14 +337,14 @@ fubyte(const void *addr)
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (u_char *)((u_int)USER_ADDR + ((u_int)addr & ~SEGMENT_MASK));
p = (u_char *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm->pm_sr[(u_int)addr >> ADDR_SR_SHFT]);
set_user_sr(pm,addr);
val = *p;
@ -291,6 +352,33 @@ fubyte(const void *addr)
return (val);
}
#ifdef __powerpc64__
int32_t
fuword32(const void *addr)
{
struct thread *td;
pmap_t pm;
faultbuf env;
int32_t *p, val;
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (int32_t *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm,addr);
val = *p;
td->td_pcb->pcb_onfault = NULL;
return (val);
}
#endif
long
fuword(const void *addr)
{
@ -301,14 +389,14 @@ fuword(const void *addr)
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (long *)((u_int)USER_ADDR + ((u_int)addr & ~SEGMENT_MASK));
p = (long *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
set_user_sr(pm->pm_sr[(u_int)addr >> ADDR_SR_SHFT]);
set_user_sr(pm,addr);
val = *p;
@ -316,31 +404,27 @@ fuword(const void *addr)
return (val);
}
#ifndef __powerpc64__
int32_t
fuword32(const void *addr)
{
return ((int32_t)fuword(addr));
}
#endif
uint32_t
casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval)
{
return (casuword((volatile u_long *)base, oldval, newval));
}
u_long
casuword(volatile u_long *addr, u_long old, u_long new)
casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
{
struct thread *td;
pmap_t pm;
faultbuf env;
u_long *p, val;
uint32_t *p, val;
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (u_long *)((u_int)USER_ADDR + ((u_int)addr & ~SEGMENT_MASK));
p = (uint32_t *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
set_user_sr(pm->pm_sr[(u_int)addr >> ADDR_SR_SHFT]);
set_user_sr(pm,(const void *)(vm_offset_t)addr);
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
@ -365,3 +449,50 @@ casuword(volatile u_long *addr, u_long old, u_long new)
return (val);
}
#ifndef __powerpc64__
u_long
casuword(volatile u_long *addr, u_long old, u_long new)
{
return (casuword32((volatile uint32_t *)addr, old, new));
}
#else
u_long
casuword(volatile u_long *addr, u_long old, u_long new)
{
struct thread *td;
pmap_t pm;
faultbuf env;
u_long *p, val;
td = PCPU_GET(curthread);
pm = &td->td_proc->p_vmspace->vm_pmap;
p = (u_long *)(USER_ADDR + ((uintptr_t)addr & ~SEGMENT_MASK));
set_user_sr(pm,(const void *)(vm_offset_t)addr);
if (setfault(env)) {
td->td_pcb->pcb_onfault = NULL;
return (-1);
}
__asm __volatile (
"1:\tldarx %0, 0, %2\n\t" /* load old value */
"cmpld %3, %0\n\t" /* compare */
"bne 2f\n\t" /* exit if not equal */
"stdcx. %4, 0, %2\n\t" /* attempt to store */
"bne- 1b\n\t" /* spin if failed */
"b 3f\n\t" /* we've succeeded */
"2:\n\t"
"stdcx. %0, 0, %2\n\t" /* clear reservation (74xx) */
"3:\n\t"
: "=&r" (val), "=m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p)
: "cc", "memory");
td->td_pcb->pcb_onfault = NULL;
return (val);
}
#endif

View File

@ -1,209 +1,8 @@
/* $FreeBSD$ */
/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
/*-
* Copyright (C) 2001 Benno Rice
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef __powerpc64__
#include <powerpc/aim/locore64.S>
#else
#include <powerpc/aim/locore32.S>
#endif
#include "assym.s"
#include <sys/syscall.h>
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/sr.h>
#include <machine/spr.h>
#include <machine/psl.h>
#include <machine/asm.h>
/* Locate the per-CPU data structure */
#define GET_CPUINFO(r) \
mfsprg0 r
/*
* Compiled KERNBASE location and the kernel load address
*/
.globl kernbase
.set kernbase, KERNBASE
#define TMPSTKSZ 8192 /* 8K temporary stack */
/*
* Globals
*/
.data
.align 4
GLOBAL(tmpstk)
.space TMPSTKSZ
GLOBAL(esym)
.long 0 /* end of symbol table */
GLOBAL(ofmsr)
.long 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */
#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
GLOBAL(intrnames)
.space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
GLOBAL(eintrnames)
.align 4
GLOBAL(intrcnt)
.space INTRCNT_COUNT * 4 * 2
GLOBAL(eintrcnt)
/*
* File-scope for locore.S
*/
idle_u:
.long 0 /* fake uarea during idle after exit */
openfirmware_entry:
.long 0 /* Open Firmware entry point */
srsave:
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.text
.globl btext
btext:
/*
* This symbol is here for the benefit of kvm_mkdb, and is supposed to
* mark the start of kernel text.
*/
.globl kernel_text
kernel_text:
/*
* Startup entry. Note, this must be the first thing in the text
* segment!
*/
.text
.globl __start
__start:
li 8,0
li 9,0x100
mtctr 9
1:
dcbf 0,8
icbi 0,8
addi 8,8,0x20
bdnz 1b
sync
isync
/* Save the argument pointer and length */
mr 20,6
mr 21,7
lis 8,openfirmware_entry@ha
stw 5,openfirmware_entry@l(8) /* save client interface handler */
lis 1,(tmpstk+TMPSTKSZ-16)@ha
addi 1,1,(tmpstk+TMPSTKSZ-16)@l
mfmsr 0
lis 9,ofmsr@ha
stwu 0,ofmsr@l(9)
mfsprg0 0 /* save SPRG0-3 */
stw 0,4(9) /* ofmsr[1] = sprg0 */
mfsprg1 0
stw 0,8(9) /* ofmsr[2] = sprg1 */
mfsprg2 0
stw 0,12(9) /* ofmsr[3] = sprg2 */
mfsprg3 0
stw 0,16(9) /* ofmsr[4] = sprg3 */
bl OF_initial_setup
lis 4,end@ha
addi 4,4,end@l
mr 5,4
lis 3,kernel_text@ha
addi 3,3,kernel_text@l
/* Restore the argument pointer and length */
mr 6,20
mr 7,21
bl powerpc_init
mr %r1, %r3
li %r3, 0
stw %r3, 0(%r1)
bl mi_startup
b OF_exit
/*
* int setfault()
*
* Similar to setjmp to setup for handling faults on accesses to user memory.
* Any routine using this may only call bcopy, either the form below,
* or the (currently used) C code optimized, so it doesn't use any non-volatile
* registers.
*/
.globl setfault
setfault:
mflr 0
mfcr 12
mfsprg 4,0
lwz 4,PC_CURTHREAD(4)
lwz 4,TD_PCB(4)
stw 3,PCB_ONFAULT(4)
stw 0,0(3)
stw 1,4(3)
stw 2,8(3)
stmw 12,12(3)
xor 3,3,3
blr
#include <powerpc/aim/trap_subr.S>

207
sys/powerpc/aim/locore32.S Normal file
View File

@ -0,0 +1,207 @@
/* $FreeBSD$ */
/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
/*-
* Copyright (C) 2001 Benno Rice
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "assym.s"
#include <sys/syscall.h>
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/spr.h>
#include <machine/asm.h>
/* Locate the per-CPU data structure */
#define GET_CPUINFO(r) \
mfsprg0 r
/*
* Compiled KERNBASE location and the kernel load address
*/
.globl kernbase
.set kernbase, KERNBASE
#define TMPSTKSZ 8192 /* 8K temporary stack */
/*
* Globals
*/
.data
.align 4
GLOBAL(tmpstk)
.space TMPSTKSZ
GLOBAL(esym)
.long 0 /* end of symbol table */
GLOBAL(ofmsr)
.long 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */
#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
GLOBAL(intrnames)
.space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
GLOBAL(eintrnames)
.align 4
GLOBAL(intrcnt)
.space INTRCNT_COUNT * 4 * 2
GLOBAL(eintrcnt)
/*
* File-scope for locore.S
*/
idle_u:
.long 0 /* fake uarea during idle after exit */
openfirmware_entry:
.long 0 /* Open Firmware entry point */
srsave:
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.text
.globl btext
btext:
/*
* This symbol is here for the benefit of kvm_mkdb, and is supposed to
* mark the start of kernel text.
*/
.globl kernel_text
kernel_text:
/*
* Startup entry. Note, this must be the first thing in the text
* segment!
*/
.text
.globl __start
__start:
li 8,0
li 9,0x100
mtctr 9
1:
dcbf 0,8
icbi 0,8
addi 8,8,0x20
bdnz 1b
sync
isync
/* Save the argument pointer and length */
mr 20,6
mr 21,7
lis 8,openfirmware_entry@ha
stw 5,openfirmware_entry@l(8) /* save client interface handler */
lis 1,(tmpstk+TMPSTKSZ-16)@ha
addi 1,1,(tmpstk+TMPSTKSZ-16)@l
mfmsr 0
lis 9,ofmsr@ha
stwu 0,ofmsr@l(9)
mfsprg0 0 /* save SPRG0-3 */
stw 0,4(9) /* ofmsr[1] = sprg0 */
mfsprg1 0
stw 0,8(9) /* ofmsr[2] = sprg1 */
mfsprg2 0
stw 0,12(9) /* ofmsr[3] = sprg2 */
mfsprg3 0
stw 0,16(9) /* ofmsr[4] = sprg3 */
bl OF_initial_setup
lis 4,end@ha
addi 4,4,end@l
mr 5,4
lis 3,kernel_text@ha
addi 3,3,kernel_text@l
/* Restore the argument pointer and length */
mr 6,20
mr 7,21
bl powerpc_init
mr %r1, %r3
li %r3, 0
stw %r3, 0(%r1)
bl mi_startup
b OF_exit
/*
* int setfault()
*
* Similar to setjmp to setup for handling faults on accesses to user memory.
* Any routine using this may only call bcopy, either the form below,
* or the (currently used) C code optimized, so it doesn't use any non-volatile
* registers.
*/
.globl setfault
setfault:
mflr 0
mfcr 12
mfsprg 4,0
lwz 4,PC_CURTHREAD(4)
lwz 4,TD_PCB(4)
stw 3,PCB_ONFAULT(4)
stw 0,0(3)
stw 1,4(3)
stw 2,8(3)
stmw 12,12(3)
xor 3,3,3
blr
#include <powerpc/aim/trap_subr32.S>

369
sys/powerpc/aim/locore64.S Normal file
View File

@ -0,0 +1,369 @@
/* $FreeBSD$ */
/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
/*-
* Copyright (C) 2001 Benno Rice
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "assym.s"
#include <sys/syscall.h>
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/spr.h>
#include <machine/asm.h>
/* Locate the per-CPU data structure */
#define GET_CPUINFO(r) \
mfsprg0 r
/*
* Compiled KERNBASE location and the kernel load address
*/
.globl kernbase
.set kernbase, KERNBASE
#define TMPSTKSZ 8192 /* 8K temporary stack */
#define OFWSTKSZ 4096 /* 4K Open Firmware stack */
/*
* Globals
*/
.data
.align 4
GLOBAL(tmpstk)
.space TMPSTKSZ
GLOBAL(ofwstk)
.space OFWSTKSZ
GLOBAL(esym)
.llong 0 /* end of symbol table */
GLOBAL(ofmsr)
.llong 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */
#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
GLOBAL(intrnames)
.space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
GLOBAL(eintrnames)
.align 4
GLOBAL(intrcnt)
.space INTRCNT_COUNT * 4 * 2
GLOBAL(eintrcnt)
/*
* File-scope for locore.S
*/
idle_u:
.llong 0 /* fake uarea during idle after exit */
openfirmware_entry:
.llong 0 /* Open Firmware entry point */
srsave:
.llong 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.text
.globl btext
btext:
/*
* This symbol is here for the benefit of kvm_mkdb, and is supposed to
* mark the start of kernel text.
*/
.globl kernel_text
kernel_text:
/*
* Startup entry. Note, this must be the first thing in the text
* segment!
*/
.text
ASENTRY(__start)
li 8,0
li 9,0x100
mtctr 9
1:
dcbf 0,8
icbi 0,8
addi 8,8,0x20
bdnz 1b
sync
isync
/* Save the argument pointer and length */
mr 20,6
mr 21,7
lis 8,openfirmware_entry@ha
std 5,openfirmware_entry@l(8) /* save client interface handler */
/* Set up the stack pointer */
lis 1,(tmpstk+TMPSTKSZ-48)@ha
addi 1,1,(tmpstk+TMPSTKSZ-48)@l
/* Set up the TOC pointer */
lis 2,tocbase@ha
ld 2,tocbase@l(2)
mfmsr 0
lis 9,ofmsr@ha
stdu 0,ofmsr@l(9)
mfsprg0 0 /* save SPRG0-3 */
std 0,8(9) /* ofmsr[1] = sprg0 */
mfsprg1 0
std 0,16(9) /* ofmsr[2] = sprg1 */
mfsprg2 0
std 0,24(9) /* ofmsr[3] = sprg2 */
mfsprg3 0
std 0,32(9) /* ofmsr[4] = sprg3 */
/* Switch to 64-bit mode */
mfmsr 9
li 8,1
insrdi 9,8,1,0
mtmsrd 9
bl .OF_initial_setup
nop
lis 4,end@ha
addi 4,4,end@l
mr 5,4
lis 3,kernbase@ha
addi 3,3,kernbase@l
/* Restore the argument pointer and length */
mr 6,20
mr 7,21
bl .powerpc_init
nop
mr %r1, %r3
li %r3, 0
std %r3, 0(%r1)
bl .mi_startup
nop
b .OF_exit
nop
/*
* PPC64 ABI TOC base
*/
.align 3
.globl tocbase
tocbase:
.llong .TOC.@tocbase
/*
* Open Firmware Real-mode Entry Point. This is a huge pain.
*/
ASENTRY(ofw_32bit_mode_entry)
mflr %r0
std %r0,16(%r1)
stdu %r1,-208(%r1)
/*
* We need to save the following, because OF's register save/
* restore code assumes that the contents of registers are
* at most 32 bits wide: lr, cr, r2, r13-r31, the old MSR. These
* get placed in that order in the stack.
*/
mfcr %r4
std %r4,48(%r1)
std %r13,56(%r1)
std %r14,64(%r1)
std %r15,72(%r1)
std %r16,80(%r1)
std %r17,88(%r1)
std %r18,96(%r1)
std %r19,104(%r1)
std %r20,112(%r1)
std %r21,120(%r1)
std %r22,128(%r1)
std %r23,136(%r1)
std %r24,144(%r1)
std %r25,152(%r1)
std %r26,160(%r1)
std %r27,168(%r1)
std %r28,176(%r1)
std %r29,184(%r1)
std %r30,192(%r1)
std %r31,200(%r1)
/* Record the old MSR */
mfmsr %r6
/* read client interface handler */
lis %r4,openfirmware_entry@ha
ld %r4,openfirmware_entry@l(%r4)
/*
* Set the MSR to the OF value. This has the side effect of disabling
* exceptions, which is important for the next few steps.
*/
lis %r5,ofmsr@ha
ld %r5,ofmsr@l(%r5)
mtmsrd %r5
isync
/*
* Set up OF stack. This needs to be accessible in real mode and
* use the 32-bit ABI stack frame format. The pointer to the current
* kernel stack is placed at the very top of the stack along with
* the old MSR so we can get them back later.
*/
mr %r5,%r1
lis %r1,(ofwstk+OFWSTKSZ-32)@ha
addi %r1,%r1,(ofwstk+OFWSTKSZ-32)@l
std %r5,8(%r1) /* Save real stack pointer */
std %r2,16(%r1) /* Save old TOC */
std %r6,24(%r1) /* Save old MSR */
li %r5,0
stw %r5,4(%r1)
stw %r5,0(%r1)
/* Finally, branch to OF */
mtctr %r4
bctrl
/* Reload stack pointer and MSR from the OFW stack */
ld %r6,24(%r1)
ld %r2,16(%r1)
ld %r1,8(%r1)
/* Now set the real MSR */
mtmsrd %r6
isync
/* Sign-extend the return value from OF */
extsw %r3,%r3
/* Restore all the non-volatile registers */
ld %r5,48(%r1)
mtcr %r5
ld %r13,56(%r1)
ld %r14,64(%r1)
ld %r15,72(%r1)
ld %r16,80(%r1)
ld %r17,88(%r1)
ld %r18,96(%r1)
ld %r19,104(%r1)
ld %r20,112(%r1)
ld %r21,120(%r1)
ld %r22,128(%r1)
ld %r23,136(%r1)
ld %r24,144(%r1)
ld %r25,152(%r1)
ld %r26,160(%r1)
ld %r27,168(%r1)
ld %r28,176(%r1)
ld %r29,184(%r1)
ld %r30,192(%r1)
ld %r31,200(%r1)
/* Restore the stack and link register */
ld %r1,0(%r1)
ld %r0,16(%r1)
mtlr %r0
blr
/*
* int setfault()
*
* Similar to setjmp to setup for handling faults on accesses to user memory.
* Any routine using this may only call bcopy, either the form below,
* or the (currently used) C code optimized, so it doesn't use any non-volatile
* registers.
*/
ASENTRY(setfault)
mflr 0
mfcr 12
mfsprg 4,0
ld 4,PC_CURTHREAD(4)
ld 4,TD_PCB(4)
std 3,PCB_ONFAULT(4)
std 0,0(3)
std 1,8(3)
std 2,16(3)
std %r12,24(%r3) /* Save the non-volatile GP regs. */
std %r13,24+1*8(%r3)
std %r14,24+2*8(%r3)
std %r15,24+3*8(%r3)
std %r16,24+4*8(%r3)
std %r17,24+5*8(%r3)
std %r18,24+6*8(%r3)
std %r19,24+7*8(%r3)
std %r20,24+8*8(%r3)
std %r21,24+9*8(%r3)
std %r22,24+10*8(%r3)
std %r23,24+11*8(%r3)
std %r24,24+12*8(%r3)
std %r25,24+13*8(%r3)
std %r26,24+14*8(%r3)
std %r27,24+15*8(%r3)
std %r28,24+16*8(%r3)
std %r29,24+17*8(%r3)
std %r30,24+18*8(%r3)
std %r31,24+19*8(%r3)
xor 3,3,3
blr
#include <powerpc/aim/trap_subr64.S>

View File

@ -105,7 +105,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
#include <machine/altivec.h>
#ifndef __powerpc64__
#include <machine/bat.h>
#endif
#include <machine/cpu.h>
#include <machine/elf.h>
#include <machine/fpu.h>
@ -130,7 +132,11 @@ extern vm_offset_t ksym_start, ksym_end;
#endif
int cold = 1;
#ifdef __powerpc64__
int cacheline_size = 128;
#else
int cacheline_size = 32;
#endif
int hw_direct_map = 1;
struct pcpu __pcpu[MAXCPU];
@ -146,24 +152,18 @@ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
CTLFLAG_RD, &cacheline_size, 0, "");
u_int powerpc_init(u_int, u_int, u_int, void *);
int save_ofw_mapping(void);
int restore_ofw_mapping(void);
void install_extint(void (*)(void));
uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
int setfault(faultbuf); /* defined in locore.S */
void asm_panic(char *);
long Maxmem = 0;
long realmem = 0;
struct pmap ofw_pmap;
extern int ofmsr;
#ifndef __powerpc64__
struct bat battable[16];
#endif
struct kva_md_info kmi;
@ -210,9 +210,14 @@ cpu_startup(void *dummy)
printf("Physical memory chunk(s):\n");
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
int size1 = phys_avail[indx + 1] - phys_avail[indx];
vm_offset_t size1 =
phys_avail[indx + 1] - phys_avail[indx];
printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
#ifdef __powerpc64__
printf("0x%16lx - 0x%16lx, %ld bytes (%ld pages)\n",
#else
printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
#endif
phys_avail[indx], phys_avail[indx + 1] - 1, size1,
size1 / PAGE_SIZE);
}
@ -235,21 +240,27 @@ cpu_startup(void *dummy)
extern char kernel_text[], _end[];
#ifndef __powerpc64__
/* Bits for running on 64-bit systems in 32-bit mode. */
extern void *testppc64, *testppc64size;
extern void *restorebridge, *restorebridgesize;
extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
extern void *trapcode64;
#endif
#ifdef SMP
extern void *rstcode, *rstsize;
#endif
extern void *trapcode, *trapcode64, *trapsize;
extern void *trapcode, *trapsize;
extern void *alitrap, *alisize;
extern void *dsitrap, *dsisize;
extern void *decrint, *decrsize;
extern void *extint, *extsize;
extern void *dblow, *dbsize;
u_int
powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
uintptr_t
powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
vm_offset_t basekernel, void *mdp)
{
struct pcpu *pc;
vm_offset_t end;
@ -257,9 +268,11 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
size_t trap_offset;
void *kmdp;
char *env;
uint32_t msr, scratch;
register_t msr, scratch;
uint8_t *cache_check;
#ifndef __powerpc64__
int ppc64;
#endif
end = 0;
kmdp = NULL;
@ -346,9 +359,9 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
case IBM970FX:
case IBM970MP:
case IBM970GX:
scratch = mfspr64upper(SPR_HID5,msr);
scratch = mfspr(SPR_HID5);
scratch &= ~HID5_970_DCBZ_SIZE_HI;
mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
mtspr(SPR_HID5, scratch);
break;
}
@ -390,6 +403,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
cacheline_size = 32;
}
#ifndef __powerpc64__
/*
* Figure out whether we need to use the 64 bit PMAP. This works by
* executing an instruction that is only legal on 64-bit PPC (mtmsrd),
@ -449,6 +463,11 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
generictrap = &trapcode;
}
#else /* powerpc64 */
cpu_features |= PPC_FEATURE_64;
generictrap = &trapcode;
#endif
#ifdef SMP
bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
#else
@ -466,9 +485,13 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
#endif
bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
#ifdef __powerpc64__
bcopy(generictrap, (void *)EXC_DSE, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_ISE, (size_t)&trapsize);
#endif
bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
@ -524,7 +547,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
*/
thread0.td_pcb = (struct pcb *)
((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
sizeof(struct pcb)) & ~15);
sizeof(struct pcb)) & ~15UL);
bzero((void *)thread0.td_pcb, sizeof(struct pcb));
pc->pc_curpcb = thread0.td_pcb;
@ -537,7 +560,8 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
"Boot flags requested debugger");
#endif
return (((uintptr_t)thread0.td_pcb - 16) & ~15);
return (((uintptr_t)thread0.td_pcb -
(sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
}
void
@ -614,7 +638,7 @@ cpu_halt(void)
void
cpu_idle(int busy)
{
uint32_t msr;
register_t msr;
uint16_t vers;
msr = mfmsr();
@ -623,7 +647,7 @@ cpu_idle(int busy)
#ifdef INVARIANTS
if ((msr & PSL_EE) != PSL_EE) {
struct thread *td = curthread;
printf("td msr %x\n", td->td_md.md_saved_msr);
printf("td msr %#lx\n", (u_long)td->td_md.md_saved_msr);
panic("ints disabled in idleproc!");
}
#endif
@ -710,7 +734,10 @@ kdb_cpu_set_singlestep(void)
void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
{
#ifdef __powerpc64__
/* Copy the SLB contents from the current CPU */
memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
#endif
}
void
@ -767,12 +794,6 @@ kcopy(const void *src, void *dst, size_t len)
return (0);
}
void
asm_panic(char *pstr)
{
panic(pstr);
}
int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
int
@ -793,3 +814,13 @@ db_trap_glue(struct trapframe *frame)
return (0);
}
#ifndef __powerpc64__
uint64_t
va_to_vsid(pmap_t pm, vm_offset_t va)
{
return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
}
#endif

View File

@ -193,8 +193,8 @@ struct ofw_map {
*/
static struct mem_region *regions;
static struct mem_region *pregions;
u_int phys_avail_count;
int regions_sz, pregions_sz;
static u_int phys_avail_count;
static int regions_sz, pregions_sz;
static struct ofw_map *translations;
extern struct pmap ofw_pmap;

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,31 @@ static register_t bsp_state[8] __aligned(8);
static void cpudep_save_config(void *dummy);
SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
void
cpudep_ap_early_bootstrap(void)
{
register_t reg;
__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
powerpc_sync();
switch (mfpvr() >> 16) {
case IBM970:
case IBM970FX:
case IBM970MP:
/* Restore HID4 and HID5, which are necessary for the MMU */
__asm __volatile("ld %0, 16(%2); sync; isync; \
mtspr %1, %0; sync; isync;"
: "=r"(reg) : "K"(SPR_HID4), "r"(bsp_state));
__asm __volatile("ld %0, 24(%2); sync; isync; \
mtspr %1, %0; sync; isync;"
: "=r"(reg) : "K"(SPR_HID5), "r"(bsp_state));
powerpc_sync();
break;
}
}
uintptr_t
cpudep_ap_bootstrap(void)
{
@ -64,9 +89,6 @@ cpudep_ap_bootstrap(void)
mtmsr(msr);
isync();
__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
powerpc_sync();
pcpup->pc_curthread = pcpup->pc_idlethread;
pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
sp = pcpup->pc_curpcb->pcb_sp;
@ -187,6 +209,12 @@ cpudep_save_config(void *dummy)
case IBM970:
case IBM970FX:
case IBM970MP:
#ifdef __powerpc64__
bsp_state[0] = mfspr(SPR_HID0);
bsp_state[1] = mfspr(SPR_HID1);
bsp_state[2] = mfspr(SPR_HID4);
bsp_state[3] = mfspr(SPR_HID5);
#else
__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
: "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
@ -195,6 +223,7 @@ cpudep_save_config(void *dummy)
: "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
: "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
#endif
powerpc_sync();

View File

@ -63,12 +63,7 @@ __FBSDID("$FreeBSD$");
#define OFMEM_REGIONS 32
static struct mem_region OFmem[OFMEM_REGIONS + 1], OFavail[OFMEM_REGIONS + 3];
static struct mem_region OFfree[OFMEM_REGIONS + 3];
struct mem_region64 {
vm_offset_t mr_start_hi;
vm_offset_t mr_start_lo;
vm_size_t mr_size;
};
static int nOFmem;
extern register_t ofmsr[5];
extern struct pmap ofw_pmap;
@ -76,6 +71,7 @@ static int (*ofwcall)(void *);
static void *fdt;
int ofw_real_mode;
int ofw_32bit_mode_entry(void *);
static void ofw_quiesce(void);
static int openfirmware(void *args);
@ -133,12 +129,121 @@ memr_overlap(struct mem_region *r1, struct mem_region *r2)
static void
memr_merge(struct mem_region *from, struct mem_region *to)
{
int end;
end = imax(to->mr_start + to->mr_size, from->mr_start + from->mr_size);
to->mr_start = imin(from->mr_start, to->mr_start);
vm_offset_t end;
end = ulmax(to->mr_start + to->mr_size, from->mr_start + from->mr_size);
to->mr_start = ulmin(from->mr_start, to->mr_start);
to->mr_size = end - to->mr_start;
}
static int
parse_ofw_memory(phandle_t node, const char *prop, struct mem_region *output)
{
cell_t address_cells, size_cells;
cell_t OFmem[4*(OFMEM_REGIONS + 1)];
int sz, i, j;
int apple_hack_mode;
phandle_t phandle;
sz = 0;
apple_hack_mode = 0;
/*
* Get #address-cells from root node, defaulting to 1 if it cannot
* be found.
*/
phandle = OF_finddevice("/");
if (OF_getprop(phandle, "#address-cells", &address_cells,
sizeof(address_cells)) < sizeof(address_cells))
address_cells = 1;
if (OF_getprop(phandle, "#size-cells", &size_cells,
sizeof(size_cells)) < sizeof(size_cells))
size_cells = 1;
/*
* On Apple hardware, address_cells is always 1 for "available",
* even when it is explicitly set to 2. Then all memory above 4 GB
* should be added by hand to the available list. Detect Apple hardware
* by seeing if ofw_real_mode is set -- only Apple seems to use
* virtual-mode OF.
*/
if (strcmp(prop, "available") == 0 && !ofw_real_mode)
apple_hack_mode = 1;
if (apple_hack_mode)
address_cells = 1;
/*
* Get memory.
*/
if ((node == -1) || (sz = OF_getprop(node, prop,
OFmem, sizeof(OFmem[0]) * 4 * OFMEM_REGIONS)) <= 0)
panic("Physical memory map not found");
i = 0;
j = 0;
while (i < sz/sizeof(cell_t)) {
#ifndef __powerpc64__
/* On 32-bit PPC, ignore regions starting above 4 GB */
if (address_cells > 1 && OFmem[i] > 0) {
i += address_cells + size_cells;
continue;
}
#endif
output[j].mr_start = OFmem[i++];
if (address_cells == 2) {
#ifdef __powerpc64__
output[j].mr_start <<= 32;
#endif
output[j].mr_start += OFmem[i++];
}
output[j].mr_size = OFmem[i++];
if (size_cells == 2) {
#ifdef __powerpc64__
output[j].mr_size <<= 32;
#endif
output[j].mr_size += OFmem[i++];
}
#ifndef __powerpc64__
/*
* Check for memory regions extending above 32-bit
* memory space, and restrict them to stay there.
*/
if (((uint64_t)output[j].mr_start +
(uint64_t)output[j].mr_size) >
BUS_SPACE_MAXADDR_32BIT) {
output[j].mr_size = BUS_SPACE_MAXADDR_32BIT -
output[j].mr_start;
}
#endif
j++;
}
sz = j*sizeof(output[0]);
#ifdef __powerpc64__
if (apple_hack_mode) {
/* Add in regions above 4 GB to the available list */
struct mem_region himem[OFMEM_REGIONS];
int hisz;
hisz = parse_ofw_memory(node, "reg", himem);
for (i = 0; i < hisz/sizeof(himem[0]); i++) {
if (himem[i].mr_start > BUS_SPACE_MAXADDR_32BIT) {
output[j].mr_start = himem[i].mr_start;
output[j].mr_size = himem[i].mr_size;
j++;
}
}
sz = j*sizeof(output[0]);
}
#endif
return (sz);
}
/*
* This is called during powerpc_init, before the system is really initialized.
* It shall provide the total and the available regions of RAM.
@ -154,83 +259,23 @@ ofw_mem_regions(struct mem_region **memp, int *memsz,
int asz, msz, fsz;
int i, j;
int still_merging;
cell_t address_cells;
asz = msz = 0;
/*
* Get #address-cells from root node, defaulting to 1 if it cannot
* be found.
*/
phandle = OF_finddevice("/");
if (OF_getprop(phandle, "#address-cells", &address_cells,
sizeof(address_cells)) < sizeof(address_cells))
address_cells = 1;
/*
* Get memory.
*/
if ((phandle = OF_finddevice("/memory")) == -1
|| (asz = OF_getprop(phandle, "available",
OFavail, sizeof OFavail[0] * OFMEM_REGIONS)) <= 0)
{
if (ofw_real_mode) {
/* XXX MAMBO */
printf("Physical memory unknown -- guessing 128 MB\n");
phandle = OF_finddevice("/memory");
if (phandle == -1)
phandle = OF_finddevice("/memory@0");
/* Leave the first 0xA000000 bytes for the kernel */
OFavail[0].mr_start = 0xA00000;
OFavail[0].mr_size = 0x75FFFFF;
asz = sizeof(OFavail[0]);
} else {
panic("no memory?");
}
}
if (address_cells == 2) {
struct mem_region64 OFmem64[OFMEM_REGIONS + 1];
if ((phandle == -1) || (msz = OF_getprop(phandle, "reg",
OFmem64, sizeof OFmem64[0] * OFMEM_REGIONS)) <= 0) {
if (ofw_real_mode) {
/* XXX MAMBO */
OFmem64[0].mr_start_hi = 0;
OFmem64[0].mr_start_lo = 0x0;
OFmem64[0].mr_size = 0x7FFFFFF;
msz = sizeof(OFmem64[0]);
} else {
panic("Physical memory map not found");
}
}
for (i = 0, j = 0; i < msz/sizeof(OFmem64[0]); i++) {
if (OFmem64[i].mr_start_hi == 0) {
OFmem[i].mr_start = OFmem64[i].mr_start_lo;
OFmem[i].mr_size = OFmem64[i].mr_size;
/*
* Check for memory regions extending above 32-bit
* memory space, and restrict them to stay there.
*/
if (((uint64_t)OFmem[i].mr_start +
(uint64_t)OFmem[i].mr_size) >
BUS_SPACE_MAXADDR_32BIT) {
OFmem[i].mr_size = BUS_SPACE_MAXADDR_32BIT -
OFmem[i].mr_start;
}
j++;
}
}
msz = j*sizeof(OFmem[0]);
} else {
if ((msz = OF_getprop(phandle, "reg",
OFmem, sizeof OFmem[0] * OFMEM_REGIONS)) <= 0)
panic("Physical memory map not found");
}
msz = parse_ofw_memory(phandle, "reg", OFmem);
nOFmem = msz / sizeof(struct mem_region);
asz = parse_ofw_memory(phandle, "available", OFavail);
*memp = OFmem;
*memsz = msz / sizeof(struct mem_region);
*memsz = nOFmem;
/*
* OFavail may have overlapping regions - collapse these
* and copy out remaining regions to OFfree
@ -274,7 +319,19 @@ OF_initial_setup(void *fdt_ptr, void *junk, int (*openfirm)(void *))
else
ofw_real_mode = 1;
ofwcall = openfirm;
ofwcall = NULL;
#ifdef __powerpc64__
/*
* For PPC64, we need to use some hand-written
* asm trampolines to get to OF.
*/
if (openfirm != NULL)
ofwcall = ofw_32bit_mode_entry;
#else
ofwcall = openfirm;
#endif
fdt = fdt_ptr;
}
@ -284,10 +341,15 @@ OF_bootstrap()
boolean_t status = FALSE;
if (ofwcall != NULL) {
if (ofw_real_mode)
if (ofw_real_mode) {
status = OF_install(OFW_STD_REAL, 0);
else
} else {
#ifdef __powerpc64__
status = OF_install(OFW_STD_32BIT, 0);
#else
status = OF_install(OFW_STD_DIRECT, 0);
#endif
}
if (status != TRUE)
return status;
@ -347,26 +409,28 @@ ofw_quiesce(void)
static int
openfirmware_core(void *args)
{
long oldmsr;
int result;
u_int srsave[16];
u_int i;
int result;
register_t oldmsr;
#ifndef __powerpc64__
register_t srsave[16];
u_int i;
#endif
__asm __volatile( "\t"
"sync\n\t"
"mfmsr %0\n\t"
"mtmsr %1\n\t"
"isync\n"
: "=r" (oldmsr)
: "r" (ofmsr[0])
);
/*
* Turn off exceptions - we really don't want to end up
* anywhere unexpected with PCPU set to something strange,
* the stack pointer wrong, or the OFW mapping enabled.
*/
oldmsr = intr_disable();
ofw_sprg_prepare();
#ifndef __powerpc64__
if (pmap_bootstrapped && !ofw_real_mode) {
/*
* Swap the kernel's address space with Open Firmware's
*/
for (i = 0; i < 16; i++) {
srsave[i] = mfsrin(i << ADDR_SR_SHFT);
mtsrin(i << ADDR_SR_SHFT, ofw_pmap.pm_sr[i]);
@ -381,28 +445,28 @@ openfirmware_core(void *args)
}
isync();
}
#endif
result = ofwcall(args);
#ifndef __powerpc64__
if (pmap_bootstrapped && !ofw_real_mode) {
/*
* Restore the kernel's addr space. The isync() doesn;t
* work outside the loop unless mtsrin() is open-coded
* in an asm statement :(
*/
for (i = 0; i < 16; i++) {
mtsrin(i << ADDR_SR_SHFT, srsave[i]);
isync();
}
}
#endif
ofw_sprg_restore();
__asm( "\t"
"mtmsr %0\n\t"
"isync\n"
: : "r" (oldmsr)
);
intr_restore(oldmsr);
return (result);
}
@ -626,7 +690,7 @@ mem_valid(vm_offset_t addr, int len)
{
int i;
for (i = 0; i < OFMEM_REGIONS; i++)
for (i = 0; i < nOFmem; i++)
if ((addr >= OFmem[i].mr_start)
&& (addr + len < OFmem[i].mr_start + OFmem[i].mr_size))
return (0);

303
sys/powerpc/aim/slb.c Normal file
View File

@ -0,0 +1,303 @@
/*-
* Copyright (c) 2010 Nathan Whitehorn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/tree.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/uma.h>
#include <vm/vm_map.h>
#include <machine/md_var.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
uintptr_t moea64_get_unique_vsid(void);
void moea64_release_vsid(uint64_t vsid);
struct slbcontainer {
struct slb slb;
SPLAY_ENTRY(slbcontainer) slb_node;
};
static int slb_compare(struct slbcontainer *a, struct slbcontainer *b);
static void slb_zone_init(void *);
SPLAY_PROTOTYPE(slb_tree, slbcontainer, slb_node, slb_compare);
SPLAY_GENERATE(slb_tree, slbcontainer, slb_node, slb_compare);
uma_zone_t slb_zone;
uma_zone_t slb_cache_zone;
SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
int
va_to_slb_entry(pmap_t pm, vm_offset_t va, struct slb *slb)
{
struct slbcontainer cont, *found;
uint64_t esid;
esid = (uintptr_t)va >> ADDR_SR_SHFT;
slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
if (pm == kernel_pmap) {
/* Set kernel VSID to deterministic value */
slb->slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT;
/* Figure out if this is a large-page mapping */
if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
/*
* XXX: If we have set up a direct map, assumes
* all physical memory is mapped with large pages.
*/
if (mem_valid(va, 0) == 0)
slb->slbv |= SLBV_L;
}
return (0);
}
PMAP_LOCK_ASSERT(pm, MA_OWNED);
cont.slb.slbe = slb->slbe;
found = SPLAY_FIND(slb_tree, &pm->pm_slbtree, &cont);
if (found == NULL)
return (-1);
slb->slbv = found->slb.slbv;
return (0);
}
uint64_t
va_to_vsid(pmap_t pm, vm_offset_t va)
{
struct slb entry;
int large;
/* Shortcut kernel case */
if (pm == kernel_pmap) {
large = 0;
if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS &&
mem_valid(va, 0) == 0)
large = 1;
return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT, large));
}
/*
* If there is no vsid for this VA, we need to add a new entry
* to the PMAP's segment table.
*/
if (va_to_slb_entry(pm, va, &entry) != 0)
return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0));
return ((entry.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
}
uint64_t
allocate_vsid(pmap_t pm, uint64_t esid, int large)
{
uint64_t vsid;
struct slbcontainer *slb_entry, kern_entry;
struct slb *prespill;
prespill = NULL;
if (pm == kernel_pmap) {
vsid = va_to_vsid(pm, esid << ADDR_SR_SHFT);
slb_entry = &kern_entry;
prespill = PCPU_GET(slb);
} else {
vsid = moea64_get_unique_vsid();
slb_entry = uma_zalloc(slb_zone, M_NOWAIT);
if (slb_entry == NULL)
panic("Could not allocate SLB mapping!");
prespill = pm->pm_slb;
}
slb_entry->slb.slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
slb_entry->slb.slbv = vsid << SLBV_VSID_SHIFT;
if (large)
slb_entry->slb.slbv |= SLBV_L;
if (pm != kernel_pmap) {
PMAP_LOCK_ASSERT(pm, MA_OWNED);
SPLAY_INSERT(slb_tree, &pm->pm_slbtree, slb_entry);
}
/*
* Someone probably wants this soon, and it may be a wired
* SLB mapping, so pre-spill this entry.
*/
if (prespill != NULL)
slb_insert(pm, prespill, &slb_entry->slb);
return (vsid);
}
/* Lock entries mapping kernel text and stacks */
#define SLB_SPILLABLE(slbe) \
(((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
(slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
(slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
void
slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry)
{
uint64_t slbe, slbv;
int i, j, to_spill;
/* We don't want to be preempted while modifying the kernel map */
critical_enter();
to_spill = -1;
slbv = slb_entry->slbv;
slbe = slb_entry->slbe;
/* Hunt for a likely candidate */
for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
if (pm == kernel_pmap && i == USER_SR)
continue;
if (!(slbcache[i].slbe & SLBE_VALID)) {
to_spill = i;
break;
}
if (to_spill < 0 && (pm != kernel_pmap ||
SLB_SPILLABLE(slbcache[i].slbe)))
to_spill = i;
}
if (to_spill < 0)
panic("SLB spill on ESID %#lx, but no available candidates!\n",
(slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
if (slbcache[to_spill].slbe & SLBE_VALID) {
/* Invalidate this first to avoid races */
slbcache[to_spill].slbe = 0;
mb();
}
slbcache[to_spill].slbv = slbv;
slbcache[to_spill].slbe = slbe | (uint64_t)to_spill;
/* If it is for this CPU, put it in the SLB right away */
if (pm == kernel_pmap && pmap_bootstrapped) {
/* slbie not required */
__asm __volatile ("slbmte %0, %1" ::
"r"(slbcache[to_spill].slbv),
"r"(slbcache[to_spill].slbe));
}
critical_exit();
}
int
vsid_to_esid(pmap_t pm, uint64_t vsid, uint64_t *esid)
{
uint64_t slbv;
struct slbcontainer *entry;
#ifdef INVARIANTS
if (pm == kernel_pmap)
panic("vsid_to_esid only works on user pmaps");
PMAP_LOCK_ASSERT(pm, MA_OWNED);
#endif
slbv = vsid << SLBV_VSID_SHIFT;
SPLAY_FOREACH(entry, slb_tree, &pm->pm_slbtree) {
if (slbv == entry->slb.slbv) {
*esid = entry->slb.slbe >> SLBE_ESID_SHIFT;
return (0);
}
}
return (-1);
}
void
free_vsids(pmap_t pm)
{
struct slbcontainer *entry;
while (!SPLAY_EMPTY(&pm->pm_slbtree)) {
entry = SPLAY_MIN(slb_tree, &pm->pm_slbtree);
SPLAY_REMOVE(slb_tree, &pm->pm_slbtree, entry);
moea64_release_vsid(entry->slb.slbv >> SLBV_VSID_SHIFT);
uma_zfree(slb_zone, entry);
}
}
static int
slb_compare(struct slbcontainer *a, struct slbcontainer *b)
{
if (a->slb.slbe == b->slb.slbe)
return (0);
else if (a->slb.slbe < b->slb.slbe)
return (-1);
else
return (1);
}
static void
slb_zone_init(void *dummy)
{
slb_zone = uma_zcreate("SLB segment", sizeof(struct slbcontainer),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
}
struct slb *
slb_alloc_user_cache(void)
{
return (uma_zalloc(slb_cache_zone, M_ZERO));
}
void
slb_free_user_cache(struct slb *slb)
{
uma_zfree(slb_cache_zone, slb);
}

View File

@ -63,8 +63,6 @@
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/sr.h>
#include <machine/psl.h>
#include <machine/asm.h>
/*
@ -83,7 +81,6 @@ ENTRY(cpu_throw)
*/
ENTRY(cpu_switch)
lwz %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */
mr %r12,%r2
stmw %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs.
These can now be used for scratch */
@ -93,8 +90,9 @@ ENTRY(cpu_switch)
stw %r16,PCB_LR(%r6)
mfsr %r16,USER_SR /* Save USER_SR for copyin/out */
isync
stw %r16,PCB_AIM_USR(%r6)
stw %r16,PCB_AIM_USR_VSID(%r6)
stw %r1,PCB_SP(%r6) /* Save the stack pointer */
stw %r2,PCB_TOC(%r6) /* Save the TOC pointer */
mr %r14,%r3 /* Copy the old thread ptr... */
mr %r15,%r4 /* and the new thread ptr in scratch */
@ -159,15 +157,15 @@ blocked_loop:
.L4:
mr %r3,%r17 /* Recover PCB ptr */
lmw %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs */
mr %r2,%r12
lwz %r5,PCB_CR(%r3) /* Load the condition register */
mtcr %r5
lwz %r5,PCB_LR(%r3) /* Load the link register */
mtlr %r5
lwz %r5,PCB_AIM_USR(%r3) /* Load the USER_SR segment reg */
lwz %r5,PCB_AIM_USR_VSID(%r3) /* Load the USER_SR segment reg */
mtsr USER_SR,%r5
isync
lwz %r1,PCB_SP(%r3) /* Load the stack pointer */
lwz %r2,PCB_TOC(%r3) /* Load the TOC pointer */
/*
* Perform a dummy stwcx. to clear any reservations we may have
* inherited from the previous thread. It doesn't matter if the
@ -181,7 +179,6 @@ blocked_loop:
* Update pcb, saving current processor state
*/
ENTRY(savectx)
mr %r12,%r2
stmw %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs */
mfcr %r4 /* Save the condition register */
stw %r4,PCB_CR(%r3)

291
sys/powerpc/aim/swtch64.S Normal file
View File

@ -0,0 +1,291 @@
/* $FreeBSD$ */
/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
/*-
* Copyright (C) 2001 Benno Rice
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "assym.s"
#include "opt_sched.h"
#include <sys/syscall.h>
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/asm.h>
/*
* void cpu_throw(struct thread *old, struct thread *new)
*/
ENTRY(cpu_throw)
mr %r15, %r4
b cpu_switchin
/*
* void cpu_switch(struct thread *old,
* struct thread *new,
* struct mutex *mtx);
*
* Switch to a new thread saving the current state in the old thread.
*/
ENTRY(cpu_switch)
ld %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */
std %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs.
These can now be used for scratch */
std %r13,PCB_CONTEXT+1*8(%r6)
std %r14,PCB_CONTEXT+2*8(%r6)
std %r15,PCB_CONTEXT+3*8(%r6)
std %r16,PCB_CONTEXT+4*8(%r6)
std %r17,PCB_CONTEXT+5*8(%r6)
std %r18,PCB_CONTEXT+6*8(%r6)
std %r19,PCB_CONTEXT+7*8(%r6)
std %r20,PCB_CONTEXT+8*8(%r6)
std %r21,PCB_CONTEXT+9*8(%r6)
std %r22,PCB_CONTEXT+10*8(%r6)
std %r23,PCB_CONTEXT+11*8(%r6)
std %r24,PCB_CONTEXT+12*8(%r6)
std %r25,PCB_CONTEXT+13*8(%r6)
std %r26,PCB_CONTEXT+14*8(%r6)
std %r27,PCB_CONTEXT+15*8(%r6)
std %r28,PCB_CONTEXT+16*8(%r6)
std %r29,PCB_CONTEXT+17*8(%r6)
std %r30,PCB_CONTEXT+18*8(%r6)
std %r31,PCB_CONTEXT+19*8(%r6)
mfcr %r16 /* Save the condition register */
std %r16,PCB_CR(%r6)
mflr %r16 /* Save the link register */
std %r16,PCB_LR(%r6)
std %r1,PCB_SP(%r6) /* Save the stack pointer */
std %r2,PCB_TOC(%r6) /* Save the TOC pointer */
li %r14,0 /* Save USER_SR for copyin/out */
li %r15,0
li %r16,USER_SR
slbmfee %r14, %r16
slbmfev %r15, %r16
isync
std %r14,PCB_AIM_USR_ESID(%r6)
std %r15,PCB_AIM_USR_VSID(%r6)
mr %r14,%r3 /* Copy the old thread ptr... */
mr %r15,%r4 /* and the new thread ptr in scratch */
mr %r16,%r5 /* and the new lock */
mr %r17,%r6 /* and the PCB */
stdu %r1,-48(%r1)
lwz %r7,PCB_FLAGS(%r17)
/* Save FPU context if needed */
andi. %r7, %r7, PCB_FPU
beq .L1
bl .save_fpu
nop
.L1:
mr %r3,%r14 /* restore old thread ptr */
lwz %r7,PCB_FLAGS(%r17)
/* Save Altivec context if needed */
andi. %r7, %r7, PCB_VEC
beq .L2
bl .save_vec
nop
.L2:
mr %r3,%r14 /* restore old thread ptr */
bl .pmap_deactivate /* Deactivate the current pmap */
nop
addi %r1,%r1,48
std %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */
cpu_switchin:
#if defined(SMP) && defined(SCHED_ULE)
/* Wait for the new thread to become unblocked */
lis %r6,blocked_lock@ha
addi %r6,%r6,blocked_lock@l
blocked_loop:
ld %r7,TD_LOCK(%r15)
cmpd %r6,%r7
beq blocked_loop
#endif
mfsprg %r7,0 /* Get the pcpu pointer */
std %r15,PC_CURTHREAD(%r7) /* Store new current thread */
ld %r17,TD_PCB(%r15) /* Store new current PCB */
std %r17,PC_CURPCB(%r7)
stdu %r1,-48(%r1)
mr %r3,%r15 /* Get new thread ptr */
bl .pmap_activate /* Activate the new address space */
nop
lwz %r6, PCB_FLAGS(%r17)
/* Restore FPU context if needed */
andi. %r6, %r6, PCB_FPU
beq .L3
mr %r3,%r15 /* Pass curthread to enable_fpu */
bl .enable_fpu
nop
.L3:
lwz %r6, PCB_FLAGS(%r17)
/* Restore Altivec context if needed */
andi. %r6, %r6, PCB_VEC
beq .L4
mr %r3,%r15 /* Pass curthread to enable_vec */
bl .enable_vec
nop
/* thread to restore is in r3 */
.L4:
addi %r1,%r1,48
mr %r3,%r17 /* Recover PCB ptr */
ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */
ld %r13,PCB_CONTEXT+1*8(%r3)
ld %r14,PCB_CONTEXT+2*8(%r3)
ld %r15,PCB_CONTEXT+3*8(%r3)
ld %r16,PCB_CONTEXT+4*8(%r3)
ld %r17,PCB_CONTEXT+5*8(%r3)
ld %r18,PCB_CONTEXT+6*8(%r3)
ld %r19,PCB_CONTEXT+7*8(%r3)
ld %r20,PCB_CONTEXT+8*8(%r3)
ld %r21,PCB_CONTEXT+9*8(%r3)
ld %r22,PCB_CONTEXT+10*8(%r3)
ld %r23,PCB_CONTEXT+11*8(%r3)
ld %r24,PCB_CONTEXT+12*8(%r3)
ld %r25,PCB_CONTEXT+13*8(%r3)
ld %r26,PCB_CONTEXT+14*8(%r3)
ld %r27,PCB_CONTEXT+15*8(%r3)
ld %r28,PCB_CONTEXT+16*8(%r3)
ld %r29,PCB_CONTEXT+17*8(%r3)
ld %r30,PCB_CONTEXT+18*8(%r3)
ld %r31,PCB_CONTEXT+19*8(%r3)
ld %r5,PCB_CR(%r3) /* Load the condition register */
mtcr %r5
ld %r5,PCB_LR(%r3) /* Load the link register */
mtlr %r5
ld %r1,PCB_SP(%r3) /* Load the stack pointer */
ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */
lis %r5,USER_ADDR@highesta /* Load the USER_SR segment reg */
ori %r5,%r5,USER_ADDR@highera
sldi %r5,%r5,32
oris %r5,%r5,USER_ADDR@ha
slbie %r5
ld %r5,PCB_AIM_USR_VSID(%r3)
ld %r6,PCB_AIM_USR_ESID(%r3)
ori %r6,%r6,USER_SR
slbmte %r5,%r6
isync
/*
* Perform a dummy stdcx. to clear any reservations we may have
* inherited from the previous thread. It doesn't matter if the
* stdcx succeeds or not. pcb_context[0] can be clobbered.
*/
stdcx. %r1, 0, %r3
blr
/*
* savectx(pcb)
* Update pcb, saving current processor state
*/
ENTRY(savectx)
std %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs. */
std %r13,PCB_CONTEXT+1*8(%r3)
std %r14,PCB_CONTEXT+2*8(%r3)
std %r15,PCB_CONTEXT+3*8(%r3)
std %r16,PCB_CONTEXT+4*8(%r3)
std %r17,PCB_CONTEXT+5*8(%r3)
std %r18,PCB_CONTEXT+6*8(%r3)
std %r19,PCB_CONTEXT+7*8(%r3)
std %r20,PCB_CONTEXT+8*8(%r3)
std %r21,PCB_CONTEXT+9*8(%r3)
std %r22,PCB_CONTEXT+10*8(%r3)
std %r23,PCB_CONTEXT+11*8(%r3)
std %r24,PCB_CONTEXT+12*8(%r3)
std %r25,PCB_CONTEXT+13*8(%r3)
std %r26,PCB_CONTEXT+14*8(%r3)
std %r27,PCB_CONTEXT+15*8(%r3)
std %r28,PCB_CONTEXT+16*8(%r3)
std %r29,PCB_CONTEXT+17*8(%r3)
std %r30,PCB_CONTEXT+18*8(%r3)
std %r31,PCB_CONTEXT+19*8(%r3)
mfcr %r4 /* Save the condition register */
std %r4,PCB_CR(%r3)
std %r2,PCB_TOC(%r3) /* Save the TOC pointer */
blr
/*
* fork_trampoline()
* Set up the return from cpu_fork()
*/
ENTRY(fork_trampoline)
ld %r3,CF_FUNC(%r1)
ld %r4,CF_ARG0(%r1)
ld %r5,CF_ARG1(%r1)
stdu %r1,-48(%r1)
bl .fork_exit
nop
addi %r1,%r1,48+CF_SIZE-FSP /* Allow 8 bytes in front of
trapframe to simulate FRAME_SETUP
does when allocating space for
a frame pointer/saved LR */
b trapexit
nop

View File

@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <machine/_inttypes.h>
#include <machine/altivec.h>
#include <machine/cpu.h>
#include <machine/db_machdep.h>
@ -86,6 +87,10 @@ static int ppc_instr_emulate(struct trapframe *frame);
static int handle_onfault(struct trapframe *frame);
static void syscall(struct trapframe *frame);
#ifdef __powerpc64__
static int handle_slb_spill(pmap_t pm, vm_offset_t addr);
#endif
int setfault(faultbuf); /* defined in locore.S */
/* Why are these not defined in a header? */
@ -101,7 +106,9 @@ static struct powerpc_exception powerpc_exceptions[] = {
{ 0x0100, "system reset" },
{ 0x0200, "machine check" },
{ 0x0300, "data storage interrupt" },
{ 0x0380, "data segment exception" },
{ 0x0400, "instruction storage interrupt" },
{ 0x0480, "instruction segment exception" },
{ 0x0500, "external interrupt" },
{ 0x0600, "alignment" },
{ 0x0700, "program" },
@ -171,6 +178,15 @@ trap(struct trapframe *frame)
sig = SIGTRAP;
break;
#ifdef __powerpc64__
case EXC_ISE:
case EXC_DSE:
if (handle_slb_spill(&p->p_vmspace->vm_pmap,
(type == EXC_ISE) ? frame->srr0 :
frame->cpu.aim.dar) != 0)
sig = SIGSEGV;
break;
#endif
case EXC_DSI:
case EXC_ISI:
sig = trap_pfault(frame, 1);
@ -227,6 +243,15 @@ trap(struct trapframe *frame)
if (trap_pfault(frame, 0) == 0)
return;
break;
#ifdef __powerpc64__
case EXC_ISE:
case EXC_DSE:
if (handle_slb_spill(kernel_pmap,
(type == EXC_ISE) ? frame->srr0 :
frame->cpu.aim.dar) != 0)
panic("Fault handling kernel SLB miss");
return;
#endif
case EXC_MCHK:
if (handle_onfault(frame))
return;
@ -276,16 +301,19 @@ printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
printf(" exception = 0x%x (%s)\n", vector >> 8,
trapname(vector));
switch (vector) {
case EXC_DSE:
case EXC_DSI:
printf(" virtual address = 0x%x\n", frame->cpu.aim.dar);
printf(" virtual address = 0x%" PRIxPTR "\n",
frame->cpu.aim.dar);
break;
case EXC_ISE:
case EXC_ISI:
printf(" virtual address = 0x%x\n", frame->srr0);
printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0);
break;
}
printf(" srr0 = 0x%x\n", frame->srr0);
printf(" srr1 = 0x%x\n", frame->srr1);
printf(" lr = 0x%x\n", frame->lr);
printf(" srr0 = 0x%" PRIxPTR "\n", frame->srr0);
printf(" srr1 = 0x%" PRIxPTR "\n", frame->srr1);
printf(" lr = 0x%" PRIxPTR "\n", frame->lr);
printf(" curthread = %p\n", curthread);
if (curthread != NULL)
printf(" pid = %d, comm = %s\n",
@ -324,7 +352,8 @@ cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
struct proc *p;
struct trapframe *frame;
caddr_t params;
int error, n;
size_t argsz;
int error, n, i;
p = td->td_proc;
frame = td->td_frame;
@ -338,7 +367,7 @@ cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
* code is first argument,
* followed by actual args.
*/
sa->code = *(u_int *) params;
sa->code = *(register_t *) params;
params += sizeof(register_t);
n -= 1;
} else if (sa->code == SYS___syscall) {
@ -347,10 +376,16 @@ cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
* so as to maintain quad alignment
* for the rest of the args.
*/
params += sizeof(register_t);
sa->code = *(u_int *) params;
params += sizeof(register_t);
n -= 2;
if (p->p_sysent->sv_flags & SV_ILP32) {
params += sizeof(register_t);
sa->code = *(register_t *) params;
params += sizeof(register_t);
n -= 2;
} else {
sa->code = *(register_t *) params;
params += sizeof(register_t);
n -= 1;
}
}
if (p->p_sysent->sv_mask)
@ -362,13 +397,34 @@ cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
sa->narg = sa->callp->sy_narg;
bcopy(params, sa->args, n * sizeof(register_t));
if (sa->narg > n) {
if (p->p_sysent->sv_flags & SV_ILP32) {
argsz = sizeof(uint32_t);
for (i = 0; i < n; i++)
sa->args[i] = ((u_register_t *)(params))[i] &
0xffffffff;
} else {
argsz = sizeof(uint64_t);
for (i = 0; i < n; i++)
sa->args[i] = ((u_register_t *)(params))[i];
}
if (sa->narg > n)
error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
(sa->narg - n) * sizeof(register_t));
} else
(sa->narg - n) * argsz);
else
error = 0;
#ifdef __powerpc64__
if (p->p_sysent->sv_flags & SV_ILP32 && sa->narg > n) {
/* Expand the size of arguments copied from the stack */
for (i = sa->narg; i >= n; i--)
sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
}
#endif
if (error == 0) {
td->td_retval[0] = 0;
td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
@ -390,6 +446,44 @@ syscall(struct trapframe *frame)
syscallret(td, error, &sa);
}
#ifdef __powerpc64__
static int
handle_slb_spill(pmap_t pm, vm_offset_t addr)
{
struct slb slb_entry;
int error, i;
if (pm == kernel_pmap) {
error = va_to_slb_entry(pm, addr, &slb_entry);
if (error)
return (error);
slb_insert(pm, PCPU_GET(slb), &slb_entry);
return (0);
}
PMAP_LOCK(pm);
error = va_to_slb_entry(pm, addr, &slb_entry);
if (error != 0)
(void)allocate_vsid(pm, (uintptr_t)addr >> ADDR_SR_SHFT, 0);
else {
/*
* Check that another CPU has not already mapped this.
* XXX: Per-thread SLB caches would be better.
*/
for (i = 0; i < 64; i++)
if (pm->pm_slb[i].slbe == (slb_entry.slbe | i))
break;
if (i == 64)
slb_insert(pm, pm->pm_slb, &slb_entry);
}
PMAP_UNLOCK(pm);
return (0);
}
#endif
static int
trap_pfault(struct trapframe *frame, int user)
{
@ -399,7 +493,7 @@ trap_pfault(struct trapframe *frame, int user)
vm_map_t map;
vm_prot_t ftype;
int rv;
u_int user_sr;
register_t user_sr;
td = curthread;
p = td->td_proc;
@ -417,16 +511,33 @@ trap_pfault(struct trapframe *frame, int user)
if (user) {
map = &p->p_vmspace->vm_map;
} else {
if ((eva >> ADDR_SR_SHFT) == USER_SR) {
if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
if (p->p_vmspace == NULL)
return (SIGSEGV);
map = &p->p_vmspace->vm_map;
#ifdef __powerpc64__
user_sr = 0;
__asm ("slbmfev %0, %1"
: "=r"(user_sr)
: "r"(USER_SR));
PMAP_LOCK(&p->p_vmspace->vm_pmap);
user_sr >>= SLBV_VSID_SHIFT;
rv = vsid_to_esid(&p->p_vmspace->vm_pmap, user_sr,
&user_sr);
PMAP_UNLOCK(&p->p_vmspace->vm_pmap);
if (rv != 0)
return (SIGSEGV);
#else
__asm ("mfsr %0, %1"
: "=r"(user_sr)
: "K"(USER_SR));
#endif
eva &= ADDR_PIDX | ADDR_POFF;
eva |= user_sr << ADDR_SR_SHFT;
map = &p->p_vmspace->vm_map;
} else {
map = kernel_map;
}
@ -502,7 +613,7 @@ badaddr_read(void *addr, size_t size, int *rptr)
x = *(volatile int32_t *)addr;
break;
default:
panic("badaddr: invalid size (%d)", size);
panic("badaddr: invalid size (%zd)", size);
}
/* Make sure we took the machine check, if we caused one. */

View File

@ -0,0 +1,678 @@
/* $FreeBSD$ */
/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NOTICE: This is not a standalone file. to use it, #include it in
* your port's locore.S, like so:
*
* #include <powerpc/aim/trap_subr.S>
*/
/*
* Save/restore segment registers
*/
#define RESTORE_SRS(pmap,sr) mtsr 0,sr; \
lwz sr,1*4(pmap); mtsr 1,sr; \
lwz sr,2*4(pmap); mtsr 2,sr; \
lwz sr,3*4(pmap); mtsr 3,sr; \
lwz sr,4*4(pmap); mtsr 4,sr; \
lwz sr,5*4(pmap); mtsr 5,sr; \
lwz sr,6*4(pmap); mtsr 6,sr; \
lwz sr,7*4(pmap); mtsr 7,sr; \
lwz sr,8*4(pmap); mtsr 8,sr; \
lwz sr,9*4(pmap); mtsr 9,sr; \
lwz sr,10*4(pmap); mtsr 10,sr; \
lwz sr,11*4(pmap); mtsr 11,sr; \
lwz sr,12*4(pmap); mtsr 12,sr; \
lwz sr,13*4(pmap); mtsr 13,sr; \
lwz sr,14*4(pmap); mtsr 14,sr; \
lwz sr,15*4(pmap); mtsr 15,sr; isync;
/*
* User SRs are loaded through a pointer to the current pmap.
*/
#define RESTORE_USER_SRS(pmap,sr) \
GET_CPUINFO(pmap); \
lwz pmap,PC_CURPMAP(pmap); \
lwzu sr,PM_SR(pmap); \
RESTORE_SRS(pmap,sr)
/*
* Kernel SRs are loaded directly from kernel_pmap_
*/
#define RESTORE_KERN_SRS(pmap,sr) \
lis pmap,CNAME(kernel_pmap_store)@ha; \
lwzu sr,CNAME(kernel_pmap_store)+PM_SR@l(pmap); \
RESTORE_SRS(pmap,sr)
/*
* FRAME_SETUP assumes:
* SPRG1 SP (1)
* SPRG3 trap type
* savearea r28-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
* r28 LR
* r29 CR
* r30 scratch
* r31 scratch
* r1 kernel stack
* SRR0/1 as at start of trap
*/
#define FRAME_SETUP(savearea) \
/* Have to enable translation to allow access of kernel stack: */ \
GET_CPUINFO(%r31); \
mfsrr0 %r30; \
stw %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
mfsrr1 %r30; \
stw %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
mfmsr %r30; \
ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
mtmsr %r30; /* stack can now be accessed */ \
isync; \
mfsprg1 %r31; /* get saved SP */ \
stwu %r31,-FRAMELEN(%r1); /* save it in the callframe */ \
stw %r0, FRAME_0+8(%r1); /* save r0 in the trapframe */ \
stw %r31,FRAME_1+8(%r1); /* save SP " " */ \
stw %r2, FRAME_2+8(%r1); /* save r2 " " */ \
stw %r28,FRAME_LR+8(%r1); /* save LR " " */ \
stw %r29,FRAME_CR+8(%r1); /* save CR " " */ \
GET_CPUINFO(%r2); \
lwz %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
lwz %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
lwz %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
lwz %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
stw %r3, FRAME_3+8(%r1); /* save r3-r31 */ \
stw %r4, FRAME_4+8(%r1); \
stw %r5, FRAME_5+8(%r1); \
stw %r6, FRAME_6+8(%r1); \
stw %r7, FRAME_7+8(%r1); \
stw %r8, FRAME_8+8(%r1); \
stw %r9, FRAME_9+8(%r1); \
stw %r10, FRAME_10+8(%r1); \
stw %r11, FRAME_11+8(%r1); \
stw %r12, FRAME_12+8(%r1); \
stw %r13, FRAME_13+8(%r1); \
stw %r14, FRAME_14+8(%r1); \
stw %r15, FRAME_15+8(%r1); \
stw %r16, FRAME_16+8(%r1); \
stw %r17, FRAME_17+8(%r1); \
stw %r18, FRAME_18+8(%r1); \
stw %r19, FRAME_19+8(%r1); \
stw %r20, FRAME_20+8(%r1); \
stw %r21, FRAME_21+8(%r1); \
stw %r22, FRAME_22+8(%r1); \
stw %r23, FRAME_23+8(%r1); \
stw %r24, FRAME_24+8(%r1); \
stw %r25, FRAME_25+8(%r1); \
stw %r26, FRAME_26+8(%r1); \
stw %r27, FRAME_27+8(%r1); \
stw %r28, FRAME_28+8(%r1); \
stw %r29, FRAME_29+8(%r1); \
stw %r30, FRAME_30+8(%r1); \
stw %r31, FRAME_31+8(%r1); \
lwz %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
lwz %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
lwz %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
lwz %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
mfxer %r3; \
mfctr %r4; \
mfsprg3 %r5; \
stw %r3, FRAME_XER+8(1); /* save xer/ctr/exc */ \
stw %r4, FRAME_CTR+8(1); \
stw %r5, FRAME_EXC+8(1); \
stw %r28,FRAME_AIM_DAR+8(1); \
stw %r29,FRAME_AIM_DSISR+8(1); /* save dsisr/srr0/srr1 */ \
stw %r30,FRAME_SRR0+8(1); \
stw %r31,FRAME_SRR1+8(1)
#define FRAME_LEAVE(savearea) \
/* Now restore regs: */ \
lwz %r2,FRAME_SRR0+8(%r1); \
lwz %r3,FRAME_SRR1+8(%r1); \
lwz %r4,FRAME_CTR+8(%r1); \
lwz %r5,FRAME_XER+8(%r1); \
lwz %r6,FRAME_LR+8(%r1); \
GET_CPUINFO(%r7); \
stw %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
stw %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
lwz %r7,FRAME_CR+8(%r1); \
mtctr %r4; \
mtxer %r5; \
mtlr %r6; \
mtsprg1 %r7; /* save cr */ \
lwz %r31,FRAME_31+8(%r1); /* restore r0-31 */ \
lwz %r30,FRAME_30+8(%r1); \
lwz %r29,FRAME_29+8(%r1); \
lwz %r28,FRAME_28+8(%r1); \
lwz %r27,FRAME_27+8(%r1); \
lwz %r26,FRAME_26+8(%r1); \
lwz %r25,FRAME_25+8(%r1); \
lwz %r24,FRAME_24+8(%r1); \
lwz %r23,FRAME_23+8(%r1); \
lwz %r22,FRAME_22+8(%r1); \
lwz %r21,FRAME_21+8(%r1); \
lwz %r20,FRAME_20+8(%r1); \
lwz %r19,FRAME_19+8(%r1); \
lwz %r18,FRAME_18+8(%r1); \
lwz %r17,FRAME_17+8(%r1); \
lwz %r16,FRAME_16+8(%r1); \
lwz %r15,FRAME_15+8(%r1); \
lwz %r14,FRAME_14+8(%r1); \
lwz %r13,FRAME_13+8(%r1); \
lwz %r12,FRAME_12+8(%r1); \
lwz %r11,FRAME_11+8(%r1); \
lwz %r10,FRAME_10+8(%r1); \
lwz %r9, FRAME_9+8(%r1); \
lwz %r8, FRAME_8+8(%r1); \
lwz %r7, FRAME_7+8(%r1); \
lwz %r6, FRAME_6+8(%r1); \
lwz %r5, FRAME_5+8(%r1); \
lwz %r4, FRAME_4+8(%r1); \
lwz %r3, FRAME_3+8(%r1); \
lwz %r2, FRAME_2+8(%r1); \
lwz %r0, FRAME_0+8(%r1); \
lwz %r1, FRAME_1+8(%r1); \
/* Can't touch %r1 from here on */ \
mtsprg2 %r2; /* save r2 & r3 */ \
mtsprg3 %r3; \
/* Disable translation, machine check and recoverability: */ \
mfmsr %r2; \
andi. %r2,%r2,~(PSL_DR|PSL_IR|PSL_EE|PSL_ME|PSL_RI)@l; \
mtmsr %r2; \
isync; \
/* Decide whether we return to user mode: */ \
GET_CPUINFO(%r2); \
lwz %r3,(savearea+CPUSAVE_SRR1)(%r2); \
mtcr %r3; \
bf 17,1f; /* branch if PSL_PR is false */ \
/* Restore user SRs */ \
RESTORE_USER_SRS(%r2,%r3); \
1: mfsprg1 %r2; /* restore cr */ \
mtcr %r2; \
GET_CPUINFO(%r2); \
lwz %r3,(savearea+CPUSAVE_SRR0)(%r2); /* restore srr0 */ \
mtsrr0 %r3; \
lwz %r3,(savearea+CPUSAVE_SRR1)(%r2); /* restore srr1 */ \
\
/* Make sure HV bit of MSR propagated to SRR1 */ \
mfmsr %r2; \
or %r3,%r2,%r3; \
\
mtsrr1 %r3; \
mfsprg2 %r2; /* restore r2 & r3 */ \
mfsprg3 %r3
/*
* The next two routines are 64-bit glue code. The first is used to test if
* we are on a 64-bit system. By copying it to the illegal instruction
* handler, we can test for 64-bit mode by trying to execute a 64-bit
* instruction and seeing what happens. The second gets copied in front
* of all the other handlers to restore 32-bit bridge mode when traps
* are taken.
*/
/* 64-bit test code. Sets SPRG2 to 0 if an illegal instruction is executed */
.globl CNAME(testppc64),CNAME(testppc64size)
CNAME(testppc64):
mtsprg1 %r31
mfsrr0 %r31
addi %r31, %r31, 4
mtsrr0 %r31
li %r31, 0
mtsprg2 %r31
mfsprg1 %r31
rfi
CNAME(testppc64size) = .-CNAME(testppc64)
/* 64-bit bridge mode restore snippet. Gets copied in front of everything else
* on 64-bit systems. */
.globl CNAME(restorebridge),CNAME(restorebridgesize)
CNAME(restorebridge):
mtsprg1 %r31
mfmsr %r31
clrldi %r31,%r31,1
mtmsrd %r31
mfsprg1 %r31
isync
CNAME(restorebridgesize) = .-CNAME(restorebridge)
#ifdef SMP
/*
* Processor reset exception handler. These are typically
* the first instructions the processor executes after a
* software reset. We do this in two bits so that we are
* not still hanging around in the trap handling region
* once the MMU is turned on.
*/
.globl CNAME(rstcode), CNAME(rstsize)
CNAME(rstcode):
ba cpu_reset
CNAME(rstsize) = . - CNAME(rstcode)
cpu_reset:
bl 1f
.space 124
1:
mflr %r1
addi %r1,%r1,(124-16)@l
lis %r3,1@l
bla CNAME(cpudep_ap_early_bootstrap)
bla CNAME(pmap_cpu_bootstrap)
bla CNAME(cpudep_ap_bootstrap)
mr %r1,%r3
bla CNAME(machdep_ap_bootstrap)
/* Should not be reached */
9:
b 9b
#endif
/*
* This code gets copied to all the trap vectors
* (except ISI/DSI, ALI, and the interrupts)
*/
.globl CNAME(trapcode),CNAME(trapsize)
CNAME(trapcode):
mtsprg1 %r1 /* save SP */
mflr %r1 /* Save the old LR in r1 */
mtsprg2 %r1 /* And then in SPRG2 */
li %r1, 0x20 /* How to get the vector from LR */
bla generictrap /* LR & SPRG3 is exception # */
CNAME(trapsize) = .-CNAME(trapcode)
/*
* 64-bit version of trapcode. Identical, except it calls generictrap64.
*/
.globl CNAME(trapcode64)
CNAME(trapcode64):
mtsprg1 %r1 /* save SP */
mflr %r1 /* Save the old LR in r1 */
mtsprg2 %r1 /* And then in SPRG2 */
li %r1, 0x20 /* How to get the vector from LR */
bla generictrap64 /* LR & SPRG3 is exception # */
/*
* For ALI: has to save DSISR and DAR
*/
.globl CNAME(alitrap),CNAME(alisize)
CNAME(alitrap):
mtsprg1 %r1 /* save SP */
GET_CPUINFO(%r1)
stw %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) /* free r28-r31 */
stw %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
stw %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfdar %r30
mfdsisr %r31
stw %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
mfsprg1 %r1 /* restore SP, in case of branch */
mflr %r28 /* save LR */
mfcr %r29 /* save CR */
/* Put our exception vector in SPRG3 */
li %r31, EXC_ALI
mtsprg3 %r31
/* Test whether we already had PR set */
mfsrr1 %r31
mtcr %r31
bla s_trap
CNAME(alisize) = .-CNAME(alitrap)
/*
* Similar to the above for DSI
* Has to handle BAT spills
* and standard pagetable spills
*/
.globl CNAME(dsitrap),CNAME(dsisize)
CNAME(dsitrap):
mtsprg1 %r1 /* save SP */
GET_CPUINFO(%r1)
stw %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* free r28-r31 */
stw %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
stw %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
stw %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
mfsprg1 %r1 /* restore SP */
mfcr %r29 /* save CR */
mfxer %r30 /* save XER */
mtsprg2 %r30 /* in SPRG2 */
mfsrr1 %r31 /* test kernel mode */
mtcr %r31
bt 17,1f /* branch if PSL_PR is set */
mfdar %r31 /* get fault address */
rlwinm %r31,%r31,7,25,28 /* get segment * 8 */
/* get batu */
addis %r31,%r31,CNAME(battable)@ha
lwz %r30,CNAME(battable)@l(31)
mtcr %r30
bf 30,1f /* branch if supervisor valid is
false */
/* get batl */
lwz %r31,CNAME(battable)+4@l(31)
/* We randomly use the highest two bat registers here */
mftb %r28
andi. %r28,%r28,1
bne 2f
mtdbatu 2,%r30
mtdbatl 2,%r31
b 3f
2:
mtdbatu 3,%r30
mtdbatl 3,%r31
3:
mfsprg2 %r30 /* restore XER */
mtxer %r30
mtcr %r29 /* restore CR */
mtsprg1 %r1
GET_CPUINFO(%r1)
lwz %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* restore r28-r31 */
lwz %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
lwz %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
lwz %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
mfsprg1 %r1
rfi /* return to trapped code */
1:
mflr %r28 /* save LR (SP already saved) */
bla disitrap
CNAME(dsisize) = .-CNAME(dsitrap)
/*
* Preamble code for DSI/ISI traps
*/
disitrap:
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
mflr %r1
andi. %r1,%r1,0xff00
mtsprg3 %r1
GET_CPUINFO(%r1)
lwz %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
stw %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
lwz %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
lwz %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
stw %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
lwz %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfdar %r30
mfdsisr %r31
stw %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
#ifdef KDB
/* Try and detect a kernel stack overflow */
mfsrr1 %r31
mtcr %r31
bt 17,realtrap /* branch is user mode */
mfsprg1 %r31 /* get old SP */
sub. %r30,%r31,%r30 /* SP - DAR */
bge 1f
neg %r30,%r30 /* modulo value */
1: cmplwi %cr0,%r30,4096 /* is DAR within a page of SP? */
bge %cr0,realtrap /* no, too far away. */
/* Now convert this DSI into a DDB trap. */
GET_CPUINFO(%r1)
lwz %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
stw %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
lwz %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
stw %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
lwz %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
stw %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
lwz %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
stw %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
lwz %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
stw %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
lwz %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
stw %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
b dbtrap
#endif
/* XXX need stack probe here */
realtrap:
/* Test whether we already had PR set */
mfsrr1 %r1
mtcr %r1
mfsprg1 %r1 /* restore SP (might have been
overwritten) */
bf 17,k_trap /* branch if PSL_PR is false */
GET_CPUINFO(%r1)
lwz %r1,PC_CURPCB(%r1)
RESTORE_KERN_SRS(%r30,%r31) /* enable kernel mapping */
ba s_trap
/*
* generictrap does some standard setup for trap handling to minimize
* the code that need be installed in the actual vectors. It expects
* the following conditions.
*
* R1 - Trap vector = LR & (0xff00 | R1)
* SPRG1 - Original R1 contents
* SPRG2 - Original LR
*/
generictrap64:
mtsprg3 %r31
mfmsr %r31
clrldi %r31,%r31,1
mtmsrd %r31
mfsprg3 %r31
isync
generictrap:
/* Save R1 for computing the exception vector */
mtsprg3 %r1
/* Save interesting registers */
GET_CPUINFO(%r1)
stw %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1) /* free r28-r31 */
stw %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
stw %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfsprg1 %r1 /* restore SP, in case of branch */
mfsprg2 %r28 /* save LR */
mfcr %r29 /* save CR */
/* Compute the exception vector from the link register */
mfsprg3 %r31
ori %r31,%r31,0xff00
mflr %r30
and %r30,%r30,%r31
mtsprg3 %r30
/* Test whether we already had PR set */
mfsrr1 %r31
mtcr %r31
s_trap:
bf 17,k_trap /* branch if PSL_PR is false */
GET_CPUINFO(%r1)
u_trap:
lwz %r1,PC_CURPCB(%r1)
RESTORE_KERN_SRS(%r30,%r31) /* enable kernel mapping */
/*
* Now the common trap catching code.
*/
k_trap:
FRAME_SETUP(PC_TEMPSAVE)
/* Call C interrupt dispatcher: */
trapagain:
addi %r3,%r1,8
bl CNAME(powerpc_interrupt)
.globl CNAME(trapexit) /* backtrace code sentinel */
CNAME(trapexit):
/* Disable interrupts: */
mfmsr %r3
andi. %r3,%r3,~PSL_EE@l
mtmsr %r3
/* Test AST pending: */
lwz %r5,FRAME_SRR1+8(%r1)
mtcr %r5
bf 17,1f /* branch if PSL_PR is false */
GET_CPUINFO(%r3) /* get per-CPU pointer */
lwz %r4, PC_CURTHREAD(%r3) /* deref to get curthread */
lwz %r4, TD_FLAGS(%r4) /* get thread flags value */
lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
and. %r4,%r4,%r5
beq 1f
mfmsr %r3 /* re-enable interrupts */
ori %r3,%r3,PSL_EE@l
mtmsr %r3
isync
addi %r3,%r1,8
bl CNAME(ast)
.globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
CNAME(asttrapexit):
b trapexit /* test ast ret value ? */
1:
FRAME_LEAVE(PC_TEMPSAVE)
.globl CNAME(rfi_patch1) /* replace rfi with rfid on ppc64 */
CNAME(rfi_patch1):
rfi
.globl CNAME(rfid_patch)
CNAME(rfid_patch):
rfid
#if defined(KDB)
/*
* Deliberate entry to dbtrap
*/
.globl CNAME(breakpoint)
CNAME(breakpoint):
mtsprg1 %r1
mfmsr %r3
mtsrr1 %r3
andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
mtmsr %r3 /* disable interrupts */
isync
GET_CPUINFO(%r3)
stw %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
stw %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
stw %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
stw %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
mflr %r28
li %r29,EXC_BPT
mtlr %r29
mfcr %r29
mtsrr0 %r28
/*
* Now the kdb trap catching code.
*/
dbtrap:
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
mflr %r1
andi. %r1,%r1,0xff00
mtsprg3 %r1
lis %r1,(tmpstk+TMPSTKSZ-16)@ha /* get new SP */
addi %r1,%r1,(tmpstk+TMPSTKSZ-16)@l
FRAME_SETUP(PC_DBSAVE)
/* Call C trap code: */
addi %r3,%r1,8
bl CNAME(db_trap_glue)
or. %r3,%r3,%r3
bne dbleave
/* This wasn't for KDB, so switch to real trap: */
lwz %r3,FRAME_EXC+8(%r1) /* save exception */
GET_CPUINFO(%r4)
stw %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
FRAME_LEAVE(PC_DBSAVE)
mtsprg1 %r1 /* prepare for entrance to realtrap */
GET_CPUINFO(%r1)
stw %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
stw %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
stw %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
stw %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mflr %r28
mfcr %r29
lwz %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
mfsprg1 %r1
b realtrap
dbleave:
FRAME_LEAVE(PC_DBSAVE)
.globl CNAME(rfi_patch2) /* replace rfi with rfid on ppc64 */
CNAME(rfi_patch2):
rfi
/*
* In case of KDB we want a separate trap catcher for it
*/
.globl CNAME(dblow),CNAME(dbsize)
CNAME(dblow):
mtsprg1 %r1 /* save SP */
mtsprg2 %r29 /* save r29 */
mfcr %r29 /* save CR in r29 */
mfsrr1 %r1
mtcr %r1
bf 17,1f /* branch if privileged */
/* Unprivileged case */
mtcr %r29 /* put the condition register back */
mfsprg2 %r29 /* ... and r29 */
mflr %r1 /* save LR */
mtsprg2 %r1 /* And then in SPRG2 */
li %r1, 0 /* How to get the vector from LR */
bla generictrap /* and we look like a generic trap */
1:
/* Privileged, so drop to KDB */
GET_CPUINFO(%r1)
stw %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
mfsprg2 %r28 /* r29 holds cr... */
stw %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
stw %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
stw %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
mflr %r28 /* save LR */
bla dbtrap
CNAME(dbsize) = .-CNAME(dblow)
#endif /* KDB */

View File

@ -0,0 +1,634 @@
/* $FreeBSD$ */
/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NOTICE: This is not a standalone file. to use it, #include it in
* your port's locore.S, like so:
*
* #include <powerpc/aim/trap_subr.S>
*/
/*
* Save/restore segment registers
*/
/*
* Restore SRs for a pmap
*
* Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
*/
restoresrs:
li %r29, 0 /* Set the counter to zero */
slbia
slbmfee %r31,%r29
clrrdi %r31,%r31,28
slbie %r31
instslb:
ld %r31, 8(%r28); /* Load SLBE */
cmpli 0, %r31, 0; /* If SLBE is not valid, get the next */
beq nslb;
ld %r30, 0(%r28) /* Load SLBV */
slbmte %r30, %r31; /* Install SLB entry */
nslb:
addi %r28, %r28, 16; /* Advance */
addi %r29, %r29, 1;
cmpli 0, %r29, 64; /* Repeat if we are not at the end */
blt instslb;
blr;
/*
* User SRs are loaded through a pointer to the current pmap.
*/
#define RESTORE_USER_SRS() \
GET_CPUINFO(%r28); \
ld %r28,PC_USERSLB(%r28); \
bl restoresrs;
/*
* Kernel SRs are loaded directly from kernel_pmap_
*/
#define RESTORE_KERN_SRS() \
GET_CPUINFO(%r28); \
addi %r28,%r28,PC_KERNSLB; \
bl restoresrs;
/*
* FRAME_SETUP assumes:
* SPRG1 SP (1)
* SPRG3 trap type
* savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
* r28 LR
* r29 CR
* r30 scratch
* r31 scratch
* r1 kernel stack
* SRR0/1 as at start of trap
*/
#define FRAME_SETUP(savearea) \
/* Have to enable translation to allow access of kernel stack: */ \
GET_CPUINFO(%r31); \
mfsrr0 %r30; \
std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
mfsrr1 %r30; \
std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
mfmsr %r30; \
ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
mtmsr %r30; /* stack can now be accessed */ \
isync; \
mfsprg1 %r31; /* get saved SP */ \
stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \
std %r31,FRAME_1+48(%r1); /* save SP " " */ \
std %r2, FRAME_2+48(%r1); /* save r2 " " */ \
std %r28,FRAME_LR+48(%r1); /* save LR " " */ \
std %r29,FRAME_CR+48(%r1); /* save CR " " */ \
GET_CPUINFO(%r2); \
ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \
ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \
std %r4, FRAME_4+48(%r1); \
std %r5, FRAME_5+48(%r1); \
std %r6, FRAME_6+48(%r1); \
std %r7, FRAME_7+48(%r1); \
std %r8, FRAME_8+48(%r1); \
std %r9, FRAME_9+48(%r1); \
std %r10, FRAME_10+48(%r1); \
std %r11, FRAME_11+48(%r1); \
std %r12, FRAME_12+48(%r1); \
std %r13, FRAME_13+48(%r1); \
std %r14, FRAME_14+48(%r1); \
std %r15, FRAME_15+48(%r1); \
std %r16, FRAME_16+48(%r1); \
std %r17, FRAME_17+48(%r1); \
std %r18, FRAME_18+48(%r1); \
std %r19, FRAME_19+48(%r1); \
std %r20, FRAME_20+48(%r1); \
std %r21, FRAME_21+48(%r1); \
std %r22, FRAME_22+48(%r1); \
std %r23, FRAME_23+48(%r1); \
std %r24, FRAME_24+48(%r1); \
std %r25, FRAME_25+48(%r1); \
std %r26, FRAME_26+48(%r1); \
std %r27, FRAME_27+48(%r1); \
std %r28, FRAME_28+48(%r1); \
std %r29, FRAME_29+48(%r1); \
std %r30, FRAME_30+48(%r1); \
std %r31, FRAME_31+48(%r1); \
ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
mfxer %r3; \
mfctr %r4; \
mfsprg3 %r5; \
std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \
std %r4, FRAME_CTR+48(1); \
std %r5, FRAME_EXC+48(1); \
std %r28,FRAME_AIM_DAR+48(1); \
std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \
std %r30,FRAME_SRR0+48(1); \
std %r31,FRAME_SRR1+48(1)
#define FRAME_LEAVE(savearea) \
/* Now restore regs: */ \
ld %r2,FRAME_SRR0+48(%r1); \
ld %r3,FRAME_SRR1+48(%r1); \
ld %r4,FRAME_CTR+48(%r1); \
ld %r5,FRAME_XER+48(%r1); \
ld %r6,FRAME_LR+48(%r1); \
GET_CPUINFO(%r7); \
std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
ld %r7,FRAME_CR+48(%r1); \
mtctr %r4; \
mtxer %r5; \
mtlr %r6; \
mtsprg1 %r7; /* save cr */ \
ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \
ld %r30,FRAME_30+48(%r1); \
ld %r29,FRAME_29+48(%r1); \
ld %r28,FRAME_28+48(%r1); \
ld %r27,FRAME_27+48(%r1); \
ld %r26,FRAME_26+48(%r1); \
ld %r25,FRAME_25+48(%r1); \
ld %r24,FRAME_24+48(%r1); \
ld %r23,FRAME_23+48(%r1); \
ld %r22,FRAME_22+48(%r1); \
ld %r21,FRAME_21+48(%r1); \
ld %r20,FRAME_20+48(%r1); \
ld %r19,FRAME_19+48(%r1); \
ld %r18,FRAME_18+48(%r1); \
ld %r17,FRAME_17+48(%r1); \
ld %r16,FRAME_16+48(%r1); \
ld %r15,FRAME_15+48(%r1); \
ld %r14,FRAME_14+48(%r1); \
ld %r13,FRAME_13+48(%r1); \
ld %r12,FRAME_12+48(%r1); \
ld %r11,FRAME_11+48(%r1); \
ld %r10,FRAME_10+48(%r1); \
ld %r9, FRAME_9+48(%r1); \
ld %r8, FRAME_8+48(%r1); \
ld %r7, FRAME_7+48(%r1); \
ld %r6, FRAME_6+48(%r1); \
ld %r5, FRAME_5+48(%r1); \
ld %r4, FRAME_4+48(%r1); \
ld %r3, FRAME_3+48(%r1); \
ld %r2, FRAME_2+48(%r1); \
ld %r0, FRAME_0+48(%r1); \
ld %r1, FRAME_1+48(%r1); \
/* Can't touch %r1 from here on */ \
mtsprg2 %r2; /* save r2 & r3 */ \
mtsprg3 %r3; \
/* Disable translation, machine check and recoverability: */ \
mfmsr %r2; \
andi. %r2,%r2,~(PSL_DR|PSL_IR|PSL_EE|PSL_ME|PSL_RI)@l; \
mtmsr %r2; \
isync; \
/* Decide whether we return to user mode: */ \
GET_CPUINFO(%r2); \
ld %r3,(savearea+CPUSAVE_SRR1)(%r2); \
mtcr %r3; \
bf 17,1f; /* branch if PSL_PR is false */ \
/* Restore user SRs */ \
GET_CPUINFO(%r3); \
std %r27,(savearea+CPUSAVE_R27)(%r3); \
std %r28,(savearea+CPUSAVE_R28)(%r3); \
std %r29,(savearea+CPUSAVE_R29)(%r3); \
std %r30,(savearea+CPUSAVE_R30)(%r3); \
std %r31,(savearea+CPUSAVE_R31)(%r3); \
mflr %r27; /* preserve LR */ \
RESTORE_USER_SRS(); /* uses r28-r31 */ \
mtlr %r27; \
ld %r31,(savearea+CPUSAVE_R31)(%r3); \
ld %r30,(savearea+CPUSAVE_R30)(%r3); \
ld %r29,(savearea+CPUSAVE_R29)(%r3); \
ld %r28,(savearea+CPUSAVE_R28)(%r3); \
ld %r27,(savearea+CPUSAVE_R27)(%r3); \
1: mfsprg1 %r2; /* restore cr */ \
mtcr %r2; \
GET_CPUINFO(%r2); \
ld %r3,(savearea+CPUSAVE_SRR0)(%r2); /* restore srr0 */ \
mtsrr0 %r3; \
ld %r3,(savearea+CPUSAVE_SRR1)(%r2); /* restore srr1 */ \
mtsrr1 %r3; \
mfsprg2 %r2; /* restore r2 & r3 */ \
mfsprg3 %r3
#ifdef SMP
/*
* Processor reset exception handler. These are typically
* the first instructions the processor executes after a
* software reset. We do this in two bits so that we are
* not still hanging around in the trap handling region
* once the MMU is turned on.
*/
.globl CNAME(rstcode), CNAME(rstsize)
CNAME(rstcode):
/* Explicitly set MSR[SF] */
mfmsr %r9
li %r8,1
insrdi %r9,%r8,1,0
mtmsrd %r9
isync
ba cpu_reset
CNAME(rstsize) = . - CNAME(rstcode)
cpu_reset:
lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
lis %r3,tocbase@ha
ld %r2,tocbase@l(%r3)
lis %r3,1@l
bl CNAME(.cpudep_ap_early_bootstrap) /* Set PCPU */
nop
bl CNAME(.pmap_cpu_bootstrap) /* Turn on virtual memory */
nop
bl CNAME(.cpudep_ap_bootstrap) /* Set up PCPU and stack */
nop
mr %r1,%r3 /* Use new stack */
bl CNAME(.machdep_ap_bootstrap) /* And away! */
nop
/* Should not be reached */
9:
b 9b
#endif
/*
* This code gets copied to all the trap vectors
* (except ISI/DSI, ALI, and the interrupts)
*/
.globl CNAME(trapcode),CNAME(trapsize)
CNAME(trapcode):
mtsprg1 %r1 /* save SP */
mflr %r1 /* Save the old LR in r1 */
mtsprg2 %r1 /* And then in SPRG2 */
li %r1, 0xA0 /* How to get the vector from LR */
bla generictrap /* LR & SPRG3 is exception # */
CNAME(trapsize) = .-CNAME(trapcode)
/*
* For ALI: has to save DSISR and DAR
*/
.globl CNAME(alitrap),CNAME(alisize)
CNAME(alitrap):
mtsprg1 %r1 /* save SP */
GET_CPUINFO(%r1)
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfdar %r30
mfdsisr %r31
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
mfsprg1 %r1 /* restore SP, in case of branch */
mflr %r28 /* save LR */
mfcr %r29 /* save CR */
/* Put our exception vector in SPRG3 */
li %r31, EXC_ALI
mtsprg3 %r31
/* Test whether we already had PR set */
mfsrr1 %r31
mtcr %r31
bla s_trap
CNAME(alisize) = .-CNAME(alitrap)
/*
* Similar to the above for DSI
* Has to handle BAT spills
* and standard pagetable spills
*/
.globl CNAME(dsitrap),CNAME(dsisize)
CNAME(dsitrap):
mtsprg1 %r1 /* save SP */
GET_CPUINFO(%r1)
std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
mfsprg1 %r1 /* restore SP */
mfcr %r29 /* save CR */
mfxer %r30 /* save XER */
mtsprg2 %r30 /* in SPRG2 */
mfsrr1 %r31 /* test kernel mode */
mtcr %r31
mflr %r28 /* save LR (SP already saved) */
bla disitrap
CNAME(dsisize) = .-CNAME(dsitrap)
/*
* Preamble code for DSI/ISI traps
*/
disitrap:
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
mflr %r1
andi. %r1,%r1,0xff00
mtsprg3 %r1
GET_CPUINFO(%r1)
ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfdar %r30
mfdsisr %r31
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
#ifdef KDB
/* Try and detect a kernel stack overflow */
mfsrr1 %r31
mtcr %r31
bt 17,realtrap /* branch is user mode */
mfsprg1 %r31 /* get old SP */
sub. %r30,%r31,%r30 /* SP - DAR */
bge 1f
neg %r30,%r30 /* modulo value */
1: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */
bge %cr0,realtrap /* no, too far away. */
/* Now convert this DSI into a DDB trap. */
GET_CPUINFO(%r1)
ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */
std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */
ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
b dbtrap
#endif
/* XXX need stack probe here */
realtrap:
/* Test whether we already had PR set */
mfsrr1 %r1
mtcr %r1
mfsprg1 %r1 /* restore SP (might have been
overwritten) */
bf 17,k_trap /* branch if PSL_PR is false */
GET_CPUINFO(%r1)
ld %r1,PC_CURPCB(%r1)
mr %r27,%r28 /* Save LR, r29 */
mtsprg2 %r29
RESTORE_KERN_SRS() /* enable kernel mapping */
mfsprg2 %r29
mr %r28,%r27
ba s_trap
/*
* generictrap does some standard setup for trap handling to minimize
* the code that need be installed in the actual vectors. It expects
* the following conditions.
*
* R1 - Trap vector = LR & (0xff00 | R1)
* SPRG1 - Original R1 contents
* SPRG2 - Original LR
*/
generictrap:
/* Save R1 for computing the exception vector */
mtsprg3 %r1
/* Save interesting registers */
GET_CPUINFO(%r1)
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mfdar %r30
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
mfsprg1 %r1 /* restore SP, in case of branch */
mfsprg2 %r28 /* save LR */
mfcr %r29 /* save CR */
/* Compute the exception vector from the link register */
mfsprg3 %r31
ori %r31,%r31,0xff00
mflr %r30
and %r30,%r30,%r31
mtsprg3 %r30
/* Test whether we already had PR set */
mfsrr1 %r31
mtcr %r31
s_trap:
bf 17,k_trap /* branch if PSL_PR is false */
GET_CPUINFO(%r1)
u_trap:
ld %r1,PC_CURPCB(%r1)
mr %r27,%r28 /* Save LR, r29 */
mtsprg2 %r29
RESTORE_KERN_SRS() /* enable kernel mapping */
mfsprg2 %r29
mr %r28,%r27
/*
* Now the common trap catching code.
*/
k_trap:
FRAME_SETUP(PC_TEMPSAVE)
/* Call C interrupt dispatcher: */
trapagain:
lis %r3,tocbase@ha
ld %r2,tocbase@l(%r3)
addi %r3,%r1,48
bl CNAME(.powerpc_interrupt)
nop
.globl CNAME(trapexit) /* backtrace code sentinel */
CNAME(trapexit):
/* Disable interrupts: */
mfmsr %r3
andi. %r3,%r3,~PSL_EE@l
mtmsr %r3
/* Test AST pending: */
ld %r5,FRAME_SRR1+48(%r1)
mtcr %r5
bf 17,1f /* branch if PSL_PR is false */
GET_CPUINFO(%r3) /* get per-CPU pointer */
ld %r4, PC_CURTHREAD(%r3) /* deref to get curthread */
lwz %r4, TD_FLAGS(%r4) /* get thread flags value */
lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
and. %r4,%r4,%r5
beq 1f
mfmsr %r3 /* re-enable interrupts */
ori %r3,%r3,PSL_EE@l
mtmsr %r3
isync
lis %r3,tocbase@ha
ld %r2,tocbase@l(%r3)
addi %r3,%r1,48
bl CNAME(.ast)
nop
.globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
CNAME(asttrapexit):
b trapexit /* test ast ret value ? */
1:
FRAME_LEAVE(PC_TEMPSAVE)
rfid
#if defined(KDB)
/*
* Deliberate entry to dbtrap
*/
ASENTRY(breakpoint)
mtsprg1 %r1
mfmsr %r3
mtsrr1 %r3
andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
mtmsr %r3 /* disable interrupts */
isync
GET_CPUINFO(%r3)
std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
mflr %r28
li %r29,EXC_BPT
mtlr %r29
mfcr %r29
mtsrr0 %r28
/*
* Now the kdb trap catching code.
*/
dbtrap:
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
mflr %r1
andi. %r1,%r1,0xff00
mtsprg3 %r1
lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
FRAME_SETUP(PC_DBSAVE)
/* Call C trap code: */
lis %r3,tocbase@ha
ld %r2,tocbase@l(%r3)
addi %r3,%r1,48
bl CNAME(.db_trap_glue)
nop
or. %r3,%r3,%r3
bne dbleave
/* This wasn't for KDB, so switch to real trap: */
ld %r3,FRAME_EXC+48(%r1) /* save exception */
GET_CPUINFO(%r4)
std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
FRAME_LEAVE(PC_DBSAVE)
mtsprg1 %r1 /* prepare for entrance to realtrap */
GET_CPUINFO(%r1)
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
mflr %r28
mfcr %r29
ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
mfsprg1 %r1
b realtrap
dbleave:
FRAME_LEAVE(PC_DBSAVE)
rfid
/*
* In case of KDB we want a separate trap catcher for it
*/
.globl CNAME(dblow),CNAME(dbsize)
CNAME(dblow):
mtsprg1 %r1 /* save SP */
mtsprg2 %r29 /* save r29 */
mfcr %r29 /* save CR in r29 */
mfsrr1 %r1
mtcr %r1
bf 17,1f /* branch if privileged */
/* Unprivileged case */
mtcr %r29 /* put the condition register back */
mfsprg2 %r29 /* ... and r29 */
mflr %r1 /* save LR */
mtsprg2 %r1 /* And then in SPRG2 */
li %r1, 0 /* How to get the vector from LR */
bla generictrap /* and we look like a generic trap */
1:
/* Privileged, so drop to KDB */
GET_CPUINFO(%r1)
std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
mfsprg2 %r28 /* r29 holds cr... */
std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
mflr %r28 /* save LR */
bla dbtrap
CNAME(dbsize) = .-CNAME(dblow)
#endif /* KDB */

View File

@ -81,7 +81,6 @@
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/sf_buf.h>
#include <sys/syscall.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
#include <sys/unistd.h>
@ -131,6 +130,10 @@ static u_int sf_buf_alloc_want;
*/
static struct mtx sf_buf_lock;
#ifdef __powerpc64__
extern uintptr_t tocbase;
#endif
/*
* Finish a fork operation, with process p2 nearly set up.
@ -147,7 +150,8 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
KASSERT(td1 == curthread || td1 == &thread0,
("cpu_fork: p1 not curproc and not proc0"));
CTR3(KTR_PROC, "cpu_fork: called td1=%08x p2=%08x flags=%x", (u_int)td1, (u_int)p2, flags);
CTR3(KTR_PROC, "cpu_fork: called td1=%p p2=%p flags=%x",
td1, p2, flags);
if ((flags & RFPROC) == 0)
return;
@ -155,7 +159,7 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
p1 = td1->td_proc;
pcb = (struct pcb *)((td2->td_kstack +
td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fU);
td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL);
td2->td_pcb = pcb;
/* Copy the pcb */
@ -178,13 +182,22 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
cf = (struct callframe *)tf - 1;
memset(cf, 0, sizeof(struct callframe));
#ifdef __powerpc64__
cf->cf_toc = tocbase;
#endif
cf->cf_func = (register_t)fork_return;
cf->cf_arg0 = (register_t)td2;
cf->cf_arg1 = (register_t)tf;
pcb->pcb_sp = (register_t)cf;
#ifdef __powerpc64__
pcb->pcb_lr = ((register_t *)fork_trampoline)[0];
pcb->pcb_toc = ((register_t *)fork_trampoline)[1];
#else
pcb->pcb_lr = (register_t)fork_trampoline;
pcb->pcb_cpu.aim.usr = kernel_pmap->pm_sr[USER_SR];
#endif
pcb->pcb_cpu.aim.usr_vsid = 0;
pcb->pcb_cpu.aim.usr_esid = 0;
/* Setup to release spin count in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
@ -209,8 +222,8 @@ cpu_set_fork_handler(td, func, arg)
{
struct callframe *cf;
CTR4(KTR_PROC, "%s called with td=%08x func=%08x arg=%08x",
__func__, (u_int)td, (u_int)func, (u_int)arg);
CTR4(KTR_PROC, "%s called with td=%p func=%p arg=%p",
__func__, td, func, arg);
cf = (struct callframe *)td->td_pcb->pcb_sp;
@ -384,7 +397,9 @@ is_physical_memory(addr)
}
/*
* Threading functions
* CPU threading functions related to the VM layer. These could be used
* to map the SLB bits required for the kernel stack instead of forcing a
* fixed-size KVA.
*/
void

View File

@ -34,7 +34,6 @@
#include <machine/hid.h>
#include <machine/param.h>
#include <machine/spr.h>
#include <machine/psl.h>
#include <machine/pte.h>
#include <machine/trap.h>
#include <machine/vmparam.h>

View File

@ -208,7 +208,7 @@ cpu_e500_startup(void *dummy)
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
size = phys_avail[indx + 1] - phys_avail[indx];
printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
phys_avail[indx], phys_avail[indx + 1] - 1,
size, size / PAGE_SIZE);
}

View File

@ -64,7 +64,6 @@
#include <machine/trap.h>
#include <machine/param.h>
#include <machine/psl.h>
#include <machine/asm.h>
#include <machine/spr.h>

View File

@ -380,7 +380,7 @@ is_physical_memory(vm_offset_t addr)
}
/*
* Thread functions
* CPU threading functions related to VM.
*/
void

View File

@ -3,8 +3,6 @@
#
# $FreeBSD$
machine powerpc
# Pseudo devices.
device mem # Memory and kernel memory devices

View File

@ -21,6 +21,8 @@
cpu AIM
ident GENERIC
machine powerpc powerpc
makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
# Platform support

View File

@ -7,6 +7,8 @@
cpu E500
ident MPC85XX
machine powerpc powerpc
makeoptions DEBUG="-Wa,-me500 -g"
makeoptions NO_MODULES=yes

View File

@ -8,6 +8,8 @@
#####################################################################
# CPU OPTIONS
machine powerpc powerpc
#
# You must specify at least one CPU (the one you intend to run on).
cpu AIM

View File

@ -108,15 +108,20 @@ static void
write_scom(register_t address, uint64_t value)
{
register_t msr;
#ifndef __powerpc64__
register_t hi, lo, scratch;
hi = (value >> 32) & 0xffffffff;
lo = value & 0xffffffff;
#endif
msr = mfmsr();
mtmsr(msr & ~PSL_EE); isync();
#ifdef __powerpc64__
mtspr(SPR_SCOMD, value);
#else
hi = (value >> 32) & 0xffffffff;
lo = value & 0xffffffff;
mtspr64(SPR_SCOMD, hi, lo, scratch);
#endif
isync();
mtspr(SPR_SCOMC, address | SCOMC_WRITE);
isync();

View File

@ -46,7 +46,7 @@
* for all data types (int, long, ...). The result is unsigned int
* and must be cast to any desired pointer type.
*/
#define _ALIGNBYTES (sizeof(int) - 1)
#define _ALIGN(p) (((unsigned)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
#define _ALIGNBYTES (sizeof(register_t) - 1)
#define _ALIGN(p) (((uintptr_t)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
#endif /* !_POWERPC_INCLUDE__ALIGN_H_ */

View File

@ -31,16 +31,18 @@
#ifndef POWERPC_INCLUDE__BUS_H
#define POWERPC_INCLUDE__BUS_H
#include <vm/vm_param.h>
/*
* Bus address and size types
*/
typedef u_int32_t bus_addr_t;
typedef u_int32_t bus_size_t;
typedef vm_paddr_t bus_addr_t;
typedef vm_size_t bus_size_t;
/*
* Access methods for bus resources and address space.
*/
typedef struct bus_space *bus_space_tag_t;
typedef u_int32_t bus_space_handle_t;
typedef vm_offset_t bus_space_handle_t;
#endif /* POWERPC_INCLUDE__BUS_H */

View File

@ -37,177 +37,185 @@
* Macros for format specifiers.
*/
#ifdef __powerpc64__
#define PRI64 "l"
#define PRIreg "l"
#else
#define PRI64 "ll"
#define PRIreg
#endif
/* fprintf(3) macros for signed integers. */
#define PRId8 "d" /* int8_t */
#define PRId16 "d" /* int16_t */
#define PRId32 "d" /* int32_t */
#define PRId64 "lld" /* int64_t */
#define PRId64 PRI64"d" /* int64_t */
#define PRIdLEAST8 "d" /* int_least8_t */
#define PRIdLEAST16 "d" /* int_least16_t */
#define PRIdLEAST32 "d" /* int_least32_t */
#define PRIdLEAST64 "lld" /* int_least64_t */
#define PRIdLEAST64 PRI64"d" /* int_least64_t */
#define PRIdFAST8 "d" /* int_fast8_t */
#define PRIdFAST16 "d" /* int_fast16_t */
#define PRIdFAST32 "d" /* int_fast32_t */
#define PRIdFAST64 "lld" /* int_fast64_t */
#define PRIdFAST64 PRI64"d" /* int_fast64_t */
#define PRIdMAX "jd" /* intmax_t */
#define PRIdPTR "d" /* intptr_t */
#define PRIdPTR PRIreg"d" /* intptr_t */
#define PRIi8 "i" /* int8_t */
#define PRIi16 "i" /* int16_t */
#define PRIi32 "i" /* int32_t */
#define PRIi64 "lli" /* int64_t */
#define PRIi64 PRI64"i" /* int64_t */
#define PRIiLEAST8 "i" /* int_least8_t */
#define PRIiLEAST16 "i" /* int_least16_t */
#define PRIiLEAST32 "i" /* int_least32_t */
#define PRIiLEAST64 "lli" /* int_least64_t */
#define PRIiLEAST64 PRI64"i" /* int_least64_t */
#define PRIiFAST8 "i" /* int_fast8_t */
#define PRIiFAST16 "i" /* int_fast16_t */
#define PRIiFAST32 "i" /* int_fast32_t */
#define PRIiFAST64 "lli" /* int_fast64_t */
#define PRIiFAST64 PRI64"i" /* int_fast64_t */
#define PRIiMAX "ji" /* intmax_t */
#define PRIiPTR "i" /* intptr_t */
#define PRIiPTR PRIreg"i" /* intptr_t */
/* fprintf(3) macros for unsigned integers. */
#define PRIo8 "o" /* uint8_t */
#define PRIo16 "o" /* uint16_t */
#define PRIo32 "o" /* uint32_t */
#define PRIo64 "llo" /* uint64_t */
#define PRIo64 PRI64"o" /* uint64_t */
#define PRIoLEAST8 "o" /* uint_least8_t */
#define PRIoLEAST16 "o" /* uint_least16_t */
#define PRIoLEAST32 "o" /* uint_least32_t */
#define PRIoLEAST64 "llo" /* uint_least64_t */
#define PRIoLEAST64 PRI64"o" /* uint_least64_t */
#define PRIoFAST8 "o" /* uint_fast8_t */
#define PRIoFAST16 "o" /* uint_fast16_t */
#define PRIoFAST32 "o" /* uint_fast32_t */
#define PRIoFAST64 "llo" /* uint_fast64_t */
#define PRIoFAST64 PRI64"o" /* uint_fast64_t */
#define PRIoMAX "jo" /* uintmax_t */
#define PRIoPTR "o" /* uintptr_t */
#define PRIoPTR PRIreg"o" /* uintptr_t */
#define PRIu8 "u" /* uint8_t */
#define PRIu16 "u" /* uint16_t */
#define PRIu32 "u" /* uint32_t */
#define PRIu64 "llu" /* uint64_t */
#define PRIu64 PRI64"u" /* uint64_t */
#define PRIuLEAST8 "u" /* uint_least8_t */
#define PRIuLEAST16 "u" /* uint_least16_t */
#define PRIuLEAST32 "u" /* uint_least32_t */
#define PRIuLEAST64 "llu" /* uint_least64_t */
#define PRIuLEAST64 PRI64"u" /* uint_least64_t */
#define PRIuFAST8 "u" /* uint_fast8_t */
#define PRIuFAST16 "u" /* uint_fast16_t */
#define PRIuFAST32 "u" /* uint_fast32_t */
#define PRIuFAST64 "llu" /* uint_fast64_t */
#define PRIuFAST64 PRI64"u" /* uint_fast64_t */
#define PRIuMAX "ju" /* uintmax_t */
#define PRIuPTR "u" /* uintptr_t */
#define PRIuPTR PRIreg"u" /* uintptr_t */
#define PRIx8 "x" /* uint8_t */
#define PRIx16 "x" /* uint16_t */
#define PRIx32 "x" /* uint32_t */
#define PRIx64 "llx" /* uint64_t */
#define PRIx64 PRI64"x" /* uint64_t */
#define PRIxLEAST8 "x" /* uint_least8_t */
#define PRIxLEAST16 "x" /* uint_least16_t */
#define PRIxLEAST32 "x" /* uint_least32_t */
#define PRIxLEAST64 "llx" /* uint_least64_t */
#define PRIxLEAST64 PRI64"x" /* uint_least64_t */
#define PRIxFAST8 "x" /* uint_fast8_t */
#define PRIxFAST16 "x" /* uint_fast16_t */
#define PRIxFAST32 "x" /* uint_fast32_t */
#define PRIxFAST64 "llx" /* uint_fast64_t */
#define PRIxFAST64 PRI64"x" /* uint_fast64_t */
#define PRIxMAX "jx" /* uintmax_t */
#define PRIxPTR "x" /* uintptr_t */
#define PRIxPTR PRIreg"x" /* uintptr_t */
#define PRIX8 "X" /* uint8_t */
#define PRIX16 "X" /* uint16_t */
#define PRIX32 "X" /* uint32_t */
#define PRIX64 "llX" /* uint64_t */
#define PRIX64 PRI64"X" /* uint64_t */
#define PRIXLEAST8 "X" /* uint_least8_t */
#define PRIXLEAST16 "X" /* uint_least16_t */
#define PRIXLEAST32 "X" /* uint_least32_t */
#define PRIXLEAST64 "llX" /* uint_least64_t */
#define PRIXLEAST64 PRI64"X" /* uint_least64_t */
#define PRIXFAST8 "X" /* uint_fast8_t */
#define PRIXFAST16 "X" /* uint_fast16_t */
#define PRIXFAST32 "X" /* uint_fast32_t */
#define PRIXFAST64 "llX" /* uint_fast64_t */
#define PRIXFAST64 PRI64"X" /* uint_fast64_t */
#define PRIXMAX "jX" /* uintmax_t */
#define PRIXPTR "X" /* uintptr_t */
#define PRIXPTR PRIreg"X" /* uintptr_t */
/* fscanf(3) macros for signed integers. */
#define SCNd8 "hhd" /* int8_t */
#define SCNd16 "hd" /* int16_t */
#define SCNd32 "d" /* int32_t */
#define SCNd64 "lld" /* int64_t */
#define SCNd64 PRI64"d" /* int64_t */
#define SCNdLEAST8 "hhd" /* int_least8_t */
#define SCNdLEAST16 "hd" /* int_least16_t */
#define SCNdLEAST32 "d" /* int_least32_t */
#define SCNdLEAST64 "lld" /* int_least64_t */
#define SCNdLEAST64 PRI64"d" /* int_least64_t */
#define SCNdFAST8 "d" /* int_fast8_t */
#define SCNdFAST16 "d" /* int_fast16_t */
#define SCNdFAST32 "d" /* int_fast32_t */
#define SCNdFAST64 "lld" /* int_fast64_t */
#define SCNdFAST64 PRI64"d" /* int_fast64_t */
#define SCNdMAX "jd" /* intmax_t */
#define SCNdPTR "d" /* intptr_t */
#define SCNdPTR PRIreg"d" /* intptr_t */
#define SCNi8 "hhi" /* int8_t */
#define SCNi16 "hi" /* int16_t */
#define SCNi32 "i" /* int32_t */
#define SCNi64 "lli" /* int64_t */
#define SCNi64 PRI64"i" /* int64_t */
#define SCNiLEAST8 "hhi" /* int_least8_t */
#define SCNiLEAST16 "hi" /* int_least16_t */
#define SCNiLEAST32 "i" /* int_least32_t */
#define SCNiLEAST64 "lli" /* int_least64_t */
#define SCNiLEAST64 PRI64"i" /* int_least64_t */
#define SCNiFAST8 "i" /* int_fast8_t */
#define SCNiFAST16 "i" /* int_fast16_t */
#define SCNiFAST32 "i" /* int_fast32_t */
#define SCNiFAST64 "lli" /* int_fast64_t */
#define SCNiFAST64 PRI64"i" /* int_fast64_t */
#define SCNiMAX "ji" /* intmax_t */
#define SCNiPTR "i" /* intptr_t */
#define SCNiPTR PRIreg"i" /* intptr_t */
/* fscanf(3) macros for unsigned integers. */
#define SCNo8 "hho" /* uint8_t */
#define SCNo16 "ho" /* uint16_t */
#define SCNo32 "o" /* uint32_t */
#define SCNo64 "llo" /* uint64_t */
#define SCNo64 PRI64"o" /* uint64_t */
#define SCNoLEAST8 "hho" /* uint_least8_t */
#define SCNoLEAST16 "ho" /* uint_least16_t */
#define SCNoLEAST32 "o" /* uint_least32_t */
#define SCNoLEAST64 "llo" /* uint_least64_t */
#define SCNoLEAST64 PRI64"o" /* uint_least64_t */
#define SCNoFAST8 "o" /* uint_fast8_t */
#define SCNoFAST16 "o" /* uint_fast16_t */
#define SCNoFAST32 "o" /* uint_fast32_t */
#define SCNoFAST64 "llo" /* uint_fast64_t */
#define SCNoFAST64 PRI64"o" /* uint_fast64_t */
#define SCNoMAX "jo" /* uintmax_t */
#define SCNoPTR "o" /* uintptr_t */
#define SCNoPTR PRIreg"o" /* uintptr_t */
#define SCNu8 "hhu" /* uint8_t */
#define SCNu16 "hu" /* uint16_t */
#define SCNu32 "u" /* uint32_t */
#define SCNu64 "llu" /* uint64_t */
#define SCNu64 PRI64"u" /* uint64_t */
#define SCNuLEAST8 "hhu" /* uint_least8_t */
#define SCNuLEAST16 "hu" /* uint_least16_t */
#define SCNuLEAST32 "u" /* uint_least32_t */
#define SCNuLEAST64 "llu" /* uint_least64_t */
#define SCNuLEAST64 PRI64"u" /* uint_least64_t */
#define SCNuFAST8 "u" /* uint_fast8_t */
#define SCNuFAST16 "u" /* uint_fast16_t */
#define SCNuFAST32 "u" /* uint_fast32_t */
#define SCNuFAST64 "llu" /* uint_fast64_t */
#define SCNuFAST64 PRI64"u" /* uint_fast64_t */
#define SCNuMAX "ju" /* uintmax_t */
#define SCNuPTR "u" /* uintptr_t */
#define SCNuPTR PRIreg"u" /* uintptr_t */
#define SCNx8 "hhx" /* uint8_t */
#define SCNx16 "hx" /* uint16_t */
#define SCNx32 "x" /* uint32_t */
#define SCNx64 "llx" /* uint64_t */
#define SCNx64 PRI64"x" /* uint64_t */
#define SCNxLEAST8 "hhx" /* uint_least8_t */
#define SCNxLEAST16 "hx" /* uint_least16_t */
#define SCNxLEAST32 "x" /* uint_least32_t */
#define SCNxLEAST64 "llx" /* uint_least64_t */
#define SCNxLEAST64 PRI64"x" /* uint_least64_t */
#define SCNxFAST8 "x" /* uint_fast8_t */
#define SCNxFAST16 "x" /* uint_fast16_t */
#define SCNxFAST32 "x" /* uint_fast32_t */
#define SCNxFAST64 "llx" /* uint_fast64_t */
#define SCNxFAST64 PRI64"x" /* uint_fast64_t */
#define SCNxMAX "jx" /* uintmax_t */
#define SCNxPTR "x" /* uintptr_t */
#define SCNxPTR PRIreg"x" /* uintptr_t */
#endif /* !_MACHINE_INTTYPES_H_ */

View File

@ -59,8 +59,7 @@
#define __INT_MAX 0x7fffffff /* max value for an int */
#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
/* Bad hack for gcc configured to give 64-bit longs. */
#ifdef _LARGE_LONG
#if defined(_LARGE_LONG) || defined(__LP64__)
#define __ULONG_MAX 0xffffffffffffffffUL
#define __LONG_MAX 0x7fffffffffffffffL
#define __LONG_MIN (-0x7fffffffffffffffL - 1)
@ -74,9 +73,13 @@
#define __LLONG_MAX 0x7fffffffffffffffLL /* max value for a long long */
#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
#ifdef __powerpc64__
#define __SSIZE_MAX __LONG_MAX /* max value for a ssize_t */
#define __SIZE_T_MAX __ULONG_MAX /* max value for a size_t */
#else
#define __SSIZE_MAX __INT_MAX /* max value for a ssize_t */
#define __SIZE_T_MAX __UINT_MAX /* max value for a size_t */
#endif
#define __OFF_MAX __LLONG_MAX /* max value for an off_t */
#define __OFF_MIN __LLONG_MIN /* min value for an off_t */
@ -86,7 +89,7 @@
#define __QUAD_MAX __LLONG_MAX /* max value for a quad_t */
#define __QUAD_MIN __LLONG_MIN /* min value for a quad_t */
#ifdef _LARGE_LONG
#if defined(_LARGE_LONG) || defined(__LP64__)
#define __LONG_BIT 64
#else
#define __LONG_BIT 32

View File

@ -45,15 +45,22 @@
#define INT8_C(c) (c)
#define INT16_C(c) (c)
#define INT32_C(c) (c)
#define INT64_C(c) (c ## LL)
#define UINT8_C(c) (c)
#define UINT16_C(c) (c)
#define UINT32_C(c) (c ## U)
#define UINT64_C(c) (c ## ULL)
#ifdef __powerpc64__
#define INT64_C(c) (c ## L)
#define UINT64_C(c) (c ## UL)
#define INTMAX_C(c) (c ## L)
#define UINTMAX_C(c) (c ## UL)
#else
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#define INTMAX_C(c) (c ## LL)
#define UINTMAX_C(c) (c ## ULL)
#endif
#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
@ -73,13 +80,21 @@
#define INT8_MAX 0x7f
#define INT16_MAX 0x7fff
#define INT32_MAX 0x7fffffff
#ifdef __powerpc64__
#define INT64_MAX 0x7fffffffffffffffL
#else
#define INT64_MAX 0x7fffffffffffffffLL
#endif
/* Maximum values of exact-width unsigned integer types. */
#define UINT8_MAX 0xff
#define UINT16_MAX 0xffff
#define UINT32_MAX 0xffffffffU
#ifdef __powerpc64__
#define UINT64_MAX 0xffffffffffffffffUL
#else
#define UINT64_MAX 0xffffffffffffffffULL
#endif
/*
* ISO/IEC 9899:1999
@ -129,9 +144,15 @@
* ISO/IEC 9899:1999
* 7.18.2.4 Limits of integer types capable of holding object pointers
*/
#ifdef __powerpc64__
#define INTPTR_MIN INT64_MIN
#define INTPTR_MAX INT64_MAX
#define UINTPTR_MAX UINT64_MAX
#else
#define INTPTR_MIN INT32_MIN
#define INTPTR_MAX INT32_MAX
#define UINTPTR_MAX UINT32_MAX
#endif
/*
* ISO/IEC 9899:1999
@ -145,6 +166,18 @@
* ISO/IEC 9899:1999
* 7.18.3 Limits of other integer types
*/
#ifdef __powerpc64__
/* Limits of ptrdiff_t. */
#define PTRDIFF_MIN INT64_MIN
#define PTRDIFF_MAX INT64_MAX
/* Limits of sig_atomic_t. */
#define SIG_ATOMIC_MIN INT64_MIN
#define SIG_ATOMIC_MAX INT64_MAX
/* Limit of size_t. */
#define SIZE_MAX UINT64_MAX
#else
/* Limits of ptrdiff_t. */
#define PTRDIFF_MIN INT32_MIN
#define PTRDIFF_MAX INT32_MAX
@ -155,6 +188,7 @@
/* Limit of size_t. */
#define SIZE_MAX UINT32_MAX
#endif
#ifndef WCHAR_MIN /* Also possibly defined in <wchar.h> */
/* Limits of wchar_t. */

View File

@ -53,6 +53,13 @@ typedef unsigned short __uint16_t;
typedef int __int32_t;
typedef unsigned int __uint32_t;
#ifdef __powerpc64__
typedef long __int64_t;
typedef unsigned long __uint64_t;
#else
#if defined(lint)
/* LONGLONG */
typedef long long __int64_t;
@ -68,17 +75,25 @@ typedef long long __int64_t;
typedef unsigned long long __uint64_t;
#endif
#endif
/*
* Standard type definitions.
*/
typedef __uint32_t __clock_t; /* clock()... */
typedef unsigned int __cpumask_t;
typedef __int32_t __critical_t;
typedef double __double_t;
typedef double __float_t;
#ifdef __powerpc64__
typedef __int64_t __critical_t;
typedef __int64_t __intfptr_t;
typedef __int64_t __intptr_t;
#else
typedef __int32_t __critical_t;
typedef __int32_t __intfptr_t;
typedef __int64_t __intmax_t;
typedef __int32_t __intptr_t;
#endif
typedef __int64_t __intmax_t;
typedef __int32_t __int_fast8_t;
typedef __int32_t __int_fast16_t;
typedef __int32_t __int_fast32_t;
@ -87,6 +102,16 @@ typedef __int8_t __int_least8_t;
typedef __int16_t __int_least16_t;
typedef __int32_t __int_least32_t;
typedef __int64_t __int_least64_t;
#ifdef __powerpc64__
typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int64_t __register_t;
typedef __int64_t __segsz_t; /* segment size (in pages) */
typedef __uint64_t __size_t; /* sizeof() */
typedef __int64_t __ssize_t; /* byte count or error */
typedef __int64_t __time_t; /* time()... */
typedef __uint64_t __uintfptr_t;
typedef __uint64_t __uintptr_t;
#else
typedef __int32_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int32_t __register_t;
typedef __int32_t __segsz_t; /* segment size (in pages) */
@ -94,8 +119,9 @@ typedef __uint32_t __size_t; /* sizeof() */
typedef __int32_t __ssize_t; /* byte count or error */
typedef __int32_t __time_t; /* time()... */
typedef __uint32_t __uintfptr_t;
typedef __uint64_t __uintmax_t;
typedef __uint32_t __uintptr_t;
#endif
typedef __uint64_t __uintmax_t;
typedef __uint32_t __uint_fast8_t;
typedef __uint32_t __uint_fast16_t;
typedef __uint32_t __uint_fast32_t;
@ -104,12 +130,19 @@ typedef __uint8_t __uint_least8_t;
typedef __uint16_t __uint_least16_t;
typedef __uint32_t __uint_least32_t;
typedef __uint64_t __uint_least64_t;
#ifdef __powerpc64__
typedef __uint64_t __u_register_t;
typedef __uint64_t __vm_offset_t;
typedef __uint64_t __vm_paddr_t;
typedef __uint64_t __vm_size_t;
#else
typedef __uint32_t __u_register_t;
typedef __uint32_t __vm_offset_t;
typedef __int64_t __vm_ooffset_t;
typedef __uint32_t __vm_paddr_t;
typedef __uint64_t __vm_pindex_t;
typedef __uint32_t __vm_size_t;
#endif
typedef __int64_t __vm_ooffset_t;
typedef __uint64_t __vm_pindex_t;
/*
* Unusual type definitions.

View File

@ -53,15 +53,32 @@
#define PIC_GOT(x) x
#endif
#ifdef __powerpc64__
#undef PIC_PLT
#define PIC_PLT(x) __CONCAT(.,x)
#endif
#define CNAME(csym) csym
#define ASMNAME(asmsym) asmsym
#ifdef __powerpc64__
#define HIDENAME(asmsym) __CONCAT(_,asmsym)
#else
#define HIDENAME(asmsym) __CONCAT(.,asmsym)
#endif
#define _GLOBAL(x) \
.data; .align 2; .globl x; x:
#ifdef __powerpc64__
#define _ENTRY(x) \
.text; .align 2; .globl x; .section ".opd","aw"; \
.align 3; x: \
.quad .x,.TOC.@tocbase,0; .previous; \
.align 4; .globl .x; .type .x,@function; .x:
#else
#define _ENTRY(x) \
.text; .align 4; .globl x; .type x,@function; x:
#endif
#if defined(PROF) || (defined(_KERNEL) && defined(GPROF))
# define _PROF_PROLOGUE mflr 0; stw 0,4(1); bl _mcount

View File

@ -65,8 +65,21 @@
: "cc", "memory") \
/* __ATOMIC_ADD_32 */
#ifdef __powerpc64__
#define __ATOMIC_ADD_64(p, v, t) \
__asm __volatile( \
"1: ldarx %0, 0, %2\n" \
" add %0, %3, %0\n" \
" stdcx. %0, 0, %2\n" \
" bne- 1b\n" \
: "=&r" (t), "=m" (*p) \
: "r" (p), "r" (v), "m" (*p) \
: "cc", "memory") \
/* __ATOMIC_ADD_64 */
#else
#define __ATOMIC_ADD_64(p, v, t) \
64-bit atomic_add not implemented
#endif
#define _ATOMIC_ADD(width, suffix, type) \
static __inline void \
@ -98,11 +111,13 @@ _ATOMIC_ADD(16, short, u_short)
#endif
_ATOMIC_ADD(32, 32, uint32_t)
_ATOMIC_ADD(32, int, u_int)
#ifdef __powerpc64__
_ATOMIC_ADD(64, 64, uint64_t)
_ATOMIC_ADD(64, long, u_long)
_ATOMIC_ADD(64, ptr, uintptr_t)
#else
_ATOMIC_ADD(32, long, u_long)
_ATOMIC_ADD(32, ptr, uintptr_t)
#if 0
_ATOMIC_ADD(64, 64, uint64_t)
_ATOMIC_ADD(64, long_long, u_long_long)
#endif
#undef _ATOMIC_ADD
@ -133,8 +148,21 @@ _ATOMIC_ADD(64, long_long, u_long_long)
: "cc", "memory") \
/* __ATOMIC_CLEAR_32 */
#ifdef __powerpc64__
#define __ATOMIC_CLEAR_64(p, v, t) \
__asm __volatile( \
"1: ldarx %0, 0, %2\n" \
" andc %0, %0, %3\n" \
" stdcx. %0, 0, %2\n" \
" bne- 1b\n" \
: "=&r" (t), "=m" (*p) \
: "r" (p), "r" (v), "m" (*p) \
: "cc", "memory") \
/* __ATOMIC_CLEAR_64 */
#else
#define __ATOMIC_CLEAR_64(p, v, t) \
64-bit atomic_clear not implemented
#endif
#define _ATOMIC_CLEAR(width, suffix, type) \
static __inline void \
@ -166,11 +194,13 @@ _ATOMIC_CLEAR(16, short, u_short)
#endif
_ATOMIC_CLEAR(32, 32, uint32_t)
_ATOMIC_CLEAR(32, int, u_int)
#ifdef __powerpc64__
_ATOMIC_CLEAR(64, 64, uint64_t)
_ATOMIC_CLEAR(64, long, u_long)
_ATOMIC_CLEAR(64, ptr, uintptr_t)
#else
_ATOMIC_CLEAR(32, long, u_long)
_ATOMIC_CLEAR(32, ptr, uintptr_t)
#if 0
_ATOMIC_CLEAR(64, 64, uint64_t)
_ATOMIC_CLEAR(64, long_long, u_long_long)
#endif
#undef _ATOMIC_CLEAR
@ -216,8 +246,21 @@ _ATOMIC_CLEAR(64, long_long, u_long_long)
: "cc", "memory") \
/* __ATOMIC_SET_32 */
#ifdef __powerpc64__
#define __ATOMIC_SET_64(p, v, t) \
__asm __volatile( \
"1: ldarx %0, 0, %2\n" \
" or %0, %3, %0\n" \
" stdcx. %0, 0, %2\n" \
" bne- 1b\n" \
: "=&r" (t), "=m" (*p) \
: "r" (p), "r" (v), "m" (*p) \
: "cc", "memory") \
/* __ATOMIC_SET_64 */
#else
#define __ATOMIC_SET_64(p, v, t) \
64-bit atomic_set not implemented
#endif
#define _ATOMIC_SET(width, suffix, type) \
static __inline void \
@ -249,11 +292,13 @@ _ATOMIC_SET(16, short, u_short)
#endif
_ATOMIC_SET(32, 32, uint32_t)
_ATOMIC_SET(32, int, u_int)
#ifdef __powerpc64__
_ATOMIC_SET(64, 64, uint64_t)
_ATOMIC_SET(64, long, u_long)
_ATOMIC_SET(64, ptr, uintptr_t)
#else
_ATOMIC_SET(32, long, u_long)
_ATOMIC_SET(32, ptr, uintptr_t)
#if 0
_ATOMIC_SET(64, 64, uint64_t)
_ATOMIC_SET(64, long_long, u_long_long)
#endif
#undef _ATOMIC_SET
@ -284,8 +329,21 @@ _ATOMIC_SET(64, long_long, u_long_long)
: "cc", "memory") \
/* __ATOMIC_SUBTRACT_32 */
#ifdef __powerpc64__
#define __ATOMIC_SUBTRACT_64(p, v, t) \
__asm __volatile( \
"1: ldarx %0, 0, %2\n" \
" subf %0, %3, %0\n" \
" stdcx. %0, 0, %2\n" \
" bne- 1b\n" \
: "=&r" (t), "=m" (*p) \
: "r" (p), "r" (v), "m" (*p) \
: "cc", "memory") \
/* __ATOMIC_SUBTRACT_64 */
#else
#define __ATOMIC_SUBTRACT_64(p, v, t) \
64-bit atomic_subtract not implemented
#endif
#define _ATOMIC_SUBTRACT(width, suffix, type) \
static __inline void \
@ -317,11 +375,13 @@ _ATOMIC_SUBTRACT(16, short, u_short)
#endif
_ATOMIC_SUBTRACT(32, 32, uint32_t)
_ATOMIC_SUBTRACT(32, int, u_int)
#ifdef __powerpc64__
_ATOMIC_SUBTRACT(64, 64, uint64_t)
_ATOMIC_SUBTRACT(64, long, u_long)
_ATOMIC_SUBTRACT(64, ptr, uintptr_t)
#else
_ATOMIC_SUBTRACT(32, long, u_long)
_ATOMIC_SUBTRACT(32, ptr, uintptr_t)
#if 0
_ATOMIC_SUBTRACT(64, 64, uint64_t)
_ATOMIC_SUBTRACT(64, long_long, u_long_long)
#endif
#undef _ATOMIC_SUBTRACT
@ -359,9 +419,37 @@ atomic_readandclear_32(volatile uint32_t *addr)
return (result);
}
#ifdef __powerpc64__
static __inline uint64_t
atomic_readandclear_64(volatile uint64_t *addr)
{
uint64_t result,temp;
#ifdef __GNUCLIKE_ASM
__asm __volatile (
"\tsync\n" /* drain writes */
"1:\tldarx %0, 0, %3\n\t" /* load old value */
"li %1, 0\n\t" /* load new value */
"stdcx. %1, 0, %3\n\t" /* attempt to store */
"bne- 1b\n\t" /* spin if failed */
: "=&r"(result), "=&r"(temp), "=m" (*addr)
: "r" (addr), "m" (*addr)
: "cc", "memory");
#endif
return (result);
}
#endif
#define atomic_readandclear_int atomic_readandclear_32
#ifdef __powerpc64__
#define atomic_readandclear_long atomic_readandclear_64
#define atomic_readandclear_ptr atomic_readandclear_64
#else
#define atomic_readandclear_long atomic_readandclear_32
#define atomic_readandclear_ptr atomic_readandclear_32
#endif
/*
* We assume that a = b will do atomic loads and stores.
@ -404,11 +492,21 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
ATOMIC_STORE_LOAD(char, 8)
ATOMIC_STORE_LOAD(short, 16)
ATOMIC_STORE_LOAD(int, 32)
#ifdef __powerpc64__
ATOMIC_STORE_LOAD(long, 64)
#endif
#ifdef __powerpc64__
#define atomic_load_acq_long atomic_load_acq_64
#define atomic_store_rel_long atomic_store_rel_64
#define atomic_load_acq_ptr atomic_load_acq_64
#define atomic_store_rel_ptr atomic_store_rel_64
#else
#define atomic_load_acq_long atomic_load_acq_32
#define atomic_store_rel_long atomic_store_rel_32
#define atomic_load_acq_ptr atomic_load_acq_32
#define atomic_store_rel_ptr atomic_store_rel_32
#endif
#undef ATOMIC_STORE_LOAD
@ -417,10 +515,10 @@ ATOMIC_STORE_LOAD(int, 32)
* two values are equal, update the value of *p with newval. Returns
* zero if the compare failed, nonzero otherwise.
*/
static __inline uint32_t
static __inline int
atomic_cmpset_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
uint32_t ret;
int ret;
#ifdef __GNUCLIKE_ASM
__asm __volatile (
@ -443,22 +541,33 @@ atomic_cmpset_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
return (ret);
}
static __inline u_long
static __inline int
atomic_cmpset_long(volatile u_long* p, u_long cmpval, u_long newval)
{
uint32_t ret;
int ret;
#ifdef __GNUCLIKE_ASM
__asm __volatile (
#ifdef __powerpc64__
"1:\tldarx %0, 0, %2\n\t" /* load old value */
"cmpld %3, %0\n\t" /* compare */
"bne 2f\n\t" /* exit if not equal */
"stdcx. %4, 0, %2\n\t" /* attempt to store */
#else
"1:\tlwarx %0, 0, %2\n\t" /* load old value */
"cmplw %3, %0\n\t" /* compare */
"bne 2f\n\t" /* exit if not equal */
"stwcx. %4, 0, %2\n\t" /* attempt to store */
#endif
"bne- 1b\n\t" /* spin if failed */
"li %0, 1\n\t" /* success - retval = 1 */
"b 3f\n\t" /* we've succeeded */
"2:\n\t"
#ifdef __powerpc64__
"stdcx. %0, 0, %2\n\t" /* clear reservation (74xx) */
#else
"stwcx. %0, 0, %2\n\t" /* clear reservation (74xx) */
#endif
"li %0, 0\n\t" /* failure - retval = 0 */
"3:\n\t"
: "=&r" (ret), "=m" (*p)
@ -471,10 +580,15 @@ atomic_cmpset_long(volatile u_long* p, u_long cmpval, u_long newval)
#define atomic_cmpset_int atomic_cmpset_32
#ifdef __powerpc64__
#define atomic_cmpset_ptr(dst, old, new) \
atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old), (u_long)(new))
#else
#define atomic_cmpset_ptr(dst, old, new) \
atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#endif
static __inline uint32_t
static __inline int
atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
{
int retval;
@ -484,24 +598,24 @@ atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
return (retval);
}
static __inline uint32_t
static __inline int
atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
{
__ATOMIC_BARRIER;
return (atomic_cmpset_32(p, cmpval, newval));
}
static __inline u_long
static __inline int
atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval)
{
int retval;
u_long retval;
retval = atomic_cmpset_long(p, cmpval, newval);
__ATOMIC_BARRIER;
return (retval);
}
static __inline uint32_t
static __inline int
atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
{
__ATOMIC_BARRIER;
@ -511,10 +625,17 @@ atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
#define atomic_cmpset_acq_int atomic_cmpset_acq_32
#define atomic_cmpset_rel_int atomic_cmpset_rel_32
#ifdef __powerpc64__
#define atomic_cmpset_acq_ptr(dst, old, new) \
atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), (u_long)(new))
#define atomic_cmpset_rel_ptr(dst, old, new) \
atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), (u_long)(new))
#else
#define atomic_cmpset_acq_ptr(dst, old, new) \
atomic_cmpset_acq_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#define atomic_cmpset_rel_ptr(dst, old, new) \
atomic_cmpset_rel_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#endif
static __inline uint32_t
atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
@ -528,7 +649,23 @@ atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
}
#define atomic_fetchadd_int atomic_fetchadd_32
#ifdef __powerpc64__
static __inline uint64_t
atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
{
uint64_t value;
do {
value = *p;
} while (!atomic_cmpset_long(p, value, value + v));
return (value);
}
#define atomic_fetchadd_long atomic_fetchadd_64
#else
#define atomic_fetchadd_long(p, v) \
(u_long)atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
#endif
#endif /* ! _MACHINE_ATOMIC_H_ */

View File

@ -77,12 +77,18 @@
#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF
#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
#define BUS_SPACE_MAXADDR 0xFFFFFFFF
#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF
#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
#define BUS_SPACE_MAXSIZE 0xFFFFFFFF
#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL
#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL
#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL
#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL
#ifdef __powerpc64__
#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFUL
#define BUS_SPACE_MAXSIZE 0xFFFFFFFFFFFFFFFFUL
#else
#define BUS_SPACE_MAXADDR 0xFFFFFFFFUL
#define BUS_SPACE_MAXSIZE 0xFFFFFFFFUL
#endif
#define BUS_SPACE_MAP_CACHEABLE 0x01
#define BUS_SPACE_MAP_LINEAR 0x02

View File

@ -68,6 +68,15 @@ mtmsr(register_t value)
__asm __volatile ("mtmsr %0; isync" :: "r"(value));
}
#ifdef __powerpc64__
static __inline void
mtmsrd(register_t value)
{
__asm __volatile ("mtmsrd %0; isync" :: "r"(value));
}
#endif
static __inline register_t
mfmsr(void)
{
@ -78,6 +87,7 @@ mfmsr(void)
return (value);
}
#ifndef __powerpc64__
static __inline void
mtsrin(vm_offset_t va, register_t value)
{
@ -94,6 +104,7 @@ mfsrin(vm_offset_t va)
return (value);
}
#endif
static __inline void
mtdec(register_t value)
@ -126,6 +137,9 @@ static __inline u_quad_t
mftb(void)
{
u_quad_t tb;
#ifdef __powerpc64__
__asm __volatile ("mftb %0" : "=r"(tb));
#else
uint32_t *tbup = (uint32_t *)&tb;
uint32_t *tblp = tbup + 1;
@ -133,6 +147,7 @@ mftb(void)
*tbup = mfspr(TBR_TBU);
*tblp = mfspr(TBR_TBL);
} while (*tbup != mfspr(TBR_TBU));
#endif
return (tb);
}

View File

@ -35,14 +35,15 @@
#define _POWERPC_DB_MACHDEP_H_
#include <vm/vm_param.h>
#include <machine/elf.h>
#define DB_ELF_SYMBOLS
#define DB_ELFSIZE 32
#define DB_ELFSIZE __ELF_WORD_SIZE
#define BYTE_MSF (1)
typedef vm_offset_t db_addr_t; /* address - unsigned */
typedef int db_expr_t; /* expression - signed */
typedef intptr_t db_expr_t; /* expression - signed */
#define PC_REGS(regs) ((db_addr_t)kdb_thrctx->pcb_lr)

View File

@ -50,17 +50,17 @@
struct trapframe {
register_t fixreg[32];
register_t lr;
int cr;
int xer;
register_t cr;
register_t xer;
register_t ctr;
register_t srr0;
register_t srr1;
int exc;
register_t exc;
union {
struct {
/* dar & dsisr are only filled on a DSI trap */
register_t dar;
int dsisr;
register_t dsisr;
} aim;
struct {
register_t dear;
@ -71,14 +71,31 @@ struct trapframe {
};
/*
* This is to ensure alignment of the stackpointer
* FRAMELEN is the size of the stack region used by the low-level trap
* handler. It is the size of its data (trapframe) plus the callframe
* header (sizeof(struct callframe) - 3 register widths). It must also
* be 16-byte aligned.
*/
#define FRAMELEN roundup(sizeof(struct trapframe) + 8, 16)
#define FRAMELEN roundup(sizeof(struct trapframe) + \
sizeof(struct callframe) - 3*sizeof(register_t), 16)
#define trapframe(td) ((td)->td_frame)
/*
* Call frame for PowerPC used during fork.
*/
#ifdef __powerpc64__
struct callframe {
register_t cf_dummy_fp; /* dummy frame pointer */
register_t cf_cr;
register_t cf_lr;
register_t cf_compiler;
register_t cf_linkeditor;
register_t cf_toc;
register_t cf_func;
register_t cf_arg0;
register_t cf_arg1;
};
#else
struct callframe {
register_t cf_dummy_fp; /* dummy frame pointer */
register_t cf_lr; /* space for link register save */
@ -86,10 +103,15 @@ struct callframe {
register_t cf_arg0;
register_t cf_arg1;
};
#endif
/* Definitions for syscalls */
#define FIRSTARG 3 /* first arg in reg 3 */
/* Definitions for syscalls */
#define FIRSTARG 3 /* first arg in reg 3 */
#define NARGREG 8 /* 8 args in regs */
#define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
#define MOREARGS(sp) ((caddr_t)((uintptr_t)(sp) + \
sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */
#endif /* _MACHINE_FRAME_H_ */

View File

@ -161,6 +161,7 @@
#define HID0_E500_DEFAULT_SET (HID0_EMCP | HID0_E500_TBEN)
#define HID1_E500_DEFAULT_SET (HID1_E500_ABE | HID1_E500_ASTME)
#define HID5_970_DCBZ_SIZE_HI 0x01000000 /* dcbz does a 32-byte store */
#define HID5_970_DCBZ_SIZE_HI 0x00000080UL /* dcbz does a 32-byte store */
#define HID4_970_DISABLE_LG_PG 0x00000004ULL /* disables large pages */
#endif /* _POWERPC_HID_H_ */

View File

@ -33,9 +33,14 @@
* Miscellaneous machine-dependent declarations.
*/
extern char sigcode[];
extern char esigcode[];
extern int szsigcode;
extern char sigcode32[];
extern int szsigcode32;
#ifdef __powerpc64__
extern char sigcode64[];
extern int szsigcode64;
#endif
extern long Maxmem;
extern int busdma_swi_pending;

View File

@ -42,7 +42,7 @@
#define _POWERPC_INCLUDE_PARAM_H_
/*
* Machine dependent constants for PowerPC (32-bit only currently)
* Machine dependent constants for PowerPC
*/
#include <machine/_align.h>
@ -54,8 +54,12 @@
#define MACHINE "powerpc"
#endif
#ifndef MACHINE_ARCH
#ifdef __powerpc64__
#define MACHINE_ARCH "powerpc64"
#else
#define MACHINE_ARCH "powerpc"
#endif
#endif
#define MID_MACHINE MID_POWERPC
#if defined(SMP) || defined(KLD_MODULE)
@ -72,7 +76,7 @@
* This does not reflect the optimal alignment, just the possibility
* (within reasonable limits).
*/
#define ALIGNED_POINTER(p, t) ((((unsigned)(p)) & (sizeof (t) - 1)) == 0)
#define ALIGNED_POINTER(p, t) ((((uintptr_t)(p)) & (sizeof (t) - 1)) == 0)
/*
* CACHE_LINE_SIZE is the compile-time maximum cache line size for an
@ -82,8 +86,8 @@
#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
#define PAGE_MASK (PAGE_SIZE - 1)
#define PAGE_SIZE (1L << PAGE_SHIFT) /* Page size */
#define PAGE_MASK (vm_offset_t)(PAGE_SIZE - 1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
@ -99,15 +103,13 @@
*/
#define trunc_page(x) ((unsigned long)(x) & ~(PAGE_MASK))
#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
#define trunc_4mpage(x) ((unsigned)(x) & ~PDRMASK)
#define round_4mpage(x) ((((unsigned)(x)) + PDRMASK) & ~PDRMASK)
#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
#define powerpc_btop(x) ((unsigned)(x) >> PAGE_SHIFT)
#define powerpc_ptob(x) ((unsigned)(x) << PAGE_SHIFT)
#define powerpc_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define powerpc_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
#define pgtok(x) ((x) * (PAGE_SIZE / 1024))
#define pgtok(x) ((x) * (PAGE_SIZE / 1024UL))
#endif /* !_POWERPC_INCLUDE_PARAM_H_ */

View File

@ -35,12 +35,13 @@
#ifndef _MACHINE_PCB_H_
#define _MACHINE_PCB_H_
typedef int faultbuf[25];
typedef register_t faultbuf[25];
struct pcb {
register_t pcb_context[20]; /* non-volatile r14-r31 */
register_t pcb_cr; /* Condition register */
register_t pcb_sp; /* stack pointer */
register_t pcb_toc; /* toc pointer */
register_t pcb_lr; /* link register */
struct pmap *pcb_pm; /* pmap of our vmspace */
faultbuf *pcb_onfault; /* For use during
@ -59,13 +60,14 @@ struct pcb {
register_t vrsave;
register_t spare[2];
register_t vscr;
} pcb_vec __attribute__((aligned(16))); /* Vector processor */
} pcb_vec __aligned(16); /* Vector processor */
unsigned int pcb_veccpu; /* which CPU had our vector
stuff. */
union {
struct {
register_t usr; /* USER_SR segment */
register_t usr_esid; /* USER_SR segment */
register_t usr_vsid; /* USER_SR segment */
} aim;
struct {
register_t ctr;

View File

@ -31,10 +31,11 @@
#define _MACHINE_PCPU_H_
#include <machine/cpufunc.h>
#include <machine/slb.h>
#include <machine/tlb.h>
struct pmap;
#define CPUSAVE_LEN 8
#define CPUSAVE_LEN 9
#define PCPU_MD_COMMON_FIELDS \
int pc_inside_intr; \
@ -50,7 +51,17 @@ struct pmap;
register_t pc_disisave[CPUSAVE_LEN]; \
register_t pc_dbsave[CPUSAVE_LEN];
#define PCPU_MD_AIM_FIELDS
#define PCPU_MD_AIM32_FIELDS
#define PCPU_MD_AIM64_FIELDS \
struct slb pc_slb[64]; \
struct slb *pc_userslb;
#ifdef __powerpc64__
#define PCPU_MD_AIM_FIELDS PCPU_MD_AIM64_FIELDS
#else
#define PCPU_MD_AIM_FIELDS PCPU_MD_AIM32_FIELDS
#endif
#define BOOKE_CRITSAVE_LEN (CPUSAVE_LEN + 2)
#define BOOKE_TLB_MAXNEST 3
@ -66,16 +77,17 @@ struct pmap;
int pc_tid_next;
/* Definitions for register offsets within the exception tmp save areas */
#define CPUSAVE_R28 0 /* where r28 gets saved */
#define CPUSAVE_R29 1 /* where r29 gets saved */
#define CPUSAVE_R30 2 /* where r30 gets saved */
#define CPUSAVE_R31 3 /* where r31 gets saved */
#define CPUSAVE_AIM_DAR 4 /* where SPR_DAR gets saved */
#define CPUSAVE_AIM_DSISR 5 /* where SPR_DSISR gets saved */
#define CPUSAVE_BOOKE_DEAR 4 /* where SPR_DEAR gets saved */
#define CPUSAVE_BOOKE_ESR 5 /* where SPR_ESR gets saved */
#define CPUSAVE_SRR0 6 /* where SRR0 gets saved */
#define CPUSAVE_SRR1 7 /* where SRR1 gets saved */
#define CPUSAVE_R27 0 /* where r27 gets saved */
#define CPUSAVE_R28 1 /* where r28 gets saved */
#define CPUSAVE_R29 2 /* where r29 gets saved */
#define CPUSAVE_R30 3 /* where r30 gets saved */
#define CPUSAVE_R31 4 /* where r31 gets saved */
#define CPUSAVE_AIM_DAR 5 /* where SPR_DAR gets saved */
#define CPUSAVE_AIM_DSISR 6 /* where SPR_DSISR gets saved */
#define CPUSAVE_BOOKE_DEAR 5 /* where SPR_DEAR gets saved */
#define CPUSAVE_BOOKE_ESR 6 /* where SPR_ESR gets saved */
#define CPUSAVE_SRR0 7 /* where SRR0 gets saved */
#define CPUSAVE_SRR1 8 /* where SRR1 gets saved */
/* Book-E TLBSAVE is more elaborate */
#define TLBSAVE_BOOKE_LR 0

View File

@ -65,10 +65,12 @@
#define _MACHINE_PMAP_H_
#include <sys/queue.h>
#include <sys/tree.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <machine/sr.h>
#include <machine/pte.h>
#include <machine/slb.h>
#include <machine/tlb.h>
struct pmap_md {
@ -84,13 +86,22 @@ struct pmap_md {
#define NPMAPS 32768
#endif /* !defined(NPMAPS) */
struct slbcontainer;
SPLAY_HEAD(slb_tree, slbcontainer);
struct pmap {
struct mtx pm_mtx;
u_int pm_sr[16];
#ifdef __powerpc64__
struct slb_tree pm_slbtree;
struct slb *pm_slb;
#else
register_t pm_sr[16];
#endif
u_int pm_active;
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
u_int pm_retries;
u_int pm_context;
struct pmap *pmap_phys;
struct pmap_statistics pm_stats;
@ -107,6 +118,7 @@ struct pvo_entry {
} pvo_pte;
pmap_t pvo_pmap; /* Owning pmap */
vm_offset_t pvo_vaddr; /* VA of entry */
uint64_t pvo_vpn; /* Virtual page number */
};
LIST_HEAD(pvo_head, pvo_entry);
@ -119,20 +131,39 @@ struct md_page {
#define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
#define pmap_page_set_memattr(m, ma) (void)0
/*
* Return the VSID corresponding to a given virtual address.
* If no VSID is currently defined, it will allocate one, and add
* it to a free slot if available.
*
* NB: The PMAP MUST be locked already.
*/
uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
int va_to_slb_entry(pmap_t pm, vm_offset_t va, struct slb *);
uint64_t allocate_vsid(pmap_t pm, uint64_t esid, int large);
void slb_insert(pmap_t pm, struct slb *dst, struct slb *);
int vsid_to_esid(pmap_t pm, uint64_t vsid, uint64_t *esid);
void free_vsids(pmap_t pm);
struct slb *slb_alloc_user_cache(void);
void slb_free_user_cache(struct slb *);
#else
struct pmap {
struct mtx pm_mtx; /* pmap mutex */
tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */
u_int pm_active; /* active on cpus */
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
u_int pm_retries;
int pm_refs; /* ref count */
struct pmap_statistics pm_stats; /* pmap statistics */
/* Page table directory, array of pointers to page tables. */
pte_t *pm_pdir[PDIR_NENTRIES];
/* generation count (pmap lock dropped) */
uint32_t pm_gen_count;
u_int pm_retries;
/* List of allocated ptbl bufs (ptbl kva regions). */
TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
};

View File

@ -46,7 +46,12 @@ struct mdthread {
struct mdproc {
};
#ifdef __powerpc64__
#define KINFO_PROC_SIZE 1088
#define KINFO_PROC32_SIZE 768
#else
#define KINFO_PROC_SIZE 768
#endif
#ifdef _KERNEL
struct syscall_args {

View File

@ -36,7 +36,7 @@
#define FUNCTION_ALIGNMENT 4
typedef u_int fptrdiff_t;
typedef __ptrdiff_t fptrdiff_t;
/*
* The mcount trampoline macro, expanded in libc/gmon/mcount.c
@ -75,6 +75,50 @@ typedef u_int fptrdiff_t;
* to be restored to what it was on entry to the profiled routine.
*/
#ifdef __powerpc64__
#define MCOUNT \
__asm( " .text \n" \
" .align 2 \n" \
" .globl _mcount \n" \
" .section \".opd\",\"aw\" \n" \
" .align 3 \n" \
"_mcount: \n" \
" .quad ._mcount,.TOC.@tocbase,0 \n" \
" .previous \n" \
" .align 4 \n" \
" .globl ._mcount \n" \
" .type ._mcount,@function \n" \
"._mcount: \n" \
" stdu %r1,-(288+120)(%r1) \n" \
" std %r3,48(%r1) \n" \
" std %r4,56(%r1) \n" \
" std %r5,64(%r1) \n" \
" std %r6,72(%r1) \n" \
" std %r7,80(%r1) \n" \
" std %r8,88(%r1) \n" \
" std %r9,96(%r1) \n" \
" std %r10,104(%r1) \n" \
" mflr %r4 \n" \
" std %r4,112(%r1) \n" \
" ld %r3,0(%r1) \n" \
" ld %r3,0(%r3) \n" \
" ld %r3,16(%r3) \n" \
" bl .__mcount \n" \
" nop \n" \
" ld %r4,112(%r1) \n" \
" mtlr %r4 \n" \
" ld %r3,48(%r1) \n" \
" ld %r4,56(%r1) \n" \
" ld %r5,64(%r1) \n" \
" ld %r6,72(%r1) \n" \
" ld %r7,80(%r1) \n" \
" ld %r8,88(%r1) \n" \
" ld %r9,96(%r1) \n" \
" ld %r10,104(%r1) \n" \
" addi %r1,%r1,(288+120) \n" \
" blr \n");
#else
#ifdef PIC
#define _PLT "@plt"
#else
@ -115,6 +159,7 @@ __asm( " .globl _mcount \n" \
" bctr \n" \
"_mcount_end: \n" \
" .size _mcount,_mcount_end-_mcount");
#endif
#ifdef _KERNEL
#define MCOUNT_ENTER(s) s = intr_disable()
@ -165,7 +210,11 @@ void __mcount(uintfptr_t frompc, uintfptr_t selfpc);
#else /* !_KERNEL */
#ifdef __powerpc64__
typedef u_long uintfptr_t;
#else
typedef u_int uintfptr_t;
#endif
#endif /* _KERNEL */

View File

@ -44,23 +44,23 @@
* FP, FE0, FE1 - reserved, always cleared, setting has no effect.
*
*/
#define PSL_UCLE 0x04000000 /* User mode cache lock enable */
#define PSL_SPE 0x02000000 /* SPE enable */
#define PSL_WE 0x00040000 /* Wait state enable */
#define PSL_CE 0x00020000 /* Critical interrupt enable */
#define PSL_EE 0x00008000 /* External interrupt enable */
#define PSL_PR 0x00004000 /* User mode */
#define PSL_FP 0x00002000 /* Floating point available */
#define PSL_ME 0x00001000 /* Machine check interrupt enable */
#define PSL_FE0 0x00000800 /* Floating point exception mode 0 */
#define PSL_UBLE 0x00000400 /* BTB lock enable */
#define PSL_DE 0x00000200 /* Debug interrupt enable */
#define PSL_FE1 0x00000100 /* Floating point exception mode 1 */
#define PSL_IS 0x00000020 /* Instruction address space */
#define PSL_DS 0x00000010 /* Data address space */
#define PSL_PMM 0x00000004 /* Performance monitor mark */
#define PSL_UCLE 0x04000000UL /* User mode cache lock enable */
#define PSL_SPE 0x02000000UL /* SPE enable */
#define PSL_WE 0x00040000UL /* Wait state enable */
#define PSL_CE 0x00020000UL /* Critical interrupt enable */
#define PSL_EE 0x00008000UL /* External interrupt enable */
#define PSL_PR 0x00004000UL /* User mode */
#define PSL_FP 0x00002000UL /* Floating point available */
#define PSL_ME 0x00001000UL /* Machine check interrupt enable */
#define PSL_FE0 0x00000800UL /* Floating point exception mode 0 */
#define PSL_UBLE 0x00000400UL /* BTB lock enable */
#define PSL_DE 0x00000200UL /* Debug interrupt enable */
#define PSL_FE1 0x00000100UL /* Floating point exception mode 1 */
#define PSL_IS 0x00000020UL /* Instruction address space */
#define PSL_DS 0x00000010UL /* Data address space */
#define PSL_PMM 0x00000004UL /* Performance monitor mark */
#define PSL_FE_DFLT 0x00000004 /* default: no FP */
#define PSL_FE_DFLT 0x00000000UL /* default == none */
/* Initial kernel MSR, use IS=1 ad DS=1. */
#define PSL_KERNSET_INIT (PSL_IS | PSL_DS)
@ -77,22 +77,29 @@
*
* [*] Little-endian mode on the 601 is implemented in the HID0 register.
*/
#define PSL_VEC 0x02000000 /* AltiVec vector unit available */
#define PSL_POW 0x00040000 /* power management */
#define PSL_ILE 0x00010000 /* interrupt endian mode (1 == le) */
#define PSL_EE 0x00008000 /* external interrupt enable */
#define PSL_PR 0x00004000 /* privilege mode (1 == user) */
#define PSL_FP 0x00002000 /* floating point enable */
#define PSL_ME 0x00001000 /* machine check enable */
#define PSL_FE0 0x00000800 /* floating point interrupt mode 0 */
#define PSL_SE 0x00000400 /* single-step trace enable */
#define PSL_BE 0x00000200 /* branch trace enable */
#define PSL_FE1 0x00000100 /* floating point interrupt mode 1 */
#define PSL_IP 0x00000040 /* interrupt prefix */
#define PSL_IR 0x00000020 /* instruction address relocation */
#define PSL_DR 0x00000010 /* data address relocation */
#define PSL_RI 0x00000002 /* recoverable interrupt */
#define PSL_LE 0x00000001 /* endian mode (1 == le) */
#ifdef __powerpc64__
#define PSL_SF 0x8000000000000000UL /* 64-bit addressing */
#define PSL_HV 0x1000000000000000UL /* hyper-privileged mode */
#endif
#define PSL_VEC 0x02000000UL /* AltiVec vector unit available */
#define PSL_POW 0x00040000UL /* power management */
#define PSL_ILE 0x00010000UL /* interrupt endian mode (1 == le) */
#define PSL_EE 0x00008000UL /* external interrupt enable */
#define PSL_PR 0x00004000UL /* privilege mode (1 == user) */
#define PSL_FP 0x00002000UL /* floating point enable */
#define PSL_ME 0x00001000UL /* machine check enable */
#define PSL_FE0 0x00000800UL /* floating point interrupt mode 0 */
#define PSL_SE 0x00000400UL /* single-step trace enable */
#define PSL_BE 0x00000200UL /* branch trace enable */
#define PSL_FE1 0x00000100UL /* floating point interrupt mode 1 */
#define PSL_IP 0x00000040UL /* interrupt prefix */
#define PSL_IR 0x00000020UL /* instruction address relocation */
#define PSL_DR 0x00000010UL /* data address relocation */
#define PSL_PMM 0x00000004UL /* performance monitor mark */
#define PSL_RI 0x00000002UL /* recoverable interrupt */
#define PSL_LE 0x00000001UL /* endian mode (1 == le) */
#define PSL_601_MASK ~(PSL_POW|PSL_ILE|PSL_BE|PSL_RI|PSL_LE)
@ -111,7 +118,11 @@
#define PSL_MBO 0
#define PSL_MBZ 0
#ifdef __powerpc64__
#define PSL_KERNSET (PSL_SF | PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI)
#else
#define PSL_KERNSET (PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI)
#endif
#define PSL_USERSET (PSL_KERNSET | PSL_PR)
#define PSL_USERSTATIC (PSL_USERSET | PSL_IP | 0x87c0008c)

View File

@ -95,6 +95,7 @@ struct lpteg {
/* High quadword: */
#define LPTE_VSID_SHIFT 12
#define LPTE_API 0x0000000000000F80ULL
#define LPTE_WIRED 0x0000000000000010ULL
#define LPTE_LOCKED 0x0000000000000008ULL
#define LPTE_BIG 0x0000000000000004ULL /* 4kb/16Mb page */
#define LPTE_HID 0x0000000000000002ULL
@ -129,11 +130,11 @@ typedef struct lpte lpte_t;
* Extract bits from address
*/
#define ADDR_SR_SHFT 28
#define ADDR_PIDX 0x0ffff000
#define ADDR_PIDX 0x0ffff000UL
#define ADDR_PIDX_SHFT 12
#define ADDR_API_SHFT 22
#define ADDR_API_SHFT64 16
#define ADDR_POFF 0x00000fff
#define ADDR_POFF 0x00000fffUL
/*
* Bits in DSISR:
@ -190,7 +191,7 @@ extern u_int dsisr(void);
*/
#define PTBL_SHIFT PAGE_SHIFT
#define PTBL_SIZE PAGE_SIZE /* va range mapped by ptbl entry */
#define PTBL_MASK ((PDIR_SIZE - 1) & ~PAGE_MASK)
#define PTBL_MASK ((PDIR_SIZE - 1) & ~((1 << PAGE_SHIFT) - 1))
#define PTBL_NENTRIES 1024 /* number of pages mapped by ptbl */
/* Returns ptbl entry number for given va */

View File

@ -4,12 +4,16 @@
#ifndef _POWERPC_REG_H_
#define _POWERPC_REG_H_
#if defined(_KERNEL) && !defined(KLD_MODULE) && !defined(_STANDALONE)
#include "opt_compat.h"
#endif
/* Must match struct trapframe */
struct reg {
register_t fixreg[32];
register_t lr;
int cr;
int xer;
register_t cr;
register_t xer;
register_t ctr;
register_t pc;
};
@ -21,9 +25,29 @@ struct fpreg {
};
struct dbreg {
unsigned long junk;
unsigned int junk;
};
#ifdef COMPAT_FREEBSD32
/* Must match struct trapframe */
struct reg32 {
int32_t fixreg[32];
int32_t lr;
int32_t cr;
int32_t xer;
int32_t ctr;
int32_t pc;
};
struct fpreg32 {
struct fpreg data;
};
struct dbreg32 {
struct dbreg data;
};
#endif
#ifdef _KERNEL
/*
* XXX these interfaces are MI, so they should be declared in a MI place.
@ -34,6 +58,20 @@ int fill_fpregs(struct thread *, struct fpreg *);
int set_fpregs(struct thread *, struct fpreg *);
int fill_dbregs(struct thread *, struct dbreg *);
int set_dbregs(struct thread *, struct dbreg *);
#ifdef COMPAT_FREEBSD32
struct image_params;
int fill_regs32(struct thread *, struct reg32 *);
int set_regs32(struct thread *, struct reg32 *);
void ppc32_setregs(struct thread *, struct image_params *, u_long);
#define fill_fpregs32(td, reg) fill_fpregs(td,(struct fpreg *)reg)
#define set_fpregs32(td, reg) set_fpregs(td,(struct fpreg *)reg)
#define fill_dbregs32(td, reg) fill_dbregs(td,(struct dbreg *)reg)
#define set_dbregs32(td, reg) set_dbregs(td,(struct dbreg *)reg)
#endif
#endif
#endif /* _POWERPC_REG_H_ */

View File

@ -29,18 +29,27 @@
#ifndef _MACHINE_RUNQ_H_
#define _MACHINE_RUNQ_H_
#ifdef __powerpc64__
#define RQB_LEN (1UL) /* Number of priority status words. */
#define RQB_L2BPW (6UL) /* Log2(sizeof(rqb_word_t) * NBBY)). */
#else
#define RQB_LEN (2) /* Number of priority status words. */
#define RQB_L2BPW (5) /* Log2(sizeof(rqb_word_t) * NBBY)). */
#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */
#endif
#define RQB_BPW (1UL<<RQB_L2BPW) /* Bits in an rqb_word_t. */
#define RQB_BIT(pri) (1 << ((pri) & (RQB_BPW - 1)))
#define RQB_BIT(pri) (1UL << ((pri) & (RQB_BPW - 1)))
#define RQB_WORD(pri) ((pri) >> RQB_L2BPW)
#define RQB_FFS(word) (ffs(word) - 1)
#define RQB_FFS(word) (ffsl(word) - 1)
/*
* Type of run queue status word.
*/
#ifdef __powerpc64__
typedef u_int64_t rqb_word_t;
#else
typedef u_int32_t rqb_word_t;
#endif
#endif

69
sys/powerpc/include/slb.h Normal file
View File

@ -0,0 +1,69 @@
/*-
* Copyright (C) 2009 Nathan Whitehorn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SLB_H_
#define _MACHINE_SLB_H_
/*
* Bit definitions for segment lookaside buffer entries.
*
* PowerPC Microprocessor Family: The Programming Environments for 64-bit
* Microprocessors, section 7.4.2.1
*
* Note that these bitmasks are relative to the values for one of the two
* values for slbmte, slbmfee, and slbmfev, not the internal SLB
* representation.
*/
#define SLBV_KS 0x0000000000000800UL /* Supervisor-state prot key */
#define SLBV_KP 0x0000000000000400UL /* User-state prot key */
#define SLBV_N 0x0000000000000200UL /* No-execute protection */
#define SLBV_L 0x0000000000000100UL /* Large page selector */
#define SLBV_CLASS 0x0000000000000080UL /* Class selector */
#define SLBV_VSID_MASK 0xfffffffffffff000UL /* Virtual segment ID mask */
#define SLBV_VSID_SHIFT 12
#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
/*
* Shift large-page VSIDs one place left. At present, they are only used in the
* kernel direct map, and we already assume in the placement of KVA that the
* CPU cannot address more than 63 bits of memory.
*/
#define KERNEL_VSID(esid, large) (((uint64_t)(esid) << (large ? 1 : 0)) | KERNEL_VSID_BIT)
#define SLBE_VALID 0x0000000008000000UL /* SLB entry valid */
#define SLBE_INDEX_MASK 0x0000000000000fffUL /* SLB index mask*/
#define SLBE_ESID_MASK 0xfffffffff0000000UL /* Effective segment ID mask */
#define SLBE_ESID_SHIFT 28
struct slb {
uint64_t slbv;
uint64_t slbe;
};
#endif /* !_MACHINE_SLB_H_ */

View File

@ -48,6 +48,7 @@ struct cpuref {
};
void pmap_cpu_bootstrap(int);
void cpudep_ap_early_bootstrap(void);
uintptr_t cpudep_ap_bootstrap(void);
void cpudep_ap_setup(void);
void machdep_ap_bootstrap(void);

View File

@ -37,6 +37,9 @@
__asm __volatile("mfspr %0,%1" : "=r"(val) : "K"(reg)); \
val; } )
#ifndef __powerpc64__
/* The following routines allow manipulation of the full 64-bit width
* of SPRs on 64 bit CPUs in bridge mode */
@ -74,6 +77,8 @@
: "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \
val; } )
#endif
#endif /* _LOCORE */
/*
@ -143,13 +148,26 @@
#define IBM401E2 0x0025
#define IBM401F2 0x0026
#define IBM401G2 0x0027
#define IBMRS64II 0x0033
#define IBMRS64III 0x0034
#define IBMPOWER4 0x0035
#define IBMRS64III_2 0x0036
#define IBMRS64IV 0x0037
#define IBMPOWER4PLUS 0x0038
#define IBM970 0x0039
#define IBMPOWER5 0x003a
#define IBMPOWER5PLUS 0x003b
#define IBM970FX 0x003c
#define IBMPOWER3 0x0041
#define IBMPOWER6 0x003e
#define IBMPOWER7 0x003f
#define IBMPOWER3 0x0040
#define IBMPOWER3PLUS 0x0041
#define IBM970MP 0x0044
#define IBM970GX 0x0045
#define MPC860 0x0050
#define IBMCELLBE 0x0070
#define MPC8240 0x0081
#define PA6T 0x0090
#define IBM405GP 0x4011
#define IBM405L 0x4161
#define IBM750FX 0x7000

View File

@ -46,12 +46,17 @@
#define KERNEL_SR 13
#define KERNEL2_SR 14
#define KERNEL3_SR 15
#define KERNEL_VSIDBITS 0xfffff
#define KERNEL_VSIDBITS 0xfffffUL
#define KERNEL_SEGMENT (0xfffff0 + KERNEL_SR)
#define KERNEL2_SEGMENT (0xfffff0 + KERNEL2_SR)
#define EMPTY_SEGMENT 0xfffff0
#define USER_ADDR ((void *)(USER_SR << ADDR_SR_SHFT))
#define SEGMENT_LENGTH 0x10000000
#define SEGMENT_MASK 0xf0000000
#ifdef __powerpc64__
#define USER_ADDR 0xcffffffff0000000UL
#else
#define USER_ADDR ((uintptr_t)USER_SR << ADDR_SR_SHFT)
#endif
#define SEGMENT_LENGTH 0x10000000UL
#define SEGMENT_INVMASK 0x0fffffffUL
#define SEGMENT_MASK ~SEGMENT_INVMASK
#endif /* !_MACHINE_SR_H_ */

View File

@ -39,7 +39,9 @@
#define EXC_RST 0x0100 /* Reset; all but IBM4xx */
#define EXC_MCHK 0x0200 /* Machine Check */
#define EXC_DSI 0x0300 /* Data Storage Interrupt */
#define EXC_DSE 0x0380 /* Data Segment Interrupt */
#define EXC_ISI 0x0400 /* Instruction Storage Interrupt */
#define EXC_ISE 0x0480 /* Instruction Segment Interrupt */
#define EXC_EXI 0x0500 /* External Interrupt */
#define EXC_ALI 0x0600 /* Alignment Interrupt */
#define EXC_PGM 0x0700 /* Program Interrupt */

View File

@ -44,10 +44,25 @@ typedef struct __mcontext {
int mc_len; /* sizeof(__mcontext) */
uint64_t mc_avec[32*2]; /* vector register file */
uint32_t mc_av[2];
uint32_t mc_frame[41];
register_t mc_frame[42];
uint64_t mc_fpreg[33];
} mcontext_t __aligned(16);
#if defined(_KERNEL) && defined(__powerpc64__)
typedef struct __mcontext32 {
int mc_vers;
int mc_flags;
#define _MC_FP_VALID 0x01
#define _MC_AV_VALID 0x02
int mc_onstack; /* saved onstack flag */
int mc_len; /* sizeof(__mcontext) */
uint64_t mc_avec[32*2]; /* vector register file */
uint32_t mc_av[2];
uint32_t mc_frame[42];
uint64_t mc_fpreg[33];
} mcontext32_t __aligned(16);
#endif
/* GPRs and supervisor-level regs */
#define mc_gpr mc_frame
#define mc_lr mc_frame[32]

View File

@ -79,27 +79,37 @@
* Would like to have MAX addresses = 0, but this doesn't (currently) work
*/
#if !defined(LOCORE)
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7ffff000)
#ifdef __powerpc64__
#define VM_MIN_ADDRESS (0x0000000000000000UL)
#define VM_MAXUSER_ADDRESS (0x7ffffffffffff000UL)
#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
#else
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7ffff000)
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#endif
#else /* LOCORE */
#ifndef __powerpc64__
#define VM_MIN_ADDRESS 0
#define VM_MAXUSER_ADDRESS 0x7ffff000
#endif
#endif /* LOCORE */
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#define FREEBSD32_USRSTACK 0x7ffff000
#if defined(AIM) /* AIM */
#ifdef AIM
#define KERNBASE 0x00100000UL /* start of kernel virtual */
#define KERNBASE 0x00100000 /* start of kernel virtual */
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)(KERNEL_SR << ADDR_SR_SHFT))
#ifdef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL
#define VM_MAX_KERNEL_ADDRESS 0xc0000001c7ffffffUL
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
#else
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
#define VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
#endif
/*
* Use the direct-mapped BAT registers for UMA small allocs. This
@ -107,7 +117,7 @@
*/
#define UMA_MD_SMALL_ALLOC
#else
#else /* Book-E */
/*
* Kernel CCSRBAR location. We make this the reset location.
@ -182,4 +192,14 @@ struct pmap_physseg {
#define VM_KMEM_SIZE (12 * 1024 * 1024)
#endif
#ifdef __powerpc64__
#ifndef VM_KMEM_SIZE_SCALE
#define VM_KMEM_SIZE_SCALE (3)
#endif
#ifndef VM_KMEM_SIZE_MAX
#define VM_KMEM_SIZE_MAX 0x1c0000000 /* 7 GB */
#endif
#endif
#endif /* _MACHINE_VMPARAM_H_ */

View File

@ -220,7 +220,7 @@ cpu_setup(u_int cpuid)
}
if (cpu_est_clockrate(0, &cps) == 0)
printf(", %lld.%02lld MHz", cps / 1000000, (cps / 10000) % 100);
printf(", %jd.%02jd MHz", cps / 1000000, (cps / 10000) % 100);
printf("\n");
cpu_features |= cp->features;

View File

@ -79,7 +79,7 @@ void
db_show_mdpcpu(struct pcpu *pc)
{
db_printf("PPC: hwref = %#x\n", pc->pc_hwref);
db_printf("PPC: hwref = %#zx\n", pc->pc_hwref);
db_printf("PPC: ipimask = %#x\n", pc->pc_ipimask);
db_printf("PPC: pir = %#x\n", pc->pc_pir);
}

View File

@ -53,6 +53,12 @@ static db_varfcn_t db_frame;
#define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x)
#ifdef __powerpc64__
#define CALLOFFSET 8 /* Include TOC reload slot */
#else
#define CALLOFFSET 4
#endif
struct db_variable db_regs[] = {
{ "r0", DB_OFFSET(fixreg[0]), db_frame },
{ "r1", DB_OFFSET(fixreg[1]), db_frame },
@ -109,11 +115,11 @@ struct db_variable *db_eregs = db_regs + sizeof (db_regs)/sizeof (db_regs[0]);
static int
db_frame(struct db_variable *vp, db_expr_t *valuep, int op)
{
uint32_t *reg;
register_t *reg;
if (kdb_frame == NULL)
return (0);
reg = (uint32_t*)((uintptr_t)kdb_frame + (uintptr_t)vp->valuep);
reg = (register_t*)((uintptr_t)kdb_frame + (uintptr_t)vp->valuep);
if (op == DB_VAR_GET)
*valuep = *reg;
else
@ -164,8 +170,13 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
stackframe = *(db_addr_t *)stackframe;
next_frame:
#ifdef __powerpc64__
/* The saved arg values start at frame[6] */
args = (db_addr_t *)(stackframe + 48);
#else
/* The saved arg values start at frame[2] */
args = (db_addr_t *)(stackframe + 8);
#endif
if (stackframe < PAGE_SIZE)
break;
@ -178,13 +189,21 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
* 4 to convert into calling address (as opposed to
* return address)
*/
#ifdef __powerpc64__
lr = *(db_addr_t *)(stackframe + 16) - 4;
#else
lr = *(db_addr_t *)(stackframe + 4) - 4;
#endif
if ((lr & 3) || (lr < 0x100)) {
db_printf("saved LR(0x%x) is invalid.", lr);
db_printf("saved LR(0x%zx) is invalid.", lr);
break;
}
#ifdef __powerpc64__
db_printf("0x%16lx: ", stackframe);
#else
db_printf("0x%08x: ", stackframe);
#endif
/*
* The trap code labels the return addresses from the
@ -192,24 +211,33 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
* to determine if the callframe has to traverse a saved
* trap context
*/
if ((lr + 4 == (db_addr_t) &trapexit) ||
(lr + 4 == (db_addr_t) &asttrapexit)) {
if ((lr + CALLOFFSET == (db_addr_t) &trapexit) ||
(lr + CALLOFFSET == (db_addr_t) &asttrapexit)) {
const char *trapstr;
struct trapframe *tf = (struct trapframe *)
(stackframe+8);
struct trapframe *tf = (struct trapframe *)(args);
db_printf("%s ", tf->srr1 & PSL_PR ? "user" : "kernel");
switch (tf->exc) {
case EXC_DSI:
/* XXX take advantage of the union. */
db_printf("DSI %s trap @ %#x by ",
db_printf("DSI %s trap @ %#zx by ",
(tf->cpu.aim.dsisr & DSISR_STORE) ? "write"
: "read", tf->cpu.aim.dar);
goto print_trap;
case EXC_ALI:
/* XXX take advantage of the union. */
db_printf("ALI trap @ %#x (xSR %#x) ",
tf->cpu.aim.dar, tf->cpu.aim.dsisr);
db_printf("ALI trap @ %#zx (xSR %#x) ",
tf->cpu.aim.dar,
(uint32_t)tf->cpu.aim.dsisr);
goto print_trap;
#ifdef __powerpc64__
case EXC_DSE:
db_printf("DSE trap @ %#zx by ",
tf->cpu.aim.dar);
goto print_trap;
case EXC_ISE:
db_printf("ISE trap @ %#zx by ", tf->srr0);
goto print_trap;
#endif
case EXC_ISI: trapstr = "ISI"; break;
case EXC_PGM: trapstr = "PGM"; break;
case EXC_SC: trapstr = "SC"; break;
@ -232,7 +260,7 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
if (trapstr != NULL) {
db_printf("%s trap by ", trapstr);
} else {
db_printf("trap %#x by ", tf->exc);
db_printf("trap %#zx by ", tf->exc);
}
print_trap:
@ -242,15 +270,17 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
sym = db_search_symbol(lr, DB_STGY_ANY, &diff);
db_symbol_values(sym, &symname, 0);
if (symname == NULL || !strcmp(symname, "end")) {
db_printf("%#x: srr1=%#x\n", lr, tf->srr1);
db_printf("%#zx: srr1=%#zx\n", lr, tf->srr1);
} else {
db_printf("%s+%#x: srr1=%#x\n", symname, diff,
db_printf("%s+%#zx: srr1=%#zx\n", symname, diff,
tf->srr1);
}
db_printf("%-10s r1=%#x cr=%#x xer=%#x ctr=%#x",
"", tf->fixreg[1], tf->cr, tf->xer, tf->ctr);
db_printf("%-10s r1=%#zx cr=%#x xer=%#x ctr=%#zx",
"", tf->fixreg[1], (uint32_t)tf->cr,
(uint32_t)tf->xer, tf->ctr);
if (tf->exc == EXC_DSI)
db_printf(" sr=%#x", tf->cpu.aim.dsisr);
db_printf(" sr=%#x",
(uint32_t)tf->cpu.aim.dsisr);
db_printf("\n");
stackframe = (db_addr_t) tf->fixreg[1];
if (kernel_only && (tf->srr1 & PSL_PR))
@ -263,12 +293,12 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
sym = db_search_symbol(lr, DB_STGY_ANY, &diff);
db_symbol_values(sym, &symname, 0);
if (symname == NULL || !strcmp(symname, "end"))
db_printf("at %x", lr);
db_printf("at %zx", lr);
else
db_printf("at %s+%#x", symname, diff);
db_printf("at %s+%#zx", symname, diff);
if (full)
/* Print all the args stored in that stackframe. */
db_printf("(%x, %x, %x, %x, %x, %x, %x, %x)",
db_printf("(%zx, %zx, %zx, %zx, %zx, %zx, %zx, %zx)",
args[0], args[1], args[2], args[3],
args[4], args[5], args[6], args[7]);
db_printf("\n");

View File

@ -28,6 +28,9 @@
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#define __ELF_WORD_SIZE 32
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/malloc.h>
@ -46,11 +49,23 @@
#include <machine/cpu.h>
#include <machine/elf.h>
#include <machine/reg.h>
#include <machine/md_var.h>
#ifdef __powerpc64__
#include <compat/freebsd32/freebsd32_proto.h>
#include <compat/freebsd32/freebsd32_util.h>
extern const char *freebsd32_syscallnames[];
#endif
struct sysentvec elf32_freebsd_sysvec = {
.sv_size = SYS_MAXSYSCALL,
#ifdef __powerpc64__
.sv_table = freebsd32_sysent,
#else
.sv_table = sysent,
#endif
.sv_mask = 0,
.sv_sigsize = 0,
.sv_sigtbl = NULL,
@ -59,8 +74,8 @@ struct sysentvec elf32_freebsd_sysvec = {
.sv_transtrap = NULL,
.sv_fixup = __elfN(freebsd_fixup),
.sv_sendsig = sendsig,
.sv_sigcode = sigcode,
.sv_szsigcode = &szsigcode,
.sv_sigcode = sigcode32,
.sv_szsigcode = &szsigcode32,
.sv_prepsyscall = NULL,
.sv_name = "FreeBSD ELF32",
.sv_coredump = __elfN(coredump),
@ -68,18 +83,27 @@ struct sysentvec elf32_freebsd_sysvec = {
.sv_minsigstksz = MINSIGSTKSZ,
.sv_pagesize = PAGE_SIZE,
.sv_minuser = VM_MIN_ADDRESS,
.sv_stackprot = VM_PROT_ALL,
#ifdef __powerpc64__
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = FREEBSD32_USRSTACK,
.sv_psstrings = FREEBSD32_PS_STRINGS,
.sv_copyout_strings = freebsd32_copyout_strings,
.sv_setregs = ppc32_setregs,
.sv_syscallnames = freebsd32_syscallnames,
#else
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = USRSTACK,
.sv_psstrings = PS_STRINGS,
.sv_stackprot = VM_PROT_ALL,
.sv_copyout_strings = exec_copyout_strings,
.sv_setregs = exec_setregs,
.sv_syscallnames = syscallnames,
#endif
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_ILP32,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,
};
static Elf32_Brandinfo freebsd_brand_info = {
@ -89,7 +113,11 @@ static Elf32_Brandinfo freebsd_brand_info = {
.emul_path = NULL,
.interp_path = "/libexec/ld-elf.so.1",
.sysvec = &elf32_freebsd_sysvec,
#ifdef __powerpc64__
.interp_newpath = "/libexec/ld-elf32.so.1",
#else
.interp_newpath = NULL,
#endif
.brand_note = &elf32_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
@ -114,14 +142,13 @@ SYSINIT(oelf32, SI_SUB_EXEC, SI_ORDER_ANY,
(sysinit_cfunc_t) elf32_insert_brand_entry,
&freebsd_brand_oinfo);
void
elf32_dump_thread(struct thread *td __unused, void *dst __unused,
size_t *off __unused)
{
}
#ifndef __powerpc64__
/* Process one elf relocation with addend. */
static int
elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
@ -140,8 +167,8 @@ elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
break;
case ELF_RELOC_RELA:
rela = (const Elf_Rela *)data;
where = (Elf_Addr *) (relocbase + rela->r_offset);
hwhere = (Elf_Half *) (relocbase + rela->r_offset);
where = (Elf_Addr *) ((uintptr_t)relocbase + rela->r_offset);
hwhere = (Elf_Half *) ((uintptr_t)relocbase + rela->r_offset);
addend = rela->r_addend;
rtype = ELF_R_TYPE(rela->r_info);
symidx = ELF_R_SYM(rela->r_info);
@ -239,3 +266,4 @@ elf_cpu_unload_file(linker_file_t lf __unused)
return (0);
}
#endif

View File

@ -0,0 +1,212 @@
/*-
* Copyright 1996-1998 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/namei.h>
#include <sys/fcntl.h>
#include <sys/sysent.h>
#include <sys/imgact_elf.h>
#include <sys/syscall.h>
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/linker.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <machine/cpu.h>
#include <machine/elf.h>
#include <machine/md_var.h>
struct sysentvec elf64_freebsd_sysvec = {
.sv_size = SYS_MAXSYSCALL,
.sv_table = sysent,
.sv_mask = 0,
.sv_sigsize = 0,
.sv_sigtbl = NULL,
.sv_errsize = 0,
.sv_errtbl = NULL,
.sv_transtrap = NULL,
.sv_fixup = __elfN(freebsd_fixup),
.sv_sendsig = sendsig,
.sv_sigcode = sigcode64,
.sv_szsigcode = &szsigcode64,
.sv_prepsyscall = NULL,
.sv_name = "FreeBSD ELF64",
.sv_coredump = __elfN(coredump),
.sv_imgact_try = NULL,
.sv_minsigstksz = MINSIGSTKSZ,
.sv_pagesize = PAGE_SIZE,
.sv_minuser = VM_MIN_ADDRESS,
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = USRSTACK,
.sv_psstrings = PS_STRINGS,
.sv_stackprot = VM_PROT_ALL,
.sv_copyout_strings = exec_copyout_strings,
.sv_setregs = exec_setregs,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_LP64,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,
};
static Elf64_Brandinfo freebsd_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_PPC64,
.compat_3_brand = "FreeBSD",
.emul_path = NULL,
.interp_path = "/libexec/ld-elf.so.1",
.sysvec = &elf64_freebsd_sysvec,
.interp_newpath = NULL,
.brand_note = &elf64_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_ANY,
(sysinit_cfunc_t) elf64_insert_brand_entry,
&freebsd_brand_info);
static Elf64_Brandinfo freebsd_brand_oinfo = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_PPC64,
.compat_3_brand = "FreeBSD",
.emul_path = NULL,
.interp_path = "/usr/libexec/ld-elf.so.1",
.sysvec = &elf64_freebsd_sysvec,
.interp_newpath = NULL,
.brand_note = &elf64_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY,
(sysinit_cfunc_t) elf64_insert_brand_entry,
&freebsd_brand_oinfo);
void
elf64_dump_thread(struct thread *td __unused, void *dst __unused,
size_t *off __unused)
{
}
/* Process one elf relocation with addend. */
static int
elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
int type, int local, elf_lookup_fn lookup)
{
Elf_Addr *where;
Elf_Addr addr;
Elf_Addr addend;
Elf_Word rtype, symidx;
const Elf_Rela *rela;
switch (type) {
case ELF_RELOC_REL:
panic("PPC only supports RELA relocations");
break;
case ELF_RELOC_RELA:
rela = (const Elf_Rela *)data;
where = (Elf_Addr *) (relocbase + rela->r_offset);
addend = rela->r_addend;
rtype = ELF_R_TYPE(rela->r_info);
symidx = ELF_R_SYM(rela->r_info);
break;
default:
panic("elf_reloc: unknown relocation mode %d\n", type);
}
switch (rtype) {
case R_PPC_NONE:
break;
case R_PPC64_ADDR64: /* doubleword64 S + A */
addr = lookup(lf, symidx, 1);
if (addr == 0)
return -1;
addr += addend;
*where = addr;
break;
case R_PPC_RELATIVE: /* doubleword64 B + A */
*where = elf_relocaddr(lf, relocbase + addend);
break;
case R_PPC_JMP_SLOT: /* function descriptor copy */
addr = lookup(lf, symidx, 1);
memcpy(where, (Elf_Addr *)addr, 3*sizeof(Elf_Addr));
__asm __volatile("dcbst 0,%0; sync" :: "r"(where) : "memory");
break;
default:
printf("kldload: unexpected relocation type %d\n",
(int) rtype);
return -1;
}
return(0);
}
int
elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
elf_lookup_fn lookup)
{
return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
}
int
elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
int type, elf_lookup_fn lookup)
{
return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
}
int
elf_cpu_load_file(linker_file_t lf)
{
/* Only sync the cache for non-kernel modules */
if (lf->id != 1)
__syncicache(lf->address, lf->size);
return (0);
}
int
elf_cpu_unload_file(linker_file_t lf __unused)
{
return (0);
}

View File

@ -92,6 +92,28 @@ __FBSDID("$FreeBSD$");
#include <machine/trap.h>
#include <machine/vmparam.h>
#ifdef COMPAT_FREEBSD32
#include <compat/freebsd32/freebsd32_signal.h>
#include <compat/freebsd32/freebsd32_util.h>
#include <compat/freebsd32/freebsd32_proto.h>
typedef struct __ucontext32 {
sigset_t uc_sigmask;
mcontext32_t uc_mcontext;
uint32_t uc_link;
struct sigaltstack32 uc_stack;
uint32_t uc_flags;
uint32_t __spare__[4];
} ucontext32_t;
struct sigframe32 {
ucontext32_t sf_uc;
struct siginfo32 sf_si;
};
static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
#endif
static int grab_mcontext(struct thread *, mcontext_t *, int);
void
@ -102,6 +124,10 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
struct sigframe sf;
struct thread *td;
struct proc *p;
#ifdef COMPAT_FREEBSD32
struct siginfo32 siginfo32;
struct sigframe32 sf32;
#endif
size_t sfpsize;
caddr_t sfp, usfp;
int oonstack, rndfsize;
@ -129,25 +155,61 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
tf->cpu.booke.dear : tf->srr0);
#endif
sig = ksi->ksi_signo;
code = ksi->ksi_code;
sfp = (caddr_t)&sf;
sfpsize = sizeof(sf);
rndfsize = ((sizeof(sf) + 15) / 16) * 16;
#ifdef COMPAT_FREEBSD32
if (p->p_sysent->sv_flags & SV_ILP32) {
siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
sig = siginfo32.si_signo;
code = siginfo32.si_code;
sfp = (caddr_t)&sf32;
sfpsize = sizeof(sf32);
rndfsize = ((sizeof(sf32) + 15) / 16) * 16;
/*
* Save user context
*/
/*
* Save user context
*/
memset(&sf, 0, sizeof(sf));
grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
memset(&sf32, 0, sizeof(sf32));
grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
sf.sf_uc.uc_sigmask = *mask;
sf.sf_uc.uc_stack = td->td_sigstk;
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
sf32.sf_uc.uc_sigmask = *mask;
sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
} else {
#endif
sig = ksi->ksi_signo;
code = ksi->ksi_code;
sfp = (caddr_t)&sf;
sfpsize = sizeof(sf);
#ifdef __powerpc64__
/*
* 64-bit PPC defines a 288 byte scratch region
* below the stack.
*/
rndfsize = 288 + ((sizeof(sf) + 47) / 48) * 48;
#else
rndfsize = ((sizeof(sf) + 15) / 16) * 16;
#endif
/*
* Save user context
*/
memset(&sf, 0, sizeof(sf));
grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
sf.sf_uc.uc_sigmask = *mask;
sf.sf_uc.uc_stack = td->td_sigstk;
sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
#ifdef COMPAT_FREEBSD32
}
#endif
CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
catcher, sig);
@ -187,15 +249,33 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
tf->lr = (register_t)catcher;
tf->fixreg[1] = (register_t)usfp;
tf->fixreg[FIRSTARG] = sig;
#ifdef COMPAT_FREEBSD32
tf->fixreg[FIRSTARG+2] = (register_t)usfp +
(p->p_sysent->sv_flags & SV_ILP32) ?
offsetof(struct sigframe32, sf_uc) :
offsetof(struct sigframe, sf_uc);
#else
tf->fixreg[FIRSTARG+2] = (register_t)usfp +
offsetof(struct sigframe, sf_uc);
#endif
if (SIGISMEMBER(psp->ps_siginfo, sig)) {
/*
* Signal handler installed with SA_SIGINFO.
*/
#ifdef COMPAT_FREEBSD32
if (p->p_sysent->sv_flags & SV_ILP32) {
sf32.sf_si = siginfo32;
tf->fixreg[FIRSTARG+1] = (register_t)usfp +
offsetof(struct sigframe32, sf_si);
sf32.sf_si = siginfo32;
} else {
#endif
tf->fixreg[FIRSTARG+1] = (register_t)usfp +
offsetof(struct sigframe, sf_si);
sf.sf_si = ksi->ksi_info;
#ifdef COMPAT_FREEBSD32
}
#endif
} else {
/* Old FreeBSD-style arguments. */
tf->fixreg[FIRSTARG+1] = code;
@ -380,7 +460,7 @@ set_mcontext(struct thread *td, const mcontext_t *mcp)
memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
#ifdef AIM
#ifdef AIM
if (mcp->mc_flags & _MC_FP_VALID) {
if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
critical_enter();
@ -401,7 +481,7 @@ set_mcontext(struct thread *td, const mcontext_t *mcp)
pcb->pcb_vec.vrsave = mcp->mc_vrsave;
memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
}
#endif
#endif
return (0);
}
@ -414,10 +494,17 @@ exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *tf;
register_t argc;
#ifdef __powerpc64__
register_t entry_desc[3];
#endif
tf = trapframe(td);
bzero(tf, sizeof *tf);
#ifdef __powerpc64__
tf->fixreg[1] = -roundup(-stack + 48, 16);
#else
tf->fixreg[1] = -roundup(-stack + 8, 16);
#endif
/*
* Set up arguments for _start():
@ -452,11 +539,59 @@ exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
tf->fixreg[7] = 0; /* termination vector */
tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
#ifdef __powerpc64__
/*
* For 64-bit, we need to disentangle the function descriptor
*
* 0. entry point
* 1. TOC value (r2)
* 2. Environment pointer (r11)
*/
(void)copyin((void *)imgp->entry_addr, entry_desc, sizeof(entry_desc));
tf->srr0 = entry_desc[0] + imgp->reloc_base;
tf->fixreg[2] = entry_desc[1] + imgp->reloc_base;
tf->fixreg[11] = entry_desc[2] + imgp->reloc_base;
tf->srr1 = PSL_SF | PSL_USERSET | PSL_FE_DFLT;
if (mfmsr() & PSL_HV)
tf->srr1 |= PSL_HV;
#else
tf->srr0 = imgp->entry_addr;
tf->srr1 = PSL_USERSET | PSL_FE_DFLT;
#endif
td->td_pcb->pcb_flags = 0;
}
#ifdef COMPAT_FREEBSD32
void
ppc32_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *tf;
uint32_t argc;
tf = trapframe(td);
bzero(tf, sizeof *tf);
tf->fixreg[1] = -roundup(-stack + 8, 16);
argc = fuword32((void *)stack);
td->td_retval[0] = argc;
td->td_retval[1] = stack + sizeof(uint32_t);
tf->fixreg[3] = argc;
tf->fixreg[4] = stack + sizeof(uint32_t);
tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
tf->fixreg[6] = 0; /* auxillary vector */
tf->fixreg[7] = 0; /* termination vector */
tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
tf->srr0 = imgp->entry_addr;
tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
tf->srr1 &= ~PSL_SF;
if (mfmsr() & PSL_HV)
tf->srr1 |= PSL_HV;
td->td_pcb->pcb_flags = 0;
}
#endif
int
fill_regs(struct thread *td, struct reg *regs)
@ -524,7 +659,204 @@ set_fpregs(struct thread *td, struct fpreg *fpregs)
return (0);
}
#ifdef COMPAT_FREEBSD32
int
set_regs32(struct thread *td, struct reg32 *regs)
{
struct trapframe *tf;
int i;
tf = td->td_frame;
for (i = 0; i < 32; i++)
tf->fixreg[i] = regs->fixreg[i];
tf->lr = regs->lr;
tf->cr = regs->cr;
tf->xer = regs->xer;
tf->ctr = regs->ctr;
tf->srr0 = regs->pc;
return (0);
}
int
fill_regs32(struct thread *td, struct reg32 *regs)
{
struct trapframe *tf;
int i;
tf = td->td_frame;
for (i = 0; i < 32; i++)
regs->fixreg[i] = tf->fixreg[i];
regs->lr = tf->lr;
regs->cr = tf->cr;
regs->xer = tf->xer;
regs->ctr = tf->ctr;
regs->pc = tf->srr0;
return (0);
}
static int
grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
{
mcontext_t mcp64;
int i, error;
error = grab_mcontext(td, &mcp64, flags);
if (error != 0)
return (error);
mcp->mc_vers = mcp64.mc_vers;
mcp->mc_flags = mcp64.mc_flags;
mcp->mc_onstack = mcp64.mc_onstack;
mcp->mc_len = mcp64.mc_len;
memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
for (i = 0; i < 42; i++)
mcp->mc_frame[i] = mcp64.mc_frame[i];
memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
return (0);
}
static int
get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
{
int error;
error = grab_mcontext32(td, mcp, flags);
if (error == 0) {
PROC_LOCK(curthread->td_proc);
mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
PROC_UNLOCK(curthread->td_proc);
}
return (error);
}
static int
set_mcontext32(struct thread *td, const mcontext32_t *mcp)
{
mcontext_t mcp64;
int i, error;
mcp64.mc_vers = mcp->mc_vers;
mcp64.mc_flags = mcp->mc_flags;
mcp64.mc_onstack = mcp->mc_onstack;
mcp64.mc_len = mcp->mc_len;
memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
for (i = 0; i < 42; i++)
mcp64.mc_frame[i] = mcp->mc_frame[i];
memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
error = set_mcontext(td, &mcp64);
return (error);
}
#endif
#ifdef COMPAT_FREEBSD32
int
freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
{
ucontext32_t uc;
int error;
CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
return (EFAULT);
}
error = set_mcontext32(td, &uc.uc_mcontext);
if (error != 0)
return (error);
kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
return (EJUSTRETURN);
}
/*
* The first two fields of a ucontext_t are the signal mask and the machine
* context. The next field is uc_link; we want to avoid destroying the link
* when copying out contexts.
*/
#define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
int
freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
{
ucontext32_t uc;
int ret;
if (uap->ucp == NULL)
ret = EINVAL;
else {
get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
PROC_LOCK(td->td_proc);
uc.uc_sigmask = td->td_sigmask;
PROC_UNLOCK(td->td_proc);
ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
}
return (ret);
}
int
freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
{
ucontext32_t uc;
int ret;
if (uap->ucp == NULL)
ret = EINVAL;
else {
ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
if (ret == 0) {
ret = set_mcontext32(td, &uc.uc_mcontext);
if (ret == 0) {
kern_sigprocmask(td, SIG_SETMASK,
&uc.uc_sigmask, NULL, 0);
}
}
}
return (ret == 0 ? EJUSTRETURN : ret);
}
int
freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
{
ucontext32_t uc;
int ret;
if (uap->oucp == NULL || uap->ucp == NULL)
ret = EINVAL;
else {
get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
PROC_LOCK(td->td_proc);
uc.uc_sigmask = td->td_sigmask;
PROC_UNLOCK(td->td_proc);
ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
if (ret == 0) {
ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
if (ret == 0) {
ret = set_mcontext32(td, &uc.uc_mcontext);
if (ret == 0) {
kern_sigprocmask(td, SIG_SETMASK,
&uc.uc_sigmask, NULL, 0);
}
}
}
}
return (ret == 0 ? EJUSTRETURN : ret);
}
#endif
void
cpu_set_syscall_retval(struct thread *td, int error)
@ -539,7 +871,8 @@ cpu_set_syscall_retval(struct thread *td, int error)
p = td->td_proc;
tf = td->td_frame;
if (tf->fixreg[0] == SYS___syscall) {
if (tf->fixreg[0] == SYS___syscall &&
(p->p_sysent->sv_flags & SV_ILP32)) {
int code = tf->fixreg[FIRSTARG + 1];
if (p->p_sysent->sv_mask)
code &= p->p_sysent->sv_mask;
@ -612,7 +945,10 @@ int
cpu_set_user_tls(struct thread *td, void *tls_base)
{
td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
if (td->td_proc->p_sysent->sv_flags & SV_LP64)
td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
else
td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
return (0);
}
@ -643,8 +979,14 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
cf->cf_arg1 = (register_t)tf;
pcb2->pcb_sp = (register_t)cf;
#ifdef __powerpc64__
pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
#else
pcb2->pcb_lr = (register_t)fork_trampoline;
pcb2->pcb_cpu.aim.usr = 0;
#endif
pcb2->pcb_cpu.aim.usr_vsid = 0;
pcb2->pcb_cpu.aim.usr_esid = 0;
/* Setup to release spin count in fork_exit(). */
td->td_md.md_spinlock_count = 1;
@ -660,19 +1002,39 @@ cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
tf = td->td_frame;
/* align stack and alloc space for frame ptr and saved LR */
#ifdef __powerpc64__
sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
~0x1f;
#else
sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
~0x1f;
#endif
bzero(tf, sizeof(struct trapframe));
tf->fixreg[1] = (register_t)sp;
tf->fixreg[3] = (register_t)arg;
tf->srr0 = (register_t)entry;
#ifdef AIM
tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
#else
tf->srr1 = PSL_USERSET;
#endif
if (td->td_proc->p_sysent->sv_flags & SV_ILP32) {
tf->srr0 = (register_t)entry;
#ifdef AIM
tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
#else
tf->srr1 = PSL_USERSET;
#endif
} else {
#ifdef __powerpc64__
register_t entry_desc[3];
(void)copyin((void *)entry, entry_desc, sizeof(entry_desc));
tf->srr0 = entry_desc[0];
tf->fixreg[2] = entry_desc[1];
tf->fixreg[11] = entry_desc[2];
tf->srr1 = PSL_SF | PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
#endif
}
#ifdef __powerpc64__
if (mfmsr() & PSL_HV)
tf->srr1 |= PSL_HV;
#endif
td->td_pcb->pcb_flags = 0;
td->td_retval[0] = (register_t)entry;

View File

@ -53,6 +53,7 @@
#include <machine/pcb.h>
#include <machine/pmap.h>
#include <machine/psl.h>
#include <machine/sigframe.h>
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
@ -70,40 +71,46 @@ ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke_tlb_level));
ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke_tlb_lock));
#endif
ASSYM(CPUSAVE_R28, CPUSAVE_R28*4);
ASSYM(CPUSAVE_R29, CPUSAVE_R29*4);
ASSYM(CPUSAVE_R30, CPUSAVE_R30*4);
ASSYM(CPUSAVE_R31, CPUSAVE_R31*4);
ASSYM(CPUSAVE_SRR0, CPUSAVE_SRR0*4);
ASSYM(CPUSAVE_SRR1, CPUSAVE_SRR1*4);
ASSYM(CPUSAVE_AIM_DAR, CPUSAVE_AIM_DAR*4);
ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*4);
ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*4);
ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*4);
ASSYM(CPUSAVE_R27, CPUSAVE_R27*sizeof(register_t));
ASSYM(CPUSAVE_R28, CPUSAVE_R28*sizeof(register_t));
ASSYM(CPUSAVE_R29, CPUSAVE_R29*sizeof(register_t));
ASSYM(CPUSAVE_R30, CPUSAVE_R30*sizeof(register_t));
ASSYM(CPUSAVE_R31, CPUSAVE_R31*sizeof(register_t));
ASSYM(CPUSAVE_SRR0, CPUSAVE_SRR0*sizeof(register_t));
ASSYM(CPUSAVE_SRR1, CPUSAVE_SRR1*sizeof(register_t));
ASSYM(CPUSAVE_AIM_DAR, CPUSAVE_AIM_DAR*sizeof(register_t));
ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*4);
ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*4);
ASSYM(TLBSAVE_BOOKE_SRR0, TLBSAVE_BOOKE_SRR0*4);
ASSYM(TLBSAVE_BOOKE_SRR1, TLBSAVE_BOOKE_SRR1*4);
ASSYM(TLBSAVE_BOOKE_R20, TLBSAVE_BOOKE_R20*4);
ASSYM(TLBSAVE_BOOKE_R21, TLBSAVE_BOOKE_R21*4);
ASSYM(TLBSAVE_BOOKE_R22, TLBSAVE_BOOKE_R22*4);
ASSYM(TLBSAVE_BOOKE_R23, TLBSAVE_BOOKE_R23*4);
ASSYM(TLBSAVE_BOOKE_R24, TLBSAVE_BOOKE_R24*4);
ASSYM(TLBSAVE_BOOKE_R25, TLBSAVE_BOOKE_R25*4);
ASSYM(TLBSAVE_BOOKE_R26, TLBSAVE_BOOKE_R26*4);
ASSYM(TLBSAVE_BOOKE_R27, TLBSAVE_BOOKE_R27*4);
ASSYM(TLBSAVE_BOOKE_R28, TLBSAVE_BOOKE_R28*4);
ASSYM(TLBSAVE_BOOKE_R29, TLBSAVE_BOOKE_R29*4);
ASSYM(TLBSAVE_BOOKE_R30, TLBSAVE_BOOKE_R30*4);
ASSYM(TLBSAVE_BOOKE_R31, TLBSAVE_BOOKE_R31*4);
ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_SRR0, TLBSAVE_BOOKE_SRR0*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_SRR1, TLBSAVE_BOOKE_SRR1*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R20, TLBSAVE_BOOKE_R20*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R21, TLBSAVE_BOOKE_R21*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R22, TLBSAVE_BOOKE_R22*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R23, TLBSAVE_BOOKE_R23*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R24, TLBSAVE_BOOKE_R24*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R25, TLBSAVE_BOOKE_R25*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R26, TLBSAVE_BOOKE_R26*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R27, TLBSAVE_BOOKE_R27*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R28, TLBSAVE_BOOKE_R28*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R29, TLBSAVE_BOOKE_R29*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R30, TLBSAVE_BOOKE_R30*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_R31, TLBSAVE_BOOKE_R31*sizeof(register_t));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
#if defined(AIM)
ASSYM(PM_KERNELSR, offsetof(struct pmap, pm_sr[KERNEL_SR]));
ASSYM(PM_USRSR, offsetof(struct pmap, pm_sr[USER_SR]));
ASSYM(USER_SR, USER_SR);
ASSYM(USER_ADDR, USER_ADDR);
#ifdef __powerpc64__
ASSYM(PC_KERNSLB, offsetof(struct pcpu, pc_slb));
ASSYM(PC_USERSLB, offsetof(struct pcpu, pc_userslb));
#else
ASSYM(PM_SR, offsetof(struct pmap, pm_sr));
#endif
#elif defined(E500)
ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir));
#endif
@ -114,7 +121,11 @@ ASSYM(PTE_FLAGS, offsetof(struct pte, flags));
ASSYM(TLB0_ENTRY_SIZE, sizeof(struct tlb_entry));
#endif
#ifdef __powerpc64__
ASSYM(FSP, 48);
#else
ASSYM(FSP, 8);
#endif
ASSYM(FRAMELEN, FRAMELEN);
ASSYM(FRAME_0, offsetof(struct trapframe, fixreg[0]));
ASSYM(FRAME_1, offsetof(struct trapframe, fixreg[1]));
@ -169,13 +180,15 @@ ASSYM(CF_SIZE, sizeof(struct callframe));
ASSYM(PCB_CONTEXT, offsetof(struct pcb, pcb_context));
ASSYM(PCB_CR, offsetof(struct pcb, pcb_cr));
ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));
ASSYM(PCB_TOC, offsetof(struct pcb, pcb_toc));
ASSYM(PCB_LR, offsetof(struct pcb, pcb_lr));
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
ASSYM(PCB_FPU, PCB_FPU);
ASSYM(PCB_VEC, PCB_VEC);
ASSYM(PCB_AIM_USR, offsetof(struct pcb, pcb_cpu.aim.usr));
ASSYM(PCB_AIM_USR_ESID, offsetof(struct pcb, pcb_cpu.aim.usr_esid));
ASSYM(PCB_AIM_USR_VSID, offsetof(struct pcb, pcb_cpu.aim.usr_vsid));
ASSYM(PCB_BOOKE_CTR, offsetof(struct pcb, pcb_cpu.booke.ctr));
ASSYM(PCB_BOOKE_XER, offsetof(struct pcb, pcb_cpu.booke.xer));
ASSYM(PCB_BOOKE_DBCR0, offsetof(struct pcb, pcb_cpu.booke.dbcr0));
@ -197,3 +210,50 @@ ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
ASSYM(KERNBASE, KERNBASE);
ASSYM(MAXCOMLEN, MAXCOMLEN);
#ifdef E500
ASSYM(PSL_UCLE, PSL_UCLE);
ASSYM(PSL_SPE, PSL_SPE);
ASSYM(PSL_WE, PSL_WE);
ASSYM(PSL_CE, PSL_CE);
ASSYM(PSL_UBLE, PSL_UBLE);
ASSYM(PSL_DS, PSL_DS);
ASSYM(PSL_IS, PSL_IS);
ASSYM(PSL_DE, PSL_DE);
ASSYM(PSL_KERNSET_INIT, PSL_KERNSET_INIT);
#else /* AIM */
#ifdef __powerpc64__
ASSYM(PSL_SF, PSL_SF);
ASSYM(PSL_HV, PSL_HV);
#endif
ASSYM(PSL_VEC, PSL_VEC);
ASSYM(PSL_POW, PSL_POW);
ASSYM(PSL_ILE, PSL_ILE);
ASSYM(PSL_BE, PSL_BE);
ASSYM(PSL_LE, PSL_LE);
ASSYM(PSL_SE, PSL_SE);
ASSYM(PSL_RI, PSL_RI);
ASSYM(PSL_DR, PSL_DR);
ASSYM(PSL_IP, PSL_IP);
ASSYM(PSL_IR, PSL_IR);
ASSYM(PSL_FE_DIS, PSL_FE_DIS);
ASSYM(PSL_FE_NONREC, PSL_FE_NONREC);
ASSYM(PSL_FE_PREC, PSL_FE_PREC);
ASSYM(PSL_FE_REC, PSL_FE_REC);
ASSYM(PSL_USERSTATIC, PSL_USERSTATIC);
#endif
ASSYM(PSL_EE, PSL_EE);
ASSYM(PSL_FE0, PSL_FE0);
ASSYM(PSL_FE1, PSL_FE1);
ASSYM(PSL_FP, PSL_FP);
ASSYM(PSL_ME, PSL_ME);
ASSYM(PSL_PR, PSL_PR);
ASSYM(PSL_PMM, PSL_PMM);
ASSYM(PSL_KERNSET, PSL_KERNSET);
ASSYM(PSL_USERSET, PSL_USERSET);

View File

@ -228,7 +228,7 @@ skip_start:
if (len < mlen)
mlen = len;
if ((clen ^ (int) addr) & 1)
if ((clen ^ (long) addr) & 1)
sum += in_cksumdata(addr, mlen) << 8;
else
sum += in_cksumdata(addr, mlen);

View File

@ -187,7 +187,7 @@ cpu_mp_announce(void)
pc = pcpu_find(i);
if (pc == NULL)
continue;
printf("cpu%d: dev=%x", i, pc->pc_hwref);
printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
if (pc->pc_bsp)
printf(" (BSP)");
printf("\n");
@ -211,7 +211,7 @@ cpu_mp_unleash(void *dummy)
if (!pc->pc_bsp) {
if (bootverbose)
printf("Waking up CPU %d (dev=%x)\n",
pc->pc_cpuid, pc->pc_hwref);
pc->pc_cpuid, (int)pc->pc_hwref);
platform_smp_start_cpu(pc);

View File

@ -4,99 +4,111 @@
/* kernel version of this file, does not have signal goop */
/* int setjmp(jmp_buf env) */
#define JMP_r1 0x04
#define JMP_r14 0x08
#define JMP_r15 0x0c
#define JMP_r16 0x10
#define JMP_r17 0x14
#define JMP_r18 0x18
#define JMP_r19 0x1c
#define JMP_r20 0x20
#define JMP_r21 0x24
#define JMP_r22 0x28
#define JMP_r23 0x2c
#define JMP_r24 0x30
#define JMP_r25 0x34
#define JMP_r26 0x38
#define JMP_r27 0x3c
#define JMP_r28 0x40
#define JMP_r29 0x44
#define JMP_r30 0x48
#define JMP_r31 0x4c
#define JMP_lr 0x50
#define JMP_cr 0x54
#define JMP_ctr 0x58
#define JMP_xer 0x5c
#define JMP_sig 0x60
#include <machine/asm.h>
#ifdef __powerpc64__
#define LD_REG ld
#define ST_REG std
#define REGWIDTH 8
#else
#define LD_REG lwz
#define ST_REG stw
#define REGWIDTH 4
#endif
.globl setjmp
setjmp:
stw 31, JMP_r31(3)
/* r1, r14-r30 */
stw 1, JMP_r1 (3)
stw 14, JMP_r14(3)
stw 15, JMP_r15(3)
stw 16, JMP_r16(3)
stw 17, JMP_r17(3)
stw 18, JMP_r18(3)
stw 19, JMP_r19(3)
stw 20, JMP_r20(3)
stw 21, JMP_r21(3)
stw 22, JMP_r22(3)
stw 23, JMP_r23(3)
stw 24, JMP_r24(3)
stw 25, JMP_r25(3)
stw 26, JMP_r26(3)
stw 27, JMP_r27(3)
stw 28, JMP_r28(3)
stw 29, JMP_r29(3)
stw 30, JMP_r30(3)
#define JMP_r1 1*REGWIDTH
#define JMP_r2 2*REGWIDTH
#define JMP_r14 3*REGWIDTH
#define JMP_r15 4*REGWIDTH
#define JMP_r16 5*REGWIDTH
#define JMP_r17 6*REGWIDTH
#define JMP_r18 7*REGWIDTH
#define JMP_r19 8*REGWIDTH
#define JMP_r20 9*REGWIDTH
#define JMP_r21 10*REGWIDTH
#define JMP_r22 11*REGWIDTH
#define JMP_r23 12*REGWIDTH
#define JMP_r24 13*REGWIDTH
#define JMP_r25 14*REGWIDTH
#define JMP_r26 15*REGWIDTH
#define JMP_r27 16*REGWIDTH
#define JMP_r28 17*REGWIDTH
#define JMP_r29 18*REGWIDTH
#define JMP_r30 19*REGWIDTH
#define JMP_r31 20*REGWIDTH
#define JMP_lr 21*REGWIDTH
#define JMP_cr 22*REGWIDTH
#define JMP_ctr 23*REGWIDTH
#define JMP_xer 24*REGWIDTH
#define JMP_sig 25*REGWIDTH
ASENTRY(setjmp)
ST_REG 31, JMP_r31(3)
/* r1, r2, r14-r30 */
ST_REG 1, JMP_r1 (3)
ST_REG 2, JMP_r2 (3)
ST_REG 14, JMP_r14(3)
ST_REG 15, JMP_r15(3)
ST_REG 16, JMP_r16(3)
ST_REG 17, JMP_r17(3)
ST_REG 18, JMP_r18(3)
ST_REG 19, JMP_r19(3)
ST_REG 20, JMP_r20(3)
ST_REG 21, JMP_r21(3)
ST_REG 22, JMP_r22(3)
ST_REG 23, JMP_r23(3)
ST_REG 24, JMP_r24(3)
ST_REG 25, JMP_r25(3)
ST_REG 26, JMP_r26(3)
ST_REG 27, JMP_r27(3)
ST_REG 28, JMP_r28(3)
ST_REG 29, JMP_r29(3)
ST_REG 30, JMP_r30(3)
/* cr, lr, ctr, xer */
mfcr 0
stw 0, JMP_cr(3)
ST_REG 0, JMP_cr(3)
mflr 0
stw 0, JMP_lr(3)
ST_REG 0, JMP_lr(3)
mfctr 0
stw 0, JMP_ctr(3)
ST_REG 0, JMP_ctr(3)
mfxer 0
stw 0, JMP_xer(3)
ST_REG 0, JMP_xer(3)
/* f14-f31, fpscr */
li 3, 0
blr
.extern sigsetmask
.globl longjmp
longjmp:
lwz 31, JMP_r31(3)
/* r1, r14-r30 */
lwz 1, JMP_r1 (3)
lwz 14, JMP_r14(3)
lwz 15, JMP_r15(3)
lwz 16, JMP_r16(3)
lwz 17, JMP_r17(3)
lwz 18, JMP_r18(3)
lwz 19, JMP_r19(3)
lwz 20, JMP_r20(3)
lwz 21, JMP_r21(3)
lwz 22, JMP_r22(3)
lwz 23, JMP_r23(3)
lwz 24, JMP_r24(3)
lwz 25, JMP_r25(3)
lwz 26, JMP_r26(3)
lwz 27, JMP_r27(3)
lwz 28, JMP_r28(3)
lwz 29, JMP_r29(3)
lwz 30, JMP_r30(3)
ASENTRY(longjmp)
LD_REG 31, JMP_r31(3)
/* r1, r2, r14-r30 */
LD_REG 1, JMP_r1 (3)
LD_REG 2, JMP_r2 (3)
LD_REG 14, JMP_r14(3)
LD_REG 15, JMP_r15(3)
LD_REG 16, JMP_r16(3)
LD_REG 17, JMP_r17(3)
LD_REG 18, JMP_r18(3)
LD_REG 19, JMP_r19(3)
LD_REG 20, JMP_r20(3)
LD_REG 21, JMP_r21(3)
LD_REG 22, JMP_r22(3)
LD_REG 23, JMP_r23(3)
LD_REG 24, JMP_r24(3)
LD_REG 25, JMP_r25(3)
LD_REG 26, JMP_r26(3)
LD_REG 27, JMP_r27(3)
LD_REG 28, JMP_r28(3)
LD_REG 29, JMP_r29(3)
LD_REG 30, JMP_r30(3)
/* cr, lr, ctr, xer */
lwz 0, JMP_cr(3)
LD_REG 0, JMP_cr(3)
mtcr 0
lwz 0, JMP_lr(3)
LD_REG 0, JMP_lr(3)
mtlr 0
lwz 0, JMP_ctr(3)
LD_REG 0, JMP_ctr(3)
mtctr 0
lwz 0, JMP_xer(3)
LD_REG 0, JMP_xer(3)
mtxer 0
/* f14-f31, fpscr */
mr 3, 4

View File

@ -43,8 +43,8 @@
* On entry r1 points to a struct sigframe at bottom of current stack.
* All other registers are unchanged.
*/
.globl CNAME(sigcode),CNAME(szsigcode)
CNAME(sigcode):
.globl CNAME(sigcode32),CNAME(szsigcode32)
CNAME(sigcode32):
addi 1,1,-16 /* reserved space for callee */
blrl
addi 3,1,16+SF_UC /* restore sp, and get &frame->sf_uc */
@ -52,8 +52,8 @@ CNAME(sigcode):
sc /* sigreturn(scp) */
li 0,SYS_exit
sc /* exit(errno) */
endsigcode:
endsigcode32:
.data
CNAME(szsigcode):
.long endsigcode - CNAME(sigcode)
CNAME(szsigcode32):
.long endsigcode32 - CNAME(sigcode32)

View File

@ -0,0 +1,66 @@
/* $FreeBSD$ */
/* $NetBSD: sigcode.S,v 1.1 1999/11/17 14:56:11 kleink Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#include <sys/syscall.h>
#include "assym.s"
/*
* The following code gets copied to the top of the user stack on process
* execution. It does signal trampolining on signal delivery.
*
* On entry r1 points to a struct sigframe at bottom of current stack.
* All other registers are unchanged.
*/
.globl CNAME(sigcode64),CNAME(szsigcode64)
CNAME(sigcode64):
addi 1,1,-48 /* reserved space for callee */
mflr 2 /* resolve function descriptor */
ld 0,0(2)
ld 2,8(2)
mtlr 0
blrl
addi 3,1,48+SF_UC /* restore sp, and get &frame->sf_uc */
li 0,SYS_sigreturn
sc /* sigreturn(scp) */
li 0,SYS_exit
sc /* exit(errno) */
nop /* align to doubleword */
endsigcode64:
.data
CNAME(szsigcode64):
.long endsigcode64 - CNAME(sigcode64)

View File

@ -42,6 +42,12 @@ __FBSDID("$FreeBSD$");
#include <machine/stack.h>
#include <machine/trap.h>
#ifdef __powerpc64__
#define CALLOFFSET 8 /* Account for the TOC reload slot */
#else
#define CALLOFFSET 4
#endif
static void
stack_capture(struct stack *st, vm_offset_t frame)
{
@ -51,10 +57,15 @@ stack_capture(struct stack *st, vm_offset_t frame)
if (frame < PAGE_SIZE)
return;
while (1) {
frame = *(register_t *)frame;
frame = *(vm_offset_t *)frame;
if (frame < PAGE_SIZE)
break;
#ifdef __powerpc64__
callpc = *(vm_offset_t *)(frame + 16) - 4;
#else
callpc = *(vm_offset_t *)(frame + 4) - 4;
#endif
if ((callpc & 3) || (callpc < 0x100))
break;
@ -64,8 +75,8 @@ stack_capture(struct stack *st, vm_offset_t frame)
* things are going wrong. Plus, prevents this shortened
* version of code from accessing user-space frames
*/
if (callpc + 4 == (register_t) &trapexit ||
callpc + 4 == (register_t) &asttrapexit)
if (callpc + CALLOFFSET == (vm_offset_t) &trapexit ||
callpc + CALLOFFSET == (vm_offset_t) &asttrapexit)
break;
if (stack_put(st, callpc) == -1)

View File

@ -50,10 +50,10 @@ static const char rcsid[] =
void
__syncicache(void *from, int len)
{
int l, off;
register_t l, off;
char *p;
off = (u_int)from & (cacheline_size - 1);
off = (uintptr_t)from & (cacheline_size - 1);
l = len += off;
p = (char *)from - off;

View File

@ -31,9 +31,23 @@ __FBSDID("$FreeBSD$");
#include <sys/errno.h>
#include <sys/sysproto.h>
#include "opt_compat.h"
#ifdef COMPAT_FREEBSD32
#include <compat/freebsd32/freebsd32_proto.h>
int
freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
{
return (EINVAL);
}
#endif
int
sysarch(struct thread *td, struct sysarch_args *uap)
{
return (EINVAL);
}