The .s files were repo-copied to .S files.

Approved by:	marcel
Repocopied by:	joe
This commit is contained in:
Ruslan Ermilov 2003-07-02 12:57:07 +00:00
parent ae0c4c928a
commit 0be33d3321
8 changed files with 7 additions and 4687 deletions

View File

@ -75,7 +75,7 @@ ia64/ia64/autoconf.c standard
ia64/ia64/busdma_machdep.c standard
ia64/ia64/clock.c standard
ia64/ia64/clock_if.m standard
ia64/ia64/context.s standard
ia64/ia64/context.S standard
ia64/ia64/critical.c standard
ia64/ia64/db_disasm.c optional ddb
ia64/ia64/db_interface.c optional ddb
@ -84,26 +84,26 @@ ia64/ia64/dump_machdep.c standard
ia64/ia64/efi.c standard
ia64/ia64/eficlock.c standard
ia64/ia64/elf_machdep.c standard
ia64/ia64/exception.s standard
ia64/ia64/exception.S standard
ia64/ia64/ia64-gdbstub.c optional ddb
ia64/ia64/in_cksum.c optional inet
ia64/ia64/interrupt.c standard
ia64/ia64/locore.s standard no-obj
ia64/ia64/locore.S standard no-obj
ia64/ia64/machdep.c standard
ia64/ia64/mca.c standard
ia64/ia64/mem.c standard
ia64/ia64/mp_machdep.c optional smp
ia64/ia64/nexus.c standard
ia64/ia64/pal.s standard
ia64/ia64/pal.S standard
ia64/ia64/pmap.c standard
ia64/ia64/sal.c standard
ia64/ia64/sapic.c standard
ia64/ia64/setjmp.s standard
ia64/ia64/setjmp.S standard
ia64/ia64/ssc.c optional ski
ia64/ia64/sscdisk.c optional ski
ia64/ia64/support.s standard
ia64/ia64/support.S standard
ia64/ia64/sys_machdep.c standard
ia64/ia64/syscall.s standard
ia64/ia64/syscall.S standard
ia64/ia64/trap.c standard
ia64/ia64/unaligned.c standard
ia64/ia64/unwind.c standard

View File

@ -1,804 +0,0 @@
/*
* Copyright (c) 2003 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asm.h>
#include <assym.s>
.text
/*
* void restorectx(struct pcb *)
*/
ENTRY(restorectx, 1)
{ .mmi
invala
mov ar.rsc=0
add r31=8,r32
;;
}
{ .mmi
ld8 r12=[r32] // sp
ld8 r16=[r31],16 // unat (before)
add r30=16,r32
;;
}
{ .mmi
ld8 r17=[r30],16 // rp
ld8 r18=[r31],16 // pr
add r14=SIZEOF_SPECIAL,r32
;;
}
{ .mmi
ld8 r19=[r30],16 // pfs
ld8 r20=[r31],16 // bspstore
mov rp=r17
;;
}
{ .mmi
loadrs
ld8 r21=[r30],16 // rnat
mov pr=r18,0x1fffe
;;
}
{ .mmi
ld8 r17=[r14],8 // unat (after)
mov ar.bspstore=r20
mov ar.pfs=r19
;;
}
{ .mmi
mov ar.unat=r17
mov ar.rnat=r21
add r15=8,r14
;;
}
{ .mmi
ld8.fill r4=[r14],16 // r4
ld8.fill r5=[r15],16 // r5
nop 0
;;
}
{ .mmi
ld8.fill r6=[r14],16 // r6
ld8.fill r7=[r15],16 // r7
nop 1
;;
}
{ .mmi
mov ar.unat=r16
mov ar.rsc=3
nop 2
}
{ .mmi
ld8 r17=[r14],16 // b1
ld8 r18=[r15],16 // b2
nop 3
;;
}
{ .mmi
ld8 r19=[r14],16 // b3
ld8 r20=[r15],16 // b4
mov b1=r17
;;
}
{ .mmi
ld8 r16=[r14],24 // b5
ld8 r17=[r15],32 // lc
mov b2=r18
;;
}
{ .mmi
ldf.fill f2=[r14],32
ldf.fill f3=[r15],32
mov b3=r19
;;
}
{ .mmi
ldf.fill f4=[r14],32
ldf.fill f5=[r15],32
mov b4=r20
;;
}
{ .mmi
ldf.fill f16=[r14],32
ldf.fill f17=[r15],32
mov b5=r16
;;
}
{ .mmi
ldf.fill f18=[r14],32
ldf.fill f19=[r15],32
mov ar.lc=r17
;;
}
ldf.fill f20=[r14],32
ldf.fill f21=[r15],32
;;
ldf.fill f22=[r14],32
ldf.fill f23=[r15],32
;;
ldf.fill f24=[r14],32
ldf.fill f25=[r15],32
;;
ldf.fill f26=[r14],32
ldf.fill f27=[r15],32
;;
{ .mmi
ldf.fill f28=[r14],32
ldf.fill f29=[r15],32
add r8=1,r0
;;
}
{ .mmb
ldf.fill f30=[r14]
ldf.fill f31=[r15]
br.ret.sptk rp
;;
}
END(restorectx)
/*
* void swapctx(struct pcb *old, struct pcb *new)
*/
ENTRY(swapctx, 2)
{ .mmi
mov ar.rsc=0
mov r16=ar.unat
add r31=8,r32
;;
}
{ .mmi
flushrs
st8 [r32]=sp,16 // sp
mov r17=rp
;;
}
{ .mmi
st8 [r31]=r16,16 // unat (before)
st8 [r32]=r17,16 // rp
mov r16=pr
;;
}
{ .mmi
st8 [r31]=r16,16 // pr
mov r17=ar.bsp
mov r16=ar.pfs
;;
}
{ .mmi
st8 [r32]=r16,16 // pfs
st8 [r31]=r17,16 // bspstore
cmp.eq p15,p0=0,r33
;;
}
{ .mmi
mov r16=ar.rnat
(p15) mov ar.rsc=3
add r30=SIZEOF_SPECIAL-(6*8),r32
;;
}
{ .mmi
st8 [r32]=r16,SIZEOF_SPECIAL-(4*8) // rnat
st8 [r31]=r0,SIZEOF_SPECIAL-(6*8) // __spare
mov r16=b1
;;
}
/* callee_saved */
{ .mmi
.mem.offset 8,0
st8.spill [r31]=r4,16 // r4
.mem.offset 16,0
st8.spill [r32]=r5,16 // r5
mov r17=b2
;;
}
{ .mmi
.mem.offset 24,0
st8.spill [r31]=r6,16 // r6
.mem.offset 32,0
st8.spill [r32]=r7,16 // r7
mov r18=b3
;;
}
{ .mmi
st8 [r31]=r16,16 // b1
mov r16=ar.unat
mov r19=b4
;;
}
{ .mmi
st8 [r30]=r16 // unat (after)
st8 [r32]=r17,16 // b2
mov r16=b5
;;
}
{ .mmi
st8 [r31]=r18,16 // b3
st8 [r32]=r19,16 // b4
mov r17=ar.lc
;;
}
st8 [r31]=r16,16 // b5
st8 [r32]=r17,16 // lc
;;
st8 [r31]=r0,24 // __spare
stf.spill [r32]=f2,32
;;
stf.spill [r31]=f3,32
stf.spill [r32]=f4,32
;;
stf.spill [r31]=f5,32
stf.spill [r32]=f16,32
;;
stf.spill [r31]=f17,32
stf.spill [r32]=f18,32
;;
stf.spill [r31]=f19,32
stf.spill [r32]=f20,32
;;
stf.spill [r31]=f21,32
stf.spill [r32]=f22,32
;;
stf.spill [r31]=f23,32
stf.spill [r32]=f24,32
;;
stf.spill [r31]=f25,32
stf.spill [r32]=f26,32
;;
stf.spill [r31]=f27,32
stf.spill [r32]=f28,32
;;
{ .mmi
stf.spill [r31]=f29,32
stf.spill [r32]=f30
(p15) add r8=0,r0
;;
}
{ .mmb
stf.spill [r31]=f31
mf
(p15) br.ret.sptk rp
;;
}
{ .mfb
mov r32=r33
nop 0
br.sptk restorectx
;;
}
END(swapctx)
/*
* save_callee_saved(struct _callee_saved *)
*/
ENTRY(save_callee_saved, 1)
{ .mii
nop 0
add r14=8,r32
add r15=16,r32
;;
}
{ .mmi
.mem.offset 8,0
st8.spill [r14]=r4,16 // r4
.mem.offset 16,0
st8.spill [r15]=r5,16 // r5
mov r16=b1
;;
}
{ .mmi
.mem.offset 24,0
st8.spill [r14]=r6,16 // r6
.mem.offset 32,0
st8.spill [r15]=r7,16 // r7
mov r17=b2
;;
}
{ .mmi
st8 [r14]=r16,16 // b1
mov r18=ar.unat
mov r19=b3
;;
}
{ .mmi
st8 [r32]=r18 // nat (after)
st8 [r15]=r17,16 // b2
mov r16=b4
;;
}
{ .mmi
st8 [r14]=r19,16 // b3
st8 [r15]=r16,16 // b4
mov r17=b5
;;
}
{ .mfi
st8 [r14]=r17,16 // b5
nop 0
mov r16=ar.lc
;;
}
{ .mmb
st8 [r15]=r16 // ar.lc
st8 [r14]=r0 // __spare
br.ret.sptk rp
;;
}
END(save_callee_saved)
/*
* restore_callee_saved(struct _callee_saved *)
*/
ENTRY(restore_callee_saved, 1)
{ .mmi
ld8 r30=[r32],16 // nat (after)
;;
mov ar.unat=r30
add r31=-8,r32
;;
}
{ .mmb
ld8.fill r4=[r31],16 // r4
ld8.fill r5=[r32],16 // r5
nop 0
;;
}
{ .mmb
ld8.fill r6=[r31],16 // r6
ld8.fill r7=[r32],16 // r7
nop 0
;;
}
{ .mmi
ld8 r30=[r31],16 // b1
ld8 r29=[r32],16 // b2
nop 0
;;
}
{ .mmi
ld8 r28=[r31],16 // b3
ld8 r27=[r32],16 // b4
mov b1=r30
;;
}
{ .mii
ld8 r26=[r31] // b5
mov b2=r29
mov b3=r28
;;
}
{ .mii
ld8 r25=[r32] // lc
mov b4=r27
mov b5=r26
;;
}
{ .mib
nop 0
mov ar.lc=r25
br.ret.sptk rp
;;
}
END(restore_callee_saved)
/*
* save_callee_saved_fp(struct _callee_saved_fp *)
*/
ENTRY(save_callee_saved_fp, 1)
add r31=16,r32
stf.spill [r32]=f2,32
;;
stf.spill [r31]=f3,32
stf.spill [r32]=f4,32
;;
stf.spill [r31]=f5,32
stf.spill [r32]=f16,32
;;
stf.spill [r31]=f17,32
stf.spill [r32]=f18,32
;;
stf.spill [r31]=f19,32
stf.spill [r32]=f20,32
;;
stf.spill [r31]=f21,32
stf.spill [r32]=f22,32
;;
stf.spill [r31]=f23,32
stf.spill [r32]=f24,32
;;
stf.spill [r31]=f25,32
stf.spill [r32]=f26,32
;;
stf.spill [r31]=f27,32
stf.spill [r32]=f28,32
;;
stf.spill [r31]=f29,32
stf.spill [r32]=f30
;;
stf.spill [r31]=f31
br.ret.sptk rp
;;
END(save_callee_saved_fp)
/*
* restore_callee_saved_fp(struct _callee_saved_fp *)
*/
ENTRY(restore_callee_saved_fp, 1)
add r31=16,r32
ldf.fill f2=[r32],32
;;
ldf.fill f3=[r31],32
ldf.fill f4=[r32],32
;;
ldf.fill f5=[r31],32
ldf.fill f16=[r32],32
;;
ldf.fill f17=[r31],32
ldf.fill f18=[r32],32
;;
ldf.fill f19=[r31],32
ldf.fill f20=[r32],32
;;
ldf.fill f21=[r31],32
ldf.fill f22=[r32],32
;;
ldf.fill f23=[r31],32
ldf.fill f24=[r32],32
;;
ldf.fill f25=[r31],32
ldf.fill f26=[r32],32
;;
ldf.fill f27=[r31],32
ldf.fill f28=[r32],32
;;
ldf.fill f29=[r31],32
ldf.fill f30=[r32]
;;
ldf.fill f31=[r31]
br.ret.sptk rp
;;
END(restore_callee_saved_fp)
/*
* save_high_fp(struct _high_fp *)
*/
ENTRY(save_high_fp, 1)
rsm psr.dfh
;;
srlz.d
add r31=16,r32
stf.spill [r32]=f32,32
;;
stf.spill [r31]=f33,32
stf.spill [r32]=f34,32
;;
stf.spill [r31]=f35,32
stf.spill [r32]=f36,32
;;
stf.spill [r31]=f37,32
stf.spill [r32]=f38,32
;;
stf.spill [r31]=f39,32
stf.spill [r32]=f40,32
;;
stf.spill [r31]=f41,32
stf.spill [r32]=f42,32
;;
stf.spill [r31]=f43,32
stf.spill [r32]=f44,32
;;
stf.spill [r31]=f45,32
stf.spill [r32]=f46,32
;;
stf.spill [r31]=f47,32
stf.spill [r32]=f48,32
;;
stf.spill [r31]=f49,32
stf.spill [r32]=f50,32
;;
stf.spill [r31]=f51,32
stf.spill [r32]=f52,32
;;
stf.spill [r31]=f53,32
stf.spill [r32]=f54,32
;;
stf.spill [r31]=f55,32
stf.spill [r32]=f56,32
;;
stf.spill [r31]=f57,32
stf.spill [r32]=f58,32
;;
stf.spill [r31]=f59,32
stf.spill [r32]=f60,32
;;
stf.spill [r31]=f61,32
stf.spill [r32]=f62,32
;;
stf.spill [r31]=f63,32
stf.spill [r32]=f64,32
;;
stf.spill [r31]=f65,32
stf.spill [r32]=f66,32
;;
stf.spill [r31]=f67,32
stf.spill [r32]=f68,32
;;
stf.spill [r31]=f69,32
stf.spill [r32]=f70,32
;;
stf.spill [r31]=f71,32
stf.spill [r32]=f72,32
;;
stf.spill [r31]=f73,32
stf.spill [r32]=f74,32
;;
stf.spill [r31]=f75,32
stf.spill [r32]=f76,32
;;
stf.spill [r31]=f77,32
stf.spill [r32]=f78,32
;;
stf.spill [r31]=f79,32
stf.spill [r32]=f80,32
;;
stf.spill [r31]=f81,32
stf.spill [r32]=f82,32
;;
stf.spill [r31]=f83,32
stf.spill [r32]=f84,32
;;
stf.spill [r31]=f85,32
stf.spill [r32]=f86,32
;;
stf.spill [r31]=f87,32
stf.spill [r32]=f88,32
;;
stf.spill [r31]=f89,32
stf.spill [r32]=f90,32
;;
stf.spill [r31]=f91,32
stf.spill [r32]=f92,32
;;
stf.spill [r31]=f93,32
stf.spill [r32]=f94,32
;;
stf.spill [r31]=f95,32
stf.spill [r32]=f96,32
;;
stf.spill [r31]=f97,32
stf.spill [r32]=f98,32
;;
stf.spill [r31]=f99,32
stf.spill [r32]=f100,32
;;
stf.spill [r31]=f101,32
stf.spill [r32]=f102,32
;;
stf.spill [r31]=f103,32
stf.spill [r32]=f104,32
;;
stf.spill [r31]=f105,32
stf.spill [r32]=f106,32
;;
stf.spill [r31]=f107,32
stf.spill [r32]=f108,32
;;
stf.spill [r31]=f109,32
stf.spill [r32]=f110,32
;;
stf.spill [r31]=f111,32
stf.spill [r32]=f112,32
;;
stf.spill [r31]=f113,32
stf.spill [r32]=f114,32
;;
stf.spill [r31]=f115,32
stf.spill [r32]=f116,32
;;
stf.spill [r31]=f117,32
stf.spill [r32]=f118,32
;;
stf.spill [r31]=f119,32
stf.spill [r32]=f120,32
;;
stf.spill [r31]=f121,32
stf.spill [r32]=f122,32
;;
stf.spill [r31]=f123,32
stf.spill [r32]=f124,32
;;
stf.spill [r31]=f125,32
stf.spill [r32]=f126
;;
stf.spill [r31]=f127
ssm psr.dfh
;;
srlz.d
br.ret.sptk rp
;;
END(save_high_fp)
/*
* restore_high_fp(struct _high_fp *)
*/
ENTRY(restore_high_fp, 1)
rsm psr.dfh
;;
srlz.d
add r31=16,r32
ldf.fill f32=[r32],32
;;
ldf.fill f33=[r31],32
ldf.fill f34=[r32],32
;;
ldf.fill f35=[r31],32
ldf.fill f36=[r32],32
;;
ldf.fill f37=[r31],32
ldf.fill f38=[r32],32
;;
ldf.fill f39=[r31],32
ldf.fill f40=[r32],32
;;
ldf.fill f41=[r31],32
ldf.fill f42=[r32],32
;;
ldf.fill f43=[r31],32
ldf.fill f44=[r32],32
;;
ldf.fill f45=[r31],32
ldf.fill f46=[r32],32
;;
ldf.fill f47=[r31],32
ldf.fill f48=[r32],32
;;
ldf.fill f49=[r31],32
ldf.fill f50=[r32],32
;;
ldf.fill f51=[r31],32
ldf.fill f52=[r32],32
;;
ldf.fill f53=[r31],32
ldf.fill f54=[r32],32
;;
ldf.fill f55=[r31],32
ldf.fill f56=[r32],32
;;
ldf.fill f57=[r31],32
ldf.fill f58=[r32],32
;;
ldf.fill f59=[r31],32
ldf.fill f60=[r32],32
;;
ldf.fill f61=[r31],32
ldf.fill f62=[r32],32
;;
ldf.fill f63=[r31],32
ldf.fill f64=[r32],32
;;
ldf.fill f65=[r31],32
ldf.fill f66=[r32],32
;;
ldf.fill f67=[r31],32
ldf.fill f68=[r32],32
;;
ldf.fill f69=[r31],32
ldf.fill f70=[r32],32
;;
ldf.fill f71=[r31],32
ldf.fill f72=[r32],32
;;
ldf.fill f73=[r31],32
ldf.fill f74=[r32],32
;;
ldf.fill f75=[r31],32
ldf.fill f76=[r32],32
;;
ldf.fill f77=[r31],32
ldf.fill f78=[r32],32
;;
ldf.fill f79=[r31],32
ldf.fill f80=[r32],32
;;
ldf.fill f81=[r31],32
ldf.fill f82=[r32],32
;;
ldf.fill f83=[r31],32
ldf.fill f84=[r32],32
;;
ldf.fill f85=[r31],32
ldf.fill f86=[r32],32
;;
ldf.fill f87=[r31],32
ldf.fill f88=[r32],32
;;
ldf.fill f89=[r31],32
ldf.fill f90=[r32],32
;;
ldf.fill f91=[r31],32
ldf.fill f92=[r32],32
;;
ldf.fill f93=[r31],32
ldf.fill f94=[r32],32
;;
ldf.fill f95=[r31],32
ldf.fill f96=[r32],32
;;
ldf.fill f97=[r31],32
ldf.fill f98=[r32],32
;;
ldf.fill f99=[r31],32
ldf.fill f100=[r32],32
;;
ldf.fill f101=[r31],32
ldf.fill f102=[r32],32
;;
ldf.fill f103=[r31],32
ldf.fill f104=[r32],32
;;
ldf.fill f105=[r31],32
ldf.fill f106=[r32],32
;;
ldf.fill f107=[r31],32
ldf.fill f108=[r32],32
;;
ldf.fill f109=[r31],32
ldf.fill f110=[r32],32
;;
ldf.fill f111=[r31],32
ldf.fill f112=[r32],32
;;
ldf.fill f113=[r31],32
ldf.fill f114=[r32],32
;;
ldf.fill f115=[r31],32
ldf.fill f116=[r32],32
;;
ldf.fill f117=[r31],32
ldf.fill f118=[r32],32
;;
ldf.fill f119=[r31],32
ldf.fill f120=[r32],32
;;
ldf.fill f121=[r31],32
ldf.fill f122=[r32],32
;;
ldf.fill f123=[r31],32
ldf.fill f124=[r32],32
;;
ldf.fill f125=[r31],32
ldf.fill f126=[r32]
;;
ldf.fill f127=[r31]
ssm psr.dfh
;;
srlz.d
br.ret.sptk rp
;;
END(restore_high_fp)

File diff suppressed because it is too large Load Diff

View File

@ -1,439 +0,0 @@
/*-
* Copyright (c) 1998 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asm.h>
#include <machine/ia64_cpu.h>
#include <machine/fpu.h>
#include <machine/pte.h>
#include <sys/syscall.h>
#include <assym.s>
#ifndef EVCNT_COUNTERS
#define _LOCORE
#include <machine/intrcnt.h>
#endif
.section .data.proc0,"aw"
.global kstack
.align PAGE_SIZE
kstack: .space KSTACK_PAGES * PAGE_SIZE
.text
/*
* Not really a leaf but we can't return.
* The EFI loader passes the physical address of the bootinfo block in
* register r8.
*/
ENTRY(__start, 1)
.prologue
.save rp,r0
.body
{ .mlx
mov ar.rsc=0
movl r16=ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva=r16
movl r16=kstack
;;
}
{ .mmi
srlz.i
;;
ssm IA64_PSR_DFH
mov r17=KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
}
{ .mlx
add sp=r16,r17 // proc0's stack
movl gp=__gp // find kernel globals
;;
}
{ .mlx
mov ar.bspstore=r16 // switch backing store
movl r16=pa_bootinfo
;;
}
st8 [r16]=r8 // save the PA of the bootinfo block
loadrs // invalidate regs
;;
mov ar.rsc=3 // turn rse back on
;;
alloc r16=ar.pfs,0,0,1,0
;;
movl out0=0 // we are linked at the right address
;; // we just need to process fptrs
br.call.sptk.many rp=_reloc
;;
br.call.sptk.many rp=ia64_init
;;
/* NOTREACHED */
1: br.cond.sptk.few 1b
END(__start)
/*
* fork_trampoline()
*
* Arrange for a function to be invoked neatly, after a cpu_switch().
*
* Invokes fork_exit() passing in three arguments: a callout function, an
* argument to the callout, and a trapframe pointer. For child processes
* returning from fork(2), the argument is a pointer to the child process.
*
* The callout function and its argument is in the trapframe in scratch
* registers r2 and r3.
*/
ENTRY(fork_trampoline, 0)
.prologue
.save rp,r0
.body
{ .mmi
alloc r14=ar.pfs,0,0,3,0
add r15=32+SIZEOF_SPECIAL+8,sp
add r16=32+SIZEOF_SPECIAL+16,sp
;;
}
{ .mmi
ld8 out0=[r15]
ld8 out1=[r16]
nop 0
}
{ .mfb
add out2=16,sp
nop 0
br.call.sptk rp=fork_exit
;;
}
// If we get back here, it means we're a user space process that's
// the immediate result of fork(2).
.global enter_userland
.type enter_userland, @function
enter_userland:
{ .mmi
add r14=24,sp
;;
ld8 r14=[r14]
nop 0
;;
}
{ .mbb
cmp.eq p6,p7=r0,r14
(p6) br.sptk exception_restore
(p7) br.sptk epc_syscall_return
;;
}
END(fork_trampoline)
#ifdef SMP
/*
* AP wake-up entry point. The handoff state is similar as for the BSP,
* as described on page 3-9 of the IPF SAL Specification. The difference
* lies in the contents of register b0. For APs this register holds the
* return address into the SAL rendezvous routine.
*
* Note that we're responsible for clearing the IRR bit by reading cr.ivr
* and issuing the EOI to the local SAPIC.
*/
.align 32
ENTRY(os_boot_rendez,0)
mov r16=cr.ivr // clear IRR bit
;;
srlz.d
mov cr.eoi=r0 // ACK the wake-up
;;
srlz.d
rsm IA64_PSR_IC|IA64_PSR_I
;;
mov r16 = (5<<8)|(PAGE_SHIFT<<2)|1
movl r17 = 5<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (6<<8)|(28<<2)
movl r17 = 6<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (7<<8)|(28<<2)
movl r17 = 7<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (PTE_P|PTE_MA_WB|PTE_A|PTE_D|PTE_PL_KERN|PTE_AR_RWX)
mov r18 = 28<<2
;;
mov cr.ifa = r17
mov cr.itir = r18
ptr.d r17, r18
ptr.i r17, r18
;;
srlz.i
;;
itr.d dtr[r0] = r16
;;
itr.i itr[r0] = r16
;;
srlz.i
;;
1: mov r16 = ip
add r17 = 2f-1b, r17
movl r18 = (IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_DFH|IA64_PSR_DT|IA64_PSR_IC|IA64_PSR_IT|IA64_PSR_RT)
;;
add r17 = r17, r16
mov cr.ipsr = r18
mov cr.ifs = r0
;;
mov cr.iip = r17
;;
rfi
.align 32
2:
{ .mlx
mov ar.rsc = 0
movl r16 = ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva = r16
movl r16 = ap_stack
;;
}
{ .mmi
srlz.i
;;
ld8 r16 = [r16]
mov r18 = KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
}
{ .mlx
mov ar.bspstore = r16
movl gp = __gp
;;
}
{ .mmi
loadrs
;;
alloc r17 = ar.pfs, 0, 0, 0, 0
add sp = r18, r16
;;
}
{ .mfb
mov ar.rsc = 3
nop 0
br.call.sptk.few rp = ia64_ap_startup
;;
}
/* NOT REACHED */
9:
{ .mfb
nop 0
nop 0
br.sptk 9b
;;
}
END(os_boot_rendez)
#endif /* !SMP */
/*
* Create a default interrupt name table. The first entry (vector 0) is
* hardwaired to the clock interrupt.
*/
.data
.align 8
EXPORT(intrnames)
.ascii "clock"
.fill INTRNAME_LEN - 5 - 1, 1, ' '
.byte 0
intr_n = 0
.rept INTRCNT_COUNT - 1
.ascii "#"
.byte intr_n / 100 + '0'
.byte (intr_n % 100) / 10 + '0'
.byte intr_n % 10 + '0'
.fill INTRNAME_LEN - 1 - 3 - 1, 1, ' '
.byte 0
intr_n = intr_n + 1
.endr
EXPORT(eintrnames)
.align 8
EXPORT(intrcnt)
.fill INTRCNT_COUNT, 8, 0
EXPORT(eintrcnt)
.text
// in0: image base
STATIC_ENTRY(_reloc, 1)
alloc loc0=ar.pfs,1,2,0,0
mov loc1=rp
;;
movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc.
movl r2=@gprel(fptr_storage)
movl r3=@gprel(fptr_storage_end)
;;
add r15=r15,gp // relocate _DYNAMIC etc.
add r2=r2,gp
add r3=r3,gp
;;
1: ld8 r16=[r15],8 // read r15->d_tag
;;
ld8 r17=[r15],8 // and r15->d_val
;;
cmp.eq p6,p0=DT_NULL,r16 // done?
(p6) br.cond.dpnt.few 2f
;;
cmp.eq p6,p0=DT_RELA,r16
;;
(p6) add r18=r17,in0 // found rela section
;;
cmp.eq p6,p0=DT_RELASZ,r16
;;
(p6) mov r19=r17 // found rela size
;;
cmp.eq p6,p0=DT_SYMTAB,r16
;;
(p6) add r20=r17,in0 // found symbol table
;;
(p6) setf.sig f8=r20
;;
cmp.eq p6,p0=DT_SYMENT,r16
;;
(p6) setf.sig f9=r17 // found symbol entry size
;;
cmp.eq p6,p0=DT_RELAENT,r16
;;
(p6) mov r22=r17 // found rela entry size
;;
br.sptk.few 1b
2:
ld8 r15=[r18],8 // read r_offset
;;
ld8 r16=[r18],8 // read r_info
add r15=r15,in0 // relocate r_offset
;;
ld8 r17=[r18],8 // read r_addend
sub r19=r19,r22 // update relasz
extr.u r23=r16,0,32 // ELF64_R_TYPE(r16)
;;
cmp.eq p6,p0=R_IA64_NONE,r23
(p6) br.cond.dpnt.few 3f
;;
cmp.eq p6,p0=R_IA64_REL64LSB,r23
(p6) br.cond.dptk.few 4f
;;
extr.u r16=r16,32,32 // ELF64_R_SYM(r16)
;;
setf.sig f10=r16 // so we can multiply
;;
xma.lu f10=f10,f9,f8 // f10=symtab + r_sym*syment
;;
getf.sig r16=f10
;;
add r16=8,r16 // address of st_value
;;
ld8 r16=[r16] // read symbol value
;;
add r16=r16,in0 // relocate symbol value
;;
cmp.eq p6,p0=R_IA64_DIR64LSB,r23
(p6) br.cond.dptk.few 5f
;;
cmp.eq p6,p0=R_IA64_FPTR64LSB,r23
(p6) br.cond.dptk.few 6f
;;
3:
cmp.ltu p6,p0=0,r19 // more?
(p6) br.cond.dptk.few 2b // loop
mov r8=0 // success return value
br.cond.sptk.few 9f // done
4:
add r16=in0,r17 // BD + A
;;
st8 [r15]=r16 // word64 (LSB)
br.cond.sptk.few 3b
5:
add r16=r16,r17 // S + A
;;
st8 [r15]=r16 // word64 (LSB)
br.cond.sptk.few 3b
6:
movl r17=@gprel(fptr_storage)
;;
add r17=r17,gp // start of fptrs
;;
7: cmp.geu p6,p0=r17,r2 // end of fptrs?
(p6) br.cond.dpnt.few 8f // can't find existing fptr
ld8 r20=[r17] // read function from fptr
;;
cmp.eq p6,p0=r16,r20 // same function?
;;
(p6) st8 [r15]=r17 // reuse fptr
(p6) br.cond.sptk.few 3b // done
add r17=16,r17 // next fptr
br.cond.sptk.few 7b
8: // allocate new fptr
mov r8=1 // failure return value
cmp.geu p6,p0=r2,r3 // space left?
(p6) br.cond.dpnt.few 9f // bail out
st8 [r15]=r2 // install fptr
st8 [r2]=r16,8 // write fptr address
;;
st8 [r2]=gp,8 // write fptr gp
br.cond.sptk.few 3b
9:
mov ar.pfs=loc0
mov rp=loc1
;;
br.ret.sptk.few rp
END(_reloc)
.data
.align 16
.global fptr_storage
fptr_storage:
.space 4096*16 // XXX
fptr_storage_end:

View File

@ -1,223 +0,0 @@
/*-
* Copyright (c) 2000-2001 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asm.h>
.data
.global ia64_pal_entry
ia64_pal_entry: .quad 0
.text
/*
* struct ia64_pal_result ia64_call_pal_static(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_static, 4)
.regstk 4,5,0,0
palret = loc0
entry = loc1
rpsave = loc2
pfssave = loc3
psrsave = loc4
alloc pfssave=ar.pfs,4,5,0,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
1: mov palret=ip // for return address
;;
add entry=entry,gp
mov psrsave=psr
mov r28=in0 // procedure number
;;
ld8 entry=[entry] // read entry point
mov r29=in1 // copy arguments
mov r30=in2
mov r31=in3
;;
mov b6=entry
add palret=2f-1b,palret // calculate return address
;;
mov b0=palret
rsm psr.i // disable interrupts
;;
br.cond.sptk b6 // call into firmware
2: mov psr.l=psrsave
mov rp=rpsave
mov ar.pfs=pfssave
;;
srlz.d
br.ret.sptk rp
END(ia64_call_pal_static)
#ifdef _KERNEL
/*
* struct ia64_pal_result ia64_call_pal_static_physical(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_static_physical, 4)
.regstk 4,5,0,0
palret = loc0
entry = loc1
rpsave = loc2
pfssave = loc3
psrsave = loc4
alloc pfssave=ar.pfs,4,5,0,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
1: mov palret=ip // for return address
;;
add entry=entry,gp
mov r28=in0 // procedure number
;;
ld8 entry=[entry] // read entry point
mov r29=in1 // copy arguments
mov r30=in2
mov r31=in3
;;
dep entry=0,entry,61,3 // physical address
dep palret=0,palret,61,3 // physical address
br.call.sptk.many rp=ia64_physical_mode
mov psrsave=ret0
;;
mov b6=entry
add palret=2f-1b,palret // calculate return address
;;
mov b0=palret
br.cond.sptk b6 // call into firmware
;;
2: mov r14=psrsave
;;
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=rpsave
mov ar.pfs=pfssave
;;
br.ret.sptk rp
END(ia64_call_pal_static_physical)
#endif
/*
* struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_stacked, 4)
.regstk 4,4,4,0
entry = loc0
rpsave = loc1
pfssave = loc2
psrsave = loc3
alloc pfssave=ar.pfs,4,4,4,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
;;
add entry=entry,gp
mov psrsave=psr
mov r28=in0 // procedure number
mov out0=in0
;;
ld8 entry=[entry] // read entry point
mov out1=in1 // copy arguments
mov out2=in2
mov out3=in3
;;
mov b6=entry
;;
rsm psr.i // disable interrupts
;;
br.call.sptk.many rp=b6 // call into firmware
mov psr.l=psrsave
mov rp=rpsave
mov ar.pfs=pfssave
;;
srlz.d
br.ret.sptk rp
END(ia64_call_pal_stacked)
#ifdef _KERNEL
/*
* struct ia64_pal_result ia64_call_pal_stacked_physical(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_stacked_physical, 4)
.regstk 4,4,4,0
entry = loc0
rpsave = loc1
pfssave = loc2
psrsave = loc3
alloc pfssave=ar.pfs,4,4,4,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
;;
add entry=entry,gp
mov r28=in0 // procedure number
mov out0=in0
;;
ld8 entry=[entry] // read entry point
mov out1=in1 // copy arguments
mov out2=in2
mov out3=in3
;;
dep entry=0,entry,61,3 // physical address
br.call.sptk.many rp=ia64_physical_mode
mov psrsave=ret0
;;
mov b6=entry
;;
br.call.sptk.many rp=b6 // call into firmware
;;
mov r14=psrsave
;;
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=rpsave
mov ar.pfs=pfssave
;;
br.ret.sptk rp
END(ia64_call_pal_stacked_physical)
#endif

View File

@ -1,348 +0,0 @@
// $FreeBSD$
//
// Copyright (c) 1999, 2000
// Intel Corporation.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
//
// This product includes software developed by Intel Corporation and
// its contributors.
//
// 4. Neither the name of Intel Corporation or its contributors may be
// used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION AND CONTRIBUTORS ``AS IS''
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//
//
// Module Name:
//
// setjmp.s
//
// Abstract:
//
// Contains an implementation of setjmp and longjmp for the
// IA-64 architecture.
.file "setjmp.s"
#include <machine/asm.h>
#include <machine/setjmp.h>
// int setjmp(struct jmp_buffer *)
//
// Setup a non-local goto.
//
// Description:
//
// SetJump stores the current register set in the area pointed to
// by "save". It returns zero. Subsequent calls to "LongJump" will
// restore the registers and return non-zero to the same location.
//
// On entry, r32 contains the pointer to the jmp_buffer
//
ENTRY(setjmp, 1)
//
// Make sure buffer is aligned at 16byte boundary
//
add r10 = -0x10,r0 ;; // mask the lower 4 bits
and r32 = r32, r10;;
add r32 = 0x10, r32;; // move to next 16 byte boundary
add r10 = J_PREDS, r32 // skip Unats & pfs save area
add r11 = J_BSP, r32
//
// save immediate context
//
mov r2 = ar.bsp // save backing store pointer
mov r3 = pr // save predicates
flushrs
;;
//
// save user Unat register
//
mov r16 = ar.lc // save loop count register
mov r14 = ar.unat // save user Unat register
st8 [r10] = r3, J_LC-J_PREDS
st8 [r11] = r2, J_R4-J_BSP
;;
st8 [r10] = r16, J_R5-J_LC
st8 [r32] = r14, J_NATS // Note: Unat at the
// beginning of the save area
mov r15 = ar.pfs
;;
//
// save preserved general registers & NaT's
//
st8.spill [r11] = r4, J_R6-J_R4
;;
st8.spill [r10] = r5, J_R7-J_R5
;;
st8.spill [r11] = r6, J_SP-J_R6
;;
st8.spill [r10] = r7, J_F3-J_R7
;;
st8.spill [r11] = sp, J_F2-J_SP
;;
//
// save spilled Unat and pfs registers
//
mov r2 = ar.unat // save Unat register after spill
;;
st8 [r32] = r2, J_PFS-J_NATS // save unat for spilled regs
;;
st8 [r32] = r15 // save pfs
//
// save floating registers
//
stf.spill [r11] = f2, J_F4-J_F2
stf.spill [r10] = f3, J_F5-J_F3
;;
stf.spill [r11] = f4, J_F16-J_F4
stf.spill [r10] = f5, J_F17-J_F5
;;
stf.spill [r11] = f16, J_F18-J_F16
stf.spill [r10] = f17, J_F19-J_F17
;;
stf.spill [r11] = f18, J_F20-J_F18
stf.spill [r10] = f19, J_F21-J_F19
;;
stf.spill [r11] = f20, J_F22-J_F20
stf.spill [r10] = f21, J_F23-J_F21
;;
stf.spill [r11] = f22, J_F24-J_F22
stf.spill [r10] = f23, J_F25-J_F23
;;
stf.spill [r11] = f24, J_F26-J_F24
stf.spill [r10] = f25, J_F27-J_F25
;;
stf.spill [r11] = f26, J_F28-J_F26
stf.spill [r10] = f27, J_F29-J_F27
;;
stf.spill [r11] = f28, J_F30-J_F28
stf.spill [r10] = f29, J_F31-J_F29
;;
stf.spill [r11] = f30, J_FPSR-J_F30
stf.spill [r10] = f31, J_B0-J_F31 // size of f31 + fpsr
//
// save FPSR register & branch registers
//
mov r2 = ar.fpsr // save fpsr register
mov r3 = b0
;;
st8 [r11] = r2, J_B1-J_FPSR
st8 [r10] = r3, J_B2-J_B0
mov r2 = b1
mov r3 = b2
;;
st8 [r11] = r2, J_B3-J_B1
st8 [r10] = r3, J_B4-J_B2
mov r2 = b3
mov r3 = b4
;;
st8 [r11] = r2, J_B5-J_B3
st8 [r10] = r3
mov r2 = b5
;;
st8 [r11] = r2
;;
//
// return
//
mov r8 = r0 // return 0 from setjmp
mov ar.unat = r14 // restore unat
br.ret.sptk b0
END(setjmp)
//
// void longjmp(struct jmp_buffer *, int val)
//
// Perform a non-local goto.
//
// Description:
//
// LongJump initializes the register set to the values saved by a
// previous 'SetJump' and jumps to the return location saved by that
// 'SetJump'. This has the effect of unwinding the stack and returning
// for a second time to the 'SetJump'.
//
ENTRY(longjmp, 2)
//
// Make sure buffer is aligned at 16byte boundary
//
add r10 = -0x10,r0 ;; // mask the lower 4 bits
and r32 = r32, r10;;
add r32 = 0x10, r32;; // move to next 16 byte boundary
//
// caching the return value as we do invala in the end
//
mov r8 = r33 // return value
//
// get immediate context
//
mov r14 = ar.rsc // get user RSC conf
add r10 = J_PFS, r32 // get address of pfs
add r11 = J_NATS, r32
;;
ld8 r15 = [r10], J_BSP-J_PFS // get pfs
ld8 r2 = [r11], J_LC-J_NATS // get unat for spilled regs
;;
mov ar.unat = r2
;;
ld8 r16 = [r10], J_PREDS-J_BSP // get backing store pointer
mov ar.rsc = r0 // put RSE in enforced lazy
mov ar.pfs = r15
;;
//
// while returning from longjmp the BSPSTORE and BSP needs to be
// same and discard all the registers allocated after we did
// setjmp. Also, we need to generate the RNAT register since we
// did not flushed the RSE on setjmp.
//
mov r17 = ar.bspstore // get current BSPSTORE
;;
cmp.ltu p6,p7 = r17, r16 // is it less than BSP of
(p6) br.spnt.few .flush_rse
mov r19 = ar.rnat // get current RNAT
;;
loadrs // invalidate dirty regs
br.sptk.many .restore_rnat // restore RNAT
.flush_rse:
flushrs
;;
mov r19 = ar.rnat // get current RNAT
mov r17 = r16 // current BSPSTORE
;;
.restore_rnat:
//
// check if RNAT is saved between saved BSP and curr BSPSTORE
//
mov r18 = 0x3f
;;
dep r18 = r18,r16,3,6 // get RNAT address
;;
cmp.ltu p8,p9 = r18, r17 // RNAT saved on RSE
;;
(p8) ld8 r19 = [r18] // get RNAT from RSE
;;
mov ar.bspstore = r16 // set new BSPSTORE
;;
mov ar.rnat = r19 // restore RNAT
mov ar.rsc = r14 // restore RSC conf
ld8 r3 = [r11], J_R4-J_LC // get lc register
ld8 r2 = [r10], J_R5-J_PREDS // get predicates
;;
mov pr = r2, -1
mov ar.lc = r3
//
// restore preserved general registers & NaT's
//
ld8.fill r4 = [r11], J_R6-J_R4
;;
ld8.fill r5 = [r10], J_R7-J_R5
ld8.fill r6 = [r11], J_SP-J_R6
;;
ld8.fill r7 = [r10], J_F2-J_R7
ld8.fill sp = [r11], J_F3-J_SP
;;
//
// restore floating registers
//
ldf.fill f2 = [r10], J_F4-J_F2
ldf.fill f3 = [r11], J_F5-J_F3
;;
ldf.fill f4 = [r10], J_F16-J_F4
ldf.fill f5 = [r11], J_F17-J_F5
;;
ldf.fill f16 = [r10], J_F18-J_F16
ldf.fill f17 = [r11], J_F19-J_F17
;;
ldf.fill f18 = [r10], J_F20-J_F18
ldf.fill f19 = [r11], J_F21-J_F19
;;
ldf.fill f20 = [r10], J_F22-J_F20
ldf.fill f21 = [r11], J_F23-J_F21
;;
ldf.fill f22 = [r10], J_F24-J_F22
ldf.fill f23 = [r11], J_F25-J_F23
;;
ldf.fill f24 = [r10], J_F26-J_F24
ldf.fill f25 = [r11], J_F27-J_F25
;;
ldf.fill f26 = [r10], J_F28-J_F26
ldf.fill f27 = [r11], J_F29-J_F27
;;
ldf.fill f28 = [r10], J_F30-J_F28
ldf.fill f29 = [r11], J_F31-J_F29
;;
ldf.fill f30 = [r10], J_FPSR-J_F30
ldf.fill f31 = [r11], J_B0-J_F31 ;;
//
// restore branch registers and fpsr
//
ld8 r16 = [r10], J_B1-J_FPSR // get fpsr
ld8 r17 = [r11], J_B2-J_B0 // get return pointer
;;
mov ar.fpsr = r16
mov b0 = r17
ld8 r2 = [r10], J_B3-J_B1
ld8 r3 = [r11], J_B4-J_B2
;;
mov b1 = r2
mov b2 = r3
ld8 r2 = [r10], J_B5-J_B3
ld8 r3 = [r11]
;;
mov b3 = r2
mov b4 = r3
ld8 r2 = [r10]
ld8 r21 = [r32] // get user unat
;;
mov b5 = r2
mov ar.unat = r21
//
// invalidate ALAT
//
invala ;;
br.ret.sptk b0
END(longjmp)

View File

@ -1,901 +0,0 @@
/*-
* Copyright (c) 1998 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
#include <machine/ia64_cpu.h>
#include <assym.s>
.text
/*
* ia64_change_mode: change mode to/from physical mode
*
* Arguments:
* r14 psr for desired mode
*
* Modifies:
* r15-r19 scratch
* ar.bsp tranlated to new mode
*/
ENTRY(ia64_change_mode, 0)
rsm psr.i | psr.ic
mov r19=ar.rsc // save rsc while we change mode
tbit.nz p6,p7=r14,17 // physical or virtual ?
;;
mov ar.rsc=0 // turn off RSE
(p6) mov r15=7 // RR base for virtual addresses
(p7) mov r15=0 // RR base for physical addresses
flushrs // no dirty registers please
srlz.i
;;
mov r16=ar.bsp
mov r17=rp
mov r18=ar.rnat
;;
dep r16=r15,r16,61,3 // new address of ar.bsp
dep r17=r15,r17,61,3 // new address of rp
dep sp=r15,sp,61,3 // new address of sp
;;
mov ar.bspstore=r16
mov rp=r17
;;
1: mov r16=ip
mov ar.rnat=r18
mov cr.ipsr=r14 // psr for new mode
;;
add r16=2f-1b,r16 // address to rfi to
;;
dep r16=r15,r16,61,3 // new mode address for rfi
;;
mov cr.iip=r16 // setup for rfi
mov cr.ifs=r0
;;
rfi
2: mov ar.rsc=r19 // restore ar.rsc
br.ret.sptk.few rp // now in new mode
END(ia64_change_mode)
/*
* ia64_physical_mode: change mode to physical mode
*
* Return:
* ret0 psr to restore
*
* Modifies:
* r15-r18 scratch
* ar.bsp tranlated to physical mode
* psr.i cleared
*/
ENTRY(ia64_physical_mode, 0)
mov r14=psr
mov ret0=psr
movl r15=(IA64_PSR_I|IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFL|IA64_PSR_DFH)
movl r16=IA64_PSR_BN
;;
andcm r14=r14,r15 // clear various xT bits
;;
or r14=r14,r16 // make sure BN=1
or ret0=ret0,r16 // make sure BN=1
br.cond.sptk.many ia64_change_mode
END(ia64_physical_mode)
/*
* ia64_call_efi_physical: call an EFI procedure in physical mode
*
* Arguments:
* in0 Address of EFI procedure descriptor
* in1-in5 Arguments to EFI procedure
*
* Return:
* ret0-ret3 return values from EFI
*
*/
ENTRY(ia64_call_efi_physical, 6)
.prologue
.regstk 6,4,5,0
.save ar.pfs,loc0
alloc loc0=ar.pfs,6,4,5,0
;;
.save rp,loc1
mov loc1=rp
;;
.body
br.call.sptk.many rp=ia64_physical_mode
;;
mov loc2=r8 // psr to restore mode
mov loc3=gp // save kernel gp
ld8 r14=[in0],8 // function address
;;
mov out0=in1
mov out1=in2
mov out2=in3
mov out3=in4
mov out4=in5
ld8 gp=[in0] // function gp value
;;
mov b6=r14
;;
br.call.sptk.many rp=b6 // call EFI procedure
mov gp=loc3 // restore kernel gp
;;
mov r14=loc2 // psr to restore mode
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=loc1
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_call_efi_physical)
/**************************************************************************/
ENTRY(fusufault, 0)
{ .mib
st8.rel [r15]=r0 // Clear onfault.
add ret0=-1,r0
br.ret.sptk rp
;;
}
END(fusufault)
/*
* casuptr(intptr_t *p, intptr_t old, intptr_t new)
* Perform a compare-exchange in user space.
*/
ENTRY(casuptr, 3)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
mov ar.ccv=in1
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
cmpxchg8.rel ret0=[in0],in2,ar.ccv
nop 0
;;
}
{ .mfb
st8.rel [r15]=r0 // Clear onfault
nop 0
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(casuptr)
/*
* subyte(void *addr, int byte)
* suword16(void *addr, int word)
* suword32(void *addr, int word)
* suword64|suword(void *addr, long word)
* Store in user space
*/
ENTRY(subyte, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
st1.rel [in0]=in1
nop 0
;;
}
{ .mib
st8.rel [r15]=r0 // Clear onfault
mov ret0=r0
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(subyte)
ENTRY(suword16, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
st2.rel [in0]=in1
nop 0
;;
}
{ .mib
st8.rel [r15]=r0 // Clear onfault
mov ret0=r0
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(suword16)
ENTRY(suword32, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
st4.rel [in0]=in1
nop 0
;;
}
{ .mib
st8.rel [r15]=r0 // Clear onfault
mov ret0=r0
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(suword32)
ENTRY(suword64, 2)
XENTRY(suword)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
st8.rel [in0]=in1
nop 0
;;
}
{ .mib
st8.rel [r15]=r0 // Clear onfault
mov ret0=r0
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(suword64)
/*
* fubyte(void *addr, int byte)
* fuword16(void *addr, int word)
* fuword32(void *addr, int word)
* fuword64|fuword(void *addr, long word)
* Fetch from user space
*/
ENTRY(fubyte, 1)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
mf
nop 0
;;
}
{ .mmb
ld1 ret0=[in0]
st8.rel [r15]=r0 // Clear onfault
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fubyte)
ENTRY(fuword16, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
mf
nop 0
;;
}
{ .mmb
ld2 ret0=[in0]
st8.rel [r15]=r0 // Clear onfault
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fuword16)
ENTRY(fuword32, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
mf
nop 0
;;
}
{ .mmb
ld4 ret0=[in0]
st8.rel [r15]=r0 // Clear onfault
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fuword32)
ENTRY(fuword64, 2)
XENTRY(fuword)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
;;
}
{ .mib
ld8 r15=[r15] // r15 = curthread
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few 1f
;;
}
{ .mlx
add r15=TD_PCB,r15
movl r14=fusufault
;;
}
{ .mmi
ld8 r15=[r15] // r15 = PCB
;;
nop 0
add r15=PCB_ONFAULT,r15
;;
}
{ .mmi
st8 [r15]=r14 // Set onfault
;;
mf
nop 0
;;
}
{ .mmb
ld8 ret0=[in0]
st8.rel [r15]=r0 // Clear onfault
br.ret.sptk rp
;;
}
1:
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fuword64)
/*
* fuswintr(void *addr)
* suswintr(void *addr)
*/
ENTRY(fswintrberr, 0)
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fswintrberr)
ENTRY(fuswintr, 1)
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(fuswintr)
ENTRY(suswintr, 0)
{ .mfb
add ret0=-1,r0
nop 0
br.ret.sptk rp
;;
}
END(suswintr)
/**************************************************************************/
/*
* Copy a null-terminated string within the kernel's address space.
* If lenp is not NULL, store the number of chars copied in *lenp
*
* int copystr(char *from, char *to, size_t len, size_t *lenp);
*/
ENTRY(copystr, 4)
mov r14=in2 // r14 = i = len
cmp.eq p6,p0=r0,in2
(p6) br.cond.spnt.few 2f // if (len == 0), bail out
1: ld1 r15=[in0],1 // read one byte
;;
st1 [in1]=r15,1 // write that byte
add in2=-1,in2 // len--
;;
cmp.eq p6,p0=r0,r15
cmp.ne p7,p0=r0,in2
;;
(p6) br.cond.spnt.few 2f // if (*from == 0), bail out
(p7) br.cond.sptk.few 1b // if (len != 0) copy more
2: cmp.eq p6,p0=r0,in3
(p6) br.cond.dpnt.few 3f // if (lenp != NULL)
sub r14=r14,in2 // *lenp = (i - len)
;;
st8 [in3]=r14
3: cmp.eq p6,p0=r0,r15
(p6) br.cond.spnt.few 4f // *from == '\0'; leave quietly
mov ret0=ENAMETOOLONG // *from != '\0'; error.
br.ret.sptk.few rp
4: mov ret0=0 // return 0.
br.ret.sptk.few rp
END(copystr)
ENTRY(copyinstr, 4)
.prologue
.regstk 4, 3, 4, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
(p6) br.cond.spnt.few copyerr // if it's not, error out.
movl r14=copyerr // set up fault handler.
add r15=PC_CURTHREAD,r13 // find curthread
;;
ld8 r15=[r15]
;;
add r15=TD_PCB,r15 // find pcb
;;
ld8 r15=[r15]
;;
add loc2=PCB_ONFAULT,r15
;;
st8 [loc2]=r14
;;
mov out0=in0
mov out1=in1
mov out2=in2
mov out3=in3
;;
br.call.sptk.few rp=copystr // do the copy.
st8 [loc2]=r0 // kill the fault handler.
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from copystr
END(copyinstr)
/*
* Not the fastest bcopy in the world.
*/
ENTRY(bcopy, 3)
mov ret0=r0 // return zero for copy{in,out}
;;
cmp.le p6,p0=in2,r0 // bail if len <= 0
(p6) br.ret.spnt.few rp
sub r14=in1,in0 ;; // check for overlap
cmp.ltu p6,p0=r14,in2 // dst-src < len
(p6) br.cond.spnt.few 5f
extr.u r14=in0,0,3 // src & 7
extr.u r15=in1,0,3 ;; // dst & 7
cmp.eq p6,p0=r14,r15 // different alignment?
(p6) br.cond.spnt.few 2f // branch if same alignment
1: ld1 r14=[in0],1 ;; // copy bytewise
st1 [in1]=r14,1
add in2=-1,in2 ;; // len--
cmp.ne p6,p0=r0,in2
(p6) br.cond.dptk.few 1b // loop
br.ret.sptk.few rp // done
2: cmp.eq p6,p0=r14,r0 // aligned?
(p6) br.cond.sptk.few 4f
3: ld1 r14=[in0],1 ;; // copy bytewise
st1 [in1]=r14,1
extr.u r15=in0,0,3 // src & 7
add in2=-1,in2 ;; // len--
cmp.eq p6,p0=r0,in2 // done?
cmp.eq p7,p0=r0,r15 ;; // aligned now?
(p6) br.ret.spnt.few rp // return if done
(p7) br.cond.spnt.few 4f // go to main copy
br.cond.sptk.few 3b // more bytes to copy
// At this point, in2 is non-zero
4: mov r14=8 ;;
cmp.ltu p6,p0=in2,r14 ;; // len < 8?
(p6) br.cond.spnt.few 1b // byte copy the end
ld8 r15=[in0],8 ;; // copy word
st8 [in1]=r15,8
add in2=-8,in2 ;; // len -= 8
cmp.ne p6,p0=r0,in2 // done?
(p6) br.cond.spnt.few 4b // again
br.ret.sptk.few rp // return
// Don't bother optimising overlap case
5: add in0=in0,in2
add in1=in1,in2 ;;
add in0=-1,in0
add in1=-1,in1 ;;
6: ld1 r14=[in0],-1 ;;
st1 [in1]=r14,-1
add in2=-1,in2 ;;
cmp.ne p6,p0=r0,in2
(p6) br.cond.spnt.few 6b
br.ret.sptk.few rp
END(bcopy)
ENTRY(memcpy,3)
mov r14=in0 ;;
mov in0=in1 ;;
mov in1=r14
br.cond.sptk.few bcopy
END(memcpy)
ENTRY(copyin, 3)
.prologue
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
(p6) br.cond.spnt.few copyerr // if it's not, error out.
movl r14=copyerr // set up fault handler.
add r15=PC_CURTHREAD,r13 // find curthread
;;
ld8 r15=[r15]
;;
add r15=TD_PCB,r15 // find pcb
;;
ld8 r15=[r15]
;;
add loc2=PCB_ONFAULT,r15
;;
st8 [loc2]=r14
;;
mov out0=in0
mov out1=in1
mov out2=in2
;;
br.call.sptk.few rp=bcopy // do the copy.
st8 [loc2]=r0 // kill the fault handler.
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyin)
ENTRY(copyout, 3)
.prologue
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;
(p6) br.cond.spnt.few copyerr // if it's not, error out.
movl r14=copyerr // set up fault handler.
add r15=PC_CURTHREAD,r13 // find curthread
;;
ld8 r15=[r15]
;;
add r15=TD_PCB,r15 // find pcb
;;
ld8 r15=[r15]
;;
add loc2=PCB_ONFAULT,r15
;;
st8 [loc2]=r14
;;
mov out0=in0
mov out1=in1
mov out2=in2
;;
br.call.sptk.few rp=bcopy // do the copy.
st8 [loc2]=r0 // kill the fault handler.
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyout)
ENTRY(copyerr, 0)
add r14=PC_CURTHREAD,r13 ;; // find curthread
ld8 r14=[r14] ;;
add r14=TD_PCB,r14 ;; // curthread->td_addr
ld8 r14=[r14] ;;
add r14=PCB_ONFAULT,r14 ;; // &curthread->td_pcb->pcb_onfault
st8 [r14]=r0 // reset fault handler
mov ret0=EFAULT // return EFAULT
br.ret.sptk.few rp
END(copyerr)

View File

@ -1,569 +0,0 @@
/*
* Copyright (c) 2002, 2003 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/syscall.h>
#include <machine/asm.h>
#include <assym.s>
/*
* A process performs a syscall by performing an indirect call to the
* address stored in ar.k5. The contents of ar.pfs and rp should be
* saved prior to the syscall in r9 and r10 respectively. The kernel
* will restore these values on return. The value of gp is preserved
* across the call. This allows for small enough syscall stubs without
* getting too weird.
* The address in ar.k5 is the start of the EPC gateway page and also
* the syscall entry point. The syscall code in the gateway page is
* primarily responsible for increasing the privilege level, but will
* also make sure we have a reliable psr.
*
* A process defines:
* r8 - syscall number
* r9 - copy of ar.pfs
* r10 - copy of rp
* in0-in7 - syscall arguments
*
* A syscall returns:
* r8+r9 - syscall return value(s)
* r10 - syscall error flag
* ar.pfs - restored from r9
* rp - restored from r10
* gp - preserved
*
* The EPC syscall code defines:
* r11 - copy of psr.l
* r14 - Kernel memory stack
* r15 - Kernel register stack
*
* Also in the gateway page are the signal trampolines. As such, stacks
* don't have to be made executable per se. Since debuggers have a need
* to know about trampolines, we probably need to define a table of
* vectors or something along those lines so that debuggers can get the
* information they need and we have the freedom to move code around.
*/
.section .text.gateway, "ax"
.align PAGE_SIZE
.global ia64_gateway_page
ia64_gateway_page:
{ .mmb
mov r14=ar.k7 // Memory stack
mov r15=ar.k6 // Register stack
epc
;;
}
{ .mlx
mov r11=psr
movl r31=epc_syscall
;;
}
{ .mib
rum psr.be
mov b7=r31
br b7
;;
}
gw_ret:
{ .mmi
mov ar.rnat=r22
mov ar.rsc=r24
mov ar.pfs=r28
}
{ .mib
mov ar.fpsr=r25
mov b0=r29
br.sptk b6
;;
}
gw_ret_ia32:
{ .mfb
flushrs
nop 0
nop 0
;;
}
{ .mfb
nop 0
nop 0
br.ia.sptk b6
;;
}
ENTRY(break_sigtramp, 0)
{ .mib
mov ar.rsc=0
cmp.ne p15,p0=0,gp
cover
;;
}
{ .mmi
flushrs
(p15) invala
add r16=16+UC_MCONTEXT+MC_SPECIAL,sp
;;
}
{ .mmi
mov r17=ar.bsp
mov r18=ar.rnat
add r14=40,r16
;;
}
{ .mmi
st8 [r14]=r17,64 // bspstore
(p15) mov ar.bspstore=gp
add r15=48,r16
;;
}
{ .mmi
st8 [r15]=r18 // rnat
st8 [r14]=r0 // ndirty
nop 0
;;
}
{ .mmi
alloc r14=ar.pfs, 0, 0, 3, 0
mov ar.rsc=15
mov out0=r8
;;
}
{ .mmi
ld8 r16=[r10],8 // function address
;;
ld8 gp=[r10] // function's gp value
mov b7=r16
;;
}
{ .mib
mov out1=r9
add out2=16,sp
br.call.sptk rp=b7
;;
}
{ .mmi
mov r15=SYS_sigreturn
add out0=16,sp
break 0x100000
;;
}
{ .mmi
mov r15=SYS_exit
mov out0=ret0
break 0x100000
;;
}
END(break_sigtramp)
ENTRY(epc_sigtramp, 0)
{ .mib
mov ar.rsc=0
cmp.ne p15,p0=0,gp
cover
;;
}
{ .mmi
flushrs
(p15) invala
add r16=16+UC_MCONTEXT+MC_SPECIAL,sp
;;
}
{ .mmi
mov r17=ar.bsp
mov r18=ar.rnat
add r14=32,r16
;;
}
{ .mmi
(p15) mov ar.bspstore=gp
ld8 r19=[r14],8
add r15=48,r16
;;
}
{ .mmi
st8 [r14]=r17,64 // bspstore
st8 [r15]=r18,-16 // rnat
dep r19=r19,r19,7,7
;;
}
{ .mmi
st8 [r14]=r0 // ndirty
st8 [r15]=r19 // pfs
nop 0
;;
}
{ .mmi
alloc r14=ar.pfs, 0, 0, 3, 0
mov ar.rsc=15
mov out0=r8
;;
}
{ .mmi
ld8 r16=[r10],8 // function address
;;
ld8 gp=[r10] // function's gp value
mov b7=r16
;;
}
{ .mib
mov out1=r9
add out2=16,sp
br.call.sptk rp=b7
;;
}
add out0=16,sp
CALLSYS_NOERROR(sigreturn)
mov out0=ret0
CALLSYS_NOERROR(exit)
END(epc_sigtramp)
.align PAGE_SIZE
.text
ENTRY(epc_syscall, 8)
.prologue
.unwabi @svr4, 'E'
.save rp, r0
{ .mmi
mov r16=ar.rsc
mov ar.rsc=0
mov r17=r13
;;
}
{ .mmi
mov r18=ar.bspstore
mov r19=ar.rnat
add r30=-SIZEOF_TRAPFRAME,r14
;;
}
{ .mmi
mov ar.bspstore=r15
mov r13=ar.k4
dep r30=0,r30,0,10
;;
}
{ .mii
mov r20=sp
add r31=8,r30
add sp=-16,r30
;;
}
{ .mmi
mov r21=ar.unat
mov r22=ar.fpsr
sub r29=r14,r30
;;
}
{ .mmi
mov r23=ar.bsp
mov ar.rsc=3
add r28=FRAME_SYSCALL,r0
;;
}
{ .mmi
st8 [r30]=r29,16 // tf_length
st8 [r31]=r28,16 // tf_flags
mov r24=rp
;;
}
{ .mmi
st8 [r30]=r20,16 // sp
st8 [r31]=r21,16 // unat
mov r25=pr
;;
}
{ .mmi
st8 [r30]=r24,16 // rp (syscall stub)
st8 [r31]=r25,16 // pr
mov r26=ar.pfs
;;
}
{ .mmi
st8 [r30]=r26,16 // pfs (syscall stub)
st8 [r31]=r18,16 // bspstore
sub r27=r23,r15
;;
}
{ .mmi
st8 [r30]=r19,16 // rnat
st8 [r31]=r0,16 // __spare
nop 0
;;
}
{ .mmi
st8 [r30]=r17,16 // tp
st8 [r31]=r16,16 // rsc
dep r11=-1,r11,32,2 // Set psr.cpl=3
;;
}
{ .mmi
st8 [r30]=r22,16 // fpsr
st8 [r31]=r11,16 // psr
nop 0
;;
}
{ .mmi
st8 [r30]=r1,16 // gp
st8 [r31]=r27,16 // ndirty
nop 0
;;
}
{ .mmi
st8 [r30]=r9,16 // pfs (syscall caller)
st8 [r31]=r10,16 // rp (syscall caller)
nop 0
;;
}
{ .mmi
st8 [r30]=r0,80 // ifa
st8 [r31]=r0,80 // isr
nop 0
;;
}
{ .mmi
alloc r14=ar.pfs,0,0,8,0
st8 [r30]=r8,16 // syscall number (=r15)
nop 0
;;
}
{ .mmi
.mem.offset 0,0
st8.spill [r31]=r32,16 // arg0 (=r16)
.mem.offset 8,0
st8.spill [r30]=r33,16 // arg1 (=r17)
nop 0
;;
}
{ .mmi
.mem.offset 16,0
st8.spill [r31]=r34,16 // arg2 (=r18)
.mem.offset 24,0
st8.spill [r30]=r35,16 // arg3 (=r19)
nop 0
;;
}
{ .mmi
.mem.offset 32,0
st8.spill [r31]=r36,16 // arg4 (=r20)
.mem.offset 40,0
st8.spill [r30]=r37,16 // arg5 (=r21)
nop 0
;;
}
{ .mmi
.mem.offset 48,0
st8.spill [r31]=r38 // arg6 (=r22)
.mem.offset 56,0
st8.spill [r30]=r39 // arg7 (=r23)
nop 0
;;
}
{ .mlx
ssm psr.dfh|psr.ac
movl gp=__gp
;;
}
epc_syscall_restart:
{ .mib
srlz.d
add out0=16,sp
br.call.sptk rp=syscall
;;
}
{ .mfb
add out0=16,sp
nop 0
br.call.sptk rp=do_ast
;;
}
{ .mfb
cmp4.eq p15,p0=ERESTART,r8
nop 0
(p15) br.spnt epc_syscall_restart
;;
}
.global epc_syscall_return
epc_syscall_return:
{ .mmi
alloc r31=ar.pfs,0,0,0,0
add r14=32,sp
add r15=16,sp
;;
}
{ .mmi
ld8 r31=[r15],24 // tf_length
ld8 r16=[r14],16 // sp
add sp=16,sp
;;
}
{ .mmi
ld8 r17=[r15],16 // unat (before)
ld8 r18=[r14],16 // rp (syscall stub)
add r31=r31,sp
;;
}
{ .mmi
ld8 r19=[r15],16 // pr
ld8 r20=[r14],16 // pfs (syscall stub)
mov b6=r18
;;
}
{ .mmi
ld8 r21=[r15],24 // bspstore
ld8 r22=[r14],24 // rnat
mov pr=r19,0x1fffe
;;
}
{ .mmb
ld8 r23=[r15],16 // tp
ld8 r24=[r14],16 // rsc
nop 0
;;
}
{ .mmi
ld8 r25=[r15],16 // fpsr
ld8 r26=[r14],16 // psr
mov ar.pfs=r20
;;
}
{ .mmi
ld8 gp=[r15],16 // gp
ld8 r27=[r14],16 // ndirty
tbit.z p14,p15=r26,34 // p14=ia64, p15=ia32
;;
}
{ .mmi
ld8 r28=[r15],56 // pfs (syscall caller)
ld8 r29=[r14],56 // rp (syscall caller)
shl r27=r27,16
;;
}
{ .mmb
ld8 r8=[r15],16 // r8
mov ar.rsc=r27
nop 0
;;
}
{ .mmb
ld8 r9=[r14],40 // r9
ld8 r10=[r15],40 // r10
(p15) br.spnt epc_syscall_setup_ia32
;;
}
{ .mmi
loadrs
mov ar.k7=r31
mov sp=r16
;;
}
{ .mmi
mov r30=ar.bspstore
;;
mov r14=ar.k5
dep r30=0,r30,0,9
;;
}
{ .mmi
mov ar.k6=r30
mov ar.bspstore=r21
mov r13=r23
;;
}
{ .mmi
mov psr.l=r26
mov ar.unat=r17
add r14=gw_ret-ia64_gateway_page,r14
;;
}
{ .mib
srlz.d
mov b7=r14
br.ret.sptk b7
;;
}
epc_syscall_setup_ia32:
{ .mmi
loadrs
mov ar.k7=r31
mov sp=r16
;;
}
{ .mmi
mov r30=ar.bspstore
;;
mov ar.unat=r17
dep r30=0,r30,0,9
;;
}
{ .mmi
mov ar.k6=r30
mov ar.bspstore=r21
mov r11=r0
;;
}
{ .mmi
ld8 r16=[r14],64
ld8 r17=[r15],80
mov r13=r0
;;
}
ld8 r24=[r14],32
ld8 r27=[r15],16
;;
ld8 r28=[r14],16
ld8 r29=[r15],16
;;
ld8 r30=[r14],40
ld8 r31=[r15],40
;;
{ .mmi
ld8 r2=[r14]
ld8 r3=[r15]
mov r14=r0
;;
}
{ .mmi
mov ar.csd=r2
mov ar.ssd=r3
mov r15=r0
;;
}
mov r2=ar.k5
mov psr.l=r26
;;
srlz.d
add r2=gw_ret_ia32-ia64_gateway_page,r2
;;
mov ar.rsc=0x0
mov b7=r2
br.ret.sptk b7
;;
END(epc_syscall)