Repocopy *.s to *.S

This commit is contained in:
Peter Wemm 2003-05-03 00:21:43 +00:00
parent 3674802f42
commit 5b27e93419
10 changed files with 3 additions and 1621 deletions

View File

@ -283,7 +283,7 @@ ENTRY(fork_trampoline)
* XXX it should be in a stand-alone file. It's still icu-dependent and
* belongs in i386/isa.
*/
#include "amd64/isa/vector.s"
#include "amd64/isa/vector.S"
.data
ALIGN_DATA
@ -379,4 +379,4 @@ doreti_iret_fault:
movq $0,TF_ERR(%rsp) /* XXX should be the error code */
jmp alltraps_with_regs_pushed
#include "amd64/isa/icu_ipl.s"
#include "amd64/isa/icu_ipl.S"

View File

@ -1,382 +0,0 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz.
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include <sys/mutex.h>
#include <machine/psl.h>
#include <machine/trap.h>
#include "assym.s"
#define SEL_RPL_MASK 0x0003
.text
/*****************************************************************************/
/* Trap handling */
/*****************************************************************************/
/*
* Trap and fault vector routines.
*
* Most traps are 'trap gates', SDT_SYS386TGT. A trap gate pushes state on
* the stack that mostly looks like an interrupt, but does not disable
* interrupts. A few of the traps we are use are interrupt gates,
* SDT_SYS386IGT, which are nearly the same thing except interrupts are
* disabled on entry.
*
* The cpu will push a certain amount of state onto the kernel stack for
* the current process. The amount of state depends on the type of trap
* and whether the trap crossed rings or not. See i386/include/frame.h.
* At the very least the current EFLAGS (status register, which includes
* the interrupt disable state prior to the trap), the code segment register,
* and the return instruction pointer are pushed by the cpu. The cpu
* will also push an 'error' code for certain traps. We push a dummy
* error code for those traps where the cpu doesn't in order to maintain
* a consistent frame. We also push a contrived 'trap number'.
*
* The cpu does not push the general registers, we must do that, and we
* must restore them prior to calling 'iret'. The cpu adjusts the %cs and
* %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
* must load them with appropriate values for supervisor mode operation.
*/
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
.type __CONCAT(X,name),@function; __CONCAT(X,name):
#define TRAP(a) pushq $(a) ; jmp alltraps
MCOUNT_LABEL(user)
MCOUNT_LABEL(btrap)
IDTVEC(div)
pushq $0; TRAP(T_DIVIDE)
IDTVEC(dbg)
pushq $0; TRAP(T_TRCTRAP)
IDTVEC(nmi)
pushq $0; TRAP(T_NMI)
IDTVEC(bpt)
pushq $0; TRAP(T_BPTFLT)
IDTVEC(ofl)
pushq $0; TRAP(T_OFLOW)
IDTVEC(bnd)
pushq $0; TRAP(T_BOUND)
IDTVEC(ill)
pushq $0; TRAP(T_PRIVINFLT)
IDTVEC(dna)
pushq $0; TRAP(T_DNA)
IDTVEC(fpusegm)
pushq $0; TRAP(T_FPOPFLT)
IDTVEC(tss)
TRAP(T_TSSFLT)
IDTVEC(missing)
TRAP(T_SEGNPFLT)
IDTVEC(stk)
TRAP(T_STKFLT)
IDTVEC(prot)
TRAP(T_PROTFLT)
IDTVEC(page)
TRAP(T_PAGEFLT)
IDTVEC(mchk)
pushq $0; TRAP(T_MCHK)
IDTVEC(rsvd)
pushq $0; TRAP(T_RESERVED)
IDTVEC(fpu)
pushq $0; TRAP(T_ARITHTRAP)
IDTVEC(align)
TRAP(T_ALIGNFLT)
IDTVEC(xmm)
pushq $0; TRAP(T_XMMFLT)
/*
* alltraps entry point. Interrupts are enabled if this was a trap
* gate (TGT), else disabled if this was an interrupt gate (IGT).
* Note that int0x80_syscall is a trap gate. Only page faults
* use an interrupt gate.
*/
SUPERALIGN_TEXT
.globl alltraps
.type alltraps,@function
alltraps:
subq $TF_TRAPNO,%rsp /* tf_err and tf_trapno already pushed */
movq %rdi,TF_RDI(%rsp)
movq %rsi,TF_RSI(%rsp)
movq %rdx,TF_RDX(%rsp)
movq %rcx,TF_RCX(%rsp)
movq %r8,TF_R8(%rsp)
movq %r9,TF_R9(%rsp)
movq %rax,TF_RAX(%rsp)
movq %rbx,TF_RBX(%rsp)
movq %rbp,TF_RBP(%rsp)
movq %r10,TF_R10(%rsp)
movq %r11,TF_R11(%rsp)
movq %r12,TF_R12(%rsp)
movq %r13,TF_R13(%rsp)
movq %r14,TF_R14(%rsp)
movq %r15,TF_R15(%rsp)
alltraps_with_regs_pushed:
FAKE_MCOUNT(13*4(%rsp))
calltrap:
FAKE_MCOUNT(btrap) /* init "from" btrap -> calltrap */
call trap
MEXITCOUNT
jmp doreti /* Handle any pending ASTs */
/*
* Call gate entry for FreeBSD ELF and Linux/NetBSD syscall (int 0x80)
*
* Even though the name says 'int0x80', this is actually a TGT (trap gate)
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
* entry just as they are for a normal syscall.
*
* This leaves a place to put eflags so that the call frame can be
* converted to a trap frame. Note that the eflags is (semi-)bogusly
* pushed into (what will be) tf_err and then copied later into the
* final spot. It has to be done this way because esp can't be just
* temporarily altered for the pushfl - an interrupt might come in
* and clobber the saved cs/eip.
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
pushq $2 /* sizeof "int 0x80" */
subq $TF_ERR,%rsp /* skip over tf_trapno */
movq %rdi,TF_RDI(%rsp)
movq %rsi,TF_RSI(%rsp)
movq %rdx,TF_RDX(%rsp)
movq %rcx,TF_RCX(%rsp)
movq %r8,TF_R8(%rsp)
movq %r9,TF_R9(%rsp)
movq %rax,TF_RAX(%rsp)
movq %rbx,TF_RBX(%rsp)
movq %rbp,TF_RBP(%rsp)
movq %r10,TF_R10(%rsp)
movq %r11,TF_R11(%rsp)
movq %r12,TF_R12(%rsp)
movq %r13,TF_R13(%rsp)
movq %r14,TF_R14(%rsp)
movq %r15,TF_R15(%rsp)
FAKE_MCOUNT(13*4(%rsp))
call syscall
MEXITCOUNT
jmp doreti
/*
* Fast syscall entry point. We enter here with just our new %cs/%ss set,
* and the new privilige level. We are still running on the old user stack
* pointer. We have to juggle a few things around to find our stack etc.
* swapgs gives us access to our PCPU space only.
* XXX The PCPU stuff is stubbed out right now...
*/
IDTVEC(fast_syscall)
#swapgs
movq %rsp,PCPU(SCRATCH_RSP)
movq common_tss+COMMON_TSS_RSP0,%rsp
sti
/* Now emulate a trapframe. Ugh. */
subq $TF_SIZE,%rsp
movq $KUDSEL,TF_SS(%rsp)
/* defer TF_RSP till we have a spare register */
movq %r11,TF_RFLAGS(%rsp)
movq $KUCSEL,TF_CS(%rsp)
movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
movq $2,TF_ERR(%rsp)
movq %rdi,TF_RDI(%rsp) /* arg 1 */
movq %rsi,TF_RSI(%rsp) /* arg 2 */
movq %rdx,TF_RDX(%rsp) /* arg 3 */
movq %r10,TF_RCX(%rsp) /* arg 4 */
movq %r8,TF_R8(%rsp) /* arg 5 */
movq %r9,TF_R9(%rsp) /* arg 6 */
movq %rax,TF_RAX(%rsp) /* syscall number */
movq %rbx,TF_RBX(%rsp) /* C preserved */
movq %rbp,TF_RBP(%rsp) /* C preserved */
movq %r12,TF_R12(%rsp) /* C preserved */
movq %r13,TF_R13(%rsp) /* C preserved */
movq %r14,TF_R14(%rsp) /* C preserved */
movq %r15,TF_R15(%rsp) /* C preserved */
movq PCPU(SCRATCH_RSP),%r12 /* %r12 already saved */
movq %r12,TF_RSP(%rsp) /* user stack pointer */
call syscall
movq PCPU(CURPCB),%rax
testq $PCB_FULLCTX,PCB_FLAGS(%rax)
jne 3f
/* simplified from doreti */
1: /* Check for and handle AST's on return to userland */
cli
movq PCPU(CURTHREAD),%rax
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
je 2f
sti
movq %rsp, %rdi
call ast
jmp 1b
2: /* restore preserved registers */
movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
movq TF_RDX(%rsp),%rdx /* return value 2 */
movq TF_RAX(%rsp),%rax /* return value 1 */
movq TF_RBX(%rsp),%rbx /* C preserved */
movq TF_RBP(%rsp),%rbp /* C preserved */
movq TF_R12(%rsp),%r12 /* C preserved */
movq TF_R13(%rsp),%r13 /* C preserved */
movq TF_R14(%rsp),%r14 /* C preserved */
movq TF_R15(%rsp),%r15 /* C preserved */
movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
movq TF_RIP(%rsp),%rcx /* original %rip */
movq TF_RSP(%rsp),%r9 /* user stack pointer */
movq %r9,%rsp /* original %rsp */
#swapgs
sysretq
3: /* Requested full context restore, use doreti for that */
andq $~PCB_FULLCTX,PCB_FLAGS(%rax)
jmp doreti
/*
* Here for CYA insurance, in case a "syscall" instruction gets
* issued from 32 bit compatability mode. MSR_CSTAR has to point
* to *something* if EFER_SCE is enabled.
*/
IDTVEC(fast_syscall32)
sysret
ENTRY(fork_trampoline)
movq %r12, %rdi /* function */
movq %rbx, %rsi /* arg1 */
movq %rsp, %rdx /* trapframe pointer */
call fork_exit
MEXITCOUNT
jmp doreti /* Handle any ASTs */
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and
* belongs in i386/isa.
*/
#include "amd64/isa/vector.s"
.data
ALIGN_DATA
/*
* void doreti(struct trapframe)
*
* Handle return from interrupts, traps and syscalls.
*/
.text
SUPERALIGN_TEXT
.type doreti,@function
doreti:
FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
/*
* Check if ASTs can be handled now.
*/
testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
jz doreti_exit /* can't handle ASTs now if not */
doreti_ast:
/*
* Check for ASTs atomically with returning. Disabling CPU
* interrupts provides sufficient locking evein the SMP case,
* since we will be informed of any new ASTs by an IPI.
*/
cli
movq PCPU(CURTHREAD),%rax
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
je doreti_exit
sti
movq %rsp, %rdi /* pass a pointer to the trapframe */
call ast
jmp doreti_ast
/*
* doreti_exit: pop registers, iret.
*
* The segment register pop is a special case, since it may
* fault if (for example) a sigreturn specifies bad segment
* registers. The fault is handled in trap.c.
*/
doreti_exit:
MEXITCOUNT
movq TF_RDI(%rsp),%rdi
movq TF_RSI(%rsp),%rsi
movq TF_RDX(%rsp),%rdx
movq TF_RCX(%rsp),%rcx
movq TF_R8(%rsp),%r8
movq TF_R9(%rsp),%r9
movq TF_RAX(%rsp),%rax
movq TF_RBX(%rsp),%rbx
movq TF_RBP(%rsp),%rbp
movq TF_R10(%rsp),%r10
movq TF_R11(%rsp),%r11
movq TF_R12(%rsp),%r12
movq TF_R13(%rsp),%r13
movq TF_R14(%rsp),%r14
movq TF_R15(%rsp),%r15
addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
.globl doreti_iret
doreti_iret:
iretq
/*
* doreti_iret_fault and friends. Alternative return code for
* the case where we get a fault in the doreti_exit code
* above. trap() (i386/i386/trap.c) catches this specific
* case, sends the process a signal and continues in the
* corresponding place in the code below.
*/
ALIGN_TEXT
.globl doreti_iret_fault
doreti_iret_fault:
subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
movq %rdi,TF_RDI(%rsp)
movq %rsi,TF_RSI(%rsp)
movq %rdx,TF_RDX(%rsp)
movq %rcx,TF_RCX(%rsp)
movq %r8,TF_R8(%rsp)
movq %r9,TF_R9(%rsp)
movq %rax,TF_RAX(%rsp)
movq %rbx,TF_RBX(%rsp)
movq %rbp,TF_RBP(%rsp)
movq %r10,TF_R10(%rsp)
movq %r11,TF_R11(%rsp)
movq %r12,TF_R12(%rsp)
movq %r13,TF_R13(%rsp)
movq %r14,TF_R14(%rsp)
movq %r15,TF_R15(%rsp)
movq $T_PROTFLT,TF_TRAPNO(%rsp)
movq $0,TF_ERR(%rsp) /* XXX should be the error code */
jmp alltraps_with_regs_pushed
#include "amd64/isa/icu_ipl.s"

View File

@ -1,92 +0,0 @@
/*-
* Copyright (c) 2003 Peter Wemm <peter@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include <machine/psl.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>
#include "assym.s"
/*
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
*/
.globl PTmap,PTD,PTDpde
.set PTmap,(PTDPTDI << PDRSHIFT)
.set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
.set PTDpde,PTD + (PTDPTDI * PDESIZE)
/*
* Compiled KERNBASE location
*/
.globl kernbase
.set kernbase,KERNBASE
.text
/**********************************************************************
*
* This is where the loader trampoline start us, set the ball rolling...
*
* We are called with the stack looking like this:
* 0(%rsp) = 32 bit return address (cannot be used)
* 4(%rsp) = 32 bit modulep
* 8(%rsp) = 32 bit kernend
*
* We are already in long mode, on a 64 bit %cs and running at KERNBASE.
*/
NON_GPROF_ENTRY(btext)
/* Tell the bios to warmboot next time */
movw $0x1234,0x472
/* Don't trust what the loader gives for rflags. */
pushq $PSL_KERNEL
popfq
/* Find the metadata pointers before we lose them */
movq %rsp, %rbp
xorq %rax, %rax
movl 4(%rbp),%eax /* modulep */
movq %rax,modulep
movl 8(%rbp),%eax /* kernend */
movq %rax,physfree
/* Get onto a stack that we can trust - there is no going back now. */
movq $bootstack,%rsp
xorq %rbp, %rbp
call hammer_time /* set up cpu for unix operation */
call mi_startup /* autoconfiguration, mountroot etc */
0: hlt
jmp 0b
.bss
ALIGN_DATA /* just to be sure */
.space 0x1000 /* space for bootstack - temporary stack */
bootstack:

View File

@ -1,56 +0,0 @@
/*-
* Copyright (c) 2003 Peter Wemm <peter@freeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/syscall.h>
#include <machine/asmacros.h>
#include "assym.s"
.text
/**********************************************************************
*
* Signal trampoline, copied to top of user stack
*
*/
NON_GPROF_ENTRY(sigcode)
call *SIGF_HANDLER(%rsp) /* call signal handler */
lea SIGF_UC(%rsp),%rdi /* get ucontext_t */
pushq $0 /* junk to fake return addr. */
movq $SYS_sigreturn,%rax
syscall /* enter kernel with args */
0: hlt /* trap priviliged instruction */
jmp 0b
ALIGN_TEXT
esigcode:
.data
.globl szsigcode
szsigcode:
.long esigcode-sigcode

View File

@ -1,494 +0,0 @@
/*-
* Copyright (c) 1993 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include <machine/cputypes.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>
#include "assym.s"
.text
/* fillw(pat, base, cnt) */
/* %rdi,%rsi, %rdx */
ENTRY(fillw)
movq %rdi,%rax
movq %rsi,%rdi
movq %rdx,%rcx
cld
rep
stosw
ret
/*****************************************************************************/
/* copyout and fubyte family */
/*****************************************************************************/
/*
* Access user memory from inside the kernel. These routines and possibly
* the math- and DOS emulators should be the only places that do this.
*
* We have to access the memory with user's permissions, so use a segment
* selector with RPL 3. For writes to user space we have to additionally
* check the PTE for write permission, because the 386 does not check
* write permissions when we are executing with EPL 0. The 486 does check
* this if the WP bit is set in CR0, so we can use a simpler version here.
*
* These routines set curpcb->onfault for the time they execute. When a
* protection violation occurs inside the functions, the trap handler
* returns to *curpcb->onfault instead of the function.
*/
/*
* copyout(from_kernel, to_user, len) - MP SAFE
* %rdi, %rsi, %rdx
*/
ENTRY(copyout)
movq PCPU(CURPCB),%rax
movq $copyout_fault,PCB_ONFAULT(%rax)
testq %rdx,%rdx /* anything to do? */
jz done_copyout
/*
* Check explicitly for non-user addresses. If 486 write protection
* is being used, this check is essential because we are in kernel
* mode so the h/w does not provide any protection against writing
* kernel addresses.
*/
/*
* First, prevent address wrapping.
*/
movq %rsi,%rax
addq %rdx,%rax
jc copyout_fault
/*
* XXX STOP USING VM_MAXUSER_ADDRESS.
* It is an end address, not a max, so every time it is used correctly it
* looks like there is an off by one error, and of course it caused an off
* by one error in several places.
*/
movq $VM_MAXUSER_ADDRESS,%rcx
cmpq %rcx,%rax
ja copyout_fault
xchgq %rdi, %rsi
/* bcopy(%rsi, %rdi, %rdx) */
movq %rdx,%rcx
shrq $3,%rcx
cld
rep
movsq
movb %dl,%cl
andb $7,%cl
rep
movsb
done_copyout:
xorq %rax,%rax
movq PCPU(CURPCB),%rdx
movq %rax,PCB_ONFAULT(%rdx)
ret
ALIGN_TEXT
copyout_fault:
movq PCPU(CURPCB),%rdx
movq $0,PCB_ONFAULT(%rdx)
movq $EFAULT,%rax
ret
/*
* copyin(from_user, to_kernel, len) - MP SAFE
* %rdi, %rsi, %rdx
*/
ENTRY(copyin)
movq PCPU(CURPCB),%rax
movq $copyin_fault,PCB_ONFAULT(%rax)
testq %rdx,%rdx /* anything to do? */
jz done_copyin
/*
* make sure address is valid
*/
movq %rdi,%rax
addq %rdx,%rax
jc copyin_fault
movq $VM_MAXUSER_ADDRESS,%rcx
cmpq %rcx,%rax
ja copyin_fault
xchgq %rdi, %rsi
movq %rdx, %rcx
movb %cl,%al
shrq $3,%rcx /* copy longword-wise */
cld
rep
movsq
movb %al,%cl
andb $7,%cl /* copy remaining bytes */
rep
movsb
done_copyin:
xorq %rax,%rax
movq PCPU(CURPCB),%rdx
movq %rax,PCB_ONFAULT(%rdx)
ret
ALIGN_TEXT
copyin_fault:
movq PCPU(CURPCB),%rdx
movq $0,PCB_ONFAULT(%rdx)
movq $EFAULT,%rax
ret
/*
* casuptr. Compare and set user pointer. Returns -1 or the current value.
* dst = %rdi, old = %rsi, new = %rdx
*/
ENTRY(casuptr)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-4,%rax
cmpq %rax,%rdi /* verify address is valid */
ja fusufault
movq %rsi, %rax /* old */
cmpxchgq %rdx, (%rdi) /* new = %rdx */
/*
* The old value is in %eax. If the store succeeded it will be the
* value we expected (old) from before the store, otherwise it will
* be the current value.
*/
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $0,PCB_ONFAULT(%rcx)
ret
/*
* fu{byte,sword,word} - MP SAFE
*
* Fetch a byte (sword, word) from user memory
* %rdi
*/
ENTRY(fuword64)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-8,%rax
cmpq %rax,%rdi /* verify address is valid */
ja fusufault
movq (%rdi),%rax
movq $0,PCB_ONFAULT(%rcx)
ret
ENTRY(fuword32)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-4,%rax
cmpq %rax,%rdi /* verify address is valid */
ja fusufault
# XXX use the 64 extend
xorq %rax, %rax
movl (%rdi),%eax
movq $0,PCB_ONFAULT(%rcx)
ret
ENTRY(fuword)
jmp fuword32
/*
* These two routines are called from the profiling code, potentially
* at interrupt time. If they fail, that's okay, good things will
* happen later. Fail all the time for now - until the trap code is
* able to deal with this.
*/
ALTENTRY(suswintr)
ENTRY(fuswintr)
movq $-1,%rax
ret
/*
* fuword16 - MP SAFE
*/
ENTRY(fuword16)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-2,%rax
cmpq %rax,%rdi
ja fusufault
# XXX use the 64 extend
xorq %rax, %rax
movzwl (%rdi),%eax
movq $0,PCB_ONFAULT(%rcx)
ret
/*
* fubyte - MP SAFE
*/
ENTRY(fubyte)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-1,%rax
cmpq %rax,%rdi
ja fusufault
# XXX use the 64 extend
xorq %rax, %rax
movzbl (%rdi),%eax
movq $0,PCB_ONFAULT(%rcx)
ret
ALIGN_TEXT
fusufault:
movq PCPU(CURPCB),%rcx
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
decq %rax
ret
/*
* su{byte,sword,word} - MP SAFE
*
* Write a byte (word, longword) to user memory
* addr = %rdi, value = %rsi
*/
ENTRY(suword64)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-8,%rax
cmpq %rax,%rdi /* verify address validity */
ja fusufault
movq %rsi,(%rdi)
xorq %rax,%rax
movq PCPU(CURPCB),%rcx
movq %rax,PCB_ONFAULT(%rcx)
ret
ENTRY(suword32)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-4,%rax
cmpq %rax,%rdi /* verify address validity */
ja fusufault
movl %esi,(%rdi)
xorq %rax,%rax
movq PCPU(CURPCB),%rcx
movq %rax,PCB_ONFAULT(%rcx)
ret
ENTRY(suword)
jmp suword32
/*
* suword16 - MP SAFE
*/
ENTRY(suword16)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-2,%rax
cmpq %rax,%rdi /* verify address validity */
ja fusufault
movw %si,(%rdi)
xorq %rax,%rax
movq PCPU(CURPCB),%rcx /* restore trashed register */
movq %rax,PCB_ONFAULT(%rcx)
ret
/*
* subyte - MP SAFE
*/
ENTRY(subyte)
movq PCPU(CURPCB),%rcx
movq $fusufault,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS-1,%rax
cmpq %rax,%rdi /* verify address validity */
ja fusufault
movl %esi, %eax
movb %al,(%rdi)
xorq %rax,%rax
movq PCPU(CURPCB),%rcx /* restore trashed register */
movq %rax,PCB_ONFAULT(%rcx)
ret
/*
* copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
* %rdi, %rsi, %rdx, %rcx
*
* copy a string from from to to, stop when a 0 character is reached.
* return ENAMETOOLONG if string is longer than maxlen, and
* EFAULT on protection violations. If lencopied is non-zero,
* return the actual length in *lencopied.
*/
ENTRY(copyinstr)
movq %rdx, %r8 /* %r8 = maxlen */
movq %rcx, %r9 /* %r9 = *len */
xchgq %rdi, %rsi /* %rdi = from, %rsi = to */
movq PCPU(CURPCB),%rcx
movq $cpystrflt,PCB_ONFAULT(%rcx)
movq $VM_MAXUSER_ADDRESS,%rax
/* make sure 'from' is within bounds */
subq %rsi,%rax
jbe cpystrflt
/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
cmpq %rdx,%rax
jae 1f
movq %rax,%rdx
movq %rax,%r8
1:
incq %rdx
cld
2:
decq %rdx
jz 3f
lodsb
stosb
orb %al,%al
jnz 2b
/* Success -- 0 byte reached */
decq %rdx
xorq %rax,%rax
jmp cpystrflt_x
3:
/* rdx is zero - return ENAMETOOLONG or EFAULT */
movq $VM_MAXUSER_ADDRESS,%rax
cmpq %rax,%rsi
jae cpystrflt
4:
movq $ENAMETOOLONG,%rax
jmp cpystrflt_x
cpystrflt:
movq $EFAULT,%rax
cpystrflt_x:
/* set *lencopied and return %eax */
movq PCPU(CURPCB),%rcx
movq $0,PCB_ONFAULT(%rcx)
testq %r9,%r9
jz 1f
subq %rdx,%r8
movq %r8,(%r9)
1:
ret
/*
* copystr(from, to, maxlen, int *lencopied) - MP SAFE
* %rdi, %rsi, %rdx, %rcx
*/
ENTRY(copystr)
movq %rdx, %r8 /* %r8 = maxlen */
xchgq %rdi, %rsi
incq %rdx
cld
1:
decq %rdx
jz 4f
lodsb
stosb
orb %al,%al
jnz 1b
/* Success -- 0 byte reached */
decq %rdx
xorq %rax,%rax
jmp 6f
4:
/* rdx is zero -- return ENAMETOOLONG */
movq $ENAMETOOLONG,%rax
6:
testq %rcx, %rcx
jz 7f
/* set *lencopied and return %rax */
subq %rdx, %r8
movq %r8, (%rcx)
7:
ret
/*
* Handling of special 386 registers and descriptor tables etc
* %rdi
*/
/* void lgdt(struct region_descriptor *rdp); */
ENTRY(lgdt)
/* reload the descriptor table */
lgdt (%rdi)
/* flush the prefetch q */
jmp 1f
nop
1:
movl $KDSEL, %eax
mov %ax,%ds
mov %ax,%es
mov %ax,%fs /* Beware, use wrmsr to set 64 bit base */
mov %ax,%gs
mov %ax,%ss
/* reload code selector by turning return into intersegmental return */
popq %rax
pushq $KCSEL
pushq %rax
lretq

View File

@ -1,295 +0,0 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "assym.s"
/*****************************************************************************/
/* Scheduling */
/*****************************************************************************/
.text
/*
* cpu_throw()
*
* This is the second half of cpu_swtch(). It is used when the current
* thread is either a dummy or slated to die, and we no longer care
* about its state. This is only a slight optimization and is probably
* not worth it anymore. Note that we need to clear the pm_active bits so
* we do need the old proc if it still exists.
* %rdi = oldtd
* %rsi = newtd
*/
ENTRY(cpu_throw)
xorq %rax, %rax
movl PCPU(CPUID), %eax
testq %rdi,%rdi /* no thread? */
jz 1f
/* release bit from old pm_active */
movq TD_PROC(%rdi), %rdx /* oldtd->td_proc */
movq P_VMSPACE(%rdx), %rdx /* proc->p_vmspace */
btrq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* clear old */
1:
movq TD_PCB(%rsi),%rdx /* newtd->td_proc */
movq PCB_CR3(%rdx),%rdx
movq %rdx,%cr3 /* new address space */
/* set bit in new pm_active */
movq TD_PROC(%rsi),%rdx
movq P_VMSPACE(%rdx), %rdx
btsq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* set new */
jmp sw1
/*
* cpu_switch(old, new)
*
* Save the current thread state, then select the next thread to run
* and load its state.
* %rdi = oldtd
* %rsi = newtd
*/
ENTRY(cpu_switch)
/* Switch to new thread. First, save context. */
#ifdef INVARIANTS
testq %rdi,%rdi /* no thread? */
jz badsw2 /* no, panic */
#endif
movq TD_PCB(%rdi),%rdx
movq (%rsp),%rax /* Hardware registers */
movq %rax,PCB_RIP(%rdx)
movq %rbx,PCB_RBX(%rdx)
movq %rsp,PCB_RSP(%rdx)
movq %rbp,PCB_RBP(%rdx)
movq %r12,PCB_R12(%rdx)
movq %r13,PCB_R13(%rdx)
movq %r14,PCB_R14(%rdx)
movq %r15,PCB_R15(%rdx)
pushfq /* PSL */
popq PCB_RFLAGS(%rdx)
/* have we used fp, and need a save? */
cmpq %rdi,PCPU(FPCURTHREAD)
jne 1f
pushq %rdi
pushq %rsi
addq $PCB_SAVEFPU,%rdx /* h/w bugs make saving complicated */
movq %rdx, %rdi
call npxsave /* do it in a big C function */
popq %rsi
popq %rdi
1:
/* Save is done. Now fire up new thread. Leave old vmspace. */
#ifdef INVARIANTS
testq %rsi,%rsi /* no thread? */
jz badsw3 /* no, panic */
#endif
movq TD_PCB(%rsi),%rdx
xorq %rax, %rax
movl PCPU(CPUID), %eax
/* switch address space */
movq PCB_CR3(%rdx),%rdx
movq %rdx,%cr3 /* new address space */
/* Release bit from old pmap->pm_active */
movq TD_PROC(%rdi), %rdx /* oldproc */
movq P_VMSPACE(%rdx), %rdx
btrq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* clear old */
/* Set bit in new pmap->pm_active */
movq TD_PROC(%rsi),%rdx /* newproc */
movq P_VMSPACE(%rdx), %rdx
btsq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* set new */
sw1:
/*
* At this point, we've switched address spaces and are ready
* to load up the rest of the next context.
*/
movq TD_PCB(%rsi),%rdx
/* Update the TSS_RSP0 pointer for the next interrupt */
leaq -16(%rdx), %rbx
movq %rbx, common_tss + COMMON_TSS_RSP0
/* Restore context. */
movq PCB_RBX(%rdx),%rbx
movq PCB_RSP(%rdx),%rsp
movq PCB_RBP(%rdx),%rbp
movq PCB_R12(%rdx),%r12
movq PCB_R13(%rdx),%r13
movq PCB_R14(%rdx),%r14
movq PCB_R15(%rdx),%r15
movq PCB_RIP(%rdx),%rax
movq %rax,(%rsp)
pushq PCB_RFLAGS(%rdx)
popfq
movq %rdx, PCPU(CURPCB)
movq %rsi, PCPU(CURTHREAD) /* into next thread */
ret
#ifdef INVARIANTS
badsw1:
pushq %rax
pushq %rcx
pushq %rdx
pushq %rbx
pushq %rbp
pushq %rsi
pushq %rdi
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq $sw0_1
call panic
sw0_1: .asciz "cpu_throw: no newthread supplied"
badsw2:
pushq %rax
pushq %rcx
pushq %rdx
pushq %rbx
pushq %rbp
pushq %rsi
pushq %rdi
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq $sw0_2
call panic
sw0_2: .asciz "cpu_switch: no curthread supplied"
badsw3:
pushq %rax
pushq %rcx
pushq %rdx
pushq %rbx
pushq %rbp
pushq %rsi
pushq %rdi
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq $sw0_3
call panic
sw0_3: .asciz "cpu_switch: no newthread supplied"
#endif
noswitch: .asciz "cpu_switch: called!"
nothrow: .asciz "cpu_throw: called!"
/*
* savectx(pcb)
* Update pcb, saving current processor state.
*/
ENTRY(savectx)
/* Fetch PCB. */
movq %rdi,%rcx
/* Save caller's return address. */
movq (%rsp),%rax
movq %rax,PCB_RIP(%rcx)
movq %cr3,%rax
movq %rax,PCB_CR3(%rcx)
movq %rbx,PCB_RBX(%rcx)
movq %rsp,PCB_RSP(%rcx)
movq %rbp,PCB_RBP(%rcx)
movq %r12,PCB_R12(%rcx)
movq %r13,PCB_R13(%rcx)
movq %r14,PCB_R14(%rcx)
movq %r15,PCB_R15(%rcx)
pushfq
popq PCB_RFLAGS(%rcx)
/*
* If fpcurthread == NULL, then the npx h/w state is irrelevant and the
* state had better already be in the pcb. This is true for forks
* but not for dumps (the old book-keeping with FP flags in the pcb
* always lost for dumps because the dump pcb has 0 flags).
*
* If fpcurthread != NULL, then we have to save the npx h/w state to
* fpcurthread's pcb and copy it to the requested pcb, or save to the
* requested pcb and reload. Copying is easier because we would
* have to handle h/w bugs for reloading. We used to lose the
* parent's npx state for forks by forgetting to reload.
*/
pushfq
cli
movq PCPU(FPCURTHREAD),%rax
testq %rax,%rax
je 1f
pushq %rcx
pushq %rax
movq TD_PCB(%rax),%rdi
leaq PCB_SAVEFPU(%rdi),%rdi
call npxsave
popq %rax
popq %rcx
movq $PCB_SAVEFPU_SIZE,%rdx /* arg 3 */
leaq PCB_SAVEFPU(%rcx),%rsi /* arg 2 */
movq %rax,%rdi /* arg 1 */
call bcopy
1:
popfq
ret

View File

@ -1,81 +0,0 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz.
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
.data
ALIGN_DATA
/* interrupt mask enable (all h/w off) */
.globl imen
imen: .long HWI_MASK
.text
SUPERALIGN_TEXT
ENTRY(INTREN)
movq %rdi, %rax
movl %eax, %ecx
notl %eax
andl %eax, imen
movl imen, %eax
testb %cl, %cl
je 1f
outb %al, $(IO_ICU1 + ICU_IMR_OFFSET)
1:
testb %ch, %ch
je 2f
shrl $8, %eax
outb %al, $(IO_ICU2 + ICU_IMR_OFFSET)
2:
ret
ENTRY(INTRDIS)
movq %rdi, %rax
movl %eax, %ecx
orl %eax, imen
movl imen, %eax
testb %cl, %cl
je 1f
outb %al, $(IO_ICU1 + ICU_IMR_OFFSET)
1:
testb %ch, %ch
je 2f
shrl $8, %eax
outb %al, $(IO_ICU2 + ICU_IMR_OFFSET)
2:
ret

View File

@ -1,142 +0,0 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $FreeBSD$
*/
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#define ENABLE_ICU1 \
movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
outb %al,$IO_ICU1 /* ... to clear in service bit */
#define ENABLE_ICU1_AND_2 \
movb $ICU_EOI,%al ; /* as above */ \
outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
outb %al,$IO_ICU1 /* ... then first icu */
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
#define FAST_INTR(irq_num, vec_name, enable_icus) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
subq $TF_RIP,%rsp ; /* skip dummy tf_err and tf_trapno */ \
movq %rdi,TF_RDI(%rsp) ; \
movq %rsi,TF_RSI(%rsp) ; \
movq %rdx,TF_RDX(%rsp) ; \
movq %rcx,TF_RCX(%rsp) ; \
movq %r8,TF_R8(%rsp) ; \
movq %r9,TF_R9(%rsp) ; \
movq %rax,TF_RAX(%rsp) ; \
movq %rbx,TF_RBX(%rsp) ; \
movq %rbp,TF_RBP(%rsp) ; \
movq %r10,TF_R10(%rsp) ; \
movq %r11,TF_R11(%rsp) ; \
movq %r12,TF_R12(%rsp) ; \
movq %r13,TF_R13(%rsp) ; \
movq %r14,TF_R14(%rsp) ; \
movq %r15,TF_R15(%rsp) ; \
FAKE_MCOUNT((12)*4(%rsp)) ; \
call critical_enter ; \
movq PCPU(CURTHREAD),%rbx ; \
incl TD_INTR_NESTING_LEVEL(%rbx) ; \
movq intr_unit + (irq_num) * 8, %rdi ; \
call *intr_handler + (irq_num) * 8 ; /* do the work ASAP */ \
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movq intr_countp + (irq_num) * 8,%rax ; \
incq (%rax) ; \
decl TD_INTR_NESTING_LEVEL(%rbx) ; \
call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
/*
* Slow, threaded interrupts.
*
* XXX Most of the parameters here are obsolete. Fix this when we're
* done.
* XXX we really shouldn't return via doreti if we just schedule the
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
subq $TF_RIP,%rsp ; /* skip dummy tf_err and tf_trapno */ \
movq %rdi,TF_RDI(%rsp) ; \
movq %rsi,TF_RSI(%rsp) ; \
movq %rdx,TF_RDX(%rsp) ; \
movq %rcx,TF_RCX(%rsp) ; \
movq %r8,TF_R8(%rsp) ; \
movq %r9,TF_R9(%rsp) ; \
movq %rax,TF_RAX(%rsp) ; \
movq %rbx,TF_RBX(%rsp) ; \
movq %rbp,TF_RBP(%rsp) ; \
movq %r10,TF_R10(%rsp) ; \
movq %r11,TF_R11(%rsp) ; \
movq %r12,TF_R12(%rsp) ; \
movq %r13,TF_R13(%rsp) ; \
movq %r14,TF_R14(%rsp) ; \
movq %r15,TF_R15(%rsp) ; \
maybe_extra_ipending ; \
movb imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
movb %al,imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET ; \
enable_icus ; \
movq PCPU(CURTHREAD),%rbx ; \
incl TD_INTR_NESTING_LEVEL(%rbx) ; \
FAKE_MCOUNT(13*4(%rsp)) ; /* XXX late to avoid double count */ \
movq $irq_num, %rdi; /* pass the IRQ */ \
call sched_ithd ; \
decl TD_INTR_NESTING_LEVEL(%rbx) ; \
MEXITCOUNT ; \
/* We could usually avoid the following jmp by inlining some of */ \
/* doreti, but it's probably better to use less cache. */ \
jmp doreti
MCOUNT_LABEL(bintr)
FAST_INTR(0,fastintr0, ENABLE_ICU1)
FAST_INTR(1,fastintr1, ENABLE_ICU1)
FAST_INTR(2,fastintr2, ENABLE_ICU1)
FAST_INTR(3,fastintr3, ENABLE_ICU1)
FAST_INTR(4,fastintr4, ENABLE_ICU1)
FAST_INTR(5,fastintr5, ENABLE_ICU1)
FAST_INTR(6,fastintr6, ENABLE_ICU1)
FAST_INTR(7,fastintr7, ENABLE_ICU1)
FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
MCOUNT_LABEL(eintr)

View File

@ -73,4 +73,4 @@ eintrnames:
* loading segregs.
*/
#include "amd64/isa/icu_vector.s"
#include "amd64/isa/icu_vector.S"

View File

@ -1,76 +0,0 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $FreeBSD$
*/
#include <amd64/isa/icu.h>
#include <amd64/isa/isa.h>
#include <amd64/isa/intr_machdep.h>
.data
ALIGN_DATA
/*
* Interrupt counters and names for export to vmstat(8) and friends.
*
* XXX this doesn't really belong here; everything except the labels
* for the endpointers is almost machine-independent.
*/
.globl intrcnt, eintrcnt
intrcnt:
.space INTRCNT_COUNT * 8
eintrcnt:
.globl intrnames, eintrnames
intrnames:
.space INTRCNT_COUNT * 32
eintrnames:
.text
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*
* XXX - the interrupt frame is set up to look like a trap frame. This is
* usually a waste of time. The only interrupt handlers that want a frame
* are the clock handler (it wants a clock frame), the npx handler (it's
* easier to do right all in assembler). The interrupt return routine
* needs a trap frame for rare AST's (it could easily convert the frame).
* The direct costs of setting up a trap frame are two pushl's (error
* code and trap number), an addl to get rid of these, and pushing and
* popping the call-saved regs %esi, %edi and %ebp twice, The indirect
* costs are making the driver interface nonuniform so unpending of
* interrupts is more complicated and slower (call_driver(unit) would
* be easier than ensuring an interrupt frame for all handlers. Finally,
* there are some struct copies in the npx handler and maybe in the clock
* handler that could be avoided by working more with pointers to frames
* instead of frames.
*
* XXX - should we do a cld on every system entry to avoid the requirement
* for scattered cld's?
*
* Coding notes for *.s:
*
* If possible, avoid operations that involve an operand size override.
* Word-sized operations might be smaller, but the operand size override
* makes them slower on on 486's and no faster on 386's unless perhaps
* the instruction pipeline is depleted. E.g.,
*
* Use movl to seg regs instead of the equivalent but more descriptive
* movw - gas generates an irelevant (slower) operand size override.
*
* Use movl to ordinary regs in preference to movw and especially
* in preference to movz[bw]l. Use unsigned (long) variables with the
* top bits clear instead of unsigned short variables to provide more
* opportunities for movl.
*
* If possible, use byte-sized operations. They are smaller and no slower.
*
* Use (%reg) instead of 0(%reg) - gas generates larger code for the latter.
*
* If the interrupt frame is made more flexible, INTR can push %eax first
* and decide the ipending case with less overhead, e.g., by avoiding
* loading segregs.
*/
#include "amd64/isa/icu_vector.s"