Added trap specific lock calls: get_fpu_lock, etc.

All resolve to the GIANT_LOCK at this time, it is purely a logical partitioning.
This commit is contained in:
fsmp 1997-08-10 20:59:07 +00:00
parent 1dfa4285cf
commit ce530fb8fa
8 changed files with 333 additions and 275 deletions

View File

@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
* $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
#include "i386/isa/intr_machdep.h"
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPtrylock ; /* try to get lock */ \
add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
GET_MPLOCK /* SMP Spin lock */
call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_ISRLOCK(irq_num) ; \
call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
* $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -42,27 +42,8 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
#include <machine/smptests.h> /** PEND_INTS */
#ifndef PEND_INTS
/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
#endif /* PEND_INTS */
/* ISR specific giant-lock calls */
#define GET_ISRLOCK(N) call _get_isrlock
#define TRY_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPtrylock ; \
add $4, %esp
#define REL_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPrellock ; \
add $4, %esp
#define MP_INSTR_LOCK lock
#define MP_INSTR_LOCK \
lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@ -77,13 +58,9 @@
#else
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
#define MP_INSTR_LOCK /* NOP */
#define IMASK_LOCK /* NOP */
#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushal
pushl %ds
pushl %es /* now the stack frame is a trap frame */
pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
GET_ISRLOCK(-1)
pushl $0 /* dummy unit to finish intr frame */
call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
call _get_align_lock
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
subl $8,%esp /* skip over tf_trapno and tf_err */
subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
call %esi /* function */
pushl %ebx /* arg1 */
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
* $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -42,27 +42,8 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
#include <machine/smptests.h> /** PEND_INTS */
#ifndef PEND_INTS
/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
#endif /* PEND_INTS */
/* ISR specific giant-lock calls */
#define GET_ISRLOCK(N) call _get_isrlock
#define TRY_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPtrylock ; \
add $4, %esp
#define REL_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPrellock ; \
add $4, %esp
#define MP_INSTR_LOCK lock
#define MP_INSTR_LOCK \
lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@ -77,13 +58,9 @@
#else
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
#define MP_INSTR_LOCK /* NOP */
#define IMASK_LOCK /* NOP */
#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushal
pushl %ds
pushl %es /* now the stack frame is a trap frame */
pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
GET_ISRLOCK(-1)
pushl $0 /* dummy unit to finish intr frame */
call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
call _get_align_lock
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
subl $8,%esp /* skip over tf_trapno and tf_err */
subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
call %esi /* function */
pushl %ebx /* arg1 */
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT

View File

@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
* $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
#include "i386/isa/intr_machdep.h"
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPtrylock ; /* try to get lock */ \
add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
GET_MPLOCK /* SMP Spin lock */
call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_ISRLOCK(irq_num) ; \
call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.35 1997/08/09 00:02:31 dyson Exp $
* $Id: exception.s,v 1.8 1997/08/10 20:51:52 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -42,27 +42,8 @@
#ifdef SMP
#include <machine/apic.h> /* for apic_vector.s */
#include <machine/smptests.h> /** PEND_INTS */
#ifndef PEND_INTS
/* generic giant-lock calls */
#define GET_MPLOCK call _get_mplock
#define REL_MPLOCK call _rel_mplock
#endif /* PEND_INTS */
/* ISR specific giant-lock calls */
#define GET_ISRLOCK(N) call _get_isrlock
#define TRY_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPtrylock ; \
add $4, %esp
#define REL_ISRLOCK(N) \
pushl $_mp_lock ; \
call _MPrellock ; \
add $4, %esp
#define MP_INSTR_LOCK lock
#define MP_INSTR_LOCK \
lock /* MP-safe */
/* protects the IO APIC and apic_imen as a critical region */
#define IMASK_LOCK \
@ -77,13 +58,9 @@
#else
#define GET_MPLOCK /* NOP get Kernel Mutex */
#define REL_MPLOCK /* NOP release mutex */
#define GET_ISRLOCK(N) /* NOP get Kernel Mutex */
#define REL_ISRLOCK(N) /* NOP release mutex */
#define MP_INSTR_LOCK /* NOP instruction lock */
#define IMASK_LOCK /* NOP IO APIC & apic_imen lock */
#define IMASK_UNLOCK /* NOP IO APIC & apic_imen lock */
#define MP_INSTR_LOCK /* NOP */
#define IMASK_LOCK /* NOP */
#define IMASK_UNLOCK /* NOP */
#endif /* SMP */
@ -171,19 +148,19 @@ IDTVEC(fpu)
* interrupts, but now it is fairly easy - mask nested ones the
* same as SWI_AST's.
*/
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushl $0 /* dummy error code */
pushl $0 /* dummy trap type */
pushal
pushl %ds
pushl %es /* now the stack frame is a trap frame */
pushl %es /* now stack frame is a trap frame */
movl $KDSEL,%eax
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish building intr frame */
GET_ISRLOCK(-1)
pushl $0 /* dummy unit to finish intr frame */
call _get_fpu_lock
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
@ -209,8 +186,8 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
GET_ISRLOCK(-1)
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
call _get_align_lock
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
incl _cnt+V_TRAP
orl $SWI_AST_MASK,_cpl
call _trap
@ -251,26 +228,26 @@ calltrap:
*/
SUPERALIGN_TEXT
IDTVEC(syscall)
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushfl /* save eflags in tf_err for now */
subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -281,23 +258,23 @@ IDTVEC(syscall)
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
subl $8,%esp /* skip over tf_trapno and tf_err */
subl $8,%esp /* skip over tf_trapno and tf_err */
pushal
pushl %ds
pushl %es
movl $KDSEL,%eax /* switch to kernel segments */
movl $KDSEL,%eax /* switch to kernel segments */
movl %ax,%ds
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
GET_ISRLOCK(-1)
call _get_int0x80_syscall_lock
incl _cnt+V_SYSCALL
movl $SWI_AST_MASK,_cpl
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT
@ -314,14 +291,14 @@ ENTRY(fork_trampoline)
* have this call a non-return function to stay in kernel mode.
* initproc has it's own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
call %esi /* function */
pushl %ebx /* arg1 */
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
pushl $0 /* cpl to restore */
subl $4,%esp
movb $1,_intr_nesting_level
MEXITCOUNT

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: mplock.s,v 1.14 1997/08/04 17:19:17 smp Exp smp $
* $Id: mplock.s,v 1.15 1997/08/10 20:51:52 smp Exp smp $
*
* Functions for locking between CPUs in a SMP system.
*
@ -40,6 +40,25 @@ _tryhits: 9938 2196d 44cc
#define FREE_FIRST
#define GLPROFILE
#ifdef CHEAP_TPR
/* we assumme that the 'reserved bits' can be written with zeros */
#else /* CHEAP_TPR */
#error HEADS UP: this code needs work
/*
* The APIC doc says that reserved bits must be written with whatever
* value they currently contain, ie you should: read, modify, write,
* instead of just writing new values to the TPR register. Current
* silicon seems happy with just writing. If the behaviour of the
* silicon changes, all code that access the lapic_tpr must be modified.
* The last version to contain such code was:
* $Id: mplock.s,v 1.15 1997/08/10 20:51:52 smp Exp smp $
*/
#endif /* CHEAP_TPR */
#ifdef GRAB_LOPRIO
/*
* Claim LOWest PRIOrity, ie. attempt to grab ALL INTerrupts.
@ -48,44 +67,20 @@ _tryhits: 9938 2196d 44cc
/* location of saved TPR on stack */
#define TPR_TARGET 12(%esp)
/* we assumme that the 'reserved bits' can be written with zeros */
#ifdef CHEAP_TPR
/* after 1st acquire of lock we attempt to grab all hardware INTs */
#define GRAB_HWI \
movl $ALLHWI_LEVEL, TPR_TARGET /* task prio to 'all HWI' */
#define GRAB_HWI_2 \
movl $ALLHWI_LEVEL, lapic_tpr /* task prio to 'all HWI' */
#define GRAB_HWI movl $ALLHWI_LEVEL, TPR_TARGET
#define GRAB_HWI_2 movl $ALLHWI_LEVEL, lapic_tpr /* CHEAP_TPR */
/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
#define ARB_HWI \
movl $LOPRIO_LEVEL, lapic_tpr /* task prio to 'arbitrate' */
#define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
#else /** CHEAP_TPR */
#else /* GRAB_LOPRIO */
#define GRAB_HWI \
andl $~APIC_TPR_PRIO, TPR_TARGET /* task prio to 'all HWI' */
#define GRAB_HWI /* nop */
#define GRAB_HWI_2 /* nop */
#define ARB_HWI /* nop */
#define GRAB_HWI_2 \
andl $~APIC_TPR_PRIO, lapic_tpr /* task prio to 'all HWI' */
#define ARB_HWI \
movl lapic_tpr, %eax ; /* TPR */ \
andl $~APIC_TPR_PRIO, %eax ; /* clear TPR field */ \
orl $LOPRIO_LEVEL, %eax ; /* prio to arbitrate */ \
movl %eax, lapic_tpr ; /* set it */ \
movl (%edx), %eax /* reload %eax with lock */
#endif /** CHEAP_TPR */
#else /** GRAB_LOPRIO */
#define GRAB_HWI /* nop */
#define GRAB_HWI_2 /* nop */
#define ARB_HWI /* nop */
#endif /** GRAB_LOPRIO */
#endif /* GRAB_LOPRIO */
.text
@ -308,22 +303,11 @@ NON_GPROF_ENTRY(get_mplock)
pushl %edx
/* block all HW INTs via Task Priority Register */
#ifdef CHEAP_TPR
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
testl $(1<<9), (%esp) /* test EI bit */
jnz 1f /* INTs currently enabled */
movl $TPR_BLOCK_HWI, lapic_tpr
#else
movl lapic_tpr, %eax /* get current TPR */
pushl %eax /* save current TPR */
pushfl /* save current EFLAGS */
testl $(1<<9), (%esp) /* test EI bit */
jnz 1f /* INTs currently enabled */
andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
movl %eax, lapic_tpr
#endif /** CHEAP_TPR */
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti /* allow IPI (and only IPI) INTs */
1:
pushl $_mp_lock
@ -337,44 +321,6 @@ NON_GPROF_ENTRY(get_mplock)
popl %eax
ret
/***********************************************************************
* void get_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
*
* Stack (after call to _MPgetlock):
*
* &mp_lock 4(%esp)
* EFLAGS 8(%esp)
* local APIC TPR 12(%esp)
*/
NON_GPROF_ENTRY(get_isrlock)
/* block all HW INTs via Task Priority Register */
#ifdef CHEAP_TPR
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
movl $TPR_BLOCK_HWI, lapic_tpr
#else
movl lapic_tpr, %eax /* get current TPR */
pushl %eax /* save current TPR */
pushfl /* save current EFLAGS */
andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
orl $TPR_BLOCK_HWI, %eax /* only allow IPIs */
movl %eax, lapic_tpr
#endif /** CHEAP_TPR */
sti /* allow IPI (and only IPI) INTs */
1:
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl /* restore original EFLAGS */
popl lapic_tpr /* restore TPR */
ret
/***********************************************************************
* void try_mplock()
* -----------------
@ -391,20 +337,6 @@ NON_GPROF_ENTRY(try_mplock)
popl %ecx
ret
/***********************************************************************
* void try_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
* reg %eax == 1 if success
*/
NON_GPROF_ENTRY(try_isrlock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
/***********************************************************************
* void rel_mplock()
* -----------------
@ -423,6 +355,49 @@ NON_GPROF_ENTRY(rel_mplock)
popl %eax
ret
/***********************************************************************
* void get_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
*
* Stack (after call to _MPgetlock):
*
* &mp_lock 4(%esp)
* EFLAGS 8(%esp)
* local APIC TPR 12(%esp)
*/
NON_GPROF_ENTRY(get_isrlock)
/* block all HW INTs via Task Priority Register */
pushl lapic_tpr /* save current TPR */
pushfl /* save current EFLAGS */
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti /* allow IPI (and only IPI) INTs */
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl /* restore original EFLAGS */
popl lapic_tpr /* restore TPR */
ret
/***********************************************************************
* void try_isrlock()
* -----------------
* no registers preserved, assummed the calling ISR does!
* reg %eax == 1 if success
*/
NON_GPROF_ENTRY(try_isrlock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
/***********************************************************************
* void rel_isrlock()
* -----------------
@ -436,6 +411,130 @@ NON_GPROF_ENTRY(rel_isrlock)
ret
/***********************************************************************
* FPU locks
*/
NON_GPROF_ENTRY(get_fpu_lock)
pushl lapic_tpr
pushfl
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl
popl lapic_tpr
ret
#ifdef notneeded
NON_GPROF_ENTRY(try_fpu_lock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
NON_GPROF_ENTRY(rel_fpu_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
ret
#endif /* notneeded */
/***********************************************************************
* align locks
*/
NON_GPROF_ENTRY(get_align_lock)
pushl lapic_tpr
pushfl
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl
popl lapic_tpr
ret
#ifdef notneeded
NON_GPROF_ENTRY(try_align_lock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
NON_GPROF_ENTRY(rel_align_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
ret
#endif /* notneeded */
/***********************************************************************
* syscall locks
*/
NON_GPROF_ENTRY(get_syscall_lock)
pushl lapic_tpr
pushfl
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl
popl lapic_tpr
ret
#ifdef notneeded
NON_GPROF_ENTRY(try_syscall_lock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
NON_GPROF_ENTRY(rel_syscall_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
ret
#endif /* notneeded */
/***********************************************************************
* int0x80_syscall locks
*/
NON_GPROF_ENTRY(get_int0x80_syscall_lock)
pushl lapic_tpr
pushfl
movl $TPR_BLOCK_HWI, lapic_tpr /* CHEAP_TPR */
sti
pushl $_mp_lock
call _MPgetlock
add $4, %esp
popfl
popl lapic_tpr
ret
#ifdef notneeded
NON_GPROF_ENTRY(try_int0x80_syscall_lock)
pushl $_mp_lock
call _MPtrylock
add $4, %esp
ret
NON_GPROF_ENTRY(rel_int0x80_syscall_lock)
pushl $_mp_lock
call _MPrellock
add $4, %esp
ret
#endif /* notneeded */
/***********************************************************************
*
*/

View File

@ -1,13 +1,16 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.13 1997/07/31 05:42:05 fsmp Exp $
* $Id: apic_vector.s,v 1.21 1997/08/10 20:47:53 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** PEND_INTS, various counters */
#include "i386/isa/intr_machdep.h"
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
@ -31,7 +34,9 @@
lock ; /* MP-safe */ \
btsl $(irq_num),iactive ; /* lazy masking */ \
jc 6f ; /* already active */ \
TRY_ISRLOCK(irq_num) ; /* try to get lock */ \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPtrylock ; /* try to get lock */ \
add $4, %esp ; \
testl %eax, %eax ; /* did we get it? */ \
jnz 8f ; /* yes, enter kernel */ \
6: ; /* active or locked */ \
@ -83,7 +88,7 @@
; \
ALIGN_TEXT ; \
1: ; \
GET_MPLOCK /* SMP Spin lock */
call _get_mplock /* SMP Spin lock */
#endif /* PEND_INTS */
@ -123,7 +128,7 @@ IDTVEC(vec_name) ; \
movl %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_ISRLOCK(irq_num) ; \
call _get_isrlock ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
movl $0, lapic_eoi ; \
@ -137,7 +142,9 @@ IDTVEC(vec_name) ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
MAYBE_POPL_ES ; \
popl %ds ; \
popl %edx ; \
@ -210,7 +217,9 @@ __CONCAT(Xresume,irq_num): ; \
/* XXX skip mcounting here to avoid double count */ \
lock ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
REL_ISRLOCK(irq_num) ; \
pushl $_mp_lock ; /* GIANT_LOCK */ \
call _MPrellock ; \
add $4, %esp ; \
popl %es ; \
popl %ds ; \
popal ; \

View File

@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.5 1997/07/31 05:42:06 fsmp Exp $
* $Id: ipl.s,v 1.5 1997/08/10 20:47:53 smp Exp smp $
*/
@ -137,11 +137,12 @@ doreti_stop:
nop
1:
#endif /* VM86 */
#if 0
REL_MPLOCK
#else
REL_ISRLOCK(-1)
#endif
/* release the kernel lock */
pushl $_mp_lock /* GIANT_LOCK */
call _MPrellock
add $4, %esp
.globl doreti_popl_es
doreti_popl_es:
popl %es
@ -356,4 +357,4 @@ swi_tty:
#include "i386/isa/apic_ipl.s"
#else
#include "i386/isa/icu_ipl.s"
#endif /* APIC_IO */
#endif /* APIC_IO */