The last of the encapsolation of cpl/spl/ipending things into a critical

region protected by the simplelock 'cpl_lock'.

Notes:

 - this code is currently controlled on a section by section basis with
   defines in machine/param.h.  All sections are currently enabled.

 - this code is not as clean as I would like, but that can wait till later.

 - the "giant lock" still surrounds most instances of this "cpl region".
   I still have to do the code that arbitrates setting cpl between the
   top and bottom halves of the kernel.

 - the possibility of deadlock exists, I am committing the code at this
   point so as to exercise it and detect any such cases B4 the "giant lock"
   is removed.
This commit is contained in:
fsmp 1997-08-24 00:05:37 +00:00
parent 07ee29d4fd
commit 9240a89ca5
18 changed files with 953 additions and 239 deletions

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.24 1997/08/21 04:52:30 smp Exp smp $
* $Id: apic_vector.s,v 1.27 1997/08/23 05:15:12 smp Exp smp $
*/
@ -11,6 +11,18 @@
#include "i386/isa/intr_machdep.h"
#if defined(SMP) && defined(REAL_AVCPL)
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -82,7 +94,7 @@ IDTVEC(vec_name) ; \
popl %eax ; \
iret
#else
#else /* FAST_WITHOUTCPL */
#define FAST_INTR(irq_num, vec_name) \
.text ; \
@ -93,21 +105,23 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
movl $KDSEL,%eax ; \
movl %ax,%ds ; \
movl $KDSEL, %eax ; \
movl %ax, %ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_FAST_INTR_LOCK ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4,%esp ; \
addl $4, %esp ; \
movl $0, lapic_eoi ; \
lock ; \
incl _cnt+V_INTR ; /* book-keeping can wait */ \
movl _intr_countp + (irq_num) * 4,%eax ; \
lock ; \
incl (%eax) ; \
movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \
movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \
notl %eax ; \
andl _ipending,%eax ; \
andl _ipending, %eax ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
@ -121,27 +135,28 @@ IDTVEC(vec_name) ; \
; \
ALIGN_TEXT ; \
2: ; \
cmpb $3,_intr_nesting_level ; /* enough stack? */ \
cmpb $3, _intr_nesting_level ; /* enough stack? */ \
jae 1b ; /* no, return */ \
movl _cpl,%eax ; \
movl _cpl, %eax ; \
/* XXX next line is probably unnecessary now. */ \
movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \
lock ; \
incb _intr_nesting_level ; /* ... really limit it ... */ \
sti ; /* to do this as early as possible */ \
MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
popl %ecx ; /* ... original %ds ... */ \
popl %edx ; \
xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \
pushal ; /* build fat frame (grrr) ... */ \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
movl $KDSEL,%eax ; \
movl %ax,%es ; \
movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \
movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
movl $KDSEL, %eax ; \
movl %ax, %es ; \
movl (2+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \
movl %ecx, (2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \
pushl %eax ; \
subl $4,%esp ; /* junk for unit number */ \
subl $4, %esp ; /* junk for unit number */ \
MEXITCOUNT ; \
jmp _doreti
@ -215,11 +230,11 @@ IDTVEC(vec_name) ; \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; \
jne 2f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
@ -229,12 +244,12 @@ __CONCAT(Xresume,irq_num): ; \
movl _intr_countp + (irq_num) * 4, %eax ; \
lock ; incl (%eax) ; \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
movl _cpl, %eax ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
sti ; \
@ -252,16 +267,16 @@ __CONCAT(Xresume,irq_num): ; \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.12 1997/08/21 04:53:27 smp Exp smp $
* $Id: exception.s,v 1.17 1997/08/23 05:16:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -41,6 +41,19 @@
#include <machine/asmacros.h> /* miscellaneous macros */
#include <machine/param.h>
#if defined(SMP) && defined(REAL_ECPL)
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* SMP */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* SMP */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
#define SEL_RPL_MASK 0x0003
@ -116,6 +129,7 @@ IDTVEC(mchk)
pushl $0; TRAP(T_MCHK)
IDTVEC(rsvd)
pushl $0; TRAP(T_RESERVED)
IDTVEC(fpu)
#if NNPX > 0
/*
@ -134,20 +148,33 @@ IDTVEC(fpu)
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
#ifdef SMP
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
movl _cpl,%eax
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
call _npxintr
#else
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
call _npxintr
#endif /* SMP */
incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
#else /* NNPX > 0 */
pushl $0; TRAP(T_ARITHTRAP)
#endif /* NNPX > 0 */
IDTVEC(align)
TRAP(T_ALIGNFLT)
@ -163,10 +190,12 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
ALIGN_LOCK
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
orl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _trap
/*
@ -174,22 +203,36 @@ calltrap:
* indirectly. For traps from user mode it was 0, and for traps
* from kernel mode Oring SWI_AST_MASK into it didn't change it.
*/
#ifndef SMP
subl %eax,%eax
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jne 1f
#endif /* VM86 */
#ifdef SMP
ECPL_LOCK
/* XXX will this work??? */
pushl _cpl
ECPL_UNLOCK
jmp 2f
1:
pushl $0 /* cpl to restore */
2:
#else
movl _cpl,%eax
1:
pushl %eax
#endif /* SMP */
/*
* Return via _doreti to handle ASTs. Have to change trap frame
* to interrupt frame.
*/
pushl %eax
subl $4,%esp
incb _intr_nesting_level
subl $4,%esp /* dummy unit to finish intr frame */
MPLOCKED incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -217,15 +260,18 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
SYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -244,15 +290,18 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
ALTSYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -272,15 +321,17 @@ ENTRY(fork_trampoline)
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.12 1997/08/21 04:53:27 smp Exp smp $
* $Id: exception.s,v 1.17 1997/08/23 05:16:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -41,6 +41,19 @@
#include <machine/asmacros.h> /* miscellaneous macros */
#include <machine/param.h>
#if defined(SMP) && defined(REAL_ECPL)
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* SMP */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* SMP */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
#define SEL_RPL_MASK 0x0003
@ -116,6 +129,7 @@ IDTVEC(mchk)
pushl $0; TRAP(T_MCHK)
IDTVEC(rsvd)
pushl $0; TRAP(T_RESERVED)
IDTVEC(fpu)
#if NNPX > 0
/*
@ -134,20 +148,33 @@ IDTVEC(fpu)
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
#ifdef SMP
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
movl _cpl,%eax
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
call _npxintr
#else
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
call _npxintr
#endif /* SMP */
incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
#else /* NNPX > 0 */
pushl $0; TRAP(T_ARITHTRAP)
#endif /* NNPX > 0 */
IDTVEC(align)
TRAP(T_ALIGNFLT)
@ -163,10 +190,12 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
ALIGN_LOCK
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
orl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _trap
/*
@ -174,22 +203,36 @@ calltrap:
* indirectly. For traps from user mode it was 0, and for traps
* from kernel mode Oring SWI_AST_MASK into it didn't change it.
*/
#ifndef SMP
subl %eax,%eax
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jne 1f
#endif /* VM86 */
#ifdef SMP
ECPL_LOCK
/* XXX will this work??? */
pushl _cpl
ECPL_UNLOCK
jmp 2f
1:
pushl $0 /* cpl to restore */
2:
#else
movl _cpl,%eax
1:
pushl %eax
#endif /* SMP */
/*
* Return via _doreti to handle ASTs. Have to change trap frame
* to interrupt frame.
*/
pushl %eax
subl $4,%esp
incb _intr_nesting_level
subl $4,%esp /* dummy unit to finish intr frame */
MPLOCKED incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -217,15 +260,18 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
SYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -244,15 +290,18 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
ALTSYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -272,15 +321,17 @@ ENTRY(fork_trampoline)
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $
* $Id: locore.s,v 1.3 1997/08/23 04:10:36 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -96,7 +96,7 @@
.set _SMP_ioapic,_SMP_prvstart + (16 * PAGE_SIZE)
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus
.globl _common_tss,_other_cpus,_ss_tpr
.set _cpuid,_SMP_prvpage+0
.set _curproc,_SMP_prvpage+4
.set _curpcb,_SMP_prvpage+8
@ -106,7 +106,8 @@
.set _common_tss,_SMP_prvpage+28 /* 104 bytes long, next = 132 */
.set _other_cpus,_SMP_prvpage+132 /* bitmap of available CPUs,
excluding ourself */
.set _ss_tpr,_SMP_prvpage+136
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $
* $Id: locore.s,v 1.3 1997/08/23 04:10:36 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -96,7 +96,7 @@
.set _SMP_ioapic,_SMP_prvstart + (16 * PAGE_SIZE)
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus
.globl _common_tss,_other_cpus,_ss_tpr
.set _cpuid,_SMP_prvpage+0
.set _curproc,_SMP_prvpage+4
.set _curpcb,_SMP_prvpage+8
@ -106,7 +106,8 @@
.set _common_tss,_SMP_prvpage+28 /* 104 bytes long, next = 132 */
.set _other_cpus,_SMP_prvpage+132 /* bitmap of available CPUs,
excluding ourself */
.set _ss_tpr,_SMP_prvpage+136
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $
* $Id: icu_ipl.s,v 1.2 1997/08/22 05:05:05 smp Exp smp $
*/
.data
@ -45,12 +45,79 @@ _vec:
.long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
.long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
/* interrupt mask enable (all h/w off) */
.globl _imen
_imen: .long HWI_MASK
/*
*
*/
.text
SUPERALIGN_TEXT
/*
* Interrupt priority mechanism
* -- soft splXX masks with group mechanism (cpl)
* -- h/w masks for currently active or unused interrupts (imen)
* -- ipending = active interrupts currently masked by cpl
*/
ENTRY(splz)
/*
* The caller has restored cpl and checked that (ipending & ~cpl)
* is nonzero. We have to repeat the check since if there is an
* interrupt while we're looking, _doreti processing for the
* interrupt will handle all the unmasked pending interrupts
* because we restored early. We're repeating the calculation
* of (ipending & ~cpl) anyway so that the caller doesn't have
* to pass it, so this only costs one "jne". "bsfl %ecx,%ecx"
* is undefined when %ecx is 0 so we can't rely on the secondary
* btrl tests.
*/
movl _cpl,%eax
splz_next:
/*
* We don't need any locking here. (ipending & ~cpl) cannot grow
* while we're looking at it - any interrupt will shrink it to 0.
*/
movl %eax,%ecx
notl %ecx
andl _ipending,%ecx
jne splz_unpend
ret
ALIGN_TEXT
splz_unpend:
bsfl %ecx,%ecx
btrl %ecx,_ipending
jnc splz_next
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
* frame. Also, there's a problem determining the unit number.
* We should change the interface so that the unit number is not
* determined at config time.
*/
jmp *_vec(,%ecx,4)
ALIGN_TEXT
splz_swi:
cmpl $SWI_AST,%ecx
je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
call %edx
popl %eax
movl %eax,_cpl
jmp splz_next
/*
* Fake clock interrupt(s) so that they appear to come from our caller instead
* of from here, so that system profiling works.

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $
* $Id: icu_ipl.s,v 1.2 1997/08/22 05:05:05 smp Exp smp $
*/
.data
@ -45,12 +45,79 @@ _vec:
.long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
.long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
/* interrupt mask enable (all h/w off) */
.globl _imen
_imen: .long HWI_MASK
/*
*
*/
.text
SUPERALIGN_TEXT
/*
* Interrupt priority mechanism
* -- soft splXX masks with group mechanism (cpl)
* -- h/w masks for currently active or unused interrupts (imen)
* -- ipending = active interrupts currently masked by cpl
*/
ENTRY(splz)
/*
* The caller has restored cpl and checked that (ipending & ~cpl)
* is nonzero. We have to repeat the check since if there is an
* interrupt while we're looking, _doreti processing for the
* interrupt will handle all the unmasked pending interrupts
* because we restored early. We're repeating the calculation
* of (ipending & ~cpl) anyway so that the caller doesn't have
* to pass it, so this only costs one "jne". "bsfl %ecx,%ecx"
* is undefined when %ecx is 0 so we can't rely on the secondary
* btrl tests.
*/
movl _cpl,%eax
splz_next:
/*
* We don't need any locking here. (ipending & ~cpl) cannot grow
* while we're looking at it - any interrupt will shrink it to 0.
*/
movl %eax,%ecx
notl %ecx
andl _ipending,%ecx
jne splz_unpend
ret
ALIGN_TEXT
splz_unpend:
bsfl %ecx,%ecx
btrl %ecx,_ipending
jnc splz_next
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
* frame. Also, there's a problem determining the unit number.
* We should change the interface so that the unit number is not
* determined at config time.
*/
jmp *_vec(,%ecx,4)
ALIGN_TEXT
splz_swi:
cmpl $SWI_AST,%ecx
je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
call %edx
popl %eax
movl %eax,_cpl
jmp splz_next
/*
* Fake clock interrupt(s) so that they appear to come from our caller instead
* of from here, so that system profiling works.

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.24 1997/08/21 04:52:30 smp Exp smp $
* $Id: apic_vector.s,v 1.27 1997/08/23 05:15:12 smp Exp smp $
*/
@ -11,6 +11,18 @@
#include "i386/isa/intr_machdep.h"
#if defined(SMP) && defined(REAL_AVCPL)
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -82,7 +94,7 @@ IDTVEC(vec_name) ; \
popl %eax ; \
iret
#else
#else /* FAST_WITHOUTCPL */
#define FAST_INTR(irq_num, vec_name) \
.text ; \
@ -93,21 +105,23 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
movl $KDSEL,%eax ; \
movl %ax,%ds ; \
movl $KDSEL, %eax ; \
movl %ax, %ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_FAST_INTR_LOCK ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4,%esp ; \
addl $4, %esp ; \
movl $0, lapic_eoi ; \
lock ; \
incl _cnt+V_INTR ; /* book-keeping can wait */ \
movl _intr_countp + (irq_num) * 4,%eax ; \
lock ; \
incl (%eax) ; \
movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \
movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \
notl %eax ; \
andl _ipending,%eax ; \
andl _ipending, %eax ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
@ -121,27 +135,28 @@ IDTVEC(vec_name) ; \
; \
ALIGN_TEXT ; \
2: ; \
cmpb $3,_intr_nesting_level ; /* enough stack? */ \
cmpb $3, _intr_nesting_level ; /* enough stack? */ \
jae 1b ; /* no, return */ \
movl _cpl,%eax ; \
movl _cpl, %eax ; \
/* XXX next line is probably unnecessary now. */ \
movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \
lock ; \
incb _intr_nesting_level ; /* ... really limit it ... */ \
sti ; /* to do this as early as possible */ \
MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
popl %ecx ; /* ... original %ds ... */ \
popl %edx ; \
xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \
pushal ; /* build fat frame (grrr) ... */ \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
movl $KDSEL,%eax ; \
movl %ax,%es ; \
movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \
movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
movl $KDSEL, %eax ; \
movl %ax, %es ; \
movl (2+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \
movl %ecx, (2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \
pushl %eax ; \
subl $4,%esp ; /* junk for unit number */ \
subl $4, %esp ; /* junk for unit number */ \
MEXITCOUNT ; \
jmp _doreti
@ -215,11 +230,11 @@ IDTVEC(vec_name) ; \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; \
jne 2f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
@ -229,12 +244,12 @@ __CONCAT(Xresume,irq_num): ; \
movl _intr_countp + (irq_num) * 4, %eax ; \
lock ; incl (%eax) ; \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
movl _cpl, %eax ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
sti ; \
@ -252,16 +267,16 @@ __CONCAT(Xresume,irq_num): ; \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.12 1997/08/21 04:53:27 smp Exp smp $
* $Id: exception.s,v 1.17 1997/08/23 05:16:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -41,6 +41,19 @@
#include <machine/asmacros.h> /* miscellaneous macros */
#include <machine/param.h>
#if defined(SMP) && defined(REAL_ECPL)
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* SMP */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* SMP */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
#define SEL_RPL_MASK 0x0003
@ -116,6 +129,7 @@ IDTVEC(mchk)
pushl $0; TRAP(T_MCHK)
IDTVEC(rsvd)
pushl $0; TRAP(T_RESERVED)
IDTVEC(fpu)
#if NNPX > 0
/*
@ -134,20 +148,33 @@ IDTVEC(fpu)
movl %ax,%ds
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
#ifdef SMP
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
movl _cpl,%eax
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
call _npxintr
#else
movl _cpl,%eax
pushl %eax
pushl $0 /* dummy unit to finish intr frame */
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
incl _cnt+V_TRAP
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
call _npxintr
#endif /* SMP */
incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
#else /* NNPX > 0 */
pushl $0; TRAP(T_ARITHTRAP)
#endif /* NNPX > 0 */
IDTVEC(align)
TRAP(T_ALIGNFLT)
@ -163,10 +190,12 @@ alltraps_with_regs_pushed:
movl %ax,%es
FAKE_MCOUNT(12*4(%esp))
calltrap:
ALIGN_LOCK
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
orl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _trap
/*
@ -174,22 +203,36 @@ calltrap:
* indirectly. For traps from user mode it was 0, and for traps
* from kernel mode Oring SWI_AST_MASK into it didn't change it.
*/
#ifndef SMP
subl %eax,%eax
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
#ifdef VM86
testl $PSL_VM,TF_EFLAGS(%esp)
jne 1f
#endif /* VM86 */
#ifdef SMP
ECPL_LOCK
/* XXX will this work??? */
pushl _cpl
ECPL_UNLOCK
jmp 2f
1:
pushl $0 /* cpl to restore */
2:
#else
movl _cpl,%eax
1:
pushl %eax
#endif /* SMP */
/*
* Return via _doreti to handle ASTs. Have to change trap frame
* to interrupt frame.
*/
pushl %eax
subl $4,%esp
incb _intr_nesting_level
subl $4,%esp /* dummy unit to finish intr frame */
MPLOCKED incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -217,15 +260,18 @@ IDTVEC(syscall)
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
FAKE_MCOUNT(12*4(%esp))
SYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -244,15 +290,18 @@ IDTVEC(int0x80_syscall)
movl %ax,%es
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(12*4(%esp))
ALTSYSCALL_LOCK
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
movl $SWI_AST_MASK,_cpl
ECPL_UNLOCK
call _syscall
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
@ -272,15 +321,17 @@ ENTRY(fork_trampoline)
call %esi /* function */
addl $4,%esp
/* cut from syscall */
/*
* Return via _doreti to handle ASTs.
*/
pushl $0 /* cpl to restore */
subl $4,%esp
subl $4,%esp /* dummy unit to finish intr frame */
movb $1,_intr_nesting_level
MEXITCOUNT
jmp _doreti
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.93 1997/07/20 08:37:18 bde Exp $
* $Id: locore.s,v 1.3 1997/08/23 04:10:36 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -96,7 +96,7 @@
.set _SMP_ioapic,_SMP_prvstart + (16 * PAGE_SIZE)
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus
.globl _common_tss,_other_cpus,_ss_tpr
.set _cpuid,_SMP_prvpage+0
.set _curproc,_SMP_prvpage+4
.set _curpcb,_SMP_prvpage+8
@ -106,7 +106,8 @@
.set _common_tss,_SMP_prvpage+28 /* 104 bytes long, next = 132 */
.set _other_cpus,_SMP_prvpage+132 /* bitmap of available CPUs,
excluding ourself */
.set _ss_tpr,_SMP_prvpage+136
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -32,7 +32,7 @@
* SUCH DAMAGE.
*
* from: Steve McCanne's microtime code
* $Id: microtime.s,v 1.4 1997/08/21 04:53:27 smp Exp smp $
* $Id: microtime.s,v 1.6 1997/08/23 05:16:26 smp Exp smp $
*/
#include "opt_cpu.h"
@ -111,10 +111,18 @@ ENTRY(microtime)
movl _timer0_max_count, %edx /* prepare for 2 uses */
#ifdef APIC_IO
CPL_LOCK /* MP-safe */
#if defined(REAL_MCPL) /* XXX do we need this??? */
pushl %ecx /* s_lock destroys %eax, %ecx */
CPL_LOCK /* MP-safe, INTs disabled above */
popl %ecx /* restore %ecx */
movl _ipending, %eax
movl $0, _cpl_lock /* s_unlock would destroy %eax */
testl %eax, _mask8254 /* is soft timer interrupt pending? */
#else /* REAL_MCPL */
/** XXX FIXME: take our chances with a race, is this OK? */
movl _ipending, %eax
testl %eax, _mask8254 /* is soft timer interrupt pending? */
CPL_UNLOCK
#endif /* REAL_MCPL */
#else
testb $IRQ0, _ipending /* is soft timer interrupt pending? */
#endif /* APIC_IO */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: simplelock.s,v 1.1 1997/07/24 23:51:33 fsmp Exp $
* $Id: simplelock.s,v 1.6 1997/08/23 04:10:11 smp Exp smp $
*/
/*
@ -30,7 +30,8 @@
*/
#include <machine/asmacros.h> /* miscellaneous macros */
#include <i386/isa/intr_machdep.h>
/*
* The following impliments the primitives described in i386/i386/param.h
@ -145,3 +146,50 @@ ENTRY(test_and_set)
ret
#endif /* needed */
/*
* These versions of simple_lock block hardware INTS,
* making it suitable for regions accessed by both top and bottom levels.
* This is done by saving the current value of the TPR in a per-cpu global,
* then taking the lock. On the way out the lock is released, then the
* original value of the TPR is restored.
* Because of this, it must ONLY be used for SHORT, deterministic paths!
*
* Note:
* It would appear to be "bad behaviour" to blindly store a value in
* ss_tpr, as this could destroy the previous contents. But since ss_tpr
* is a per-cpu variable, and its fatal to attempt to acquire a simplelock
* that you already hold, we get away with it. This needs to be cleaned
* up someday...
*/
/*
* void ss_lock(struct simplelock *lkp)
*/
ENTRY(ss_lock)
movl lapic_tpr, %eax
movl $TPR_BLOCK_HWI, lapic_tpr
movl %eax, _ss_tpr
movl 4(%esp), %eax /* get the address of the lock */
movl $1, %ecx
ssetlock:
xchgl %ecx, (%eax)
testl %ecx, %ecx
jz sgotit /* it was clear, return */
swait:
cmpl $0, (%eax) /* wait to empty */
jne swait /* still set... */
jmp ssetlock /* empty again, try once more */
sgotit:
ret
/*
* void ss_unlock(struct simplelock *lkp)
*/
ENTRY(ss_unlock)
movl 4(%esp), %eax /* get the address of the lock */
movl $0, (%eax)
movl _ss_tpr, %eax
movl %eax, lapic_tpr
ret

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)param.h 5.8 (Berkeley) 6/28/91
* $Id: param.h,v 1.35 1997/08/21 05:07:56 fsmp Exp $
* $Id: param.h,v 1.14 1997/08/23 05:14:23 smp Exp $
*/
#ifndef _MACHINE_PARAM_H_
@ -145,6 +145,19 @@
#ifndef _SIMPLELOCK_H_
#define _SIMPLELOCK_H_
/*
* XXX some temp debug control of cpl locks
*/
#define REAL_ECPL /* exception.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_ICPL /* ipl.s: CPL_LOCK/CPL_UNLOCK/FAST */
#define REAL_AICPL /* apic_ipl.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_AVCPL /* apic_vector.s: CPL_LOCK/CPL_UNLOCK */
#define REAL_IFCPL /* ipl_funcs.c: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_MCPL_NOT /* microtime.s: CPL_LOCK/movl $0,_cpl_lock */
#ifdef LOCORE
#ifdef SMP
@ -188,13 +201,22 @@
addl $4, %esp
/*
* Protects spl updates as a critical region.
* Variations of CPL_LOCK protect spl updates as a critical region.
* Items within this 'region' include:
* cpl
* cil
* ipending
* ???
*/
/*
* Botom half routines, ie. those already protected from INTs.
*
* Used in:
* sys/i386/i386/microtime.s (XXX currently NOT used, possible race?)
* sys/i386/isa/ipl.s: _doreti
* sys/i386/isa/apic_vector.s: _Xintr0, ..., _Xintr23
*/
#define CPL_LOCK \
pushl $_cpl_lock ; /* address of lock */ \
call _s_lock ; /* MP-safe */ \
@ -205,6 +227,23 @@
call _s_unlock ; /* MP-safe */ \
addl $4, %esp
/*
* INT safe version for top half of kernel.
*
* Used in:
* sys/i386/i386/exception.s: _Xfpu, _Xalign, _Xsyscall, _Xint0x80_syscall
* sys/i386/isa/apic_ipl.s: splz()
*/
#define SCPL_LOCK \
pushl $_cpl_lock ; \
call _ss_lock ; \
addl $4, %esp
#define SCPL_UNLOCK \
pushl $_cpl_lock ; \
call _ss_unlock ; \
addl $4, %esp
#else /* SMP */
#define MPLOCKED /* NOP */
@ -220,6 +259,22 @@
#ifdef SMP
/*
* Protects cpl/cil/ipending data as a critical region.
*
* Used in:
* sys/i386/isa/ipl_funcs.c: DO_SETBITS, softclockpending(), GENSPL,
* spl0(), splx(), splq()
*/
/* Bottom half */
#define CPL_LOCK() s_lock(&cpl_lock)
#define CPL_UNLOCK() s_unlock(&cpl_lock)
/* INT safe version for top half of kernel */
#define SCPL_LOCK() ss_lock(&cpl_lock)
#define SCPL_UNLOCK() ss_unlock(&cpl_lock)
/*
* Protects com/tty data as a critical region.
*/
@ -228,6 +283,11 @@
#else /* SMP */
#define CPL_LOCK()
#define CPL_UNLOCK()
#define SCPL_LOCK()
#define SCPL_UNLOCK()
#define COM_LOCK()
#define COM_UNLOCK()
@ -251,6 +311,8 @@ void s_lock_init __P((struct simplelock *));
void s_lock __P((struct simplelock *));
int s_lock_try __P((struct simplelock *));
void s_unlock __P((struct simplelock *));
void ss_lock __P((struct simplelock *));
void ss_unlock __P((struct simplelock *));
/* global data in mp_machdep.c */
extern struct simplelock imen_lock;

View File

@ -22,13 +22,29 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: apic_ipl.s,v 1.23 1997/08/21 04:52:30 smp Exp smp $
* $Id: apic_ipl.s,v 1.28 1997/08/23 05:15:12 smp Exp smp $
*/
#if defined(SMP) && defined(REAL_AICPL)
#define AICPL_LOCK SCPL_LOCK
#define AICPL_UNLOCK SCPL_UNLOCK
#else /* SMP */
#define AICPL_LOCK
#define AICPL_UNLOCK
#endif /* SMP */
.data
ALIGN_DATA
/* current INTerrupt level */
.globl _cil
_cil: .long 0
/* this allows us to change the 8254 APIC pin# assignment */
.globl _Xintr8254
_Xintr8254:
@ -46,31 +62,6 @@ _vec:
.long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
.long vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
/* various simple locks */
.align 2 /* MUST be 32bit aligned */
#if 0
/* critical region around IO APIC */
.globl _imen_lock
_imen_lock:
.long 0
/* critical region around spl & cpl */
.globl _cpl_lock
_cpl_lock:
.long 0
/* critical region around FAST_INTR() routines */
.globl _fast_intr_lock
_fast_intr_lock:
.long 0
/* critical region around INTR() routines */
.globl _intr_lock
_intr_lock:
.long 0
#endif
/*
* Note:
* This is the UP equivilant of _imen.
@ -83,16 +74,86 @@ _intr_lock:
* MAYBE_UNMASK_IRQ
* imen_dump()
*/
.align 2 /* MUST be 32bit aligned */
.globl _apic_imen
_apic_imen:
.long HWI_MASK
/*
*
*/
.text
SUPERALIGN_TEXT
/*
* Interrupt priority mechanism
* -- soft splXX masks with group mechanism (cpl)
* -- h/w masks for currently active or unused interrupts (imen)
* -- ipending = active interrupts currently masked by cpl
*/
ENTRY(splz)
/*
* The caller has restored cpl and checked that (ipending & ~cpl)
* is nonzero. We have to repeat the check since if there is an
* interrupt while we're looking, _doreti processing for the
* interrupt will handle all the unmasked pending interrupts
* because we restored early. We're repeating the calculation
* of (ipending & ~cpl) anyway so that the caller doesn't have
* to pass it, so this only costs one "jne". "bsfl %ecx,%ecx"
* is undefined when %ecx is 0 so we can't rely on the secondary
* btrl tests.
*/
AICPL_LOCK
movl _cpl,%eax
splz_next:
/*
* We don't need any locking here. (ipending & ~cpl) cannot grow
* while we're looking at it - any interrupt will shrink it to 0.
*/
movl %eax,%ecx
notl %ecx /* set bit = unmasked level */
andl _ipending,%ecx /* set bit = unmasked pending INT */
jne splz_unpend
AICPL_UNLOCK
ret
ALIGN_TEXT
splz_unpend:
bsfl %ecx,%ecx
lock
btrl %ecx, _ipending
jnc splz_next
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
AICPL_UNLOCK
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
* frame. Also, there's a problem determining the unit number.
* We should change the interface so that the unit number is not
* determined at config time.
*/
jmp *_vec(,%ecx,4)
ALIGN_TEXT
splz_swi:
cmpl $SWI_AST,%ecx
je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
AICPL_UNLOCK
call %edx
AICPL_LOCK
popl %eax
movl %eax,_cpl
jmp splz_next
/*
* Fake clock interrupt(s) so that they appear to come from our caller instead
* of from here, so that system profiling works.

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.24 1997/08/21 04:52:30 smp Exp smp $
* $Id: apic_vector.s,v 1.27 1997/08/23 05:15:12 smp Exp smp $
*/
@ -11,6 +11,18 @@
#include "i386/isa/intr_machdep.h"
#if defined(SMP) && defined(REAL_AVCPL)
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -82,7 +94,7 @@ IDTVEC(vec_name) ; \
popl %eax ; \
iret
#else
#else /* FAST_WITHOUTCPL */
#define FAST_INTR(irq_num, vec_name) \
.text ; \
@ -93,21 +105,23 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
movl $KDSEL,%eax ; \
movl %ax,%ds ; \
movl $KDSEL, %eax ; \
movl %ax, %ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
GET_FAST_INTR_LOCK ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4,%esp ; \
addl $4, %esp ; \
movl $0, lapic_eoi ; \
lock ; \
incl _cnt+V_INTR ; /* book-keeping can wait */ \
movl _intr_countp + (irq_num) * 4,%eax ; \
lock ; \
incl (%eax) ; \
movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \
movl _cpl, %eax ; /* unmasking pending HWIs or SWIs? */ \
notl %eax ; \
andl _ipending,%eax ; \
andl _ipending, %eax ; \
jne 2f ; /* yes, maybe handle them */ \
1: ; \
MEXITCOUNT ; \
@ -121,27 +135,28 @@ IDTVEC(vec_name) ; \
; \
ALIGN_TEXT ; \
2: ; \
cmpb $3,_intr_nesting_level ; /* enough stack? */ \
cmpb $3, _intr_nesting_level ; /* enough stack? */ \
jae 1b ; /* no, return */ \
movl _cpl,%eax ; \
movl _cpl, %eax ; \
/* XXX next line is probably unnecessary now. */ \
movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
movl $HWI_MASK|SWI_MASK, _cpl ; /* limit nesting ... */ \
lock ; \
incb _intr_nesting_level ; /* ... really limit it ... */ \
sti ; /* to do this as early as possible */ \
MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
popl %ecx ; /* ... original %ds ... */ \
popl %edx ; \
xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
xchgl %eax, 4(%esp) ; /* orig %eax; save cpl */ \
pushal ; /* build fat frame (grrr) ... */ \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
movl $KDSEL,%eax ; \
movl %ax,%es ; \
movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \
movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
movl $KDSEL, %eax ; \
movl %ax, %es ; \
movl (2+8+0)*4(%esp), %ecx ; /* %ecx from thin frame ... */ \
movl %ecx, (2+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (2+8+1)*4(%esp), %eax ; /* ... cpl from thin frame */ \
pushl %eax ; \
subl $4,%esp ; /* junk for unit number */ \
subl $4, %esp ; /* junk for unit number */ \
MEXITCOUNT ; \
jmp _doreti
@ -215,11 +230,11 @@ IDTVEC(vec_name) ; \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; \
jne 2f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
@ -229,12 +244,12 @@ __CONCAT(Xresume,irq_num): ; \
movl _intr_countp + (irq_num) * 4, %eax ; \
lock ; incl (%eax) ; \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
movl _cpl, %eax ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
sti ; \
@ -252,16 +267,16 @@ __CONCAT(Xresume,irq_num): ; \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
CPL_LOCK ; /* MP-safe */ \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
; \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
CPL_UNLOCK ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $
* $Id: icu_ipl.s,v 1.2 1997/08/22 05:05:05 smp Exp smp $
*/
.data
@ -45,12 +45,79 @@ _vec:
.long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
.long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15
/* interrupt mask enable (all h/w off) */
.globl _imen
_imen: .long HWI_MASK
/*
*
*/
.text
SUPERALIGN_TEXT
/*
* Interrupt priority mechanism
* -- soft splXX masks with group mechanism (cpl)
* -- h/w masks for currently active or unused interrupts (imen)
* -- ipending = active interrupts currently masked by cpl
*/
ENTRY(splz)
/*
* The caller has restored cpl and checked that (ipending & ~cpl)
* is nonzero. We have to repeat the check since if there is an
* interrupt while we're looking, _doreti processing for the
* interrupt will handle all the unmasked pending interrupts
* because we restored early. We're repeating the calculation
* of (ipending & ~cpl) anyway so that the caller doesn't have
* to pass it, so this only costs one "jne". "bsfl %ecx,%ecx"
* is undefined when %ecx is 0 so we can't rely on the secondary
* btrl tests.
*/
movl _cpl,%eax
splz_next:
/*
* We don't need any locking here. (ipending & ~cpl) cannot grow
* while we're looking at it - any interrupt will shrink it to 0.
*/
movl %eax,%ecx
notl %ecx
andl _ipending,%ecx
jne splz_unpend
ret
ALIGN_TEXT
splz_unpend:
bsfl %ecx,%ecx
btrl %ecx,_ipending
jnc splz_next
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
* frame. Also, there's a problem determining the unit number.
* We should change the interface so that the unit number is not
* determined at config time.
*/
jmp *_vec(,%ecx,4)
ALIGN_TEXT
splz_swi:
cmpl $SWI_AST,%ecx
je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
call %edx
popl %eax
movl %eax,_cpl
jmp splz_next
/*
* Fake clock interrupt(s) so that they appear to come from our caller instead
* of from here, so that system profiling works.

View File

@ -36,10 +36,24 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.8 1997/08/20 19:46:22 smp Exp $
* $Id: ipl.s,v 1.13 1997/08/23 05:15:12 smp Exp smp $
*/
#if defined(SMP) && defined(REAL_ICPL)
#define ICPL_LOCK CPL_LOCK
#define ICPL_UNLOCK CPL_UNLOCK
#define FAST_ICPL_UNLOCK movl $0, _cpl_lock
#else /* SMP */
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#endif /* SMP */
/*
* AT/386
* Vector interrupt control section
@ -52,16 +66,6 @@
.globl _cpl
_cpl: .long HWI_MASK | SWI_MASK
/* current INTerrupt level */
.globl _cil
_cil: .long 0
#ifndef APIC_IO
/* interrupt mask enable (all h/w off) */
.globl _imen
_imen: .long HWI_MASK
#endif /* APIC_IO */
.globl _tty_imask
_tty_imask: .long 0
.globl _bio_imask
@ -75,9 +79,9 @@ _softnet_imask: .long SWI_NET_MASK
.globl _softtty_imask
_softtty_imask: .long SWI_TTY_MASK
.globl _ipending
/* pending interrupts blocked by splxxx() */
.globl _ipending
_ipending: .long 0
/* set with bits for which queue to service */
@ -115,14 +119,23 @@ doreti_next:
* so that the stack cannot pile up (the nesting level of interrupt
* handlers is limited by the number of bits in cpl).
*/
#ifdef SMP
cli /* early to prevent INT deadlock */
movl %eax, %edx /* preserve cpl while getting lock */
ICPL_LOCK
movl %edx, %eax
#endif
movl %eax,%ecx
notl %ecx
notl %ecx /* set bit = unmasked level */
#ifndef SMP
cli
andl _ipending,%ecx
#endif
andl _ipending,%ecx /* set bit = unmasked pending INT */
jne doreti_unpend
doreti_exit:
movl %eax,_cpl
decb _intr_nesting_level
FAST_ICPL_UNLOCK /* preserves %eax */
MPLOCKED decb _intr_nesting_level
MEXITCOUNT
#ifdef VM86
/*
@ -132,6 +145,7 @@ doreti_exit:
* vm86 mode. doreti_stop is a convenient place to set a breakpoint.
* When the cpl problem is solved, this code can disappear.
*/
ICPL_LOCK
cmpl $0,_cpl
je 1f
testl $PSL_VM,TF_EFLAGS(%esp)
@ -140,6 +154,7 @@ doreti_stop:
movl $0,_cpl
nop
1:
FAST_ICPL_UNLOCK /* preserves %eax */
#endif /* VM86 */
#ifdef SMP
@ -184,9 +199,17 @@ doreti_unpend:
* We won't miss any new pending interrupts because we will check
* for them again.
*/
#ifdef SMP
/* we enter with cpl locked */
bsfl %ecx, %ecx /* slow, but not worth optimizing */
btrl %ecx, _ipending
FAST_ICPL_UNLOCK /* preserves %eax */
sti /* late to prevent INT deadlock */
#else
sti
bsfl %ecx,%ecx /* slow, but not worth optimizing */
MPLOCKED btrl %ecx, _ipending
btrl %ecx,_ipending
#endif /* SMP */
jnc doreti_next /* some intr cleared memory copy */
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
@ -194,7 +217,14 @@ doreti_unpend:
cmpl $NHWI,%ecx
jae doreti_swi
cli
#ifdef SMP
pushl %eax /* preserve %eax */
ICPL_LOCK
popl _cpl
FAST_ICPL_UNLOCK
#else
movl %eax,_cpl
#endif
MEXITCOUNT
jmp %edx
@ -210,8 +240,18 @@ doreti_swi:
* interrupt frames. There are only 4 different SWIs and the HWI
* and SWI masks limit the nesting further.
*/
#ifdef SMP
orl imasks(,%ecx,4), %eax
cli /* prevent INT deadlock */
pushl %eax /* save cpl */
ICPL_LOCK
popl _cpl /* restore cpl */
FAST_ICPL_UNLOCK
sti
#else
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
#endif
call %edx
popl %eax
jmp doreti_next
@ -247,72 +287,12 @@ swi_ast_phantom:
* using by using cli, but they are unavoidable for lcall entries.
*/
cli
MPLOCKED orl $SWI_AST_PENDING, _ipending
ICPL_LOCK
orl $SWI_AST_PENDING, _ipending
/* cpl is unlocked in doreti_exit */
subl %eax,%eax
jmp doreti_exit /* SWI_AST is highest so we must be done */
/*
* Interrupt priority mechanism
* -- soft splXX masks with group mechanism (cpl)
* -- h/w masks for currently active or unused interrupts (imen)
* -- ipending = active interrupts currently masked by cpl
*/
ENTRY(splz)
/*
* The caller has restored cpl and checked that (ipending & ~cpl)
* is nonzero. We have to repeat the check since if there is an
* interrupt while we're looking, _doreti processing for the
* interrupt will handle all the unmasked pending interrupts
* because we restored early. We're repeating the calculation
* of (ipending & ~cpl) anyway so that the caller doesn't have
* to pass it, so this only costs one "jne". "bsfl %ecx,%ecx"
* is undefined when %ecx is 0 so we can't rely on the secondary
* btrl tests.
*/
movl _cpl,%eax
splz_next:
/*
* We don't need any locking here. (ipending & ~cpl) cannot grow
* while we're looking at it - any interrupt will shrink it to 0.
*/
movl %eax,%ecx
notl %ecx
andl _ipending,%ecx
jne splz_unpend
ret
ALIGN_TEXT
splz_unpend:
bsfl %ecx,%ecx
MPLOCKED btrl %ecx, _ipending
jnc splz_next
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
* frame. Also, there's a problem determining the unit number.
* We should change the interface so that the unit number is not
* determined at config time.
*/
jmp *_vec(,%ecx,4)
ALIGN_TEXT
splz_swi:
cmpl $SWI_AST,%ecx
je splz_next /* "can't happen" */
pushl %eax
orl imasks(,%ecx,4),%eax
movl %eax,_cpl
call %edx
popl %eax
movl %eax,_cpl
jmp splz_next
ALIGN_TEXT
swi_net:

View File

@ -23,13 +23,14 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: ipl_funcs.c,v 1.1 1997/05/31 08:57:05 peter Exp $
* $Id: ipl_funcs.c,v 1.6 1997/08/23 23:15:19 smp Exp smp $
*/
#include <sys/types.h>
#include <sys/systm.h>
#include <machine/ipl.h>
#ifndef SMP
/*
* The volatile bitmap variables must be set atomically. This normally
* involves using a machine-dependent bit-set or `or' instruction.
@ -93,12 +94,164 @@ splx(unsigned ipl)
splz();
}
#ifdef SMP
#else /* !SMP */
#include <machine/param.h>
#include <machine/smp.h>
#if defined(REAL_IFCPL)
#define IFCPL_LOCK() SCPL_LOCK()
#define IFCPL_UNLOCK() SCPL_UNLOCK()
#else /* REAL_IFCPL */
#define IFCPL_LOCK()
#define IFCPL_UNLOCK()
#endif /* REAL_IFCPL */
/*
* The volatile bitmap variables must be set atomically. This normally
* involves using a machine-dependent bit-set or `or' instruction.
*/
#define DO_SETBITS(name, var, bits) \
void name(void) \
{ \
IFCPL_LOCK(); \
setbits(var, bits); \
IFCPL_UNLOCK(); \
}
DO_SETBITS(setdelayed, &ipending, loadandclear((unsigned *)&idelayed))
DO_SETBITS(setsoftast, &ipending, SWI_AST_PENDING)
DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
DO_SETBITS(setsoftnet, &ipending, SWI_NET_PENDING)
DO_SETBITS(setsofttty, &ipending, SWI_TTY_PENDING)
DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
unsigned
softclockpending(void)
{
unsigned x;
IFCPL_LOCK();
x = ipending & SWI_CLOCK_PENDING;
IFCPL_UNLOCK();
return x;
}
#define GENSPL(name, set_cpl) \
unsigned name(void) \
{ \
unsigned x; \
\
IFCPL_LOCK(); \
x = cpl; \
/* XXX test cil */ \
set_cpl; \
IFCPL_UNLOCK(); \
\
return (x); \
}
GENSPL(splbio, cpl |= bio_imask)
GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
GENSPL(splimp, cpl |= net_imask)
GENSPL(splnet, cpl |= SWI_NET_MASK)
GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
GENSPL(splstatclock, cpl |= stat_imask)
GENSPL(splvm, cpl |= net_imask | bio_imask)
/*
* This version has to check for smp_active,
* as calling simple_lock() (ie ss_lock) before then deadlocks the system.
*/
#define GENSPL2(name, set_cpl) \
unsigned name(void) \
{ \
unsigned x; \
\
if (smp_active) \
IFCPL_LOCK(); \
x = cpl; \
/* XXX test cil */ \
set_cpl; \
if (smp_active) \
IFCPL_UNLOCK(); \
\
return (x); \
}
GENSPL2(splhigh, cpl = HWI_MASK | SWI_MASK)
GENSPL2(spltty, cpl |= tty_imask)
void
spl0(void)
{
IFCPL_LOCK();
/* XXX test cil */
cpl = SWI_AST_MASK;
if (ipending & ~SWI_AST_MASK) {
IFCPL_UNLOCK();
splz();
}
else
IFCPL_UNLOCK();
}
void
splx(unsigned ipl)
{
if (smp_active)
IFCPL_LOCK();
/* XXX test cil */
cpl = ipl;
if (ipending & ~ipl) {
if (smp_active)
IFCPL_UNLOCK();
splz();
}
else
if (smp_active)
IFCPL_UNLOCK();
}
/*
* Replaces UP specific inline found in (?) pci/pci_support.c.
*
* Stefan said:
* You know, that splq() is used in the shared interrupt multiplexer, and that
* the SMP version should not have too much overhead. If it is significantly
* slower, then moving the splq() out of the loop in intr_mux() and passing in
* the logical OR of all mask values might be a better solution than the
* current code. (This logical OR could of course be pre-calculated whenever
* another shared interrupt is registered ...)
*/
intrmask_t
splq(intrmask_t mask)
{
intrmask_t tmp = cpl;
intrmask_t tmp;
IFCPL_LOCK();
tmp = cpl;
cpl |= mask;
IFCPL_UNLOCK();
return (tmp);
}
#endif
#endif /* !SMP */