General cleanup of the lock pushdown code. They are grouped and enabled

from machine/smptests.h:

#define PUSHDOWN_LEVEL_1
#define PUSHDOWN_LEVEL_2
#define PUSHDOWN_LEVEL_3
#define PUSHDOWN_LEVEL_4_NOT
This commit is contained in:
Steve Passe 1997-09-07 22:04:09 +00:00
parent 01e55e4ef4
commit 20233f27f4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=29213
30 changed files with 711 additions and 398 deletions

View File

@ -1,28 +1,15 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.33 1997/08/30 01:23:40 smp Exp smp $
* $Id: apic_vector.s,v 1.37 1997/09/07 19:23:45 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** various things... */
#include "i386/isa/intr_machdep.h"
#ifdef REAL_AVCPL
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else /* REAL_AVCPL */
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* REAL_AVCPL */
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -185,17 +172,17 @@ IDTVEC(vec_name) ; \
* and the EOI cycle would cause redundant INTs to occur.
*/
#define MASK_LEVEL_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
8: ; \
IMASK_UNLOCK
IMASK_UNLOCK ; \
8:
/*
* Test to see if the source is currntly masked, clear if so.
@ -214,10 +201,24 @@ IDTVEC(vec_name) ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
#define ENLOCK
#define DELOCK
#define LATELOCK call _get_isrlock
#else
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -228,9 +229,7 @@ IDTVEC(vec_name) ; \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
ENLOCK ; \
; \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
@ -242,6 +241,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -256,15 +257,18 @@ __CONCAT(Xresume,irq_num): ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
@ -282,14 +286,15 @@ __CONCAT(Xresume,irq_num): ; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
#else /* INTR_SIMPLELOCK */
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -311,6 +316,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -353,7 +360,7 @@ __CONCAT(Xresume,irq_num): ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
/*
@ -487,14 +494,26 @@ MCOUNT_LABEL(bintr)
MCOUNT_LABEL(eintr)
.data
ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long swi_tty, swi_net
.long 0, 0, 0, 0
.long _softclock, swi_ast
@ -506,6 +525,12 @@ imasks: /* masks for interrupt handlers */
.long 0, 0, 0, 0
.long SWI_CLOCK_MASK, SWI_AST_MASK
/*
* IDT vector entry points for the HWIs.
*
* used by:
* i386/isa/clock.c: setup Xintr8254
*/
.globl _ivectors
_ivectors:
.long _Xintr0, _Xintr1, _Xintr2, _Xintr3

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.60 1997/08/26 18:10:33 peter Exp $
* $Id: swtch.s,v 1.10 1997/09/07 21:50:13 smp Exp smp $
*/
#include "npx.h"
@ -300,6 +300,7 @@ idle_loop:
cmpl $0,_do_page_zero_idle
je 2f
/* XXX appears to cause panics */
/*
* Inside zero_idle we enable interrupts and grab the mplock
@ -311,12 +312,18 @@ idle_loop:
2:
/* enable intrs for a halt */
#ifdef SMP
movl $0, lapic_tpr /* 1st candidate for an INT */
#endif
sti
call *_hlt_vector /* wait for interrupt */
cli
jmp idle_loop
3:
#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
#endif
call _get_mplock
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
@ -384,8 +391,8 @@ idle_loop:
CROSSJUMPTARGET(_idle)
ENTRY(default_halt)
#ifndef SMP /* until we have a wakeup IPI */
hlt
#ifndef SMP
hlt /* XXX: until a wakeup IPI */
#endif
ret

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.42 1997/08/29 18:16:17 fsmp Exp $
* $Id: exception.s,v 1.22 1997/09/07 19:26:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -39,28 +39,23 @@
#include <machine/psl.h> /* PSL_I */
#include <machine/trap.h> /* trap codes */
#include <machine/asmacros.h>
#include <machine/smptests.h> /* INTR_SIMPLELOCK */
#ifdef SMP
#include <machine/smptests.h> /** CPL_AND_CML, REAL_ */
#else
#define ECPL_LOCK /* make these nops */
#define ECPL_UNLOCK
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#define AICPL_LOCK
#define AICPL_UNLOCK
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* SMP */
#include <machine/lock.h>
#ifndef SMP
#undef INTR_SIMPLELOCK /* simplifies cpp tests */
#undef REAL_ECPL
#undef REAL_ICPL
#undef REAL_AICPL
#undef REAL_AVCPL
#endif /* !SMP */
#ifdef REAL_ECPL
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* REAL_ECPL */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* REAL_ECPL */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
@ -161,7 +156,7 @@ IDTVEC(fpu)
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
orl $SWI_AST_MASK,%eax
@ -171,7 +166,7 @@ IDTVEC(fpu)
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
#else /* SMP */
@ -211,7 +206,7 @@ calltrap:
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
orl $SWI_AST_MASK,_cml
#else
orl $SWI_AST_MASK,_cpl
@ -236,11 +231,11 @@ calltrap:
#ifdef SMP
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
pushl _cml /* XXX will this work??? */
#else
pushl _cpl
#endif /* INTR_SIMPLELOCK */
#endif
ECPL_UNLOCK
jmp 2f
1:
@ -288,7 +283,7 @@ IDTVEC(syscall)
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl
@ -322,7 +317,7 @@ IDTVEC(int0x80_syscall)
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.42 1997/08/29 18:16:17 fsmp Exp $
* $Id: exception.s,v 1.22 1997/09/07 19:26:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -39,28 +39,23 @@
#include <machine/psl.h> /* PSL_I */
#include <machine/trap.h> /* trap codes */
#include <machine/asmacros.h>
#include <machine/smptests.h> /* INTR_SIMPLELOCK */
#ifdef SMP
#include <machine/smptests.h> /** CPL_AND_CML, REAL_ */
#else
#define ECPL_LOCK /* make these nops */
#define ECPL_UNLOCK
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#define AICPL_LOCK
#define AICPL_UNLOCK
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* SMP */
#include <machine/lock.h>
#ifndef SMP
#undef INTR_SIMPLELOCK /* simplifies cpp tests */
#undef REAL_ECPL
#undef REAL_ICPL
#undef REAL_AICPL
#undef REAL_AVCPL
#endif /* !SMP */
#ifdef REAL_ECPL
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* REAL_ECPL */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* REAL_ECPL */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
@ -161,7 +156,7 @@ IDTVEC(fpu)
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
orl $SWI_AST_MASK,%eax
@ -171,7 +166,7 @@ IDTVEC(fpu)
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
#else /* SMP */
@ -211,7 +206,7 @@ calltrap:
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
orl $SWI_AST_MASK,_cml
#else
orl $SWI_AST_MASK,_cpl
@ -236,11 +231,11 @@ calltrap:
#ifdef SMP
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
pushl _cml /* XXX will this work??? */
#else
pushl _cpl
#endif /* INTR_SIMPLELOCK */
#endif
ECPL_UNLOCK
jmp 2f
1:
@ -288,7 +283,7 @@ IDTVEC(syscall)
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl
@ -322,7 +317,7 @@ IDTVEC(int0x80_syscall)
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.95 1997/08/24 00:05:33 fsmp Exp $
* $Id: locore.s,v 1.5 1997/09/02 04:38:32 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -105,6 +105,7 @@
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus,_my_idlePTD,_ss_tpr
.globl _prv_CMAP1,_prv_CMAP2,_prv_CMAP3
.globl _inside_intr
.set _cpuid,_SMP_prvpage+0 /* [0] */
.set _curproc,_SMP_prvpage+4 /* [1] */
.set _curpcb,_SMP_prvpage+8 /* [2] */
@ -118,7 +119,8 @@
.set _prv_CMAP1,_SMP_prvpage+40 /* [10] */
.set _prv_CMAP2,_SMP_prvpage+44 /* [11] */
.set _prv_CMAP3,_SMP_prvpage+48 /* [12] */
.set _common_tss,_SMP_prvpage+52 /* 102 (ie: 104) bytes long */
.set _inside_intr,_SMP_prvpage+52 /* [13] */
.set _common_tss,_SMP_prvpage+56 /* 102 (ie: 104) bytes long */
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.95 1997/08/24 00:05:33 fsmp Exp $
* $Id: locore.s,v 1.5 1997/09/02 04:38:32 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -105,6 +105,7 @@
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus,_my_idlePTD,_ss_tpr
.globl _prv_CMAP1,_prv_CMAP2,_prv_CMAP3
.globl _inside_intr
.set _cpuid,_SMP_prvpage+0 /* [0] */
.set _curproc,_SMP_prvpage+4 /* [1] */
.set _curpcb,_SMP_prvpage+8 /* [2] */
@ -118,7 +119,8 @@
.set _prv_CMAP1,_SMP_prvpage+40 /* [10] */
.set _prv_CMAP2,_SMP_prvpage+44 /* [11] */
.set _prv_CMAP3,_SMP_prvpage+48 /* [12] */
.set _common_tss,_SMP_prvpage+52 /* 102 (ie: 104) bytes long */
.set _inside_intr,_SMP_prvpage+52 /* [13] */
.set _common_tss,_SMP_prvpage+56 /* 102 (ie: 104) bytes long */
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.60 1997/08/26 18:10:33 peter Exp $
* $Id: swtch.s,v 1.10 1997/09/07 21:50:13 smp Exp smp $
*/
#include "npx.h"
@ -300,6 +300,7 @@ idle_loop:
cmpl $0,_do_page_zero_idle
je 2f
/* XXX appears to cause panics */
/*
* Inside zero_idle we enable interrupts and grab the mplock
@ -311,12 +312,18 @@ idle_loop:
2:
/* enable intrs for a halt */
#ifdef SMP
movl $0, lapic_tpr /* 1st candidate for an INT */
#endif
sti
call *_hlt_vector /* wait for interrupt */
cli
jmp idle_loop
3:
#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
#endif
call _get_mplock
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
@ -384,8 +391,8 @@ idle_loop:
CROSSJUMPTARGET(_idle)
ENTRY(default_halt)
#ifndef SMP /* until we have a wakeup IPI */
hlt
#ifndef SMP
hlt /* XXX: until a wakeup IPI */
#endif
ret

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: cpufunc.h,v 1.2 1997/09/01 07:37:58 smp Exp smp $
* $Id: cpufunc.h,v 1.3 1997/09/05 20:20:31 smp Exp smp $
*/
/*
@ -58,17 +58,13 @@ static __inline void
disable_intr(void)
{
__asm __volatile("cli" : : : "memory");
#ifdef SMP
s_lock(&mpintr_lock);
#endif
MPINTR_LOCK();
}
static __inline void
enable_intr(void)
{
#ifdef SMP
s_unlock(&mpintr_lock);
#endif
MPINTR_UNLOCK();
__asm __volatile("sti");
}

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.30 1997/08/26 18:10:37 peter Exp $
* $Id: smp.h,v 1.27 1997/09/05 18:08:57 smp Exp smp $
*
*/
@ -167,6 +167,7 @@ extern volatile int smp_idle_loops;
/* 'private' global data in locore.s */
extern volatile u_int cpuid;
extern volatile u_int cpu_lockid;
extern int inside_intr;
extern volatile u_int other_cpus;
#endif /* !LOCORE */

View File

@ -1,28 +1,15 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.33 1997/08/30 01:23:40 smp Exp smp $
* $Id: apic_vector.s,v 1.37 1997/09/07 19:23:45 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** various things... */
#include "i386/isa/intr_machdep.h"
#ifdef REAL_AVCPL
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else /* REAL_AVCPL */
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* REAL_AVCPL */
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -185,17 +172,17 @@ IDTVEC(vec_name) ; \
* and the EOI cycle would cause redundant INTs to occur.
*/
#define MASK_LEVEL_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
8: ; \
IMASK_UNLOCK
IMASK_UNLOCK ; \
8:
/*
* Test to see if the source is currntly masked, clear if so.
@ -214,10 +201,24 @@ IDTVEC(vec_name) ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
#define ENLOCK
#define DELOCK
#define LATELOCK call _get_isrlock
#else
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -228,9 +229,7 @@ IDTVEC(vec_name) ; \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
ENLOCK ; \
; \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
@ -242,6 +241,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -256,15 +257,18 @@ __CONCAT(Xresume,irq_num): ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
@ -282,14 +286,15 @@ __CONCAT(Xresume,irq_num): ; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
#else /* INTR_SIMPLELOCK */
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -311,6 +316,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -353,7 +360,7 @@ __CONCAT(Xresume,irq_num): ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
/*
@ -487,14 +494,26 @@ MCOUNT_LABEL(bintr)
MCOUNT_LABEL(eintr)
.data
ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long swi_tty, swi_net
.long 0, 0, 0, 0
.long _softclock, swi_ast
@ -506,6 +525,12 @@ imasks: /* masks for interrupt handlers */
.long 0, 0, 0, 0
.long SWI_CLOCK_MASK, SWI_AST_MASK
/*
* IDT vector entry points for the HWIs.
*
* used by:
* i386/isa/clock.c: setup Xintr8254
*/
.globl _ivectors
_ivectors:
.long _Xintr0, _Xintr1, _Xintr2, _Xintr3

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.42 1997/08/29 18:16:17 fsmp Exp $
* $Id: exception.s,v 1.22 1997/09/07 19:26:26 smp Exp smp $
*/
#include "npx.h" /* NNPX */
@ -39,28 +39,23 @@
#include <machine/psl.h> /* PSL_I */
#include <machine/trap.h> /* trap codes */
#include <machine/asmacros.h>
#include <machine/smptests.h> /* INTR_SIMPLELOCK */
#ifdef SMP
#include <machine/smptests.h> /** CPL_AND_CML, REAL_ */
#else
#define ECPL_LOCK /* make these nops */
#define ECPL_UNLOCK
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#define AICPL_LOCK
#define AICPL_UNLOCK
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* SMP */
#include <machine/lock.h>
#ifndef SMP
#undef INTR_SIMPLELOCK /* simplifies cpp tests */
#undef REAL_ECPL
#undef REAL_ICPL
#undef REAL_AICPL
#undef REAL_AVCPL
#endif /* !SMP */
#ifdef REAL_ECPL
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else /* REAL_ECPL */
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* REAL_ECPL */
#define KCSEL 0x08 /* kernel code selector */
#define KDSEL 0x10 /* kernel data selector */
@ -161,7 +156,7 @@ IDTVEC(fpu)
MPLOCKED incl _cnt+V_TRAP
FPU_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl _cml,%eax
pushl %eax /* save original cml */
orl $SWI_AST_MASK,%eax
@ -171,7 +166,7 @@ IDTVEC(fpu)
pushl %eax /* save original cpl */
orl $SWI_AST_MASK,%eax
movl %eax,_cpl
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
ECPL_UNLOCK
pushl $0 /* dummy unit to finish intr frame */
#else /* SMP */
@ -211,7 +206,7 @@ calltrap:
MPLOCKED incl _cnt+V_TRAP
ALIGN_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
orl $SWI_AST_MASK,_cml
#else
orl $SWI_AST_MASK,_cpl
@ -236,11 +231,11 @@ calltrap:
#ifdef SMP
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
pushl _cml /* XXX will this work??? */
#else
pushl _cpl
#endif /* INTR_SIMPLELOCK */
#endif
ECPL_UNLOCK
jmp 2f
1:
@ -288,7 +283,7 @@ IDTVEC(syscall)
MPLOCKED incl _cnt+V_SYSCALL
SYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl
@ -322,7 +317,7 @@ IDTVEC(int0x80_syscall)
MPLOCKED incl _cnt+V_SYSCALL
ALTSYSCALL_LOCK
ECPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl $SWI_AST_MASK,_cml
#else
movl $SWI_AST_MASK,_cpl

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.95 1997/08/24 00:05:33 fsmp Exp $
* $Id: locore.s,v 1.5 1997/09/02 04:38:32 smp Exp smp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -105,6 +105,7 @@
.globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid
.globl _common_tss,_other_cpus,_my_idlePTD,_ss_tpr
.globl _prv_CMAP1,_prv_CMAP2,_prv_CMAP3
.globl _inside_intr
.set _cpuid,_SMP_prvpage+0 /* [0] */
.set _curproc,_SMP_prvpage+4 /* [1] */
.set _curpcb,_SMP_prvpage+8 /* [2] */
@ -118,7 +119,8 @@
.set _prv_CMAP1,_SMP_prvpage+40 /* [10] */
.set _prv_CMAP2,_SMP_prvpage+44 /* [11] */
.set _prv_CMAP3,_SMP_prvpage+48 /* [12] */
.set _common_tss,_SMP_prvpage+52 /* 102 (ie: 104) bytes long */
.set _inside_intr,_SMP_prvpage+52 /* [13] */
.set _common_tss,_SMP_prvpage+56 /* 102 (ie: 104) bytes long */
/* Fetch the .set's for the local apic */
#include "i386/i386/mp_apicdefs.s"

View File

@ -32,7 +32,7 @@
* SUCH DAMAGE.
*
* from: Steve McCanne's microtime code
* $Id: microtime.s,v 1.31 1997/09/01 07:45:37 fsmp Exp $
* $Id: microtime.s,v 1.10 1997/09/07 21:50:13 smp Exp smp $
*/
#include "opt_cpu.h"
@ -44,7 +44,7 @@
#include <i386/isa/timerreg.h>
#ifdef SMP
#include <machine/smptests.h> /** USE_CLOCKLOCK */
#include <machine/smptests.h> /** USE_CLOCKLOCK, REAL_MCPL */
#endif
ENTRY(microtime)
@ -122,14 +122,14 @@ ENTRY(microtime)
movl _timer0_max_count, %edx /* prepare for 2 uses */
#ifdef APIC_IO
#if defined(REAL_MCPL) /* XXX do we need this??? */
#ifdef REAL_MCPL /* XXX do we need this??? */
pushl %ecx /* s_lock destroys %eax, %ecx */
CPL_LOCK /* MP-safe, INTs disabled above */
popl %ecx /* restore %ecx */
movl _ipending, %eax
movl $0, _cpl_lock /* s_unlock would destroy %eax */
testl %eax, _mask8254 /* is soft timer interrupt pending? */
#else /* REAL_MCPL */
#else
/** XXX FIXME: take our chances with a race, is this OK? */
movl _ipending, %eax
testl %eax, _mask8254 /* is soft timer interrupt pending? */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.60 1997/08/26 18:10:33 peter Exp $
* $Id: swtch.s,v 1.10 1997/09/07 21:50:13 smp Exp smp $
*/
#include "npx.h"
@ -300,6 +300,7 @@ idle_loop:
cmpl $0,_do_page_zero_idle
je 2f
/* XXX appears to cause panics */
/*
* Inside zero_idle we enable interrupts and grab the mplock
@ -311,12 +312,18 @@ idle_loop:
2:
/* enable intrs for a halt */
#ifdef SMP
movl $0, lapic_tpr /* 1st candidate for an INT */
#endif
sti
call *_hlt_vector /* wait for interrupt */
cli
jmp idle_loop
3:
#ifdef SMP
movl $LOPRIO_LEVEL, lapic_tpr /* arbitrate for INTs */
#endif
call _get_mplock
cmpl $0,_whichrtqs /* real-time queue */
CROSSJUMP(jne, sw1a, je)
@ -384,8 +391,8 @@ idle_loop:
CROSSJUMPTARGET(_idle)
ENTRY(default_halt)
#ifndef SMP /* until we have a wakeup IPI */
hlt
#ifndef SMP
hlt /* XXX: until a wakeup IPI */
#endif
ret

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: cpufunc.h,v 1.2 1997/09/01 07:37:58 smp Exp smp $
* $Id: cpufunc.h,v 1.3 1997/09/05 20:20:31 smp Exp smp $
*/
/*
@ -58,17 +58,13 @@ static __inline void
disable_intr(void)
{
__asm __volatile("cli" : : : "memory");
#ifdef SMP
s_lock(&mpintr_lock);
#endif
MPINTR_LOCK();
}
static __inline void
enable_intr(void)
{
#ifdef SMP
s_unlock(&mpintr_lock);
#endif
MPINTR_UNLOCK();
__asm __volatile("sti");
}

View File

@ -22,25 +22,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: lock.h,v 1.4 1997/09/01 07:37:58 smp Exp smp $
* $Id: lock.h,v 1.7 1997/09/05 20:20:31 smp Exp smp $
*/
#ifndef _MACHINE_LOCK_H_
#define _MACHINE_LOCK_H_
/*
* XXX some temp debug control of cpl locks
*/
#define REAL_ECPL /* exception.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_ICPL /* ipl.s: CPL_LOCK/CPL_UNLOCK/FAST */
#define REAL_AICPL /* apic_ipl.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_AVCPL /* apic_vector.s: CPL_LOCK/CPL_UNLOCK */
#define REAL_IFCPL /* ipl_funcs.c: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_MCPL_NOT /* microtime.s: CPL_LOCK/movl $0,_cpl_lock */
#ifdef LOCORE
@ -88,9 +76,9 @@
* Variations of CPL_LOCK protect spl updates as a critical region.
* Items within this 'region' include:
* cpl
* cml
* cil
* ipending
* ???
*/
/*
@ -148,8 +136,13 @@
/*
* Locks regions protected in UP kernel via cli/sti.
*/
#ifdef USE_MPINTRLOCK
#define MPINTR_LOCK() s_lock(&mpintr_lock)
#define MPINTR_UNLOCK() s_unlock(&mpintr_lock)
#else
#define MPINTR_LOCK()
#define MPINTR_UNLOCK()
#endif /* USE_MPINTRLOCK */
/*
* Protects cpl/cml/cil/ipending data as a critical region.
@ -163,7 +156,10 @@
#define SCPL_LOCK() ss_lock(&cpl_lock) /* INT safe: top end */
#define SCPL_UNLOCK() ss_unlock(&cpl_lock)
/* sio/cy lock */
/*
* sio/cy lock.
* XXX should rc (RISCom/8) use this?
*/
#ifdef USE_COMLOCK
#define COM_LOCK() s_lock(&com_lock)
#define COM_UNLOCK() s_unlock(&com_lock)
@ -178,7 +174,10 @@
#define COM_ENABLE_INTR() enable_intr()
#endif /* USE_COMLOCK */
/* clock hardware/struct lock */
/*
* Clock hardware/struct lock.
* XXX pcaudio and friends still need this lock installed.
*/
#ifdef USE_CLOCKLOCK
#define CLOCK_LOCK() s_lock(&clock_lock)
#define CLOCK_UNLOCK() s_unlock(&clock_lock)

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.30 1997/08/26 18:10:37 peter Exp $
* $Id: smp.h,v 1.27 1997/09/05 18:08:57 smp Exp smp $
*
*/
@ -167,6 +167,7 @@ extern volatile int smp_idle_loops;
/* 'private' global data in locore.s */
extern volatile u_int cpuid;
extern volatile u_int cpu_lockid;
extern int inside_intr;
extern volatile u_int other_cpus;
#endif /* !LOCORE */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: smptests.h,v 1.28 1997/09/01 07:37:58 smp Exp smp $
* $Id: smptests.h,v 1.33 1997/09/07 19:25:23 smp Exp smp $
*/
#ifndef _MACHINE_SMPTESTS_H_
@ -34,6 +34,75 @@
*/
/*
* Control the "giant lock" pushdown by logical steps.
*/
#define PUSHDOWN_LEVEL_1
#define PUSHDOWN_LEVEL_2
#define PUSHDOWN_LEVEL_3
#define PUSHDOWN_LEVEL_4_NOT
/*
* XXX some temp debug control of cpl locks
*/
#ifdef PUSHDOWN_LEVEL_2
#define REAL_ECPL /* exception.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_ICPL /* ipl.s: CPL_LOCK/CPL_UNLOCK/FAST */
#define REAL_AICPL /* apic_ipl.s: SCPL_LOCK/SCPL_UNLOCK */
#define REAL_AVCPL /* apic_vector.s: CPL_LOCK/CPL_UNLOCK */
#define REAL_IFCPL /* ipl_funcs.c: SCPL_LOCK/SCPL_UNLOCK */
#endif /* PUSHDOWN_LEVEL_2 */
#define REAL_MCPL_NOT /* microtime.s: CPL_LOCK/movl $0,_cpl_lock */
/*
* The xCPL_LOCK/xCPL_UNLOCK defines control the spinlocks
* that protect cpl/cml/cil and the spl functions.
*/
#ifdef REAL_ECPL
#define ECPL_LOCK SCPL_LOCK
#define ECPL_UNLOCK SCPL_UNLOCK
#else
#define ECPL_LOCK
#define ECPL_UNLOCK
#endif /* REAL_ECPL */
#ifdef REAL_ICPL
#define ICPL_LOCK CPL_LOCK
#define ICPL_UNLOCK CPL_UNLOCK
#define FAST_ICPL_UNLOCK movl $0, _cpl_lock
#else
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#endif /* REAL_ICPL */
#ifdef REAL_AICPL
#define AICPL_LOCK SCPL_LOCK
#define AICPL_UNLOCK SCPL_UNLOCK
#else
#define AICPL_LOCK
#define AICPL_UNLOCK
#endif /* REAL_AICPL */
#ifdef REAL_AVCPL
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* REAL_AVCPL */
#ifdef REAL_IFCPL
#define IFCPL_LOCK() SCPL_LOCK()
#define IFCPL_UNLOCK() SCPL_UNLOCK()
#else
#define IFCPL_LOCK()
#define IFCPL_UNLOCK()
#endif /* REAL_IFCPL */
/*
* Debug version of simple_lock. This will store the CPU id of the
* holding CPU along with the lock. When a CPU fails to get the lock
@ -48,21 +117,54 @@
* Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
* Allow the mp_lock() routines to handle FAST interrupts while spinning.
*/
#ifdef PUSHDOWN_LEVEL_1
#define FAST_HI
#endif
/*
* These defines enable critical region locking of areas that were
* protected via cli/sti in the UP kernel.
*
* MPINTRLOCK protects all the generic areas.
* COMLOCK protects the sio/cy drivers.
* CLOCKLOCK protects clock hardware and data
* known to be incomplete:
* joystick lkm
* ?
*/
#ifdef PUSHDOWN_LEVEL_1
#define USE_MPINTRLOCK
#define USE_COMLOCK
#define USE_CLOCKLOCK
#endif
/*
* Regular INTerrupts without the giant lock, NOT READY YET!!!
*
#define INTR_SIMPLELOCK
*/
#ifdef PUSHDOWN_LEVEL_4
#define INTR_SIMPLELOCK
#endif
/*
* Separate the INTR() portion of cpl into another variable: cml.
*/
#ifdef PUSHDOWN_LEVEL_3
#define CPL_AND_CML
#endif
/*
* Forces spl functions to spin while waiting for safe time to change cpl.
*
#define SPL_DEBUG_POSTCODE (slows the system down noticably)
*/
#ifdef PUSHDOWN_LEVEL_3
#define INTR_SPL
#define SPL_DEBUG
#endif
/*
@ -87,7 +189,9 @@
* So I need to restore cpl handling someday, but AFTER
* I finish making spl/cpl MP-safe.
*/
#ifdef PUSHDOWN_LEVEL_1
#define FAST_WITHOUTCPL
#endif
/*
@ -99,7 +203,9 @@
* One optimization on this would be a simple lock per DRIVER, but I'm
* not sure how to organize that yet...
*/
#ifdef PUSHDOWN_LEVEL_1
#define FAST_SIMPLELOCK
#endif
/*
@ -111,9 +217,9 @@
/*
* Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
*
#define VERBOSE_CPUSTOP_ON_DDBBREAK
*/
#define CPUSTOP_ON_DDBBREAK
#define VERBOSE_CPUSTOP_ON_DDBBREAK
/*
@ -124,12 +230,6 @@
#define GIANT_LOCK
/*
* Deal with broken smp_idleloop().
*/
#define IGNORE_IDLEPROCS
/*
* Misc. counters.
*

View File

@ -22,22 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: apic_ipl.s,v 1.32 1997/08/29 18:39:36 smp Exp smp $
* $Id: apic_ipl.s,v 1.35 1997/09/07 19:23:45 smp Exp smp $
*/
#ifdef REAL_AICPL
#define AICPL_LOCK SCPL_LOCK
#define AICPL_UNLOCK SCPL_UNLOCK
#else /* REAL_AICPL */
#define AICPL_LOCK
#define AICPL_UNLOCK
#endif /* REAL_AICPL */
.data
ALIGN_DATA
@ -59,7 +47,15 @@ _Xintr8254:
_mask8254:
.long 0
/* */
/*
* Routines used by splz_unpend to build an interrupt frame from a
* trap frame. The _vec[] routines build the proper frame on the stack,
* then call one of _Xintr0 thru _XintrNN.
*
* used by:
* i386/isa/apic_ipl.s (this file): splz_unpend JUMPs to HWIs.
* i386/isa/clock.c: setup _vec[clock] to point at _vec8254.
*/
.globl _vec
_vec:
.long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7
@ -111,7 +107,7 @@ ENTRY(splz)
*/
AICPL_LOCK
movl _cpl,%eax
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
orl _cml, %eax /* add cml to cpl */
#endif
splz_next:
@ -130,14 +126,19 @@ splz_next:
splz_unpend:
bsfl %ecx,%ecx
lock
btrl %ecx, _ipending
btrl %ecx, _ipending
jnc splz_next
/*
* HWIs: will JUMP thru *_vec[], see comments below.
* SWIs: setup CALL of swi_tty, swi_net, _softclock, swi_ast.
*/
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
je splz_next /* "can't happen" */
cmpl $NHWI,%ecx
jae splz_swi
AICPL_UNLOCK
/*
* We would prefer to call the intr handler directly here but that
* doesn't work for badly behaved handlers that want the interrupt
@ -146,7 +147,7 @@ splz_unpend:
* determined at config time.
*
* The vec[] routines build the proper frame on the stack,
* then call one of _Xintr0 thru _Xintr23
* then call one of _Xintr0 thru _XintrNN.
*/
jmp *_vec(,%ecx,4)
@ -169,6 +170,8 @@ splz_swi:
* of from here, so that system profiling works.
* XXX do this more generally (for all vectors; look up the C entry point).
* XXX frame bogusness stops us from just jumping to the C entry point.
* We have to clear iactive since this is an unpend call, and it will be
* set from the time of the original INT.
*/
/*

View File

@ -1,28 +1,15 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.33 1997/08/30 01:23:40 smp Exp smp $
* $Id: apic_vector.s,v 1.37 1997/09/07 19:23:45 smp Exp smp $
*/
#include <machine/apic.h>
#include <machine/smp.h>
#include <machine/smptests.h> /** various things... */
#include "i386/isa/intr_machdep.h"
#ifdef REAL_AVCPL
#define AVCPL_LOCK CPL_LOCK
#define AVCPL_UNLOCK CPL_UNLOCK
#else /* REAL_AVCPL */
#define AVCPL_LOCK
#define AVCPL_UNLOCK
#endif /* REAL_AVCPL */
#ifdef FAST_SIMPLELOCK
#define GET_FAST_INTR_LOCK \
@ -185,17 +172,17 @@ IDTVEC(vec_name) ; \
* and the EOI cycle would cause redundant INTs to occur.
*/
#define MASK_LEVEL_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
8: ; \
IMASK_UNLOCK
IMASK_UNLOCK ; \
8:
/*
* Test to see if the source is currntly masked, clear if so.
@ -214,10 +201,24 @@ IDTVEC(vec_name) ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
#define ENLOCK
#define DELOCK
#define LATELOCK call _get_isrlock
#else
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -228,9 +229,7 @@ IDTVEC(vec_name) ; \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
ENLOCK ; \
; \
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
@ -242,6 +241,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -256,15 +257,18 @@ __CONCAT(Xresume,irq_num): ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
@ -282,14 +286,15 @@ __CONCAT(Xresume,irq_num): ; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
#else /* INTR_SIMPLELOCK */
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
@ -311,6 +316,8 @@ IDTVEC(vec_name) ; \
; \
movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(12*4(%esp)) ; /* XXX avoid dbl cnt */ \
lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
@ -353,7 +360,7 @@ __CONCAT(Xresume,irq_num): ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
#endif /* INTR_SIMPLELOCK */
#endif /* CPL_AND_CML */
/*
@ -487,14 +494,26 @@ MCOUNT_LABEL(bintr)
MCOUNT_LABEL(eintr)
.data
ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long swi_tty, swi_net
.long 0, 0, 0, 0
.long _softclock, swi_ast
@ -506,6 +525,12 @@ imasks: /* masks for interrupt handlers */
.long 0, 0, 0, 0
.long SWI_CLOCK_MASK, SWI_AST_MASK
/*
* IDT vector entry points for the HWIs.
*
* used by:
* i386/isa/clock.c: setup Xintr8254
*/
.globl _ivectors
_ivectors:
.long _Xintr0, _Xintr1, _Xintr2, _Xintr3

View File

@ -36,24 +36,10 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.16 1997/08/28 09:51:32 smp Exp smp $
* $Id: ipl.s,v 1.21 1997/09/07 21:47:45 smp Exp smp $
*/
#ifdef REAL_ICPL
#define ICPL_LOCK CPL_LOCK
#define ICPL_UNLOCK CPL_UNLOCK
#define FAST_ICPL_UNLOCK movl $0, _cpl_lock
#else /* REAL_ICPL */
#define ICPL_LOCK
#define ICPL_UNLOCK
#define FAST_ICPL_UNLOCK
#endif /* REAL_ICPL */
/*
* AT/386
* Vector interrupt control section
@ -124,9 +110,10 @@ doreti_next:
movl %eax, %edx /* preserve cpl while getting lock */
ICPL_LOCK
movl %edx, %eax
doreti_next2:
#endif
movl %eax,%ecx
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
orl _cpl, %ecx /* add cpl to cml */
#endif
notl %ecx /* set bit = unmasked level */
@ -136,7 +123,7 @@ doreti_next:
andl _ipending,%ecx /* set bit = unmasked pending INT */
jne doreti_unpend
doreti_exit:
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl %eax, _cml
#else
movl %eax,_cpl
@ -145,8 +132,8 @@ doreti_exit:
MPLOCKED decb _intr_nesting_level
MEXITCOUNT
#ifdef VM86
#ifdef INTR_SIMPLELOCK
/* XXX INTR_SIMPLELOCK needs work */
#ifdef CPL_AND_CML
/* XXX CPL_AND_CML needs work */
#error not ready for vm86
#endif
/*
@ -170,7 +157,7 @@ doreti_stop:
#ifdef SMP
#ifdef INTR_SIMPLELOCK
/**#error code needed here to decide which lock to release, INTR or giant*/
#error code needed here to decide which lock to release, INTR or giant
#endif
/* release the kernel lock */
pushl $_mp_lock /* GIANT_LOCK */
@ -217,21 +204,28 @@ doreti_unpend:
/* we enter with cpl locked */
bsfl %ecx, %ecx /* slow, but not worth optimizing */
btrl %ecx, _ipending
jnc doreti_next2 /* some intr cleared memory copy */
cmpl $NHWI, %ecx
jae no_cil
btsl %ecx, _cil
no_cil:
FAST_ICPL_UNLOCK /* preserves %eax */
sti /* late to prevent INT deadlock */
#else
sti
bsfl %ecx,%ecx /* slow, but not worth optimizing */
btrl %ecx,_ipending
#endif /* SMP */
jnc doreti_next /* some intr cleared memory copy */
#endif /* SMP */
/*
* setup call to _Xresume0 thru _Xresume23 for hwi,
* or swi_tty, swi_net, _softclock, swi_ast for swi.
* Setup JUMP to _Xresume0 thru _Xresume23 for HWIs,
* or
* Setup CALL of swi_tty, swi_net, _softclock, swi_ast for SWIs.
*/
movl ihandlers(,%ecx,4),%edx
testl %edx,%edx
/* XXX SMP this would leave cil set: */
je doreti_next /* "can't happen" */
cmpl $NHWI,%ecx
jae doreti_swi
@ -239,7 +233,7 @@ doreti_unpend:
#ifdef SMP
pushl %eax /* preserve %eax */
ICPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
popl _cml
#else
popl _cpl
@ -268,7 +262,7 @@ doreti_swi:
cli /* prevent INT deadlock */
pushl %eax /* save cpl|cmpl */
ICPL_LOCK
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
popl _cml /* restore cml */
#else
popl _cpl /* restore cpl */
@ -293,7 +287,7 @@ swi_ast_user:
movb $0,_intr_nesting_level /* finish becoming a trap handler */
call _trap
subl %eax,%eax /* recover cpl|cml */
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl %eax, _cpl
#endif
movb $1,_intr_nesting_level /* for doreti_next to decrement */
@ -321,7 +315,7 @@ swi_ast_phantom:
orl $SWI_AST_PENDING, _ipending
/* cpl is unlocked in doreti_exit */
subl %eax,%eax
#ifdef INTR_SIMPLELOCK
#ifdef CPL_AND_CML
movl %eax, _cpl
#endif
jmp doreti_exit /* SWI_AST is highest so we must be done */

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: ipl_funcs.c,v 1.6 1997/08/29 18:45:23 fsmp Exp $
* $Id: ipl_funcs.c,v 1.14 1997/09/05 20:22:10 smp Exp smp $
*/
#include <sys/types.h>
@ -97,26 +97,24 @@ splx(unsigned ipl)
#else /* !SMP */
#include <machine/smp.h>
extern int bspEarly; /* XXX */
#include <machine/smptests.h>
#ifdef REAL_IFCPL
#ifndef SPL_DEBUG_POSTCODE
#undef POSTCODE
#undef POSTCODE_LO
#undef POSTCODE_HI
#define POSTCODE(X)
#define POSTCODE_LO(X)
#define POSTCODE_HI(X)
#endif /* SPL_DEBUG_POSTCODE */
#define IFCPL_LOCK() SCPL_LOCK()
#define IFCPL_UNLOCK() SCPL_UNLOCK()
#else /* REAL_IFCPL */
#define IFCPL_LOCK()
#define IFCPL_UNLOCK()
#endif /* REAL_IFCPL */
/*
* The volatile bitmap variables must be set atomically. This normally
* involves using a machine-dependent bit-set or `or' instruction.
*/
#define DO_SETBITS(name, var, bits) \
#define DO_SETBITS(name, var, bits) \
void name(void) \
{ \
IFCPL_LOCK(); \
@ -142,90 +140,185 @@ softclockpending(void)
x = ipending & SWI_CLOCK_PENDING;
IFCPL_UNLOCK();
return x;
return (x);
}
#ifdef notneeded
#define GENSPL(name, set_cpl) \
unsigned name(void) \
{ \
unsigned x; \
\
IFCPL_LOCK(); \
x = cpl; \
/* XXX test cil */ \
set_cpl; \
IFCPL_UNLOCK(); \
\
return (x); \
}
#endif /* notneeded */
/*
* This version has to check for bsp_apic_ready,
* as calling simple_lock() (ie ss_lock) before then deadlocks the system.
* A sample count of GENSPLR calls before bsp_apic_ready was set: 2193
* A sample count of GENSPL calls before bsp_apic_ready was set: 2193
*/
#define GENSPLR(name, set_cpl) \
unsigned name(void) \
#ifdef INTR_SPL
#ifdef SPL_DEBUG
#define MAXZ 100000000
#define SPIN_COUNT unsigned z = 0;
#define SPIN_SPL \
if (++z >= MAXZ) { \
bsp_apic_ready = 0; \
panic("\ncil: 0x%08x", cil); \
}
#else /* SPL_DEBUG */
#define SPIN_COUNT
#define SPIN_SPL
#endif /* SPL_DEBUG */
#endif
#ifdef INTR_SPL
#define GENSPL(NAME, OP, MODIFIER, PC) \
unsigned NAME(void) \
{ \
unsigned x, y; \
SPIN_COUNT; \
\
if (!bsp_apic_ready) { \
x = cpl; \
cpl OP MODIFIER; \
return (x); \
} \
\
for (;;) { \
IFCPL_LOCK(); /* MP-safe */ \
x = y = cpl; /* current value */ \
POSTCODE(0x20 | PC); \
if (inside_intr) \
break; /* XXX only 1 INT allowed */ \
y OP MODIFIER; /* desired value */ \
if (cil & y) { /* not now */ \
IFCPL_UNLOCK(); /* allow cil to change */ \
while (cil & y) \
SPIN_SPL \
continue; /* try again */ \
} \
break; \
} \
cpl OP MODIFIER; /* make the change */ \
IFCPL_UNLOCK(); \
\
return (x); \
}
/* NAME: OP: MODIFIER: PC: */
GENSPL(splbio, |=, bio_imask, 2)
GENSPL(splclock, =, HWI_MASK | SWI_MASK, 3)
GENSPL(splhigh, =, HWI_MASK | SWI_MASK, 4)
GENSPL(splimp, |=, net_imask, 5)
GENSPL(splnet, |=, SWI_NET_MASK, 6)
GENSPL(splsoftclock, =, SWI_CLOCK_MASK, 7)
GENSPL(splsofttty, |=, SWI_TTY_MASK, 8)
GENSPL(splstatclock, |=, stat_imask, 9)
GENSPL(spltty, |=, tty_imask, 10)
GENSPL(splvm, |=, net_imask | bio_imask, 11)
#else /* INTR_SPL */
#define GENSPL(NAME, set_cpl) \
unsigned NAME(void) \
{ \
unsigned x; \
\
if (bsp_apic_ready) \
if (!bsp_apic_ready) { \
x = cpl; \
set_cpl; \
} \
else { \
IFCPL_LOCK(); \
x = cpl; \
/* XXX test cil */ \
set_cpl; \
if (bsp_apic_ready) \
x = cpl; \
set_cpl; \
IFCPL_UNLOCK(); \
} \
\
return (x); \
}
GENSPLR(splbio, cpl |= bio_imask)
GENSPLR(splclock, cpl = HWI_MASK | SWI_MASK)
GENSPLR(splhigh, cpl = HWI_MASK | SWI_MASK)
GENSPLR(splimp, cpl |= net_imask)
GENSPLR(splnet, cpl |= SWI_NET_MASK)
GENSPLR(splsoftclock, cpl = SWI_CLOCK_MASK)
GENSPLR(splsofttty, cpl |= SWI_TTY_MASK)
GENSPLR(splstatclock, cpl |= stat_imask)
GENSPLR(spltty, cpl |= tty_imask)
GENSPLR(splvm, cpl |= net_imask | bio_imask)
GENSPL(splbio, cpl |= bio_imask)
GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
GENSPL(splhigh, cpl = HWI_MASK | SWI_MASK)
GENSPL(splimp, cpl |= net_imask)
GENSPL(splnet, cpl |= SWI_NET_MASK)
GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
GENSPL(splstatclock, cpl |= stat_imask)
GENSPL(spltty, cpl |= tty_imask)
GENSPL(splvm, cpl |= net_imask | bio_imask)
#endif /* INTR_SPL */
void
spl0(void)
{
IFCPL_LOCK();
int unpend;
#ifdef INTR_SPL
SPIN_COUNT;
/* XXX test cil */
cpl = SWI_AST_MASK;
if (ipending & ~SWI_AST_MASK) {
IFCPL_UNLOCK();
splz();
for (;;) {
IFCPL_LOCK();
POSTCODE_HI(0xc);
if (cil & SWI_AST_MASK) { /* not now */
IFCPL_UNLOCK(); /* allow cil to change */
while (cil & SWI_AST_MASK)
SPIN_SPL
continue; /* try again */
}
break;
}
else
IFCPL_UNLOCK();
#else /* INTR_SPL */
IFCPL_LOCK();
#endif /* INTR_SPL */
cpl = SWI_AST_MASK;
unpend = ipending & ~SWI_AST_MASK;
IFCPL_UNLOCK();
if (unpend && !inside_intr)
splz();
}
void
splx(unsigned ipl)
{
if (bsp_apic_ready)
IFCPL_LOCK();
int unpend;
#ifdef INTR_SPL
SPIN_COUNT;
#endif
/* XXX test cil */
cpl = ipl;
if (ipending & ~ipl) {
if (bsp_apic_ready)
IFCPL_UNLOCK();
splz();
if (!bsp_apic_ready) {
cpl = ipl;
if (ipending & ~ipl)
splz();
return;
}
else
if (bsp_apic_ready)
IFCPL_UNLOCK();
#ifdef INTR_SPL
for (;;) {
IFCPL_LOCK();
POSTCODE_HI(0xe);
if (inside_intr)
break; /* XXX only 1 INT allowed */
POSTCODE_HI(0xf);
if (cil & ipl) { /* not now */
IFCPL_UNLOCK(); /* allow cil to change */
while (cil & ipl)
SPIN_SPL
continue; /* try again */
}
break;
}
#else /* INTR_SPL */
IFCPL_LOCK();
#endif /* INTR_SPL */
cpl = ipl;
unpend = ipending & ~ipl;
IFCPL_UNLOCK();
if (unpend && !inside_intr)
splz();
}
@ -243,15 +336,29 @@ splx(unsigned ipl)
intrmask_t
splq(intrmask_t mask)
{
intrmask_t tmp;
intrmask_t tmp, tmp2;
#ifdef INTR_SPL
for (;;) {
IFCPL_LOCK();
tmp = tmp2 = cpl;
tmp2 |= mask;
if (cil & tmp2) { /* not now */
IFCPL_UNLOCK(); /* allow cil to change */
while (cil & tmp2)
/* spin */ ;
continue; /* try again */
}
break;
}
cpl = tmp2;
#else /* INTR_SPL */
IFCPL_LOCK();
tmp = cpl;
cpl |= mask;
#endif /* INTR_SPL */
IFCPL_UNLOCK();
return (tmp);
}

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.49 1997/08/31 03:17:47 fsmp Exp $
* $Id: mp_machdep.c,v 1.38 1997/09/05 20:23:34 smp Exp smp $
*/
#include "opt_smp.h"
@ -195,14 +195,18 @@ typedef struct BASETABLE_ENTRY {
*/
#define MP_BOOTADDRESS_POST 0x10
#define MP_PROBE_POST 0x11
#define MP_START_POST 0x12
#define MP_ANNOUNCE_POST 0x13
#define MPTABLE_PASS1_POST 0x14
#define MPTABLE_PASS1_POST 0x12
#define MP_START_POST 0x13
#define MP_ENABLE_POST 0x14
#define MPTABLE_PASS2_POST 0x15
#define MP_ENABLE_POST 0x16
#define START_ALL_APS_POST 0x17
#define INSTALL_AP_TRAMP_POST 0x18
#define START_AP_POST 0x19
#define START_ALL_APS_POST 0x16
#define INSTALL_AP_TRAMP_POST 0x17
#define START_AP_POST 0x18
#define MP_ANNOUNCE_POST 0x19
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@ -1445,7 +1449,7 @@ default_mp_table(int type)
/* critical region around IO APIC, apic_imen */
struct simplelock imen_lock;
/* critical region around splxx(), cpl, cil, ipending */
/* critical region around splxx(), cpl, cml, cil, ipending */
struct simplelock cpl_lock;
/* Make FAST_INTR() routines sequential */

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.30 1997/08/26 18:10:37 peter Exp $
* $Id: smp.h,v 1.27 1997/09/05 18:08:57 smp Exp smp $
*
*/
@ -167,6 +167,7 @@ extern volatile int smp_idle_loops;
/* 'private' global data in locore.s */
extern volatile u_int cpuid;
extern volatile u_int cpu_lockid;
extern int inside_intr;
extern volatile u_int other_cpus;
#endif /* !LOCORE */