When entering the apic version of slow interrupt handler, level

interrupts are masked, and EOI is sent iff the corresponding ISR bit
is set in the local apic. If the CPU cannot obtain the interrupt
service lock (currently the global kernel lock) the interrupt is
forwarded to the CPU holding that lock.

Clock interrupts now have higher priority than other slow interrupts.
This commit is contained in:
Tor Egge 1998-03-03 22:56:30 +00:00
parent 3163861c7b
commit 02c1dc3bbc
23 changed files with 1520 additions and 186 deletions

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.25 1998/01/15 07:33:58 gibbs Exp $
* $Id: apic_vector.s,v 1.26 1998/03/03 20:55:24 tegge Exp $
*/
@ -166,6 +166,18 @@ IDTVEC(vec_name) ; \
popal ; \
addl $4+4,%esp
#define MASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* masked, don't mask */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
7: ; /* already masked */ \
IMASK_UNLOCK
/*
* Test to see whether we are handling an edge or level triggered INT.
* Level-triggered INTs must still be masked as we don't clear the source,
@ -173,36 +185,45 @@ IDTVEC(vec_name) ; \
*/
#define MASK_LEVEL_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
IMASK_UNLOCK ; \
8:
jz 9f ; /* edge, don't mask */ \
MASK_IRQ(irq_num) ; \
9:
#ifdef APIC_INTR_REORDER
#define EOI_IRQ(irq_num) \
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
movl (%eax), %eax ; \
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi ; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#else
#define EOI_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), lapic_isr1; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#endif
/*
* Test to see if the source is currntly masked, clear if so.
*/
#define UNMASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* bit set, masked */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 9f ; /* edge, don't EOI */ \
movl $0, lapic_eoi ; /* should be safe */ \
jmp 9f ; /* skip unmasking */ \
7: \
je 7f ; /* bit clear, not masked */ \
andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
movl _ioapic,%ecx ; /* ioapic[0]addr */ \
movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
9: ; \
7: ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
@ -213,11 +234,75 @@ IDTVEC(vec_name) ; \
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
jz 3f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef APIC_INTR_DIAGNOSTIC
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
log_intr_event:
pushf
cli
pushl $CNAME(apic_itrace_debuglock)
call _s_lock_np
addl $4, %esp
movl CNAME(apic_itrace_debugbuffer_idx), %ecx
andl $32767, %ecx
movl _cpuid, %eax
shll $8, %eax
orl 8(%esp), %eax
movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
incl %ecx
andl $32767, %ecx
movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
pushl $CNAME(apic_itrace_debuglock)
call _s_unlock_np
addl $4, %esp
popf
ret
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4 ; \
pushl %eax ; \
pushl %ecx ; \
pushl %edx ; \
movl $(irq_num), %eax ; \
cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
jne 7f ; \
pushl $id ; \
call log_intr_event ; \
addl $4, %esp ; \
7: ; \
popl %edx ; \
popl %ecx ; \
popl %eax
#else
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4
#endif
#define APIC_ITRACE_ENTER 1
#define APIC_ITRACE_EOI 2
#define APIC_ITRACE_TRYISRLOCK 3
#define APIC_ITRACE_GOTISRLOCK 4
#define APIC_ITRACE_ENTER2 5
#define APIC_ITRACE_LEAVE 6
#define APIC_ITRACE_UNMASK 7
#define APIC_ITRACE_ACTIVE 8
#define APIC_ITRACE_MASKED 9
#define APIC_ITRACE_NOISRLOCK 10
#define APIC_ITRACE_MASKED2 11
#define APIC_ITRACE_SPLZ 12
#define APIC_ITRACE_DORETI 13
#else
#define APIC_ITRACE(name, irq_num, id)
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
@ -230,12 +315,18 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ENLOCK ; \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
@ -244,7 +335,6 @@ IDTVEC(vec_name) ; \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -263,39 +353,67 @@ __CONCAT(Xresume,irq_num): ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
testl $IRQ_BIT(irq_num), _cml ; \
jne 4f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@ -306,20 +424,25 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
jz 3f ; /* no */ \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -334,36 +457,60 @@ __CONCAT(Xresume,irq_num): ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
andl $~IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
iret ; /* XXX: iactive bit might be 0 now */ \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
2: ; /* masked by cpl, leave iactive set */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#endif /* CPL_AND_CML */
@ -515,6 +662,8 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
lock
orl $SWI_AST_PENDING, _ipending
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@ -522,9 +671,6 @@ _Xcpuast:
pushl $0
lock
orl $SWI_AST_PENDING, _ipending
movl _cpuid, %eax
lock
btrl %eax, _checkstate_pending_ast
@ -536,6 +682,113 @@ _Xcpuast:
POP_FRAME
iret
/*
* Executed by a CPU when it receives an XFORWARD_IRQ IPI.
*/
.text
SUPERALIGN_TEXT
.globl _Xforward_irq
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
movl %ax, %ds /* use KERNEL data segment */
movl %ax, %es
movl $0, lapic_eoi /* End Of Interrupt to APIC */
FAKE_MCOUNT(12*4(%esp))
ISR_TRYLOCK
testl %eax,%eax /* Did we get the lock ? */
jz 1f /* No */
lock
incl CNAME(forward_irq_hitcnt)
cmpb $4, _intr_nesting_level
jae 2f
jmp 3f
AVCPL_LOCK
#ifdef CPL_AND_CML
movl _cml, %eax
#else
movl _cpl, %eax
#endif
pushl %eax
AVCPL_UNLOCK
lock
incb _intr_nesting_level
sti
pushl $0
MEXITCOUNT
jmp _doreti /* Handle forwarded interrupt */
4:
lock
decb _intr_nesting_level
ISR_RELLOCK
MEXITCOUNT
addl $8, %esp
POP_FRAME
iret
1:
lock
incl CNAME(forward_irq_misscnt)
call forward_irq /* Oops, we've lost the isr lock */
MEXITCOUNT
POP_FRAME
iret
2:
lock
incl CNAME(forward_irq_toodeepcnt)
3:
ISR_RELLOCK
MEXITCOUNT
POP_FRAME
iret
/*
*
*/
forward_irq:
MCOUNT
cmpl $0,_invltlb_ok
jz 4f
cmpl $0, CNAME(forward_irq_enabled)
jz 4f
movl _mp_lock,%eax
cmpl $FREE_LOCK,%eax
jne 1f
movl $0, %eax /* Pick CPU #0 if noone has lock */
1:
shrl $24,%eax
movl _cpu_num_to_apic_id(,%eax,4),%ecx
shll $24,%ecx
movl lapic_icr_hi, %eax
andl $~APIC_ID_MASK, %eax
orl %ecx, %eax
movl %eax, lapic_icr_hi
2:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 2b
movl lapic_icr_lo, %eax
andl $APIC_RESV2_MASK, %eax
orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
movl %eax, lapic_icr_lo
3:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 3b
4:
ret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@ -702,6 +955,16 @@ _checkstate_need_ast:
.long 0
_checkstate_pending_ast:
.long 0
.globl CNAME(forward_irq_misscnt)
.globl CNAME(forward_irq_toodeepcnt)
.globl CNAME(forward_irq_hitcnt)
CNAME(forward_irq_misscnt):
.long 0
CNAME(forward_irq_hitcnt):
.long 0
CNAME(forward_irq_toodeepcnt):
.long 0
.globl _apic_pin_trigger
_apic_pin_trigger:

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.36 1998/03/03 19:44:34 tegge Exp $
* $Id: smp.h,v 1.37 1998/03/03 20:55:23 tegge Exp $
*
*/
@ -137,6 +137,9 @@ void forward_statclock __P((int pscnt));
void forward_hardclock __P((int pscnt));
#endif /* BETTER_CLOCK */
void forward_signal __P((struct proc *));
#ifdef APIC_INTR_REORDER
void set_lapic_isrloc __P((int, int));
#endif /* APIC_INTR_REORDER */
/* global data in mpapic.c */
extern volatile lapic_t lapic;

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
* $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
}
else
setidt(TPR_SLOW_INTS + intr, slowintr[intr],
else {
vector = TPR_SLOW_INTS + intr;
#ifdef APIC_INTR_REORDER
#ifdef APIC_INTR_HIGHPRI_CLOCK
/* XXX: Hack (kludge?) for more accurate clock. */
if (intr == 0 || intr == 8) {
vector = TPR_FAST_INTS + intr;
}
#endif
#endif
setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
}
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, vector);
#endif
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, ICU_OFFSET + intr);
#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa_device.h 7.1 (Berkeley) 5/9/91
* $Id: intr_machdep.h,v 1.9 1998/02/13 06:59:22 bde Exp $
* $Id: intr_machdep.h,v 1.10 1998/03/03 20:55:24 tegge Exp $
*/
#ifndef _I386_ISA_INTR_MACHDEP_H_
@ -116,6 +116,9 @@
/* IPI to generate an additional software trap at the target CPU */
#define XCPUAST_OFFSET (ICU_OFFSET + 48)
/* IPI to signal the CPU holding the ISR lock that another IRQ has appeared */
#define XFORWARD_IRQ_OFFSET (ICU_OFFSET + 49)
/* IPI to signal CPUs to stop and wait for another CPU to restart them */
#define XCPUSTOP_OFFSET (ICU_OFFSET + 128)
@ -174,6 +177,7 @@ inthand_t
Xcpucheckstate, /* Check cpu state */
#endif
Xcpuast, /* Additional software trap on other cpu */
Xforward_irq, /* Forward irq to cpu holding ISR lock */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint; /* handle APIC "spurious INTs" */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
* $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
}
else
setidt(TPR_SLOW_INTS + intr, slowintr[intr],
else {
vector = TPR_SLOW_INTS + intr;
#ifdef APIC_INTR_REORDER
#ifdef APIC_INTR_HIGHPRI_CLOCK
/* XXX: Hack (kludge?) for more accurate clock. */
if (intr == 0 || intr == 8) {
vector = TPR_FAST_INTS + intr;
}
#endif
#endif
setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
}
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, vector);
#endif
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, ICU_OFFSET + intr);
#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.25 1998/01/15 07:33:58 gibbs Exp $
* $Id: apic_vector.s,v 1.26 1998/03/03 20:55:24 tegge Exp $
*/
@ -166,6 +166,18 @@ IDTVEC(vec_name) ; \
popal ; \
addl $4+4,%esp
#define MASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* masked, don't mask */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
7: ; /* already masked */ \
IMASK_UNLOCK
/*
* Test to see whether we are handling an edge or level triggered INT.
* Level-triggered INTs must still be masked as we don't clear the source,
@ -173,36 +185,45 @@ IDTVEC(vec_name) ; \
*/
#define MASK_LEVEL_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
IMASK_UNLOCK ; \
8:
jz 9f ; /* edge, don't mask */ \
MASK_IRQ(irq_num) ; \
9:
#ifdef APIC_INTR_REORDER
#define EOI_IRQ(irq_num) \
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
movl (%eax), %eax ; \
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi ; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#else
#define EOI_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), lapic_isr1; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#endif
/*
* Test to see if the source is currntly masked, clear if so.
*/
#define UNMASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* bit set, masked */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 9f ; /* edge, don't EOI */ \
movl $0, lapic_eoi ; /* should be safe */ \
jmp 9f ; /* skip unmasking */ \
7: \
je 7f ; /* bit clear, not masked */ \
andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
movl _ioapic,%ecx ; /* ioapic[0]addr */ \
movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
9: ; \
7: ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
@ -213,11 +234,75 @@ IDTVEC(vec_name) ; \
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
jz 3f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef APIC_INTR_DIAGNOSTIC
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
log_intr_event:
pushf
cli
pushl $CNAME(apic_itrace_debuglock)
call _s_lock_np
addl $4, %esp
movl CNAME(apic_itrace_debugbuffer_idx), %ecx
andl $32767, %ecx
movl _cpuid, %eax
shll $8, %eax
orl 8(%esp), %eax
movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
incl %ecx
andl $32767, %ecx
movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
pushl $CNAME(apic_itrace_debuglock)
call _s_unlock_np
addl $4, %esp
popf
ret
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4 ; \
pushl %eax ; \
pushl %ecx ; \
pushl %edx ; \
movl $(irq_num), %eax ; \
cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
jne 7f ; \
pushl $id ; \
call log_intr_event ; \
addl $4, %esp ; \
7: ; \
popl %edx ; \
popl %ecx ; \
popl %eax
#else
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4
#endif
#define APIC_ITRACE_ENTER 1
#define APIC_ITRACE_EOI 2
#define APIC_ITRACE_TRYISRLOCK 3
#define APIC_ITRACE_GOTISRLOCK 4
#define APIC_ITRACE_ENTER2 5
#define APIC_ITRACE_LEAVE 6
#define APIC_ITRACE_UNMASK 7
#define APIC_ITRACE_ACTIVE 8
#define APIC_ITRACE_MASKED 9
#define APIC_ITRACE_NOISRLOCK 10
#define APIC_ITRACE_MASKED2 11
#define APIC_ITRACE_SPLZ 12
#define APIC_ITRACE_DORETI 13
#else
#define APIC_ITRACE(name, irq_num, id)
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
@ -230,12 +315,18 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ENLOCK ; \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
@ -244,7 +335,6 @@ IDTVEC(vec_name) ; \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -263,39 +353,67 @@ __CONCAT(Xresume,irq_num): ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
testl $IRQ_BIT(irq_num), _cml ; \
jne 4f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@ -306,20 +424,25 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
jz 3f ; /* no */ \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -334,36 +457,60 @@ __CONCAT(Xresume,irq_num): ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
andl $~IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
iret ; /* XXX: iactive bit might be 0 now */ \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
2: ; /* masked by cpl, leave iactive set */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#endif /* CPL_AND_CML */
@ -515,6 +662,8 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
lock
orl $SWI_AST_PENDING, _ipending
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@ -522,9 +671,6 @@ _Xcpuast:
pushl $0
lock
orl $SWI_AST_PENDING, _ipending
movl _cpuid, %eax
lock
btrl %eax, _checkstate_pending_ast
@ -536,6 +682,113 @@ _Xcpuast:
POP_FRAME
iret
/*
* Executed by a CPU when it receives an XFORWARD_IRQ IPI.
*/
.text
SUPERALIGN_TEXT
.globl _Xforward_irq
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
movl %ax, %ds /* use KERNEL data segment */
movl %ax, %es
movl $0, lapic_eoi /* End Of Interrupt to APIC */
FAKE_MCOUNT(12*4(%esp))
ISR_TRYLOCK
testl %eax,%eax /* Did we get the lock ? */
jz 1f /* No */
lock
incl CNAME(forward_irq_hitcnt)
cmpb $4, _intr_nesting_level
jae 2f
jmp 3f
AVCPL_LOCK
#ifdef CPL_AND_CML
movl _cml, %eax
#else
movl _cpl, %eax
#endif
pushl %eax
AVCPL_UNLOCK
lock
incb _intr_nesting_level
sti
pushl $0
MEXITCOUNT
jmp _doreti /* Handle forwarded interrupt */
4:
lock
decb _intr_nesting_level
ISR_RELLOCK
MEXITCOUNT
addl $8, %esp
POP_FRAME
iret
1:
lock
incl CNAME(forward_irq_misscnt)
call forward_irq /* Oops, we've lost the isr lock */
MEXITCOUNT
POP_FRAME
iret
2:
lock
incl CNAME(forward_irq_toodeepcnt)
3:
ISR_RELLOCK
MEXITCOUNT
POP_FRAME
iret
/*
*
*/
forward_irq:
MCOUNT
cmpl $0,_invltlb_ok
jz 4f
cmpl $0, CNAME(forward_irq_enabled)
jz 4f
movl _mp_lock,%eax
cmpl $FREE_LOCK,%eax
jne 1f
movl $0, %eax /* Pick CPU #0 if noone has lock */
1:
shrl $24,%eax
movl _cpu_num_to_apic_id(,%eax,4),%ecx
shll $24,%ecx
movl lapic_icr_hi, %eax
andl $~APIC_ID_MASK, %eax
orl %ecx, %eax
movl %eax, lapic_icr_hi
2:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 2b
movl lapic_icr_lo, %eax
andl $APIC_RESV2_MASK, %eax
orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
movl %eax, lapic_icr_lo
3:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 3b
4:
ret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@ -702,6 +955,16 @@ _checkstate_need_ast:
.long 0
_checkstate_pending_ast:
.long 0
.globl CNAME(forward_irq_misscnt)
.globl CNAME(forward_irq_toodeepcnt)
.globl CNAME(forward_irq_hitcnt)
CNAME(forward_irq_misscnt):
.long 0
CNAME(forward_irq_hitcnt):
.long 0
CNAME(forward_irq_toodeepcnt):
.long 0
.globl _apic_pin_trigger
_apic_pin_trigger:

View File

@ -23,23 +23,24 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_apicdefs.s,v 1.1 1997/06/22 16:03:16 peter Exp $
* $Id: mp_apicdefs.s,v 1.2 1997/10/24 14:27:23 phk Exp $
*/
.globl lapic_eoi, lapic_svr, lapic_tpr, lapic_irr1, lapic_ver
.globl lapic_icr_lo,lapic_icr_hi,lapic_isr1
/*
* Do not clutter our namespace with these unless we need them
* outside the scope of locore.s
*/
#if 0
.globl lapic_id,lapic_ver,lapic_tpr,lapic_apr,lapic_ppr,lapic_eoi
.globl lapic_ldr,lapic_dfr,lapic_svr,lapic_isr,lapic_isr0,lapic_isr1
.globl lapic_ldr,lapic_dfr,lapic_svr,lapic_isr,lapic_isr0
.globl lapic_isr2,lapic_isr3,lapic_isr4,lapic_isr5,lapic_isr6
.globl lapic_isr7,lapic_tmr,lapic_tmr0,lapic_tmr1,lapic_tmr2
.globl lapic_tmr3,lapic_tmr4,lapic_tmr5,lapic_tmr6,lapic_tmr7
.globl lapic_irr,lapic_irr0,lapic_irr1,lapic_irr2,lapic_irr3
.globl lapic_irr4,lapic_irr5,lapic_irr6,lapic_irr7,lapic_esr
.globl lapic_icr_lo,lapic_icr_hi,lapic_lvtt,lapic_pcint,lapic_lvt1
.globl lapic_lvtt,lapic_pcint,lapic_lvt1
.globl lapic_lvt2,lapic_lvt3,lapic_ticr,lapic_tccr,lapic_tdcr
#endif
.set lapic_id, _lapic + 0x020

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.36 1998/03/03 19:44:34 tegge Exp $
* $Id: smp.h,v 1.37 1998/03/03 20:55:23 tegge Exp $
*
*/
@ -137,6 +137,9 @@ void forward_statclock __P((int pscnt));
void forward_hardclock __P((int pscnt));
#endif /* BETTER_CLOCK */
void forward_signal __P((struct proc *));
#ifdef APIC_INTR_REORDER
void set_lapic_isrloc __P((int, int));
#endif /* APIC_INTR_REORDER */
/* global data in mpapic.c */
extern volatile lapic_t lapic;

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: smptests.h,v 1.27 1997/09/07 23:06:15 fsmp Exp $
* $Id: smptests.h,v 1.28 1997/12/08 22:56:49 fsmp Exp $
*/
#ifndef _MACHINE_SMPTESTS_H_
@ -250,6 +250,37 @@
*/
#define GIANT_LOCK
#ifdef APIC_IO
/*
* Enable extra counters for some selected locations in the interrupt handlers.
* Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or
* APIC_INTR_DIAGNOSTIC.
*/
#undef APIC_INTR_DIAGNOSTIC
/*
* Add extra tracking of a specific interrupt. Look in apic_vector.s,
* apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
* APIC_INTR_DIAGNOSTIC must be defined for this to work.
*/
#ifdef APIC_INTR_DIAGNOSTIC
#define APIC_INTR_DIAGNOSTIC_IRQ 17
#endif
/*
* Don't assume that slow interrupt handler X is called from vector
* X + ICU_OFFSET.
*/
#define APIC_INTR_REORDER
/*
* Redirect clock interrupts to a higher priority (fast intr) vector,
* while still using the slow interrupt handler. Only effective when
* APIC_INTR_REORDER is defined.
*/
#define APIC_INTR_HIGHPRI_CLOCK
#endif /* APIC_IO */
/*
* Misc. counters.

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: apic_ipl.s,v 1.16 1997/09/07 22:02:28 fsmp Exp $
* $Id: apic_ipl.s,v 1.17 1997/12/15 02:18:33 tegge Exp $
*/
@ -195,6 +195,7 @@ _vec8254:
lock /* MP-safe */
andl %eax, iactive
MEXITCOUNT
APIC_ITRACE(apic_itrace_splz, 0, APIC_ITRACE_SPLZ)
movl _Xintr8254, %eax
jmp %eax /* XXX might need _Xfastintr# */
@ -212,6 +213,7 @@ vec8:
lock /* MP-safe */
andl $~IRQ_BIT(8), iactive /* lazy masking */
MEXITCOUNT
APIC_ITRACE(apic_itrace_splz, 8, APIC_ITRACE_SPLZ)
jmp _Xintr8 /* XXX might need _Xfastintr8 */
/*
@ -229,6 +231,7 @@ __CONCAT(vec,irq_num): ; \
lock ; /* MP-safe */ \
andl $~IRQ_BIT(irq_num), iactive ; /* lazy masking */ \
MEXITCOUNT ; \
APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ; \
jmp __CONCAT(_Xintr,irq_num)

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.25 1998/01/15 07:33:58 gibbs Exp $
* $Id: apic_vector.s,v 1.26 1998/03/03 20:55:24 tegge Exp $
*/
@ -166,6 +166,18 @@ IDTVEC(vec_name) ; \
popal ; \
addl $4+4,%esp
#define MASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* masked, don't mask */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
7: ; /* already masked */ \
IMASK_UNLOCK
/*
* Test to see whether we are handling an edge or level triggered INT.
* Level-triggered INTs must still be masked as we don't clear the source,
@ -173,36 +185,45 @@ IDTVEC(vec_name) ; \
*/
#define MASK_LEVEL_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 8f ; /* edge, don't mask */ \
IMASK_LOCK ; /* into critical reg */ \
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
movl _ioapic, %ecx ; /* ioapic[0] addr */ \
movl $REDTBL_IDX(irq_num), (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
IMASK_UNLOCK ; \
8:
jz 9f ; /* edge, don't mask */ \
MASK_IRQ(irq_num) ; \
9:
#ifdef APIC_INTR_REORDER
#define EOI_IRQ(irq_num) \
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
movl (%eax), %eax ; \
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi ; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#else
#define EOI_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), lapic_isr1; \
jz 9f ; /* not active */ \
movl $0, lapic_eoi; \
APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
9:
#endif
/*
* Test to see if the source is currntly masked, clear if so.
*/
#define UNMASK_IRQ(irq_num) \
IMASK_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), _apic_imen ; \
jne 7f ; /* bit set, masked */ \
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
jz 9f ; /* edge, don't EOI */ \
movl $0, lapic_eoi ; /* should be safe */ \
jmp 9f ; /* skip unmasking */ \
7: \
je 7f ; /* bit clear, not masked */ \
andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
movl _ioapic,%ecx ; /* ioapic[0]addr */ \
movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
9: ; \
7: ; \
IMASK_UNLOCK
#ifdef INTR_SIMPLELOCK
@ -213,11 +234,75 @@ IDTVEC(vec_name) ; \
#define ENLOCK \
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f
jz 3f
#define DELOCK ISR_RELLOCK
#define LATELOCK
#endif
#ifdef APIC_INTR_DIAGNOSTIC
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
log_intr_event:
pushf
cli
pushl $CNAME(apic_itrace_debuglock)
call _s_lock_np
addl $4, %esp
movl CNAME(apic_itrace_debugbuffer_idx), %ecx
andl $32767, %ecx
movl _cpuid, %eax
shll $8, %eax
orl 8(%esp), %eax
movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
incl %ecx
andl $32767, %ecx
movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
pushl $CNAME(apic_itrace_debuglock)
call _s_unlock_np
addl $4, %esp
popf
ret
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4 ; \
pushl %eax ; \
pushl %ecx ; \
pushl %edx ; \
movl $(irq_num), %eax ; \
cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
jne 7f ; \
pushl $id ; \
call log_intr_event ; \
addl $4, %esp ; \
7: ; \
popl %edx ; \
popl %ecx ; \
popl %eax
#else
#define APIC_ITRACE(name, irq_num, id) \
lock ; /* MP-safe */ \
incl CNAME(name) + (irq_num) * 4
#endif
#define APIC_ITRACE_ENTER 1
#define APIC_ITRACE_EOI 2
#define APIC_ITRACE_TRYISRLOCK 3
#define APIC_ITRACE_GOTISRLOCK 4
#define APIC_ITRACE_ENTER2 5
#define APIC_ITRACE_LEAVE 6
#define APIC_ITRACE_UNMASK 7
#define APIC_ITRACE_ACTIVE 8
#define APIC_ITRACE_MASKED 9
#define APIC_ITRACE_NOISRLOCK 10
#define APIC_ITRACE_MASKED2 11
#define APIC_ITRACE_SPLZ 12
#define APIC_ITRACE_DORETI 13
#else
#define APIC_ITRACE(name, irq_num, id)
#endif
#ifdef CPL_AND_CML
#define INTR(irq_num, vec_name) \
@ -230,12 +315,18 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ENLOCK ; \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
@ -244,7 +335,6 @@ IDTVEC(vec_name) ; \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -263,39 +353,67 @@ __CONCAT(Xresume,irq_num): ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
incl _inside_intr ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
decl _inside_intr ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
lock ; andl $~IRQ_BIT(irq_num), _cil ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
LATELOCK ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
ALIGN_TEXT ; \
2: ; /* masked by cpl|cml */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
DELOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
testl $IRQ_BIT(irq_num), _cml ; \
jne 4f ; /* this INT masked */ \
orl $IRQ_BIT(irq_num), _cil ; \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#else /* CPL_AND_CML */
#define INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
@ -306,20 +424,25 @@ IDTVEC(vec_name) ; \
movl %ax, %ds ; \
movl %ax, %es ; \
; \
APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
lock ; /* MP-safe */ \
btsl $(irq_num), iactive ; /* lazy masking */ \
jc 1f ; /* already active */ \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
ISR_TRYLOCK ; /* XXX this is going away... */ \
testl %eax, %eax ; /* did we get it? */ \
jz 1f ; /* no */ \
jz 3f ; /* no */ \
; \
APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 2f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
; \
;;; movl $0, lapic_eoi ; /* XXX too soon? */ \
incb _intr_nesting_level ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
@ -334,36 +457,60 @@ __CONCAT(Xresume,irq_num): ; \
pushl %eax ; \
orl _intr_mask + (irq_num) * 4, %eax ; \
movl %eax, _cpl ; \
andl $~IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
pushl _intr_unit + (irq_num) * 4 ; \
APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
sti ; \
call *_intr_handler + (irq_num) * 4 ; \
cli ; \
APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
; \
lock ; andl $~IRQ_BIT(irq_num), iactive ; \
UNMASK_IRQ(irq_num) ; \
APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
sti ; /* doreti repeats cli/sti */ \
MEXITCOUNT ; \
jmp _doreti ; \
; \
ALIGN_TEXT ; \
1: ; /* active or locked */ \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic_eoi ; /* do the EOI */ \
; \
1: ; /* active */ \
APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
; \
MASK_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
btsl $(irq_num), iactive ; /* still active */ \
jnc 0b ; /* retry */ \
POP_FRAME ; \
iret ; \
; \
iret ; /* XXX: iactive bit might be 0 now */ \
ALIGN_TEXT ; \
2: ; /* masked by cpl */ \
2: ; /* masked by cpl, leave iactive set */ \
APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
orl $IRQ_BIT(irq_num), _ipending ; \
AVCPL_UNLOCK ; \
ISR_RELLOCK ; /* XXX this is going away... */ \
jmp 1b
POP_FRAME ; \
iret ; \
ALIGN_TEXT ; \
3: ; /* other cpu has isr lock */ \
APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
AVCPL_LOCK ; /* MP-safe */ \
orl $IRQ_BIT(irq_num), _ipending ; \
testl $IRQ_BIT(irq_num), _cpl ; \
jne 4f ; /* this INT masked */ \
AVCPL_UNLOCK ; \
call forward_irq ; /* forward irq to lock holder */ \
POP_FRAME ; /* and return */ \
iret ; \
ALIGN_TEXT ; \
4: ; /* blocked */ \
APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
AVCPL_UNLOCK ; \
POP_FRAME ; /* and return */ \
iret
#endif /* CPL_AND_CML */
@ -515,6 +662,8 @@ _Xcpuast:
movl _cpl, %eax
#endif
pushl %eax
lock
orl $SWI_AST_PENDING, _ipending
AVCPL_UNLOCK
lock
incb _intr_nesting_level
@ -522,9 +671,6 @@ _Xcpuast:
pushl $0
lock
orl $SWI_AST_PENDING, _ipending
movl _cpuid, %eax
lock
btrl %eax, _checkstate_pending_ast
@ -536,6 +682,113 @@ _Xcpuast:
POP_FRAME
iret
/*
* Executed by a CPU when it receives an XFORWARD_IRQ IPI.
*/
.text
SUPERALIGN_TEXT
.globl _Xforward_irq
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
movl %ax, %ds /* use KERNEL data segment */
movl %ax, %es
movl $0, lapic_eoi /* End Of Interrupt to APIC */
FAKE_MCOUNT(12*4(%esp))
ISR_TRYLOCK
testl %eax,%eax /* Did we get the lock ? */
jz 1f /* No */
lock
incl CNAME(forward_irq_hitcnt)
cmpb $4, _intr_nesting_level
jae 2f
jmp 3f
AVCPL_LOCK
#ifdef CPL_AND_CML
movl _cml, %eax
#else
movl _cpl, %eax
#endif
pushl %eax
AVCPL_UNLOCK
lock
incb _intr_nesting_level
sti
pushl $0
MEXITCOUNT
jmp _doreti /* Handle forwarded interrupt */
4:
lock
decb _intr_nesting_level
ISR_RELLOCK
MEXITCOUNT
addl $8, %esp
POP_FRAME
iret
1:
lock
incl CNAME(forward_irq_misscnt)
call forward_irq /* Oops, we've lost the isr lock */
MEXITCOUNT
POP_FRAME
iret
2:
lock
incl CNAME(forward_irq_toodeepcnt)
3:
ISR_RELLOCK
MEXITCOUNT
POP_FRAME
iret
/*
*
*/
forward_irq:
MCOUNT
cmpl $0,_invltlb_ok
jz 4f
cmpl $0, CNAME(forward_irq_enabled)
jz 4f
movl _mp_lock,%eax
cmpl $FREE_LOCK,%eax
jne 1f
movl $0, %eax /* Pick CPU #0 if noone has lock */
1:
shrl $24,%eax
movl _cpu_num_to_apic_id(,%eax,4),%ecx
shll $24,%ecx
movl lapic_icr_hi, %eax
andl $~APIC_ID_MASK, %eax
orl %ecx, %eax
movl %eax, lapic_icr_hi
2:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 2b
movl lapic_icr_lo, %eax
andl $APIC_RESV2_MASK, %eax
orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
movl %eax, lapic_icr_lo
3:
movl lapic_icr_lo, %eax
andl $APIC_DELSTAT_MASK,%eax
jnz 3b
4:
ret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@ -702,6 +955,16 @@ _checkstate_need_ast:
.long 0
_checkstate_pending_ast:
.long 0
.globl CNAME(forward_irq_misscnt)
.globl CNAME(forward_irq_toodeepcnt)
.globl CNAME(forward_irq_hitcnt)
CNAME(forward_irq_misscnt):
.long 0
CNAME(forward_irq_hitcnt):
.long 0
CNAME(forward_irq_toodeepcnt):
.long 0
.globl _apic_pin_trigger
_apic_pin_trigger:

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
* $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
}
else
setidt(TPR_SLOW_INTS + intr, slowintr[intr],
else {
vector = TPR_SLOW_INTS + intr;
#ifdef APIC_INTR_REORDER
#ifdef APIC_INTR_HIGHPRI_CLOCK
/* XXX: Hack (kludge?) for more accurate clock. */
if (intr == 0 || intr == 8) {
vector = TPR_FAST_INTS + intr;
}
#endif
#endif
setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
}
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, vector);
#endif
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, ICU_OFFSET + intr);
#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa_device.h 7.1 (Berkeley) 5/9/91
* $Id: intr_machdep.h,v 1.9 1998/02/13 06:59:22 bde Exp $
* $Id: intr_machdep.h,v 1.10 1998/03/03 20:55:24 tegge Exp $
*/
#ifndef _I386_ISA_INTR_MACHDEP_H_
@ -116,6 +116,9 @@
/* IPI to generate an additional software trap at the target CPU */
#define XCPUAST_OFFSET (ICU_OFFSET + 48)
/* IPI to signal the CPU holding the ISR lock that another IRQ has appeared */
#define XFORWARD_IRQ_OFFSET (ICU_OFFSET + 49)
/* IPI to signal CPUs to stop and wait for another CPU to restart them */
#define XCPUSTOP_OFFSET (ICU_OFFSET + 128)
@ -174,6 +177,7 @@ inthand_t
Xcpucheckstate, /* Check cpu state */
#endif
Xcpuast, /* Additional software trap on other cpu */
Xforward_irq, /* Forward irq to cpu holding ISR lock */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint; /* handle APIC "spurious INTs" */

View File

@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.18 1997/10/13 00:01:53 fsmp Exp $
* $Id: ipl.s,v 1.19 1997/12/15 02:18:35 tegge Exp $
*/
@ -263,6 +263,9 @@ doreti_unpend:
cli
#ifdef SMP
pushl %edx /* preserve %edx */
#ifdef APIC_INTR_DIAGNOSTIC
pushl %ecx
#endif
pushl %eax /* preserve %eax */
ICPL_LOCK
#ifdef CPL_AND_CML
@ -271,11 +274,32 @@ doreti_unpend:
popl _cpl
#endif
FAST_ICPL_UNLOCK
#ifdef APIC_INTR_DIAGNOSTIC
popl %ecx
#endif
popl %edx
#else
movl %eax,_cpl
#endif
MEXITCOUNT
#ifdef APIC_INTR_DIAGNOSTIC
lock
incl CNAME(apic_itrace_doreti)(,%ecx,4)
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
cmpl $APIC_INTR_DIAGNOSTIC_IRQ,%ecx
jne 9f
pushl %eax
pushl %ecx
pushl %edx
pushl $APIC_ITRACE_DORETI
call log_intr_event
addl $4,%esp
popl %edx
popl %ecx
popl %eax
9:
#endif
#endif
jmp %edx
ALIGN_TEXT

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: intr_machdep.c,v 1.7 1997/09/28 15:48:34 mckay Exp $
* $Id: intr_machdep.c,v 1.8 1998/02/09 06:08:30 eivind Exp $
*/
#include "opt_auto_eoi.h"
@ -444,19 +444,30 @@ icu_setup(int intr, inthand2_t *handler, void *arg, u_int *maskptr, int flags)
vector = TPR_FAST_INTS + intr;
setidt(vector, fastintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
}
else
setidt(TPR_SLOW_INTS + intr, slowintr[intr],
else {
vector = TPR_SLOW_INTS + intr;
#ifdef APIC_INTR_REORDER
#ifdef APIC_INTR_HIGHPRI_CLOCK
/* XXX: Hack (kludge?) for more accurate clock. */
if (intr == 0 || intr == 8) {
vector = TPR_FAST_INTS + intr;
}
#endif
#endif
setidt(vector, slowintr[intr],
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
}
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, vector);
#endif
/*
* XXX MULTIPLE_IOAPICSXXX
* Reprogram the vector in the IO APIC.
*/
select = (intr * 2) + IOAPIC_REDTBL0;
value = io_apic_read(0, select) & ~IOART_INTVEC;
io_apic_write(0, select, value | vector);
#else
setidt(ICU_OFFSET + intr,
flags & INTR_FAST ? fastintr[intr] : slowintr[intr],
@ -505,6 +516,9 @@ icu_unset(intr, handler)
setidt(flags & INTR_FAST ? TPR_FAST_INTS + intr : TPR_SLOW_INTS + intr,
slowintr[intr], SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
#else /* FAST_HI */
#ifdef APIC_INTR_REORDER
set_lapic_isrloc(intr, ICU_OFFSET + intr);
#endif
setidt(ICU_OFFSET + intr, slowintr[intr], SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif /* FAST_HI */

View File

@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.67 1998/03/03 20:09:14 tegge Exp $
* $Id: mp_machdep.c,v 1.68 1998/03/03 20:55:25 tegge Exp $
*/
#include "opt_smp.h"
@ -246,6 +246,35 @@ extern int nkpt;
u_int32_t cpu_apic_versions[NCPU];
u_int32_t io_apic_versions[NAPIC];
#ifdef APIC_INTR_DIAGNOSTIC
int apic_itrace_enter[32];
int apic_itrace_tryisrlock[32];
int apic_itrace_gotisrlock[32];
int apic_itrace_active[32];
int apic_itrace_masked[32];
int apic_itrace_noisrlock[32];
int apic_itrace_masked2[32];
int apic_itrace_unmask[32];
int apic_itrace_noforward[32];
int apic_itrace_leave[32];
int apic_itrace_enter2[32];
int apic_itrace_doreti[32];
int apic_itrace_splz[32];
int apic_itrace_eoi[32];
#ifdef APIC_INTR_DIAGNOSTIC_IRQ
unsigned short apic_itrace_debugbuffer[32768];
int apic_itrace_debugbuffer_idx;
struct simplelock apic_itrace_debuglock;
#endif
#endif
#ifdef APIC_INTR_REORDER
struct {
volatile int *location;
int bit;
} apic_isrbit_location[32];
#endif
/*
* APIC ID logical/physical mapping structures.
* We oversize these to simplify boot-time config.
@ -575,6 +604,10 @@ mp_enable(u_int boot_addr)
setidt(XCPUAST_OFFSET, Xcpuast,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for interrupt forwarding */
setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for CPU stop/restart */
setidt(XCPUSTOP_OFFSET, Xcpustop,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
@ -1537,6 +1570,10 @@ init_locks(void)
/* ISR uses its own "giant lock" */
isr_lock = FREE_LOCK;
#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
s_lock_init((struct simplelock*)&apic_itrace_debuglock);
#endif
s_lock_init((struct simplelock*)&mpintr_lock);
s_lock_init((struct simplelock*)&mcount_lock);
@ -1995,6 +2032,11 @@ int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
&do_page_zero_idle, 0, "");
/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
int forward_irq_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
&forward_irq_enabled, 0, "");
/* Enable forwarding of a signal to a process running on a different CPU */
int forward_signal_enabled = 1;
SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
@ -2438,3 +2480,20 @@ forward_signal(struct proc *p)
return;
}
}
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
*/
void
set_lapic_isrloc(int intr, int vector)
{
if (intr < 0 || intr > 32)
panic("set_apic_isrloc: bad intr argument: %d",intr);
if (vector < ICU_OFFSET || vector > 255)
panic("set_apic_isrloc: bad vector argument: %d",vector);
apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
apic_isrbit_location[intr].bit = (1<<(vector & 31));
}
#endif

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.36 1998/03/03 19:44:34 tegge Exp $
* $Id: smp.h,v 1.37 1998/03/03 20:55:23 tegge Exp $
*
*/
@ -137,6 +137,9 @@ void forward_statclock __P((int pscnt));
void forward_hardclock __P((int pscnt));
#endif /* BETTER_CLOCK */
void forward_signal __P((struct proc *));
#ifdef APIC_INTR_REORDER
void set_lapic_isrloc __P((int, int));
#endif /* APIC_INTR_REORDER */
/* global data in mpapic.c */
extern volatile lapic_t lapic;