From be6c5ef7bc636a1678c8fd766997c08bbe726a9c Mon Sep 17 00:00:00 2001 From: fsmp Date: Mon, 26 May 1997 17:58:27 +0000 Subject: [PATCH] Split vector.s into UP and SMP specific files: - vector.s <- stub called by i386/exception.s - icu_vector.s <- UP - apic_vector.s <- SMP Split icu.s into UP and SMP specific files: - ipl.s <- stub called by i386/exception.s (formerly icu.s) - icu_ipl.s <- UP - apic_ipl.s <- SMP This was done in preparation for massive changes to the SMP INTerrupt mechanisms. More fine tuning, such as merging ipl.s into exception.s, may be appropriate. --- sys/amd64/amd64/apic_vector.S | 306 ++++++++++++++++++++++++ sys/amd64/isa/atpic_vector.S | 249 ++++++++++++++++++++ sys/amd64/isa/icu_ipl.S | 108 +++++++++ sys/amd64/isa/icu_ipl.s | 108 +++++++++ sys/amd64/isa/icu_vector.S | 249 ++++++++++++++++++++ sys/amd64/isa/icu_vector.s | 249 ++++++++++++++++++++ sys/amd64/isa/vector.S | 430 ++-------------------------------- sys/amd64/isa/vector.s | 430 ++-------------------------------- sys/i386/i386/apic_vector.s | 306 ++++++++++++++++++++++++ sys/i386/isa/apic_ipl.s | 149 ++++++++++++ sys/i386/isa/apic_vector.s | 306 ++++++++++++++++++++++++ sys/i386/isa/atpic_vector.s | 249 ++++++++++++++++++++ sys/i386/isa/icu_ipl.s | 108 +++++++++ sys/i386/isa/icu_vector.s | 249 ++++++++++++++++++++ sys/i386/isa/{icu.s => ipl.s} | 195 ++------------- sys/i386/isa/vector.s | 430 ++-------------------------------- 16 files changed, 2707 insertions(+), 1414 deletions(-) create mode 100644 sys/amd64/amd64/apic_vector.S create mode 100644 sys/amd64/isa/atpic_vector.S create mode 100644 sys/amd64/isa/icu_ipl.S create mode 100644 sys/amd64/isa/icu_ipl.s create mode 100644 sys/amd64/isa/icu_vector.S create mode 100644 sys/amd64/isa/icu_vector.s create mode 100644 sys/i386/i386/apic_vector.s create mode 100644 sys/i386/isa/apic_ipl.s create mode 100644 sys/i386/isa/apic_vector.s create mode 100644 sys/i386/isa/atpic_vector.s create mode 100644 sys/i386/isa/icu_ipl.s create mode 100644 sys/i386/isa/icu_vector.s rename sys/i386/isa/{icu.s => ipl.s} (70%) diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S new file mode 100644 index 000000000000..70d3dcb002ce --- /dev/null +++ b/sys/amd64/amd64/apic_vector.S @@ -0,0 +1,306 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: apic_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + + +/* convert an absolute IRQ# into a bitmask */ +#define IRQ_BIT(irq_num) (1 << (irq_num)) + +/* make an index into the IO APIC from the IRQ# */ +#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) + +/* + * 'lazy masking' code submitted by: Bruce Evans + */ +#define MAYBE_MASK_IRQ(irq_num) \ + testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ + je 1f ; /* NOT currently active */ \ + orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + orl $IOART_INTMASK,%eax ; /* set the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +1: ; \ + orl $IRQ_BIT(irq_num),iactive + + +#define MAYBE_UNMASK_IRQ(irq_num) \ + cli ; /* must unmask _imen and icu atomically */ \ + andl $~IRQ_BIT(irq_num),iactive ; \ + testl $IRQ_BIT(irq_num),_imen ; \ + je 2f ; \ + andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ +2: ; \ + sti ; /* XXX _doreti repeats the cli/sti */ + + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + MAYBE_MASK_IRQ(irq_num) ; \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + movl _cpl,%eax ; \ + testl $IRQ_BIT(irq_num), %eax ; \ + jne 3f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + MAYBE_UNMASK_IRQ(irq_num) ; \ + MEXITCOUNT ; \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +3: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + + .text + SUPERALIGN_TEXT + .globl _Xinvltlb +_Xinvltlb: + pushl %eax + movl %cr3, %eax /* invalidate the TLB */ + movl %eax, %cr3 + ss /* stack segment, avoid %ds load */ + movl _apic_base, %eax + ss + movl $0, APIC_EOI(%eax) /* End Of Interrupt to APIC */ + popl %eax + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0) + FAST_INTR(1,fastintr1) + FAST_INTR(2,fastintr2) + FAST_INTR(3,fastintr3) + FAST_INTR(4,fastintr4) + FAST_INTR(5,fastintr5) + FAST_INTR(6,fastintr6) + FAST_INTR(7,fastintr7) + FAST_INTR(8,fastintr8) + FAST_INTR(9,fastintr9) + FAST_INTR(10,fastintr10) + FAST_INTR(11,fastintr11) + FAST_INTR(12,fastintr12) + FAST_INTR(13,fastintr13) + FAST_INTR(14,fastintr14) + FAST_INTR(15,fastintr15) + FAST_INTR(16,fastintr16) + FAST_INTR(17,fastintr17) + FAST_INTR(18,fastintr18) + FAST_INTR(19,fastintr19) + FAST_INTR(20,fastintr20) + FAST_INTR(21,fastintr21) + FAST_INTR(22,fastintr22) + FAST_INTR(23,fastintr23) + INTR(0,intr0) + INTR(1,intr1) + INTR(2,intr2) + INTR(3,intr3) + INTR(4,intr4) + INTR(5,intr5) + INTR(6,intr6) + INTR(7,intr7) + INTR(8,intr8) + INTR(9,intr9) + INTR(10,intr10) + INTR(11,intr11) + INTR(12,intr12) + INTR(13,intr13) + INTR(14,intr14) + INTR(15,intr15) + INTR(16,intr16) + INTR(17,intr17) + INTR(18,intr18) + INTR(19,intr19) + INTR(20,intr20) + INTR(21,intr21) + INTR(22,intr22) + INTR(23,intr23) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long Xresume16, Xresume17, Xresume18, Xresume19 + .long Xresume20, Xresume21, Xresume22, Xresume23 + .long 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + + .globl _ivectors +_ivectors: + .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 + .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 + .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 + .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 + .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 + .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 + +/* active flag for lazy masking */ +iactive: + .long 0 + + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" + .asciz "stray irq16" + .asciz "stray irq17" + .asciz "stray irq18" + .asciz "stray irq19" + .asciz "stray irq20" + .asciz "stray irq21" + .asciz "stray irq22" + .asciz "stray irq23" +_eintrnames: + + .text diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S new file mode 100644 index 000000000000..fe673a5f27a2 --- /dev/null +++ b/sys/amd64/isa/atpic_vector.S @@ -0,0 +1,249 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: icu_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + +/* + * modified for PC98 by Kakefuda + */ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ +#else +#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ +#endif + +#define ICU_EOI 0x20 /* XXX - define elsewhere */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) +#define IRQ_BYTE(irq_num) ((irq_num) / 8) + +#ifdef AUTO_EOI_1 +#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ +#define OUTB_ICU1 +#else +#define ENABLE_ICU1 \ + movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ + OUTB_ICU1 /* ... to clear in service bit */ +#define OUTB_ICU1 \ + outb %al,$IO_ICU1 +#endif + +#ifdef AUTO_EOI_2 +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + */ +#define ENABLE_ICU1_AND_2 ENABLE_ICU1 +#else +#define ENABLE_ICU1_AND_2 \ + movb $ICU_EOI,%al ; /* as above */ \ + outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ + OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ +#endif + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name, enable_icus) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* ... to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + enable_icus ; \ + movl _cpl,%eax ; \ + testb $IRQ_BIT(irq_num),%reg ; \ + jne 2f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + cli ; /* must unmask _imen and icu atomically */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + andb $~IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + sti ; /* XXX _doreti repeats the cli/sti */ \ + MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* _doreti, but it's probably better to use less cache. */ \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) + INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) + INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) + INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) + INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) + INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) + INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) + INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) + INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) + INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" +_eintrnames: + + .text diff --git a/sys/amd64/isa/icu_ipl.S b/sys/amd64/isa/icu_ipl.S new file mode 100644 index 000000000000..3790f0f3a616 --- /dev/null +++ b/sys/amd64/isa/icu_ipl.S @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 1989, 1990 William F. Jolitz. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $ + */ + + .data + ALIGN_DATA + + .globl _vec +_vec: + .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7 + .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15 + +/* + * + */ + .text + SUPERALIGN_TEXT + +/* + * Fake clock interrupt(s) so that they appear to come from our caller instead + * of from here, so that system profiling works. + * XXX do this more generally (for all vectors; look up the C entry point). + * XXX frame bogusness stops us from just jumping to the C entry point. + */ + ALIGN_TEXT +vec0: + popl %eax /* return address */ + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr0 /* XXX might need _Xfastintr0 */ + +#ifndef PC98 + ALIGN_TEXT +vec8: + popl %eax + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr8 /* XXX might need _Xfastintr8 */ +#endif /* PC98 */ + +/* + * The 'generic' vector stubs. + */ + +#define BUILD_VEC(irq_num) \ + ALIGN_TEXT ; \ +__CONCAT(vec,irq_num): ; \ + int $ICU_OFFSET + (irq_num) ; \ + ret + + BUILD_VEC(1) + BUILD_VEC(2) + BUILD_VEC(3) + BUILD_VEC(4) + BUILD_VEC(5) + BUILD_VEC(6) + BUILD_VEC(7) +#ifdef PC98 + BUILD_VEC(8) +#endif + BUILD_VEC(9) + BUILD_VEC(10) + BUILD_VEC(11) + BUILD_VEC(12) + BUILD_VEC(13) + BUILD_VEC(14) + BUILD_VEC(15) diff --git a/sys/amd64/isa/icu_ipl.s b/sys/amd64/isa/icu_ipl.s new file mode 100644 index 000000000000..3790f0f3a616 --- /dev/null +++ b/sys/amd64/isa/icu_ipl.s @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 1989, 1990 William F. Jolitz. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $ + */ + + .data + ALIGN_DATA + + .globl _vec +_vec: + .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7 + .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15 + +/* + * + */ + .text + SUPERALIGN_TEXT + +/* + * Fake clock interrupt(s) so that they appear to come from our caller instead + * of from here, so that system profiling works. + * XXX do this more generally (for all vectors; look up the C entry point). + * XXX frame bogusness stops us from just jumping to the C entry point. + */ + ALIGN_TEXT +vec0: + popl %eax /* return address */ + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr0 /* XXX might need _Xfastintr0 */ + +#ifndef PC98 + ALIGN_TEXT +vec8: + popl %eax + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr8 /* XXX might need _Xfastintr8 */ +#endif /* PC98 */ + +/* + * The 'generic' vector stubs. + */ + +#define BUILD_VEC(irq_num) \ + ALIGN_TEXT ; \ +__CONCAT(vec,irq_num): ; \ + int $ICU_OFFSET + (irq_num) ; \ + ret + + BUILD_VEC(1) + BUILD_VEC(2) + BUILD_VEC(3) + BUILD_VEC(4) + BUILD_VEC(5) + BUILD_VEC(6) + BUILD_VEC(7) +#ifdef PC98 + BUILD_VEC(8) +#endif + BUILD_VEC(9) + BUILD_VEC(10) + BUILD_VEC(11) + BUILD_VEC(12) + BUILD_VEC(13) + BUILD_VEC(14) + BUILD_VEC(15) diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S new file mode 100644 index 000000000000..fe673a5f27a2 --- /dev/null +++ b/sys/amd64/isa/icu_vector.S @@ -0,0 +1,249 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: icu_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + +/* + * modified for PC98 by Kakefuda + */ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ +#else +#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ +#endif + +#define ICU_EOI 0x20 /* XXX - define elsewhere */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) +#define IRQ_BYTE(irq_num) ((irq_num) / 8) + +#ifdef AUTO_EOI_1 +#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ +#define OUTB_ICU1 +#else +#define ENABLE_ICU1 \ + movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ + OUTB_ICU1 /* ... to clear in service bit */ +#define OUTB_ICU1 \ + outb %al,$IO_ICU1 +#endif + +#ifdef AUTO_EOI_2 +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + */ +#define ENABLE_ICU1_AND_2 ENABLE_ICU1 +#else +#define ENABLE_ICU1_AND_2 \ + movb $ICU_EOI,%al ; /* as above */ \ + outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ + OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ +#endif + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name, enable_icus) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* ... to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + enable_icus ; \ + movl _cpl,%eax ; \ + testb $IRQ_BIT(irq_num),%reg ; \ + jne 2f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + cli ; /* must unmask _imen and icu atomically */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + andb $~IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + sti ; /* XXX _doreti repeats the cli/sti */ \ + MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* _doreti, but it's probably better to use less cache. */ \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) + INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) + INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) + INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) + INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) + INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) + INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) + INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) + INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) + INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" +_eintrnames: + + .text diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s new file mode 100644 index 000000000000..fe673a5f27a2 --- /dev/null +++ b/sys/amd64/isa/icu_vector.s @@ -0,0 +1,249 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: icu_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + +/* + * modified for PC98 by Kakefuda + */ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ +#else +#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ +#endif + +#define ICU_EOI 0x20 /* XXX - define elsewhere */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) +#define IRQ_BYTE(irq_num) ((irq_num) / 8) + +#ifdef AUTO_EOI_1 +#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ +#define OUTB_ICU1 +#else +#define ENABLE_ICU1 \ + movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ + OUTB_ICU1 /* ... to clear in service bit */ +#define OUTB_ICU1 \ + outb %al,$IO_ICU1 +#endif + +#ifdef AUTO_EOI_2 +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + */ +#define ENABLE_ICU1_AND_2 ENABLE_ICU1 +#else +#define ENABLE_ICU1_AND_2 \ + movb $ICU_EOI,%al ; /* as above */ \ + outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ + OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ +#endif + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name, enable_icus) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* ... to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + enable_icus ; \ + movl _cpl,%eax ; \ + testb $IRQ_BIT(irq_num),%reg ; \ + jne 2f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + cli ; /* must unmask _imen and icu atomically */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + andb $~IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + sti ; /* XXX _doreti repeats the cli/sti */ \ + MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* _doreti, but it's probably better to use less cache. */ \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) + INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) + INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) + INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) + INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) + INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) + INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) + INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) + INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) + INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" +_eintrnames: + + .text diff --git a/sys/amd64/isa/vector.S b/sys/amd64/isa/vector.S index e20d9f54857d..1fc56685f3eb 100644 --- a/sys/amd64/isa/vector.S +++ b/sys/amd64/isa/vector.S @@ -1,6 +1,6 @@ /* * from: vector.s, 386BSD 0.1 unknown origin - * $Id: vector.s,v 1.28 1997/04/28 01:47:55 fsmp Exp $ + * $Id: vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ */ /* @@ -8,11 +8,6 @@ */ #include "opt_auto_eoi.h" -#include "opt_smp.h" - -#if defined(SMP) -#include /* this includes */ -#endif /* SMP */ #include #ifdef PC98 @@ -21,144 +16,6 @@ #include #endif -#ifdef PC98 -#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ -#else -#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ -#endif - - -#if defined(SMP) - -#define GET_MPLOCK call _get_mplock -#define REL_MPLOCK call _rel_mplock - -#else - -#define GET_MPLOCK /* NOP get Kernel Mutex */ -#define REL_MPLOCK /* NOP release mutex */ - -#endif /* SMP */ - - -#if defined(APIC_IO) - -#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) -#define IRQ_BIT(irq_num) (1 << (irq_num)) - -#define ENABLE_APIC \ - movl _apic_base, %eax ; \ - movl $0, APIC_EOI(%eax) - -#define ENABLE_ICU1 ENABLE_APIC -#define ENABLE_ICU1_AND_2 ENABLE_APIC - -#define MASK_IRQ(irq_num,icu) \ - orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - orl $IOART_INTMASK,%eax ; /* set the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define TEST_IRQ(irq_num,reg) \ - testl $IRQ_BIT(irq_num),%eax - -#define SET_IPENDING(irq_num) \ - orl $IRQ_BIT(irq_num),_ipending - -/* - * 'lazy masking' code submitted by: Bruce Evans - */ -#define MAYBE_MASK_IRQ(irq_num,icu) \ - testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ - je 1f ; /* NOT currently active */ \ - MASK_IRQ(irq_num,icu) ; \ - ENABLE_APIC ; \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -1: ; \ - orl $IRQ_BIT(irq_num),iactive - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),iactive ; \ - testl $IRQ_BIT(irq_num),_imen ; \ - je 3f ; \ - UNMASK_IRQ(irq_num,icu) ; \ -3: - -#else /* APIC_IO */ - -#define MASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - orb $IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define UNMASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - andb $~IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define TEST_IRQ(irq_num,reg) \ - testb $IRQ_BIT(irq_num),%reg - -#define SET_IPENDING(irq_num) \ - orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) - -#define ICU_EOI 0x20 /* XXX - define elsewhere */ - -#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) -#define IRQ_BYTE(irq_num) ((irq_num) / 8) - -#ifdef AUTO_EOI_1 -#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ -#define OUTB_ICU1 -#else -#define ENABLE_ICU1 \ - movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ - OUTB_ICU1 /* ... to clear in service bit */ -#define OUTB_ICU1 \ - outb %al,$IO_ICU1 -#endif - -#ifdef AUTO_EOI_2 -/* - * The data sheet says no auto-EOI on slave, but it sometimes works. - */ -#define ENABLE_ICU1_AND_2 ENABLE_ICU1 -#else -#define ENABLE_ICU1_AND_2 \ - movb $ICU_EOI,%al ; /* as above */ \ - outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ - OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ -#endif - -#define MAYBE_MASK_IRQ(irq_num,icu) \ - MASK_IRQ(irq_num,icu) - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - UNMASK_IRQ(irq_num,icu) - -#endif /* APIC_IO */ - - #ifdef FAST_INTR_HANDLER_USES_ES #define ACTUALLY_PUSHED 1 #define MAYBE_MOVW_AX_ES movl %ax,%es @@ -176,6 +33,16 @@ #define MAYBE_PUSHL_ES #endif + .data + ALIGN_DATA + + .globl _intr_nesting_level +_intr_nesting_level: + .byte 0 + .space 3 + + .text + /* * Macros for interrupt interrupt entry, call to handler, and exit. * @@ -221,275 +88,8 @@ * loading segregs. */ -#define FAST_INTR(irq_num, vec_name, enable_icus) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl %eax ; /* save only call-used registers */ \ - pushl %ecx ; \ - pushl %edx ; \ - pushl %ds ; \ - MAYBE_PUSHL_ES ; \ - movl $KDSEL,%eax ; \ - movl %ax,%ds ; \ - MAYBE_MOVW_AX_ES ; \ - FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - pushl _intr_unit + (irq_num) * 4 ; \ - call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ - enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ - addl $4,%esp ; \ - incl _cnt+V_INTR ; /* book-keeping can wait */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ - notl %eax ; \ - andl _ipending,%eax ; \ - jne 2f ; /* yes, maybe handle them */ \ -1: ; \ - MEXITCOUNT ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - MAYBE_POPL_ES ; \ - popl %ds ; \ - popl %edx ; \ - popl %ecx ; \ - popl %eax ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ - jae 1b ; /* no, return */ \ - movl _cpl,%eax ; \ - /* XXX next line is probably unnecessary now. */ \ - movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ - incb _intr_nesting_level ; /* ... really limit it ... */ \ - sti ; /* ... to do this as early as possible */ \ - MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ - popl %ecx ; /* ... original %ds ... */ \ - popl %edx ; \ - xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ - pushal ; /* build fat frame (grrr) ... */ \ - pushl %ecx ; /* ... actually %ds ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; \ - movl %ax,%es ; \ - movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ - movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ - movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ - pushl %eax ; \ - subl $4,%esp ; /* junk for unit number */ \ - MEXITCOUNT ; \ - jmp _doreti - -#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl $0 ; /* dummy error code */ \ - pushl $0 ; /* dummy trap type */ \ - pushal ; \ - pushl %ds ; /* save our data and extra segments ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ - movl %ax,%ds ; /* ... early for obsolete reasons */ \ - movl %ax,%es ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - MAYBE_MASK_IRQ(irq_num,icu) ; \ - enable_icus ; \ - movl _cpl,%eax ; \ - TEST_IRQ(irq_num,reg) ; \ - jne 2f ; \ - incb _intr_nesting_level ; \ -__CONCAT(Xresume,irq_num): ; \ - FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ - incl _cnt+V_INTR ; /* tally interrupts */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; \ - pushl %eax ; \ - pushl _intr_unit + (irq_num) * 4 ; \ - orl _intr_mask + (irq_num) * 4,%eax ; \ - movl %eax,_cpl ; \ - sti ; \ - call *_intr_handler + (irq_num) * 4 ; \ - cli ; /* must unmask _imen and icu atomically */ \ - MAYBE_UNMASK_IRQ(irq_num,icu) ; \ - sti ; /* XXX _doreti repeats the cli/sti */ \ - MEXITCOUNT ; \ - /* We could usually avoid the following jmp by inlining some of */ \ - /* _doreti, but it's probably better to use less cache. */ \ - jmp _doreti ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - /* XXX skip mcounting here to avoid double count */ \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret - -#if defined(APIC_IO) - .text - SUPERALIGN_TEXT - .globl _Xinvltlb -_Xinvltlb: - pushl %eax - movl %cr3, %eax - movl %eax, %cr3 - ss - movl _apic_base, %eax - ss - movl $0, APIC_EOI(%eax) - popl %eax - iret -#endif /* APIC_IO */ - -MCOUNT_LABEL(bintr) - FAST_INTR(0,fastintr0, ENABLE_ICU1) - FAST_INTR(1,fastintr1, ENABLE_ICU1) - FAST_INTR(2,fastintr2, ENABLE_ICU1) - FAST_INTR(3,fastintr3, ENABLE_ICU1) - FAST_INTR(4,fastintr4, ENABLE_ICU1) - FAST_INTR(5,fastintr5, ENABLE_ICU1) - FAST_INTR(6,fastintr6, ENABLE_ICU1) - FAST_INTR(7,fastintr7, ENABLE_ICU1) - FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) - FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) - FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) - FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) - FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) - FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) - FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) - FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) -#if defined(APIC_IO) - FAST_INTR(16,fastintr16, ENABLE_ICU1_AND_2) - FAST_INTR(17,fastintr17, ENABLE_ICU1_AND_2) - FAST_INTR(18,fastintr18, ENABLE_ICU1_AND_2) - FAST_INTR(19,fastintr19, ENABLE_ICU1_AND_2) - FAST_INTR(20,fastintr20, ENABLE_ICU1_AND_2) - FAST_INTR(21,fastintr21, ENABLE_ICU1_AND_2) - FAST_INTR(22,fastintr22, ENABLE_ICU1_AND_2) - FAST_INTR(23,fastintr23, ENABLE_ICU1_AND_2) -#endif /* APIC_IO */ - INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) - INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) - INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) - INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) - INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) - INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) - INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) - INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) - INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#if defined(APIC_IO) - INTR(16,intr16, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(17,intr17, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(18,intr18, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(19,intr19, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(20,intr20, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(21,intr21, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(22,intr22, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(23,intr23, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#endif /* APIC_IO */ -MCOUNT_LABEL(eintr) - - .data -ihandlers: /* addresses of interrupt handlers */ - /* actually resumption addresses for HWI's */ - .long Xresume0, Xresume1, Xresume2, Xresume3 - .long Xresume4, Xresume5, Xresume6, Xresume7 - .long Xresume8, Xresume9, Xresume10, Xresume11 - .long Xresume12, Xresume13, Xresume14, Xresume15 -#if defined(APIC_IO) - .long Xresume16, Xresume17, Xresume18, Xresume19 - .long Xresume20, Xresume21, Xresume22, Xresume23 +#ifdef APIC_IO +#include "i386/isa/apic_vector.s" #else - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0, swi_tty, swi_net, _softclock, swi_ast - -imasks: /* masks for interrupt handlers */ - .space NHWI*4 /* padding; HWI masks are elsewhere */ - -#if !defined(APIC_IO) /* Less padding for APIC_IO, NHWI is higher */ - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0 - .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK - - .globl _intr_nesting_level -_intr_nesting_level: - .byte 0 - .space 3 - -#if defined(APIC_IO) - - .globl _ivectors -_ivectors: - .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 - .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 - .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 - .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 - .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 - .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 - -/* active flag for lazy masking */ -iactive: - .long 0 - -#endif /* APIC_IO */ - -/* - * Interrupt counters and names. The format of these and the label names - * must agree with what vmstat expects. The tables are indexed by device - * ids so that we don't have to move the names around as devices are - * attached. - */ -#include "vector.h" - .globl _intrcnt, _eintrcnt -_intrcnt: - .space (NR_DEVICES + ICU_LEN) * 4 -_eintrcnt: - - .globl _intrnames, _eintrnames -_intrnames: - .ascii DEVICE_NAMES - .asciz "stray irq0" - .asciz "stray irq1" - .asciz "stray irq2" - .asciz "stray irq3" - .asciz "stray irq4" - .asciz "stray irq5" - .asciz "stray irq6" - .asciz "stray irq7" - .asciz "stray irq8" - .asciz "stray irq9" - .asciz "stray irq10" - .asciz "stray irq11" - .asciz "stray irq12" - .asciz "stray irq13" - .asciz "stray irq14" - .asciz "stray irq15" -#if defined(APIC_IO) - .asciz "stray irq16" - .asciz "stray irq17" - .asciz "stray irq18" - .asciz "stray irq19" - .asciz "stray irq20" - .asciz "stray irq21" - .asciz "stray irq22" - .asciz "stray irq23" -#endif /* APIC_IO */ -_eintrnames: - - .text +#include "i386/isa/icu_vector.s" +#endif /* APIC_IO */ diff --git a/sys/amd64/isa/vector.s b/sys/amd64/isa/vector.s index e20d9f54857d..1fc56685f3eb 100644 --- a/sys/amd64/isa/vector.s +++ b/sys/amd64/isa/vector.s @@ -1,6 +1,6 @@ /* * from: vector.s, 386BSD 0.1 unknown origin - * $Id: vector.s,v 1.28 1997/04/28 01:47:55 fsmp Exp $ + * $Id: vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ */ /* @@ -8,11 +8,6 @@ */ #include "opt_auto_eoi.h" -#include "opt_smp.h" - -#if defined(SMP) -#include /* this includes */ -#endif /* SMP */ #include #ifdef PC98 @@ -21,144 +16,6 @@ #include #endif -#ifdef PC98 -#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ -#else -#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ -#endif - - -#if defined(SMP) - -#define GET_MPLOCK call _get_mplock -#define REL_MPLOCK call _rel_mplock - -#else - -#define GET_MPLOCK /* NOP get Kernel Mutex */ -#define REL_MPLOCK /* NOP release mutex */ - -#endif /* SMP */ - - -#if defined(APIC_IO) - -#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) -#define IRQ_BIT(irq_num) (1 << (irq_num)) - -#define ENABLE_APIC \ - movl _apic_base, %eax ; \ - movl $0, APIC_EOI(%eax) - -#define ENABLE_ICU1 ENABLE_APIC -#define ENABLE_ICU1_AND_2 ENABLE_APIC - -#define MASK_IRQ(irq_num,icu) \ - orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - orl $IOART_INTMASK,%eax ; /* set the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define TEST_IRQ(irq_num,reg) \ - testl $IRQ_BIT(irq_num),%eax - -#define SET_IPENDING(irq_num) \ - orl $IRQ_BIT(irq_num),_ipending - -/* - * 'lazy masking' code submitted by: Bruce Evans - */ -#define MAYBE_MASK_IRQ(irq_num,icu) \ - testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ - je 1f ; /* NOT currently active */ \ - MASK_IRQ(irq_num,icu) ; \ - ENABLE_APIC ; \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -1: ; \ - orl $IRQ_BIT(irq_num),iactive - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),iactive ; \ - testl $IRQ_BIT(irq_num),_imen ; \ - je 3f ; \ - UNMASK_IRQ(irq_num,icu) ; \ -3: - -#else /* APIC_IO */ - -#define MASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - orb $IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define UNMASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - andb $~IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define TEST_IRQ(irq_num,reg) \ - testb $IRQ_BIT(irq_num),%reg - -#define SET_IPENDING(irq_num) \ - orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) - -#define ICU_EOI 0x20 /* XXX - define elsewhere */ - -#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) -#define IRQ_BYTE(irq_num) ((irq_num) / 8) - -#ifdef AUTO_EOI_1 -#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ -#define OUTB_ICU1 -#else -#define ENABLE_ICU1 \ - movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ - OUTB_ICU1 /* ... to clear in service bit */ -#define OUTB_ICU1 \ - outb %al,$IO_ICU1 -#endif - -#ifdef AUTO_EOI_2 -/* - * The data sheet says no auto-EOI on slave, but it sometimes works. - */ -#define ENABLE_ICU1_AND_2 ENABLE_ICU1 -#else -#define ENABLE_ICU1_AND_2 \ - movb $ICU_EOI,%al ; /* as above */ \ - outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ - OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ -#endif - -#define MAYBE_MASK_IRQ(irq_num,icu) \ - MASK_IRQ(irq_num,icu) - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - UNMASK_IRQ(irq_num,icu) - -#endif /* APIC_IO */ - - #ifdef FAST_INTR_HANDLER_USES_ES #define ACTUALLY_PUSHED 1 #define MAYBE_MOVW_AX_ES movl %ax,%es @@ -176,6 +33,16 @@ #define MAYBE_PUSHL_ES #endif + .data + ALIGN_DATA + + .globl _intr_nesting_level +_intr_nesting_level: + .byte 0 + .space 3 + + .text + /* * Macros for interrupt interrupt entry, call to handler, and exit. * @@ -221,275 +88,8 @@ * loading segregs. */ -#define FAST_INTR(irq_num, vec_name, enable_icus) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl %eax ; /* save only call-used registers */ \ - pushl %ecx ; \ - pushl %edx ; \ - pushl %ds ; \ - MAYBE_PUSHL_ES ; \ - movl $KDSEL,%eax ; \ - movl %ax,%ds ; \ - MAYBE_MOVW_AX_ES ; \ - FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - pushl _intr_unit + (irq_num) * 4 ; \ - call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ - enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ - addl $4,%esp ; \ - incl _cnt+V_INTR ; /* book-keeping can wait */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ - notl %eax ; \ - andl _ipending,%eax ; \ - jne 2f ; /* yes, maybe handle them */ \ -1: ; \ - MEXITCOUNT ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - MAYBE_POPL_ES ; \ - popl %ds ; \ - popl %edx ; \ - popl %ecx ; \ - popl %eax ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ - jae 1b ; /* no, return */ \ - movl _cpl,%eax ; \ - /* XXX next line is probably unnecessary now. */ \ - movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ - incb _intr_nesting_level ; /* ... really limit it ... */ \ - sti ; /* ... to do this as early as possible */ \ - MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ - popl %ecx ; /* ... original %ds ... */ \ - popl %edx ; \ - xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ - pushal ; /* build fat frame (grrr) ... */ \ - pushl %ecx ; /* ... actually %ds ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; \ - movl %ax,%es ; \ - movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ - movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ - movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ - pushl %eax ; \ - subl $4,%esp ; /* junk for unit number */ \ - MEXITCOUNT ; \ - jmp _doreti - -#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl $0 ; /* dummy error code */ \ - pushl $0 ; /* dummy trap type */ \ - pushal ; \ - pushl %ds ; /* save our data and extra segments ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ - movl %ax,%ds ; /* ... early for obsolete reasons */ \ - movl %ax,%es ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - MAYBE_MASK_IRQ(irq_num,icu) ; \ - enable_icus ; \ - movl _cpl,%eax ; \ - TEST_IRQ(irq_num,reg) ; \ - jne 2f ; \ - incb _intr_nesting_level ; \ -__CONCAT(Xresume,irq_num): ; \ - FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ - incl _cnt+V_INTR ; /* tally interrupts */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; \ - pushl %eax ; \ - pushl _intr_unit + (irq_num) * 4 ; \ - orl _intr_mask + (irq_num) * 4,%eax ; \ - movl %eax,_cpl ; \ - sti ; \ - call *_intr_handler + (irq_num) * 4 ; \ - cli ; /* must unmask _imen and icu atomically */ \ - MAYBE_UNMASK_IRQ(irq_num,icu) ; \ - sti ; /* XXX _doreti repeats the cli/sti */ \ - MEXITCOUNT ; \ - /* We could usually avoid the following jmp by inlining some of */ \ - /* _doreti, but it's probably better to use less cache. */ \ - jmp _doreti ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - /* XXX skip mcounting here to avoid double count */ \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret - -#if defined(APIC_IO) - .text - SUPERALIGN_TEXT - .globl _Xinvltlb -_Xinvltlb: - pushl %eax - movl %cr3, %eax - movl %eax, %cr3 - ss - movl _apic_base, %eax - ss - movl $0, APIC_EOI(%eax) - popl %eax - iret -#endif /* APIC_IO */ - -MCOUNT_LABEL(bintr) - FAST_INTR(0,fastintr0, ENABLE_ICU1) - FAST_INTR(1,fastintr1, ENABLE_ICU1) - FAST_INTR(2,fastintr2, ENABLE_ICU1) - FAST_INTR(3,fastintr3, ENABLE_ICU1) - FAST_INTR(4,fastintr4, ENABLE_ICU1) - FAST_INTR(5,fastintr5, ENABLE_ICU1) - FAST_INTR(6,fastintr6, ENABLE_ICU1) - FAST_INTR(7,fastintr7, ENABLE_ICU1) - FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) - FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) - FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) - FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) - FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) - FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) - FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) - FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) -#if defined(APIC_IO) - FAST_INTR(16,fastintr16, ENABLE_ICU1_AND_2) - FAST_INTR(17,fastintr17, ENABLE_ICU1_AND_2) - FAST_INTR(18,fastintr18, ENABLE_ICU1_AND_2) - FAST_INTR(19,fastintr19, ENABLE_ICU1_AND_2) - FAST_INTR(20,fastintr20, ENABLE_ICU1_AND_2) - FAST_INTR(21,fastintr21, ENABLE_ICU1_AND_2) - FAST_INTR(22,fastintr22, ENABLE_ICU1_AND_2) - FAST_INTR(23,fastintr23, ENABLE_ICU1_AND_2) -#endif /* APIC_IO */ - INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) - INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) - INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) - INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) - INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) - INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) - INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) - INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) - INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#if defined(APIC_IO) - INTR(16,intr16, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(17,intr17, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(18,intr18, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(19,intr19, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(20,intr20, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(21,intr21, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(22,intr22, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(23,intr23, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#endif /* APIC_IO */ -MCOUNT_LABEL(eintr) - - .data -ihandlers: /* addresses of interrupt handlers */ - /* actually resumption addresses for HWI's */ - .long Xresume0, Xresume1, Xresume2, Xresume3 - .long Xresume4, Xresume5, Xresume6, Xresume7 - .long Xresume8, Xresume9, Xresume10, Xresume11 - .long Xresume12, Xresume13, Xresume14, Xresume15 -#if defined(APIC_IO) - .long Xresume16, Xresume17, Xresume18, Xresume19 - .long Xresume20, Xresume21, Xresume22, Xresume23 +#ifdef APIC_IO +#include "i386/isa/apic_vector.s" #else - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0, swi_tty, swi_net, _softclock, swi_ast - -imasks: /* masks for interrupt handlers */ - .space NHWI*4 /* padding; HWI masks are elsewhere */ - -#if !defined(APIC_IO) /* Less padding for APIC_IO, NHWI is higher */ - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0 - .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK - - .globl _intr_nesting_level -_intr_nesting_level: - .byte 0 - .space 3 - -#if defined(APIC_IO) - - .globl _ivectors -_ivectors: - .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 - .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 - .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 - .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 - .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 - .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 - -/* active flag for lazy masking */ -iactive: - .long 0 - -#endif /* APIC_IO */ - -/* - * Interrupt counters and names. The format of these and the label names - * must agree with what vmstat expects. The tables are indexed by device - * ids so that we don't have to move the names around as devices are - * attached. - */ -#include "vector.h" - .globl _intrcnt, _eintrcnt -_intrcnt: - .space (NR_DEVICES + ICU_LEN) * 4 -_eintrcnt: - - .globl _intrnames, _eintrnames -_intrnames: - .ascii DEVICE_NAMES - .asciz "stray irq0" - .asciz "stray irq1" - .asciz "stray irq2" - .asciz "stray irq3" - .asciz "stray irq4" - .asciz "stray irq5" - .asciz "stray irq6" - .asciz "stray irq7" - .asciz "stray irq8" - .asciz "stray irq9" - .asciz "stray irq10" - .asciz "stray irq11" - .asciz "stray irq12" - .asciz "stray irq13" - .asciz "stray irq14" - .asciz "stray irq15" -#if defined(APIC_IO) - .asciz "stray irq16" - .asciz "stray irq17" - .asciz "stray irq18" - .asciz "stray irq19" - .asciz "stray irq20" - .asciz "stray irq21" - .asciz "stray irq22" - .asciz "stray irq23" -#endif /* APIC_IO */ -_eintrnames: - - .text +#include "i386/isa/icu_vector.s" +#endif /* APIC_IO */ diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s new file mode 100644 index 000000000000..70d3dcb002ce --- /dev/null +++ b/sys/i386/i386/apic_vector.s @@ -0,0 +1,306 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: apic_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + + +/* convert an absolute IRQ# into a bitmask */ +#define IRQ_BIT(irq_num) (1 << (irq_num)) + +/* make an index into the IO APIC from the IRQ# */ +#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) + +/* + * 'lazy masking' code submitted by: Bruce Evans + */ +#define MAYBE_MASK_IRQ(irq_num) \ + testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ + je 1f ; /* NOT currently active */ \ + orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + orl $IOART_INTMASK,%eax ; /* set the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +1: ; \ + orl $IRQ_BIT(irq_num),iactive + + +#define MAYBE_UNMASK_IRQ(irq_num) \ + cli ; /* must unmask _imen and icu atomically */ \ + andl $~IRQ_BIT(irq_num),iactive ; \ + testl $IRQ_BIT(irq_num),_imen ; \ + je 2f ; \ + andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ +2: ; \ + sti ; /* XXX _doreti repeats the cli/sti */ + + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + MAYBE_MASK_IRQ(irq_num) ; \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + movl _cpl,%eax ; \ + testl $IRQ_BIT(irq_num), %eax ; \ + jne 3f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + MAYBE_UNMASK_IRQ(irq_num) ; \ + MEXITCOUNT ; \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +3: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + + .text + SUPERALIGN_TEXT + .globl _Xinvltlb +_Xinvltlb: + pushl %eax + movl %cr3, %eax /* invalidate the TLB */ + movl %eax, %cr3 + ss /* stack segment, avoid %ds load */ + movl _apic_base, %eax + ss + movl $0, APIC_EOI(%eax) /* End Of Interrupt to APIC */ + popl %eax + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0) + FAST_INTR(1,fastintr1) + FAST_INTR(2,fastintr2) + FAST_INTR(3,fastintr3) + FAST_INTR(4,fastintr4) + FAST_INTR(5,fastintr5) + FAST_INTR(6,fastintr6) + FAST_INTR(7,fastintr7) + FAST_INTR(8,fastintr8) + FAST_INTR(9,fastintr9) + FAST_INTR(10,fastintr10) + FAST_INTR(11,fastintr11) + FAST_INTR(12,fastintr12) + FAST_INTR(13,fastintr13) + FAST_INTR(14,fastintr14) + FAST_INTR(15,fastintr15) + FAST_INTR(16,fastintr16) + FAST_INTR(17,fastintr17) + FAST_INTR(18,fastintr18) + FAST_INTR(19,fastintr19) + FAST_INTR(20,fastintr20) + FAST_INTR(21,fastintr21) + FAST_INTR(22,fastintr22) + FAST_INTR(23,fastintr23) + INTR(0,intr0) + INTR(1,intr1) + INTR(2,intr2) + INTR(3,intr3) + INTR(4,intr4) + INTR(5,intr5) + INTR(6,intr6) + INTR(7,intr7) + INTR(8,intr8) + INTR(9,intr9) + INTR(10,intr10) + INTR(11,intr11) + INTR(12,intr12) + INTR(13,intr13) + INTR(14,intr14) + INTR(15,intr15) + INTR(16,intr16) + INTR(17,intr17) + INTR(18,intr18) + INTR(19,intr19) + INTR(20,intr20) + INTR(21,intr21) + INTR(22,intr22) + INTR(23,intr23) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long Xresume16, Xresume17, Xresume18, Xresume19 + .long Xresume20, Xresume21, Xresume22, Xresume23 + .long 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + + .globl _ivectors +_ivectors: + .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 + .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 + .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 + .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 + .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 + .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 + +/* active flag for lazy masking */ +iactive: + .long 0 + + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" + .asciz "stray irq16" + .asciz "stray irq17" + .asciz "stray irq18" + .asciz "stray irq19" + .asciz "stray irq20" + .asciz "stray irq21" + .asciz "stray irq22" + .asciz "stray irq23" +_eintrnames: + + .text diff --git a/sys/i386/isa/apic_ipl.s b/sys/i386/isa/apic_ipl.s new file mode 100644 index 000000000000..1aec3ce9bad7 --- /dev/null +++ b/sys/i386/isa/apic_ipl.s @@ -0,0 +1,149 @@ +/* + * Copyright (c) 1997, by Steve Passe + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the developer may NOT be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: apic_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $ + */ + + .data + ALIGN_DATA + +/* this allows us to change the 8254 APIC pin# assignment */ + .globl _Xintr8254 +_Xintr8254: + .long _Xintr7 + +/* this allows us to change the RTC clock APIC pin# assignment */ + .globl _XintrRTC +_XintrRTC: + .long _Xintr7 + +/* used by this file, microtime.s and clock.c */ + .globl _mask8254 +_mask8254: + .long 0 + +/* used by this file and clock.c */ + .globl _maskRTC +_maskRTC: + .long 0 + +/* */ + .globl _vec +_vec: + .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7 + .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15 + .long vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23 + + +/* + * + */ + .text + SUPERALIGN_TEXT + +/* + * Fake clock interrupt(s) so that they appear to come from our caller instead + * of from here, so that system profiling works. + * XXX do this more generally (for all vectors; look up the C entry point). + * XXX frame bogusness stops us from just jumping to the C entry point. + */ + +/* + * generic vector function for 8254 clock + */ + ALIGN_TEXT + .globl _vec8254 +_vec8254: + popl %eax /* return address */ + pushfl + pushl $KCSEL + pushl %eax + cli + movl _mask8254,%eax /* lazy masking */ + notl %eax + andl %eax,iactive + MEXITCOUNT + movl _Xintr8254, %eax + jmp %eax /* XXX might need _Xfastintr# */ + +/* + * generic vector function for RTC clock + */ + ALIGN_TEXT + .globl _vecRTC +_vecRTC: + popl %eax + pushfl + pushl $KCSEL + pushl %eax + cli + movl _maskRTC,%eax /* lazy masking */ + notl %eax + andl %eax,iactive + MEXITCOUNT + movl _XintrRTC, %eax + jmp %eax /* XXX might need _Xfastintr# */ + +/* + * The 'generic' vector stubs. + */ + +#define BUILD_VEC(irq_num) \ + ALIGN_TEXT ; \ +__CONCAT(vec,irq_num): ; \ + popl %eax ; \ + pushfl ; \ + pushl $KCSEL ; \ + pushl %eax ; \ + cli ; \ + andl $~IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ + MEXITCOUNT ; \ + jmp __CONCAT(_Xintr,irq_num) + + + BUILD_VEC(0) /* NOT specific in IO APIC hardware */ + BUILD_VEC(1) + BUILD_VEC(2) + BUILD_VEC(3) + BUILD_VEC(4) + BUILD_VEC(5) + BUILD_VEC(6) + BUILD_VEC(7) + BUILD_VEC(8) /* NOT specific in IO APIC hardware */ + BUILD_VEC(9) + BUILD_VEC(10) + BUILD_VEC(11) + BUILD_VEC(12) + BUILD_VEC(13) + BUILD_VEC(14) + BUILD_VEC(15) + BUILD_VEC(16) /* 8 additional INTs in IO APIC */ + BUILD_VEC(17) + BUILD_VEC(18) + BUILD_VEC(19) + BUILD_VEC(20) + BUILD_VEC(21) + BUILD_VEC(22) + BUILD_VEC(23) + diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s new file mode 100644 index 000000000000..70d3dcb002ce --- /dev/null +++ b/sys/i386/isa/apic_vector.s @@ -0,0 +1,306 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: apic_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + + +/* convert an absolute IRQ# into a bitmask */ +#define IRQ_BIT(irq_num) (1 << (irq_num)) + +/* make an index into the IO APIC from the IRQ# */ +#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) + +/* + * 'lazy masking' code submitted by: Bruce Evans + */ +#define MAYBE_MASK_IRQ(irq_num) \ + testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ + je 1f ; /* NOT currently active */ \ + orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + orl $IOART_INTMASK,%eax ; /* set the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +1: ; \ + orl $IRQ_BIT(irq_num),iactive + + +#define MAYBE_UNMASK_IRQ(irq_num) \ + cli ; /* must unmask _imen and icu atomically */ \ + andl $~IRQ_BIT(irq_num),iactive ; \ + testl $IRQ_BIT(irq_num),_imen ; \ + je 2f ; \ + andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ + movl _io_apic_base,%ecx ; /* io apic addr */ \ + movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ + movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ + andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ + movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \ +2: ; \ + sti ; /* XXX _doreti repeats the cli/sti */ + + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + MAYBE_MASK_IRQ(irq_num) ; \ + movl _apic_base, %eax ; \ + movl $0, APIC_EOI(%eax) ; \ + movl _cpl,%eax ; \ + testl $IRQ_BIT(irq_num), %eax ; \ + jne 3f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid dbl cnt */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + MAYBE_UNMASK_IRQ(irq_num) ; \ + MEXITCOUNT ; \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +3: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orl $IRQ_BIT(irq_num), _ipending ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + + .text + SUPERALIGN_TEXT + .globl _Xinvltlb +_Xinvltlb: + pushl %eax + movl %cr3, %eax /* invalidate the TLB */ + movl %eax, %cr3 + ss /* stack segment, avoid %ds load */ + movl _apic_base, %eax + ss + movl $0, APIC_EOI(%eax) /* End Of Interrupt to APIC */ + popl %eax + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0) + FAST_INTR(1,fastintr1) + FAST_INTR(2,fastintr2) + FAST_INTR(3,fastintr3) + FAST_INTR(4,fastintr4) + FAST_INTR(5,fastintr5) + FAST_INTR(6,fastintr6) + FAST_INTR(7,fastintr7) + FAST_INTR(8,fastintr8) + FAST_INTR(9,fastintr9) + FAST_INTR(10,fastintr10) + FAST_INTR(11,fastintr11) + FAST_INTR(12,fastintr12) + FAST_INTR(13,fastintr13) + FAST_INTR(14,fastintr14) + FAST_INTR(15,fastintr15) + FAST_INTR(16,fastintr16) + FAST_INTR(17,fastintr17) + FAST_INTR(18,fastintr18) + FAST_INTR(19,fastintr19) + FAST_INTR(20,fastintr20) + FAST_INTR(21,fastintr21) + FAST_INTR(22,fastintr22) + FAST_INTR(23,fastintr23) + INTR(0,intr0) + INTR(1,intr1) + INTR(2,intr2) + INTR(3,intr3) + INTR(4,intr4) + INTR(5,intr5) + INTR(6,intr6) + INTR(7,intr7) + INTR(8,intr8) + INTR(9,intr9) + INTR(10,intr10) + INTR(11,intr11) + INTR(12,intr12) + INTR(13,intr13) + INTR(14,intr14) + INTR(15,intr15) + INTR(16,intr16) + INTR(17,intr17) + INTR(18,intr18) + INTR(19,intr19) + INTR(20,intr20) + INTR(21,intr21) + INTR(22,intr22) + INTR(23,intr23) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long Xresume16, Xresume17, Xresume18, Xresume19 + .long Xresume20, Xresume21, Xresume22, Xresume23 + .long 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + + .globl _ivectors +_ivectors: + .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 + .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 + .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 + .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 + .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 + .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 + +/* active flag for lazy masking */ +iactive: + .long 0 + + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" + .asciz "stray irq16" + .asciz "stray irq17" + .asciz "stray irq18" + .asciz "stray irq19" + .asciz "stray irq20" + .asciz "stray irq21" + .asciz "stray irq22" + .asciz "stray irq23" +_eintrnames: + + .text diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s new file mode 100644 index 000000000000..fe673a5f27a2 --- /dev/null +++ b/sys/i386/isa/atpic_vector.s @@ -0,0 +1,249 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: icu_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + +/* + * modified for PC98 by Kakefuda + */ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ +#else +#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ +#endif + +#define ICU_EOI 0x20 /* XXX - define elsewhere */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) +#define IRQ_BYTE(irq_num) ((irq_num) / 8) + +#ifdef AUTO_EOI_1 +#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ +#define OUTB_ICU1 +#else +#define ENABLE_ICU1 \ + movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ + OUTB_ICU1 /* ... to clear in service bit */ +#define OUTB_ICU1 \ + outb %al,$IO_ICU1 +#endif + +#ifdef AUTO_EOI_2 +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + */ +#define ENABLE_ICU1_AND_2 ENABLE_ICU1 +#else +#define ENABLE_ICU1_AND_2 \ + movb $ICU_EOI,%al ; /* as above */ \ + outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ + OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ +#endif + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name, enable_icus) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* ... to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + enable_icus ; \ + movl _cpl,%eax ; \ + testb $IRQ_BIT(irq_num),%reg ; \ + jne 2f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + cli ; /* must unmask _imen and icu atomically */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + andb $~IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + sti ; /* XXX _doreti repeats the cli/sti */ \ + MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* _doreti, but it's probably better to use less cache. */ \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) + INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) + INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) + INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) + INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) + INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) + INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) + INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) + INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) + INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" +_eintrnames: + + .text diff --git a/sys/i386/isa/icu_ipl.s b/sys/i386/isa/icu_ipl.s new file mode 100644 index 000000000000..3790f0f3a616 --- /dev/null +++ b/sys/i386/isa/icu_ipl.s @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 1989, 1990 William F. Jolitz. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: icu_ipl.s,v 1.1 1997/05/24 17:02:04 smp Exp smp $ + */ + + .data + ALIGN_DATA + + .globl _vec +_vec: + .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7 + .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15 + +/* + * + */ + .text + SUPERALIGN_TEXT + +/* + * Fake clock interrupt(s) so that they appear to come from our caller instead + * of from here, so that system profiling works. + * XXX do this more generally (for all vectors; look up the C entry point). + * XXX frame bogusness stops us from just jumping to the C entry point. + */ + ALIGN_TEXT +vec0: + popl %eax /* return address */ + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr0 /* XXX might need _Xfastintr0 */ + +#ifndef PC98 + ALIGN_TEXT +vec8: + popl %eax + pushfl + pushl $KCSEL + pushl %eax + cli + MEXITCOUNT + jmp _Xintr8 /* XXX might need _Xfastintr8 */ +#endif /* PC98 */ + +/* + * The 'generic' vector stubs. + */ + +#define BUILD_VEC(irq_num) \ + ALIGN_TEXT ; \ +__CONCAT(vec,irq_num): ; \ + int $ICU_OFFSET + (irq_num) ; \ + ret + + BUILD_VEC(1) + BUILD_VEC(2) + BUILD_VEC(3) + BUILD_VEC(4) + BUILD_VEC(5) + BUILD_VEC(6) + BUILD_VEC(7) +#ifdef PC98 + BUILD_VEC(8) +#endif + BUILD_VEC(9) + BUILD_VEC(10) + BUILD_VEC(11) + BUILD_VEC(12) + BUILD_VEC(13) + BUILD_VEC(14) + BUILD_VEC(15) diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s new file mode 100644 index 000000000000..fe673a5f27a2 --- /dev/null +++ b/sys/i386/isa/icu_vector.s @@ -0,0 +1,249 @@ +/* + * from: vector.s, 386BSD 0.1 unknown origin + * $Id: icu_vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ + */ + +/* + * modified for PC98 by Kakefuda + */ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ +#else +#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ +#endif + +#define ICU_EOI 0x20 /* XXX - define elsewhere */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) +#define IRQ_BYTE(irq_num) ((irq_num) / 8) + +#ifdef AUTO_EOI_1 +#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ +#define OUTB_ICU1 +#else +#define ENABLE_ICU1 \ + movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ + OUTB_ICU1 /* ... to clear in service bit */ +#define OUTB_ICU1 \ + outb %al,$IO_ICU1 +#endif + +#ifdef AUTO_EOI_2 +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + */ +#define ENABLE_ICU1_AND_2 ENABLE_ICU1 +#else +#define ENABLE_ICU1_AND_2 \ + movb $ICU_EOI,%al ; /* as above */ \ + outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ + OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ +#endif + +/* + * Macros for interrupt interrupt entry, call to handler, and exit. + */ + +#define FAST_INTR(irq_num, vec_name, enable_icus) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl %eax ; /* save only call-used registers */ \ + pushl %ecx ; \ + pushl %edx ; \ + pushl %ds ; \ + MAYBE_PUSHL_ES ; \ + movl $KDSEL,%eax ; \ + movl %ax,%ds ; \ + MAYBE_MOVW_AX_ES ; \ + FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + pushl _intr_unit + (irq_num) * 4 ; \ + call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ + addl $4,%esp ; \ + incl _cnt+V_INTR ; /* book-keeping can wait */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ + notl %eax ; \ + andl _ipending,%eax ; \ + jne 2f ; /* yes, maybe handle them */ \ +1: ; \ + MEXITCOUNT ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + MAYBE_POPL_ES ; \ + popl %ds ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ + iret ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ + jae 1b ; /* no, return */ \ + movl _cpl,%eax ; \ + /* XXX next line is probably unnecessary now. */ \ + movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ + incb _intr_nesting_level ; /* ... really limit it ... */ \ + sti ; /* ... to do this as early as possible */ \ + MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ + popl %ecx ; /* ... original %ds ... */ \ + popl %edx ; \ + xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ + pushal ; /* build fat frame (grrr) ... */ \ + pushl %ecx ; /* ... actually %ds ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; \ + movl %ax,%es ; \ + movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ + movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ + movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ + pushl %eax ; \ + subl $4,%esp ; /* junk for unit number */ \ + MEXITCOUNT ; \ + jmp _doreti + +#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments ... */ \ + pushl %es ; \ + movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ + movl %ax,%ds ; /* ... early for obsolete reasons */ \ + movl %ax,%es ; \ + GET_MPLOCK ; /* SMP Spin lock */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + enable_icus ; \ + movl _cpl,%eax ; \ + testb $IRQ_BIT(irq_num),%reg ; \ + jne 2f ; \ + incb _intr_nesting_level ; \ +__CONCAT(Xresume,irq_num): ; \ + FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ + incl _cnt+V_INTR ; /* tally interrupts */ \ + movl _intr_countp + (irq_num) * 4,%eax ; \ + incl (%eax) ; \ + movl _cpl,%eax ; \ + pushl %eax ; \ + pushl _intr_unit + (irq_num) * 4 ; \ + orl _intr_mask + (irq_num) * 4,%eax ; \ + movl %eax,_cpl ; \ + sti ; \ + call *_intr_handler + (irq_num) * 4 ; \ + cli ; /* must unmask _imen and icu atomically */ \ + movb _imen + IRQ_BYTE(irq_num),%al ; \ + andb $~IRQ_BIT(irq_num),%al ; \ + movb %al,_imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ + sti ; /* XXX _doreti repeats the cli/sti */ \ + MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* _doreti, but it's probably better to use less cache. */ \ + jmp _doreti ; \ +; \ + ALIGN_TEXT ; \ +2: ; \ + /* XXX skip mcounting here to avoid double count */ \ + orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \ + REL_MPLOCK ; /* SMP release global lock */ \ + popl %es ; \ + popl %ds ; \ + popal ; \ + addl $4+4,%esp ; \ + iret + +MCOUNT_LABEL(bintr) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) + INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) + INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) + INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) + INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) + INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) + INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) + INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) + INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) + INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) + INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) +MCOUNT_LABEL(eintr) + + .data +ihandlers: /* addresses of interrupt handlers */ + /* actually resumption addresses for HWI's */ + .long Xresume0, Xresume1, Xresume2, Xresume3 + .long Xresume4, Xresume5, Xresume6, Xresume7 + .long Xresume8, Xresume9, Xresume10, Xresume11 + .long Xresume12, Xresume13, Xresume14, Xresume15 + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long swi_tty, swi_net, _softclock, swi_ast + +imasks: /* masks for interrupt handlers */ + .space NHWI*4 /* padding; HWI masks are elsewhere */ + + .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK + +/* + * Interrupt counters and names. The format of these and the label names + * must agree with what vmstat expects. The tables are indexed by device + * ids so that we don't have to move the names around as devices are + * attached. + */ +#include "vector.h" + .globl _intrcnt, _eintrcnt +_intrcnt: + .space (NR_DEVICES + ICU_LEN) * 4 +_eintrcnt: + + .globl _intrnames, _eintrnames +_intrnames: + .ascii DEVICE_NAMES + .asciz "stray irq0" + .asciz "stray irq1" + .asciz "stray irq2" + .asciz "stray irq3" + .asciz "stray irq4" + .asciz "stray irq5" + .asciz "stray irq6" + .asciz "stray irq7" + .asciz "stray irq8" + .asciz "stray irq9" + .asciz "stray irq10" + .asciz "stray irq11" + .asciz "stray irq12" + .asciz "stray irq13" + .asciz "stray irq14" + .asciz "stray irq15" +_eintrnames: + + .text diff --git a/sys/i386/isa/icu.s b/sys/i386/isa/ipl.s similarity index 70% rename from sys/i386/isa/icu.s rename to sys/i386/isa/ipl.s index 57978a810c14..25ddc3f63359 100644 --- a/sys/i386/isa/icu.s +++ b/sys/i386/isa/ipl.s @@ -34,30 +34,28 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * @(#)icu.s 7.2 (Berkeley) 5/21/91 + * @(#)ipl.s * - * $Id: icu.s,v 1.31 1997/04/28 01:08:41 fsmp Exp $ + * $Id: ipl.s,v 1.1 1997/05/24 17:02:30 smp Exp smp $ */ -#include "opt_smp.h" - /* * AT/386 * Vector interrupt control section - */ - -/* - * XXX this file should be named ipl.s. All spls are now soft and the - * only thing related to the hardware icu is that the h/w interrupt - * numbers are used without translation in the masks. */ .data + ALIGN_DATA + +/* current priority (all off) */ .globl _cpl -_cpl: .long HWI_MASK | SWI_MASK /* current priority (all off) */ +_cpl: .long HWI_MASK | SWI_MASK + +/* interrupt mask enable (all h/w off) */ .globl _imen -_imen: .long HWI_MASK /* interrupt mask enable (all h/w off) */ +_imen: .long HWI_MASK + .globl _tty_imask _tty_imask: .long 0 .globl _bio_imask @@ -65,9 +63,14 @@ _bio_imask: .long 0 .globl _net_imask _net_imask: .long 0 .globl _ipending + +/* pending interrupts blocked by splxxx() */ _ipending: .long 0 + +/* set with bits for which queue to service */ .globl _netisr -_netisr: .long 0 /* set with bits for which queue to service */ +_netisr: .long 0 + .globl _netisrs _netisrs: .long dummynetisr, dummynetisr, dummynetisr, dummynetisr @@ -79,36 +82,6 @@ _netisrs: .long dummynetisr, dummynetisr, dummynetisr, dummynetisr .long dummynetisr, dummynetisr, dummynetisr, dummynetisr -#if defined(APIC_IO) - /* this allows us to change the 8254 APIC pin# assignment */ - .globl _Xintr8254 -_Xintr8254: - .long _Xintr7 - - /* this allows us to change the RTC clock APIC pin# assignment */ - .globl _XintrRTC -_XintrRTC: - .long _Xintr7 - - /* used by this file, microtime.s and clock.c */ - .globl _mask8254 -_mask8254: - .long 0 - - /* used by this file and clock.c */ - .globl _maskRTC -_maskRTC: - .long 0 - -#endif /* APIC_IO */ - .globl _vec -_vec: - .long vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7 - .long vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15 -#if defined(APIC_IO) - .long vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23 -#endif /* APIC_IO */ - .text /* @@ -138,9 +111,7 @@ doreti_exit: movl %eax,_cpl decb _intr_nesting_level MEXITCOUNT -#ifdef SMP - call _rel_mplock -#endif /* SMP */ + REL_MPLOCK .globl doreti_popl_es doreti_popl_es: popl %es @@ -178,9 +149,7 @@ doreti_unpend: */ sti bsfl %ecx,%ecx /* slow, but not worth optimizing */ -#ifdef SMP - lock -#endif + MP_INSTR_LOCK btrl %ecx,_ipending jnc doreti_next /* some intr cleared memory copy */ movl ihandlers(,%ecx,4),%edx @@ -233,9 +202,7 @@ swi_ast_phantom: * using by using cli, but they are unavoidable for lcall entries. */ cli -#ifdef SMP - lock -#endif + MP_INSTR_LOCK orl $SWI_AST_PENDING,_ipending subl %eax,%eax jmp doreti_exit /* SWI_AST is highest so we must be done */ @@ -274,9 +241,7 @@ splz_next: ALIGN_TEXT splz_unpend: bsfl %ecx,%ecx -#ifdef SMP - lock -#endif + MP_INSTR_LOCK btrl %ecx,_ipending jnc splz_next movl ihandlers(,%ecx,4),%edx @@ -305,120 +270,6 @@ splz_swi: movl %eax,_cpl jmp splz_next -/* - * Fake clock interrupt(s) so that they appear to come from our caller instead - * of from here, so that system profiling works. - * XXX do this more generally (for all vectors; look up the C entry point). - * XXX frame bogusness stops us from just jumping to the C entry point. - */ - ALIGN_TEXT -#if defined(APIC_IO) - /* generic vector function for 8254 clock */ - .globl _vec8254 -_vec8254: - popl %eax /* return address */ - pushfl -#define KCSEL 8 - pushl $KCSEL - pushl %eax - cli - movl _mask8254,%eax /* lazy masking */ - notl %eax - andl %eax,iactive - MEXITCOUNT - movl _Xintr8254, %eax - jmp %eax /* XXX might need _Xfastintr# */ -#else /* APIC_IO */ -vec0: - popl %eax /* return address */ - pushfl -#define KCSEL 8 - pushl $KCSEL - pushl %eax - cli - MEXITCOUNT - jmp _Xintr0 /* XXX might need _Xfastintr0 */ -#endif /* APIC_IO */ - -#ifndef PC98 - ALIGN_TEXT -#if defined(APIC_IO) - /* generic vector function for RTC clock */ - .globl _vecRTC -_vecRTC: - popl %eax - pushfl - pushl $KCSEL - pushl %eax - cli - movl _maskRTC,%eax /* lazy masking */ - notl %eax - andl %eax,iactive - MEXITCOUNT - movl _XintrRTC, %eax - jmp %eax /* XXX might need _Xfastintr# */ -#else -vec8: - popl %eax - pushfl - pushl $KCSEL - pushl %eax - cli - MEXITCOUNT - jmp _Xintr8 /* XXX might need _Xfastintr8 */ -#endif /* APIC_IO */ -#endif /* PC98 */ - -# if defined(APIC_IO) -#define BUILD_VEC(irq_num) \ - ALIGN_TEXT ; \ -__CONCAT(vec,irq_num): ; \ - popl %eax ; \ - pushfl ; \ - pushl $KCSEL ; \ - pushl %eax ; \ - cli ; \ - andl $~IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ - MEXITCOUNT ; \ - jmp __CONCAT(_Xintr,irq_num) -# else -#define BUILD_VEC(irq_num) \ - ALIGN_TEXT ; \ -__CONCAT(vec,irq_num): ; \ - int $ICU_OFFSET + (irq_num) ; \ - ret -# endif /* APIC_IO */ - - BUILD_VEC(1) - BUILD_VEC(2) - BUILD_VEC(3) - BUILD_VEC(4) - BUILD_VEC(5) - BUILD_VEC(6) - BUILD_VEC(7) -#ifdef PC98 - BUILD_VEC(8) -#endif - BUILD_VEC(9) - BUILD_VEC(10) - BUILD_VEC(11) - BUILD_VEC(12) - BUILD_VEC(13) - BUILD_VEC(14) - BUILD_VEC(15) -#if defined(APIC_IO) - BUILD_VEC(0) /* NOT specific in IO APIC hardware */ - BUILD_VEC(8) /* NOT specific in IO APIC hardware */ - - BUILD_VEC(16) /* 8 additional INTs in IO APIC */ - BUILD_VEC(17) - BUILD_VEC(18) - BUILD_VEC(19) - BUILD_VEC(20) - BUILD_VEC(21) - BUILD_VEC(22) - BUILD_VEC(23) -#endif /* APIC_IO */ ALIGN_TEXT swi_net: @@ -461,3 +312,9 @@ swi_tty: #else ret #endif + +#ifdef APIC_IO +#include "i386/isa/apic_ipl.s" +#else +#include "i386/isa/icu_ipl.s" +#endif /* APIC_IO */ diff --git a/sys/i386/isa/vector.s b/sys/i386/isa/vector.s index e20d9f54857d..1fc56685f3eb 100644 --- a/sys/i386/isa/vector.s +++ b/sys/i386/isa/vector.s @@ -1,6 +1,6 @@ /* * from: vector.s, 386BSD 0.1 unknown origin - * $Id: vector.s,v 1.28 1997/04/28 01:47:55 fsmp Exp $ + * $Id: vector.s,v 1.2 1997/05/24 17:05:26 smp Exp smp $ */ /* @@ -8,11 +8,6 @@ */ #include "opt_auto_eoi.h" -#include "opt_smp.h" - -#if defined(SMP) -#include /* this includes */ -#endif /* SMP */ #include #ifdef PC98 @@ -21,144 +16,6 @@ #include #endif -#ifdef PC98 -#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */ -#else -#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */ -#endif - - -#if defined(SMP) - -#define GET_MPLOCK call _get_mplock -#define REL_MPLOCK call _rel_mplock - -#else - -#define GET_MPLOCK /* NOP get Kernel Mutex */ -#define REL_MPLOCK /* NOP release mutex */ - -#endif /* SMP */ - - -#if defined(APIC_IO) - -#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) -#define IRQ_BIT(irq_num) (1 << (irq_num)) - -#define ENABLE_APIC \ - movl _apic_base, %eax ; \ - movl $0, APIC_EOI(%eax) - -#define ENABLE_ICU1 ENABLE_APIC -#define ENABLE_ICU1_AND_2 ENABLE_APIC - -#define MASK_IRQ(irq_num,icu) \ - orl $IRQ_BIT(irq_num),_imen ; /* set the mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - orl $IOART_INTMASK,%eax ; /* set the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),_imen ; /* clear mask bit */ \ - movl _io_apic_base,%ecx ; /* io apic addr */ \ - movl $REDTBL_IDX(irq_num),(%ecx) ; /* write the index */ \ - movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \ - andl $~IOART_INTMASK,%eax ; /* clear the mask */ \ - movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ - -#define TEST_IRQ(irq_num,reg) \ - testl $IRQ_BIT(irq_num),%eax - -#define SET_IPENDING(irq_num) \ - orl $IRQ_BIT(irq_num),_ipending - -/* - * 'lazy masking' code submitted by: Bruce Evans - */ -#define MAYBE_MASK_IRQ(irq_num,icu) \ - testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ - je 1f ; /* NOT currently active */ \ - MASK_IRQ(irq_num,icu) ; \ - ENABLE_APIC ; \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -1: ; \ - orl $IRQ_BIT(irq_num),iactive - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - andl $~IRQ_BIT(irq_num),iactive ; \ - testl $IRQ_BIT(irq_num),_imen ; \ - je 3f ; \ - UNMASK_IRQ(irq_num,icu) ; \ -3: - -#else /* APIC_IO */ - -#define MASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - orb $IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define UNMASK_IRQ(irq_num,icu) \ - movb _imen + IRQ_BYTE(irq_num),%al ; \ - andb $~IRQ_BIT(irq_num),%al ; \ - movb %al,_imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define TEST_IRQ(irq_num,reg) \ - testb $IRQ_BIT(irq_num),%reg - -#define SET_IPENDING(irq_num) \ - orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) - -#define ICU_EOI 0x20 /* XXX - define elsewhere */ - -#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) -#define IRQ_BYTE(irq_num) ((irq_num) / 8) - -#ifdef AUTO_EOI_1 -#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */ -#define OUTB_ICU1 -#else -#define ENABLE_ICU1 \ - movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \ - OUTB_ICU1 /* ... to clear in service bit */ -#define OUTB_ICU1 \ - outb %al,$IO_ICU1 -#endif - -#ifdef AUTO_EOI_2 -/* - * The data sheet says no auto-EOI on slave, but it sometimes works. - */ -#define ENABLE_ICU1_AND_2 ENABLE_ICU1 -#else -#define ENABLE_ICU1_AND_2 \ - movb $ICU_EOI,%al ; /* as above */ \ - outb %al,$IO_ICU2 ; /* but do second icu first ... */ \ - OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */ -#endif - -#define MAYBE_MASK_IRQ(irq_num,icu) \ - MASK_IRQ(irq_num,icu) - -#define MAYBE_UNMASK_IRQ(irq_num,icu) \ - UNMASK_IRQ(irq_num,icu) - -#endif /* APIC_IO */ - - #ifdef FAST_INTR_HANDLER_USES_ES #define ACTUALLY_PUSHED 1 #define MAYBE_MOVW_AX_ES movl %ax,%es @@ -176,6 +33,16 @@ #define MAYBE_PUSHL_ES #endif + .data + ALIGN_DATA + + .globl _intr_nesting_level +_intr_nesting_level: + .byte 0 + .space 3 + + .text + /* * Macros for interrupt interrupt entry, call to handler, and exit. * @@ -221,275 +88,8 @@ * loading segregs. */ -#define FAST_INTR(irq_num, vec_name, enable_icus) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl %eax ; /* save only call-used registers */ \ - pushl %ecx ; \ - pushl %edx ; \ - pushl %ds ; \ - MAYBE_PUSHL_ES ; \ - movl $KDSEL,%eax ; \ - movl %ax,%ds ; \ - MAYBE_MOVW_AX_ES ; \ - FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - pushl _intr_unit + (irq_num) * 4 ; \ - call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ - enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ - addl $4,%esp ; \ - incl _cnt+V_INTR ; /* book-keeping can wait */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \ - notl %eax ; \ - andl _ipending,%eax ; \ - jne 2f ; /* yes, maybe handle them */ \ -1: ; \ - MEXITCOUNT ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - MAYBE_POPL_ES ; \ - popl %ds ; \ - popl %edx ; \ - popl %ecx ; \ - popl %eax ; \ - iret ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \ - jae 1b ; /* no, return */ \ - movl _cpl,%eax ; \ - /* XXX next line is probably unnecessary now. */ \ - movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \ - incb _intr_nesting_level ; /* ... really limit it ... */ \ - sti ; /* ... to do this as early as possible */ \ - MAYBE_POPL_ES ; /* discard most of thin frame ... */ \ - popl %ecx ; /* ... original %ds ... */ \ - popl %edx ; \ - xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \ - pushal ; /* build fat frame (grrr) ... */ \ - pushl %ecx ; /* ... actually %ds ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; \ - movl %ax,%es ; \ - movl (2+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \ - movl %ecx,(2+6)*4(%esp) ; /* ... to fat frame ... */ \ - movl (2+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \ - pushl %eax ; \ - subl $4,%esp ; /* junk for unit number */ \ - MEXITCOUNT ; \ - jmp _doreti - -#define INTR(irq_num, vec_name, icu, enable_icus, reg) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - pushl $0 ; /* dummy error code */ \ - pushl $0 ; /* dummy trap type */ \ - pushal ; \ - pushl %ds ; /* save our data and extra segments ... */ \ - pushl %es ; \ - movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \ - movl %ax,%ds ; /* ... early for obsolete reasons */ \ - movl %ax,%es ; \ - GET_MPLOCK ; /* SMP Spin lock */ \ - MAYBE_MASK_IRQ(irq_num,icu) ; \ - enable_icus ; \ - movl _cpl,%eax ; \ - TEST_IRQ(irq_num,reg) ; \ - jne 2f ; \ - incb _intr_nesting_level ; \ -__CONCAT(Xresume,irq_num): ; \ - FAKE_MCOUNT(12*4(%esp)) ; /* XXX late to avoid double count */ \ - incl _cnt+V_INTR ; /* tally interrupts */ \ - movl _intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - movl _cpl,%eax ; \ - pushl %eax ; \ - pushl _intr_unit + (irq_num) * 4 ; \ - orl _intr_mask + (irq_num) * 4,%eax ; \ - movl %eax,_cpl ; \ - sti ; \ - call *_intr_handler + (irq_num) * 4 ; \ - cli ; /* must unmask _imen and icu atomically */ \ - MAYBE_UNMASK_IRQ(irq_num,icu) ; \ - sti ; /* XXX _doreti repeats the cli/sti */ \ - MEXITCOUNT ; \ - /* We could usually avoid the following jmp by inlining some of */ \ - /* _doreti, but it's probably better to use less cache. */ \ - jmp _doreti ; \ -; \ - ALIGN_TEXT ; \ -2: ; \ - /* XXX skip mcounting here to avoid double count */ \ - SET_IPENDING(irq_num) ; \ - REL_MPLOCK ; /* SMP release global lock */ \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp ; \ - iret - -#if defined(APIC_IO) - .text - SUPERALIGN_TEXT - .globl _Xinvltlb -_Xinvltlb: - pushl %eax - movl %cr3, %eax - movl %eax, %cr3 - ss - movl _apic_base, %eax - ss - movl $0, APIC_EOI(%eax) - popl %eax - iret -#endif /* APIC_IO */ - -MCOUNT_LABEL(bintr) - FAST_INTR(0,fastintr0, ENABLE_ICU1) - FAST_INTR(1,fastintr1, ENABLE_ICU1) - FAST_INTR(2,fastintr2, ENABLE_ICU1) - FAST_INTR(3,fastintr3, ENABLE_ICU1) - FAST_INTR(4,fastintr4, ENABLE_ICU1) - FAST_INTR(5,fastintr5, ENABLE_ICU1) - FAST_INTR(6,fastintr6, ENABLE_ICU1) - FAST_INTR(7,fastintr7, ENABLE_ICU1) - FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) - FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) - FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) - FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) - FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) - FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) - FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) - FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) -#if defined(APIC_IO) - FAST_INTR(16,fastintr16, ENABLE_ICU1_AND_2) - FAST_INTR(17,fastintr17, ENABLE_ICU1_AND_2) - FAST_INTR(18,fastintr18, ENABLE_ICU1_AND_2) - FAST_INTR(19,fastintr19, ENABLE_ICU1_AND_2) - FAST_INTR(20,fastintr20, ENABLE_ICU1_AND_2) - FAST_INTR(21,fastintr21, ENABLE_ICU1_AND_2) - FAST_INTR(22,fastintr22, ENABLE_ICU1_AND_2) - FAST_INTR(23,fastintr23, ENABLE_ICU1_AND_2) -#endif /* APIC_IO */ - INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al) - INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al) - INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al) - INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al) - INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al) - INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al) - INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al) - INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al) - INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#if defined(APIC_IO) - INTR(16,intr16, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(17,intr17, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(18,intr18, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(19,intr19, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(20,intr20, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(21,intr21, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(22,intr22, IO_ICU2, ENABLE_ICU1_AND_2, ah) - INTR(23,intr23, IO_ICU2, ENABLE_ICU1_AND_2, ah) -#endif /* APIC_IO */ -MCOUNT_LABEL(eintr) - - .data -ihandlers: /* addresses of interrupt handlers */ - /* actually resumption addresses for HWI's */ - .long Xresume0, Xresume1, Xresume2, Xresume3 - .long Xresume4, Xresume5, Xresume6, Xresume7 - .long Xresume8, Xresume9, Xresume10, Xresume11 - .long Xresume12, Xresume13, Xresume14, Xresume15 -#if defined(APIC_IO) - .long Xresume16, Xresume17, Xresume18, Xresume19 - .long Xresume20, Xresume21, Xresume22, Xresume23 +#ifdef APIC_IO +#include "i386/isa/apic_vector.s" #else - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0, swi_tty, swi_net, _softclock, swi_ast - -imasks: /* masks for interrupt handlers */ - .space NHWI*4 /* padding; HWI masks are elsewhere */ - -#if !defined(APIC_IO) /* Less padding for APIC_IO, NHWI is higher */ - .long 0, 0, 0, 0, 0, 0, 0, 0 -#endif /* APIC_IO */ - .long 0, 0, 0, 0 - .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CLOCK_MASK, SWI_AST_MASK - - .globl _intr_nesting_level -_intr_nesting_level: - .byte 0 - .space 3 - -#if defined(APIC_IO) - - .globl _ivectors -_ivectors: - .long _Xintr0, _Xintr1, _Xintr2, _Xintr3 - .long _Xintr4, _Xintr5, _Xintr6, _Xintr7 - .long _Xintr8, _Xintr9, _Xintr10, _Xintr11 - .long _Xintr12, _Xintr13, _Xintr14, _Xintr15 - .long _Xintr16, _Xintr17, _Xintr18, _Xintr19 - .long _Xintr20, _Xintr21, _Xintr22, _Xintr23 - -/* active flag for lazy masking */ -iactive: - .long 0 - -#endif /* APIC_IO */ - -/* - * Interrupt counters and names. The format of these and the label names - * must agree with what vmstat expects. The tables are indexed by device - * ids so that we don't have to move the names around as devices are - * attached. - */ -#include "vector.h" - .globl _intrcnt, _eintrcnt -_intrcnt: - .space (NR_DEVICES + ICU_LEN) * 4 -_eintrcnt: - - .globl _intrnames, _eintrnames -_intrnames: - .ascii DEVICE_NAMES - .asciz "stray irq0" - .asciz "stray irq1" - .asciz "stray irq2" - .asciz "stray irq3" - .asciz "stray irq4" - .asciz "stray irq5" - .asciz "stray irq6" - .asciz "stray irq7" - .asciz "stray irq8" - .asciz "stray irq9" - .asciz "stray irq10" - .asciz "stray irq11" - .asciz "stray irq12" - .asciz "stray irq13" - .asciz "stray irq14" - .asciz "stray irq15" -#if defined(APIC_IO) - .asciz "stray irq16" - .asciz "stray irq17" - .asciz "stray irq18" - .asciz "stray irq19" - .asciz "stray irq20" - .asciz "stray irq21" - .asciz "stray irq22" - .asciz "stray irq23" -#endif /* APIC_IO */ -_eintrnames: - - .text +#include "i386/isa/icu_vector.s" +#endif /* APIC_IO */