/* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD$ */ #include #include /* convert an absolute IRQ# into a bitmask */ #define IRQ_BIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; /* 8 ints */ \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs #define PUSH_DUMMY \ pushfl ; /* eflags */ \ pushl %cs ; /* cs */ \ pushl 12(%esp) ; /* original caller eip */ \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ subl $11*4,%esp ; #define POP_FRAME \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp #define POP_DUMMY \ addl $16*4,%esp #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 #define MASK_IRQ(irq_num) \ ICU_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ ICU_UNLOCK /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: #ifdef APIC_INTR_REORDER #define EOI_IRQ(irq_num) \ movl apic_isrbit_location + 8 * (irq_num), %eax ; \ movl (%eax), %eax ; \ testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ jz 9f ; /* not active */ \ movl $0, lapic+LA_EOI ; \ 9: #else #define EOI_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \ jz 9f ; /* not active */ \ movl $0, lapic+LA_EOI; \ 9: #endif /* * Test to see if the source is currently masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ ICU_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ andl $~IOART_INTMASK, %eax ; /* clear the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already unmasked */ \ ICU_UNLOCK /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs have to be unmasked. */ #define UNMASK_LEVEL_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), apic_pin_trigger ; \ jz 9f ; /* edge, don't unmask */ \ UNMASK_IRQ(irq_num) ; \ 9: /* * Macros for interrupt entry, call to handler, and exit. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL,%eax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ movl $KPSEL,%eax ; \ mov %ax,%fs ; \ FAKE_MCOUNT(13*4(%esp)) ; \ movl PCPU(CURTHREAD),%ebx ; \ cmpl $0,TD_CRITNEST(%ebx) ; \ je 1f ; \ ; \ movl $1,PCPU(INT_PENDING) ; \ orl $IRQ_BIT(irq_num),PCPU(FPENDING) ; \ MASK_LEVEL_IRQ(irq_num) ; \ movl $0, lapic+LA_EOI ; \ jmp 10f ; \ 1: ; \ incl TD_CRITNEST(%ebx) ; \ incl TD_INTR_NESTING_LEVEL(%ebx) ; \ pushl intr_unit + (irq_num) * 4 ; \ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ movl $0, lapic+LA_EOI ; \ lock ; \ incl cnt+V_INTR ; /* book-keeping can wait */ \ movl intr_countp + (irq_num) * 4, %eax ; \ lock ; \ incl (%eax) ; \ decl TD_CRITNEST(%ebx) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ MEXITCOUNT ; \ jmp doreti /* * Restart a fast interrupt that was held up by a critical section. * This routine is called from unpend(). unpend() ensures we are * in a critical section and deals with the interrupt nesting level * for us. If we previously masked the irq, we have to unmask it. * * We have a choice. We can regenerate the irq using the 'int' * instruction or we can create a dummy frame and call the interrupt * handler directly. I've chosen to use the dummy-frame method. */ #define FAST_UNPEND(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ ; \ pushl %ebp ; \ movl %esp, %ebp ; \ PUSH_DUMMY ; \ pushl intr_unit + (irq_num) * 4 ; \ call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ lock ; \ incl cnt+V_INTR ; /* book-keeping can wait */ \ movl intr_countp + (irq_num) * 4, %eax ; \ lock ; \ incl (%eax) ; \ UNMASK_LEVEL_IRQ(irq_num) ; \ POP_DUMMY ; \ popl %ebp ; \ ret ; \ /* * Slow, threaded interrupts. * * XXX Most of the parameters here are obsolete. Fix this when we're * done. * XXX we really shouldn't return via doreti if we just schedule the * interrupt handler and don't run anything. We could just do an * iret. FIXME. */ #define INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ /* _XintrNN: entry point used by IDT/HWIs via _vec[]. */ \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ mov %ax, %ds ; \ mov %ax, %es ; \ movl $KPSEL, %eax ; \ mov %ax, %fs ; \ ; \ maybe_extra_ipending ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ ; \ movl PCPU(CURTHREAD),%ebx ; \ cmpl $0,TD_CRITNEST(%ebx) ; \ je 1f ; \ movl $1,PCPU(INT_PENDING) ; \ orl $IRQ_BIT(irq_num),PCPU(IPENDING) ; \ jmp 10f ; \ 1: ; \ incl TD_INTR_NESTING_LEVEL(%ebx) ; \ ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ addl $4, %esp ; /* discard the parameter */ \ ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ MEXITCOUNT ; \ jmp doreti /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT IDTVEC(spuriousint) /* No EOI cycle used here */ iret #ifdef SMP /* * Global address space TLB shootdown. */ .text SUPERALIGN_TEXT IDTVEC(invltlb) pushl %eax pushl %ds movl $KDSEL, %eax /* Kernel data selector */ mov %ax, %ds #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax /* Private space selector */ mov %ax, %fs movl PCPU(CPUID), %eax popl %fs incl xhits_gbl(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ lock incl smp_tlb_wait popl %ds popl %eax iret /* * Single page TLB shootdown */ .text SUPERALIGN_TEXT IDTVEC(invlpg) pushl %eax pushl %ds movl $KDSEL, %eax /* Kernel data selector */ mov %ax, %ds #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax /* Private space selector */ mov %ax, %fs movl PCPU(CPUID), %eax popl %fs incl xhits_pg(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl smp_tlb_addr1, %eax invlpg (%eax) /* invalidate single page */ movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ lock incl smp_tlb_wait popl %ds popl %eax iret /* * Page range TLB shootdown. */ .text SUPERALIGN_TEXT IDTVEC(invlrng) pushl %eax pushl %edx pushl %ds movl $KDSEL, %eax /* Kernel data selector */ mov %ax, %ds #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax /* Private space selector */ mov %ax, %fs movl PCPU(CPUID), %eax popl %fs incl xhits_rng(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl smp_tlb_addr1, %edx movl smp_tlb_addr2, %eax 1: invlpg (%edx) /* invalidate single page */ addl $PAGE_SIZE, %edx cmpl %eax, %edx jb 1b movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ lock incl smp_tlb_wait popl %ds popl %edx popl %eax iret /* * Forward hardclock to another CPU. Pushes a clockframe and calls * forwarded_hardclock(). */ .text SUPERALIGN_TEXT IDTVEC(hardclock) PUSH_FRAME movl $KDSEL, %eax /* reload with kernel's data segment */ mov %ax, %ds mov %ax, %es movl $KPSEL, %eax mov %ax, %fs movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ movl PCPU(CURTHREAD),%ebx cmpl $0,TD_CRITNEST(%ebx) je 1f movl $1,PCPU(INT_PENDING) orl $1,PCPU(SPENDING); jmp 10f 1: incl TD_INTR_NESTING_LEVEL(%ebx) pushl $0 /* XXX convert trapframe to clockframe */ call forwarded_hardclock addl $4, %esp /* XXX convert clockframe to trapframe */ decl TD_INTR_NESTING_LEVEL(%ebx) 10: MEXITCOUNT jmp doreti /* * Forward statclock to another CPU. Pushes a clockframe and calls * forwarded_statclock(). */ .text SUPERALIGN_TEXT IDTVEC(statclock) PUSH_FRAME movl $KDSEL, %eax /* reload with kernel's data segment */ mov %ax, %ds mov %ax, %es movl $KPSEL, %eax mov %ax, %fs movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ FAKE_MCOUNT(13*4(%esp)) movl PCPU(CURTHREAD),%ebx cmpl $0,TD_CRITNEST(%ebx) je 1f movl $1,PCPU(INT_PENDING) orl $2,PCPU(SPENDING); jmp 10f 1: incl TD_INTR_NESTING_LEVEL(%ebx) pushl $0 /* XXX convert trapframe to clockframe */ call forwarded_statclock addl $4, %esp /* XXX convert clockframe to trapframe */ decl TD_INTR_NESTING_LEVEL(%ebx) 10: MEXITCOUNT jmp doreti /* * Executed by a CPU when it receives an Xcpuast IPI from another CPU, * * The other CPU has already executed aston() or need_resched() on our * current process, so we simply need to ack the interrupt and return * via doreti to run ast(). */ .text SUPERALIGN_TEXT IDTVEC(cpuast) PUSH_FRAME movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ mov %ax, %es movl $KPSEL, %eax mov %ax, %fs movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ FAKE_MCOUNT(13*4(%esp)) MEXITCOUNT jmp doreti /* * Executed by a CPU when it receives an Xcpustop IPI from another CPU, * * - Signals its receipt. * - Waits for permission to restart. * - Signals its restart. */ .text SUPERALIGN_TEXT IDTVEC(cpustop) pushl %ebp movl %esp, %ebp pushl %eax pushl %ecx pushl %edx pushl %ds /* save current data segment */ pushl %es pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ mov %ax, %es movl $KPSEL, %eax mov %ax, %fs movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */ movl PCPU(CPUID), %eax imull $PCB_SIZE, %eax leal CNAME(stoppcbs)(%eax), %eax pushl %eax call CNAME(savectx) /* Save process context */ addl $4, %esp movl PCPU(CPUID), %eax lock btsl %eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<