Remove remaining bits of old interrupt and APIC code.

This commit is contained in:
John Baldwin 2003-11-03 22:51:25 +00:00
parent 05354a6e42
commit 892f579780
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122010
7 changed files with 0 additions and 2239 deletions

View File

@ -1,769 +0,0 @@
/*-
* Copyright (c) 1996, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <machine/cpufunc.h>
#include <machine/smptests.h> /** TEST_TEST1 */
#include <machine/smp.h>
#include <machine/mpapic.h>
#include <machine/segments.h>
#include <i386/isa/intr_machdep.h> /* Xspuriousint() */
#include <i386/isa/icu.h> /* apic_imen */
/* EISA Edge/Level trigger control registers */
#define ELCR0 0x4d0 /* eisa irq 0-7 */
#define ELCR1 0x4d1 /* eisa irq 8-15 */
/*
* pointers to pmapped apic hardware.
*/
#if defined(APIC_IO)
volatile ioapic_t **ioapic;
#endif /* APIC_IO */
/*
* Enable APIC, configure interrupts.
*/
void
apic_initialize(void)
{
u_int temp;
/* setup LVT1 as ExtINT */
temp = lapic.lvt_lint0;
temp &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM);
if (PCPU_GET(cpuid) == 0)
temp |= 0x00000700; /* process ExtInts */
else
temp |= 0x00010700; /* mask ExtInts */
lapic.lvt_lint0 = temp;
/* setup LVT2 as NMI, masked till later... */
temp = lapic.lvt_lint1;
temp &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM);
temp |= 0x00010400; /* masked, edge trigger, active hi */
lapic.lvt_lint1 = temp;
/* set the Task Priority Register as needed */
temp = lapic.tpr;
temp &= ~APIC_TPR_PRIO; /* clear priority field */
lapic.tpr = temp;
/* enable the local APIC */
temp = lapic.svr;
temp |= APIC_SVR_SWEN; /* software enable APIC */
temp &= ~APIC_SVR_FOCUS; /* enable 'focus processor' */
/* set the 'spurious INT' vector */
if ((XSPURIOUSINT_OFFSET & APIC_SVR_VEC_FIX) != APIC_SVR_VEC_FIX)
panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
temp &= ~APIC_SVR_VEC_PROG; /* clear (programmable) vector field */
temp |= (XSPURIOUSINT_OFFSET & APIC_SVR_VEC_PROG);
#if defined(TEST_TEST1)
if (PCPU_GET(cpuid) == GUARD_CPU) {
temp &= ~APIC_SVR_SWEN; /* software DISABLE APIC */
}
#endif /** TEST_TEST1 */
lapic.svr = temp;
}
/*
* dump contents of local APIC registers
*/
void
apic_dump(char* str)
{
printf("SMP: CPU%d %s:\n", PCPU_GET(cpuid), str);
printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
lapic.lvt_lint0, lapic.lvt_lint1, lapic.tpr, lapic.svr);
}
#if defined(APIC_IO)
/*
* IO APIC code,
*/
#define IOAPIC_ISA_INTS 16
#define REDIRCNT_IOAPIC(A) \
((int)((io_apic_versions[(A)] & IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) + 1)
static int trigger(int apic, int pin, u_int32_t * flags);
static void polarity(int apic, int pin, u_int32_t * flags, int level);
#define DEFAULT_FLAGS \
((u_int32_t) \
(IOART_INTMSET | \
IOART_DESTPHY | \
IOART_DELLOPRI))
#define DEFAULT_ISA_FLAGS \
((u_int32_t) \
(IOART_INTMSET | \
IOART_TRGREDG | \
IOART_INTAHI | \
IOART_DESTPHY | \
IOART_DELLOPRI))
void
io_apic_set_id(int apic, int id)
{
u_int32_t ux;
ux = io_apic_read(apic, IOAPIC_ID); /* get current contents */
if (((ux & APIC_ID_MASK) >> 24) != id) {
printf("Changing APIC ID for IO APIC #%d"
" from %d to %d on chip\n",
apic, ((ux & APIC_ID_MASK) >> 24), id);
ux &= ~APIC_ID_MASK; /* clear the ID field */
ux |= (id << 24);
io_apic_write(apic, IOAPIC_ID, ux); /* write new value */
ux = io_apic_read(apic, IOAPIC_ID); /* re-read && test */
if (((ux & APIC_ID_MASK) >> 24) != id)
panic("can't control IO APIC #%d ID, reg: 0x%08x",
apic, ux);
}
}
int
io_apic_get_id(int apic)
{
return (io_apic_read(apic, IOAPIC_ID) & APIC_ID_MASK) >> 24;
}
/*
* Setup the IO APIC.
*/
extern int apic_pin_trigger; /* 'opaque' */
void
io_apic_setup_intpin(int apic, int pin)
{
int bus, bustype, irq;
u_char select; /* the select register is 8 bits */
u_int32_t flags; /* the window register is 32 bits */
u_int32_t target; /* the window register is 32 bits */
u_int32_t vector; /* the window register is 32 bits */
int level;
register_t crit;
target = IOART_DEST;
select = pin * 2 + IOAPIC_REDTBL0; /* register */
/*
* Always disable interrupts, and by default map
* pin X to IRQX because the disable doesn't stick
* and the uninitialize vector will get translated
* into a panic.
*
* This is correct for IRQs 1 and 3-15. In the other cases,
* any robust driver will handle the spurious interrupt, and
* the effective NOP beats a panic.
*
* A dedicated "bogus interrupt" entry in the IDT would
* be a nicer hack, although some one should find out
* why some systems are generating interrupts when they
* shouldn't and stop the carnage.
*/
vector = NRSVIDT + pin; /* IDT vec */
crit = intr_disable();
mtx_lock_spin(&icu_lock);
io_apic_write(apic, select,
(io_apic_read(apic, select) & ~IOART_INTMASK
& ~0xff)|IOART_INTMSET|vector);
mtx_unlock_spin(&icu_lock);
intr_restore(crit);
/* we only deal with vectored INTs here */
if (apic_int_type(apic, pin) != 0)
return;
irq = apic_irq(apic, pin);
if (irq < 0)
return;
/* determine the bus type for this pin */
bus = apic_src_bus_id(apic, pin);
if (bus == -1)
return;
bustype = apic_bus_type(bus);
if ((bustype == ISA) &&
(pin < IOAPIC_ISA_INTS) &&
(irq == pin) &&
(apic_polarity(apic, pin) == 0x1) &&
(apic_trigger(apic, pin) == 0x3)) {
/*
* A broken BIOS might describe some ISA
* interrupts as active-high level-triggered.
* Use default ISA flags for those interrupts.
*/
flags = DEFAULT_ISA_FLAGS;
} else {
/*
* Program polarity and trigger mode according to
* interrupt entry.
*/
flags = DEFAULT_FLAGS;
level = trigger(apic, pin, &flags);
if (level == 1)
apic_pin_trigger |= (1 << irq);
polarity(apic, pin, &flags, level);
}
/* program the appropriate registers */
if (apic != 0 || pin != irq)
printf("IOAPIC #%d intpin %d -> irq %d\n",
apic, pin, irq);
vector = NRSVIDT + irq; /* IDT vec */
crit = intr_disable();
mtx_lock_spin(&icu_lock);
io_apic_write(apic, select, flags | vector);
io_apic_write(apic, select + 1, target);
mtx_unlock_spin(&icu_lock);
intr_restore(crit);
}
int
io_apic_setup(int apic)
{
int maxpin;
int pin;
if (apic == 0)
apic_pin_trigger = 0; /* default to edge-triggered */
maxpin = REDIRCNT_IOAPIC(apic); /* pins in APIC */
printf("Programming %d pins in IOAPIC #%d\n", maxpin, apic);
for (pin = 0; pin < maxpin; ++pin) {
io_apic_setup_intpin(apic, pin);
}
/* return GOOD status */
return 0;
}
#undef DEFAULT_ISA_FLAGS
#undef DEFAULT_FLAGS
#define DEFAULT_EXTINT_FLAGS \
((u_int32_t) \
(IOART_INTMSET | \
IOART_TRGREDG | \
IOART_INTAHI | \
IOART_DESTPHY | \
IOART_DELLOPRI))
/*
* Setup the source of External INTerrupts.
*/
int
ext_int_setup(int apic, int intr)
{
u_char select; /* the select register is 8 bits */
u_int32_t flags; /* the window register is 32 bits */
u_int32_t target; /* the window register is 32 bits */
u_int32_t vector; /* the window register is 32 bits */
if (apic_int_type(apic, intr) != 3)
return -1;
target = IOART_DEST;
select = IOAPIC_REDTBL0 + (2 * intr);
vector = NRSVIDT + intr;
flags = DEFAULT_EXTINT_FLAGS;
io_apic_write(apic, select, flags | vector);
io_apic_write(apic, select + 1, target);
return 0;
}
#undef DEFAULT_EXTINT_FLAGS
/*
* Set the trigger level for an IO APIC pin.
*/
static int
trigger(int apic, int pin, u_int32_t * flags)
{
int id;
int eirq;
int level;
static int intcontrol = -1;
switch (apic_trigger(apic, pin)) {
case 0x00:
break;
case 0x01:
*flags &= ~IOART_TRGRLVL; /* *flags |= IOART_TRGREDG */
return 0;
case 0x03:
*flags |= IOART_TRGRLVL;
return 1;
case -1:
default:
goto bad;
}
if ((id = apic_src_bus_id(apic, pin)) == -1)
goto bad;
switch (apic_bus_type(id)) {
case ISA:
*flags &= ~IOART_TRGRLVL; /* *flags |= IOART_TRGREDG; */
return 0;
case EISA:
eirq = apic_src_bus_irq(apic, pin);
if (eirq < 0 || eirq > 15) {
printf("EISA IRQ %d?!?!\n", eirq);
goto bad;
}
if (intcontrol == -1) {
intcontrol = inb(ELCR1) << 8;
intcontrol |= inb(ELCR0);
printf("EISA INTCONTROL = %08x\n", intcontrol);
}
/* Use ELCR settings to determine level or edge mode */
level = (intcontrol >> eirq) & 1;
/*
* Note that on older Neptune chipset based systems, any
* pci interrupts often show up here and in the ELCR as well
* as level sensitive interrupts attributed to the EISA bus.
*/
if (level)
*flags |= IOART_TRGRLVL;
else
*flags &= ~IOART_TRGRLVL;
return level;
case PCI:
*flags |= IOART_TRGRLVL;
return 1;
case -1:
default:
goto bad;
}
bad:
panic("bad APIC IO INT flags");
}
/*
* Set the polarity value for an IO APIC pin.
*/
static void
polarity(int apic, int pin, u_int32_t * flags, int level)
{
int id;
switch (apic_polarity(apic, pin)) {
case 0x00:
break;
case 0x01:
*flags &= ~IOART_INTALO; /* *flags |= IOART_INTAHI */
return;
case 0x03:
*flags |= IOART_INTALO;
return;
case -1:
default:
goto bad;
}
if ((id = apic_src_bus_id(apic, pin)) == -1)
goto bad;
switch (apic_bus_type(id)) {
case ISA:
*flags &= ~IOART_INTALO; /* *flags |= IOART_INTAHI */
return;
case EISA:
/* polarity converter always gives active high */
*flags &= ~IOART_INTALO;
return;
case PCI:
*flags |= IOART_INTALO;
return;
case -1:
default:
goto bad;
}
bad:
panic("bad APIC IO INT flags");
}
/*
* Print contents of apic_imen.
*/
void
imen_dump(void)
{
int x;
printf("SMP: enabled INTs: ");
for (x = 0; x < NHWI; ++x)
if ((apic_imen & (1 << x)) == 0)
printf("%d, ", x);
printf("apic_imen: 0x%08x\n", apic_imen);
}
/*
* Inter Processor Interrupt functions.
*/
/*
* Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
*
* destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
* vector is any valid SYSTEM INT vector
* delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
*/
#define DETECT_DEADLOCK
int
apic_ipi(int dest_type, int vector, int delivery_mode)
{
u_long icr_lo;
#if defined(DETECT_DEADLOCK)
#define MAX_SPIN1 10000000
#define MAX_SPIN2 1000
int x;
/* "lazy delivery", ie we only barf if they stack up on us... */
for (x = MAX_SPIN1; x; --x) {
if ((lapic.icr_lo & APIC_DELSTAT_MASK) == 0)
break;
}
if (x == 0)
panic("apic_ipi was stuck");
#endif /* DETECT_DEADLOCK */
/* build IRC_LOW */
icr_lo = (lapic.icr_lo & APIC_ICRLO_RESV_MASK)
| dest_type | delivery_mode | vector;
/* write APIC ICR */
lapic.icr_lo = icr_lo;
/* wait for pending status end */
#if defined(DETECT_DEADLOCK)
for (x = MAX_SPIN2; x; --x) {
if ((lapic.icr_lo & APIC_DELSTAT_MASK) == 0)
break;
}
#ifdef needsattention
/*
* XXX FIXME:
* The above loop waits for the message to actually be delivered.
* It breaks out after an arbitrary timout on the theory that it eventually
* will be delivered and we will catch a real failure on the next entry to
* this function, which would panic().
* We could skip this wait entirely, EXCEPT it probably protects us from
* other "less robust" routines that assume the message was delivered and
* acted upon when this function returns. TLB shootdowns are one such
* "less robust" function.
*/
if (x == 0)
printf("apic_ipi might be stuck\n");
#endif
#undef MAX_SPIN2
#undef MAX_SPIN1
#else
while (lapic.icr_lo & APIC_DELSTAT_MASK)
/* spin */ ;
#endif /* DETECT_DEADLOCK */
/** XXX FIXME: return result */
return 0;
}
static int
apic_ipi_singledest(int cpu, int vector, int delivery_mode)
{
u_long icr_lo;
u_long icr_hi;
u_long eflags;
#if defined(DETECT_DEADLOCK)
#define MAX_SPIN1 10000000
#define MAX_SPIN2 1000
int x;
/* "lazy delivery", ie we only barf if they stack up on us... */
for (x = MAX_SPIN1; x; --x) {
if ((lapic.icr_lo & APIC_DELSTAT_MASK) == 0)
break;
}
if (x == 0)
panic("apic_ipi was stuck");
#endif /* DETECT_DEADLOCK */
eflags = read_eflags();
disable_intr();
icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
icr_hi |= (CPU_TO_ID(cpu) << 24);
lapic.icr_hi = icr_hi;
/* build IRC_LOW */
icr_lo = (lapic.icr_lo & APIC_ICRLO_RESV_MASK)
| APIC_DEST_DESTFLD | delivery_mode | vector;
/* write APIC ICR */
lapic.icr_lo = icr_lo;
write_eflags(eflags);
/* wait for pending status end */
#if defined(DETECT_DEADLOCK)
for (x = MAX_SPIN2; x; --x) {
if ((lapic.icr_lo & APIC_DELSTAT_MASK) == 0)
break;
}
#ifdef needsattention
/*
* XXX FIXME:
* The above loop waits for the message to actually be delivered.
* It breaks out after an arbitrary timout on the theory that it eventually
* will be delivered and we will catch a real failure on the next entry to
* this function, which would panic().
* We could skip this wait entirely, EXCEPT it probably protects us from
* other "less robust" routines that assume the message was delivered and
* acted upon when this function returns. TLB shootdowns are one such
* "less robust" function.
*/
if (x == 0)
printf("apic_ipi might be stuck\n");
#endif
#undef MAX_SPIN2
#undef MAX_SPIN1
#else
while (lapic.icr_lo & APIC_DELSTAT_MASK)
/* spin */ ;
#endif /* DETECT_DEADLOCK */
/** XXX FIXME: return result */
return 0;
}
/*
* Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
*
* target contains a bitfield with a bit set for selected APICs.
* vector is any valid SYSTEM INT vector
* delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
*/
int
selected_apic_ipi(u_int target, int vector, int delivery_mode)
{
int x;
int status;
if (target & ~0x7fff)
return -1; /* only 15 targets allowed */
for (status = 0, x = 0; x <= 14; ++x)
if (target & (1 << x)) {
/* send the IPI */
if (apic_ipi_singledest(x, vector,
delivery_mode) == -1)
status |= (1 << x);
}
return status;
}
#endif /* APIC_IO */
/*
* Timer code, in development...
* - suggested by rgrimes@gndrsh.aac.dev.com
*/
/** XXX FIXME: temp hack till we can determin bus clock */
#ifndef BUS_CLOCK
#define BUS_CLOCK 66000000
#define bus_clock() 66000000
#endif
#if defined(READY)
int acquire_apic_timer(void);
int release_apic_timer(void);
/*
* Acquire the APIC timer for exclusive use.
*/
int
acquire_apic_timer(void)
{
#if 1
return 0;
#else
/** XXX FIXME: make this really do something */
panic("APIC timer in use when attempting to aquire");
#endif
}
/*
* Return the APIC timer.
*/
int
release_apic_timer(void)
{
#if 1
return 0;
#else
/** XXX FIXME: make this really do something */
panic("APIC timer was already released");
#endif
}
#endif /* READY */
/*
* Load a 'downcount time' in uSeconds.
*/
void
set_apic_timer(int value)
{
u_long lvtt;
long ticks_per_microsec;
/*
* Calculate divisor and count from value:
*
* timeBase == CPU bus clock divisor == [1,2,4,8,16,32,64,128]
* value == time in uS
*/
lapic.dcr_timer = APIC_TDCR_1;
ticks_per_microsec = bus_clock() / 1000000;
/* configure timer as one-shot */
lvtt = lapic.lvt_timer;
lvtt &= ~(APIC_LVTT_VECTOR | APIC_LVTT_DS | APIC_LVTT_M | APIC_LVTT_TM);
lvtt |= APIC_LVTT_M; /* no INT, one-shot */
lapic.lvt_timer = lvtt;
/* */
lapic.icr_timer = value * ticks_per_microsec;
}
/*
* Read remaining time in timer.
*/
int
read_apic_timer(void)
{
#if 0
/** XXX FIXME: we need to return the actual remaining time,
* for now we just return the remaining count.
*/
#else
return lapic.ccr_timer;
#endif
}
/*
* Spin-style delay, set delay time in uS, spin till it drains.
*/
void
u_sleep(int count)
{
set_apic_timer(count);
while (read_apic_timer())
/* spin */ ;
}
/*
* IOAPIC access helper functions.
*/
u_int
io_apic_read(int idx, int reg)
{
volatile ioapic_t *apic;
apic = ioapic[idx];
apic->ioregsel = reg;
return apic->iowin;
}
void
io_apic_write(int idx, int reg, u_int value)
{
volatile ioapic_t *apic;
apic = ioapic[idx];
apic->ioregsel = reg;
apic->iowin = value;
}

View File

@ -1,120 +0,0 @@
/*-
* Copyright (c) 1997, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
.data
ALIGN_DATA
/*
* Note:
* This is the UP equivilant of _imen.
* It is OPAQUE, and must NOT be accessed directly.
* It MUST be accessed along with the IO APIC as a 'critical region'.
* Accessed by:
* INTREN()
* INTRDIS()
* imen_dump()
*/
.p2align 2 /* MUST be 32bit aligned */
.globl apic_imen
apic_imen:
.long HWI_MASK
.text
SUPERALIGN_TEXT
/******************************************************************************
* XXX FIXME: figure out where these belong.
*/
/* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
#define QUALIFY_MASKS_NOT
#ifdef QUALIFY_MASKS
#define QUALIFY_MASK \
btrl %ecx, %eax ; \
andl %eax, %eax ; \
jz 1f ; \
pushl $bad_mask ; \
call panic ; \
1:
bad_mask: .asciz "bad mask"
#else
#define QUALIFY_MASK
#endif
/*
* MP-safe function to clear ONE INT mask bit.
* The passed arg is a 32bit u_int MASK.
* It sets the associated bit in _apic_imen.
* It sets the mask bit of the associated IO APIC register.
*/
ENTRY(INTREN)
movl 4(%esp), %eax /* mask into %eax */
bsfl %eax, %ecx /* get pin index */
btrl %ecx, apic_imen /* update apic_imen */
QUALIFY_MASK
shll $4, %ecx
movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
testl %edx, %edx
jz 1f
movl %ecx, (%edx) /* write the target register index */
movl IOAPIC_WINDOW(%edx), %eax /* read the target register data */
andl $~IOART_INTMASK, %eax /* clear mask bit */
movl %eax, IOAPIC_WINDOW(%edx) /* write the APIC register data */
1:
ret
/*
* MP-safe function to set ONE INT mask bit.
* The passed arg is a 32bit u_int MASK.
* It clears the associated bit in _apic_imen.
* It clears the mask bit of the associated IO APIC register.
*/
ENTRY(INTRDIS)
movl 4(%esp), %eax /* mask into %eax */
bsfl %eax, %ecx /* get pin index */
btsl %ecx, apic_imen /* update apic_imen */
QUALIFY_MASK
shll $4, %ecx
movl CNAME(int_to_apicintpin) + 8(%ecx), %edx
movl CNAME(int_to_apicintpin) + 12(%ecx), %ecx
testl %edx, %edx
jz 1f
movl %ecx, (%edx) /* write the target register index */
movl IOAPIC_WINDOW(%edx), %eax /* read the target register data */
orl $IOART_INTMASK, %eax /* set mask bit */
movl %eax, IOAPIC_WINDOW(%edx) /* write the APIC register data */
1:
ret

View File

@ -1,673 +0,0 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $FreeBSD$
*/
#include <machine/apic.h>
#include <machine/smp.h>
/* convert an absolute IRQ# into a bitmask */
#define IRQ_BIT(irq_num) (1 << (irq_num))
/* make an index into the IO APIC from the IRQ# */
#define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
/*
*
*/
#define PUSH_FRAME \
pushl $0 ; /* dummy error code */ \
pushl $0 ; /* dummy trap type */ \
pushal ; /* 8 ints */ \
pushl %ds ; /* save data and extra segments ... */ \
pushl %es ; \
pushl %fs
#define PUSH_DUMMY \
pushfl ; /* eflags */ \
pushl %cs ; /* cs */ \
pushl 12(%esp) ; /* original caller eip */ \
pushl $0 ; /* dummy error code */ \
pushl $0 ; /* dummy trap type */ \
subl $11*4,%esp ;
#define POP_FRAME \
popl %fs ; \
popl %es ; \
popl %ds ; \
popal ; \
addl $4+4,%esp
#define POP_DUMMY \
addl $16*4,%esp
#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
#define MASK_IRQ(irq_num) \
ICU_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), apic_imen ; \
jne 7f ; /* masked, don't mask */ \
orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
movl REDIRIDX(irq_num), %eax ; /* get the index */ \
movl %eax, (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
orl $IOART_INTMASK, %eax ; /* set the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
7: ; /* already masked */ \
ICU_UNLOCK
/*
* Test to see whether we are handling an edge or level triggered INT.
* Level-triggered INTs must still be masked as we don't clear the source,
* and the EOI cycle would cause redundant INTs to occur.
*/
#define MASK_LEVEL_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
jz 9f ; /* edge, don't mask */ \
MASK_IRQ(irq_num) ; \
9:
#ifdef APIC_INTR_REORDER
#define EOI_IRQ(irq_num) \
movl apic_isrbit_location + 8 * (irq_num), %eax ; \
movl (%eax), %eax ; \
testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
jz 9f ; /* not active */ \
movl $0, lapic+LA_EOI ; \
9:
#else
#define EOI_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
jz 9f ; /* not active */ \
movl $0, lapic+LA_EOI; \
9:
#endif
/*
* Test to see if the source is currently masked, clear if so.
*/
#define UNMASK_IRQ(irq_num) \
ICU_LOCK ; /* into critical reg */ \
testl $IRQ_BIT(irq_num), apic_imen ; \
je 7f ; /* bit clear, not masked */ \
andl $~IRQ_BIT(irq_num), apic_imen ;/* clear mask bit */ \
movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
movl REDIRIDX(irq_num), %eax ; /* get the index */ \
movl %eax, (%ecx) ; /* write the index */ \
movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
andl $~IOART_INTMASK, %eax ; /* clear the mask */ \
movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
7: ; /* already unmasked */ \
ICU_UNLOCK
/*
* Test to see whether we are handling an edge or level triggered INT.
* Level-triggered INTs have to be unmasked.
*/
#define UNMASK_LEVEL_IRQ(irq_num) \
testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
jz 9f ; /* edge, don't unmask */ \
UNMASK_IRQ(irq_num) ; \
9:
/*
* Macros for interrupt entry, call to handler, and exit.
*/
#define FAST_INTR(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL,%eax ; \
mov %ax,%ds ; \
mov %ax,%es ; \
movl $KPSEL,%eax ; \
mov %ax,%fs ; \
FAKE_MCOUNT(13*4(%esp)) ; \
movl PCPU(CURTHREAD),%ebx ; \
cmpl $0,TD_CRITNEST(%ebx) ; \
je 1f ; \
; \
movl $1,PCPU(INT_PENDING) ; \
orl $IRQ_BIT(irq_num),PCPU(FPENDING) ; \
MASK_LEVEL_IRQ(irq_num) ; \
movl $0, lapic+LA_EOI ; \
jmp 10f ; \
1: ; \
incl TD_CRITNEST(%ebx) ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4, %esp ; \
movl $0, lapic+LA_EOI ; \
lock ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4, %eax ; \
lock ; \
incl (%eax) ; \
decl TD_CRITNEST(%ebx) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
MEXITCOUNT ; \
jmp doreti
/*
* Restart a fast interrupt that was held up by a critical section.
* This routine is called from unpend(). unpend() ensures we are
* in a critical section and deals with the interrupt nesting level
* for us. If we previously masked the irq, we have to unmask it.
*
* We have a choice. We can regenerate the irq using the 'int'
* instruction or we can create a dummy frame and call the interrupt
* handler directly. I've chosen to use the dummy-frame method.
*/
#define FAST_UNPEND(irq_num, vec_name) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
; \
pushl %ebp ; \
movl %esp, %ebp ; \
PUSH_DUMMY ; \
pushl intr_unit + (irq_num) * 4 ; \
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4, %esp ; \
lock ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4, %eax ; \
lock ; \
incl (%eax) ; \
UNMASK_LEVEL_IRQ(irq_num) ; \
POP_DUMMY ; \
popl %ebp ; \
ret ; \
/*
* Slow, threaded interrupts.
*
* XXX Most of the parameters here are obsolete. Fix this when we're
* done.
* XXX we really shouldn't return via doreti if we just schedule the
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
#define INTR(irq_num, vec_name, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
/* _XintrNN: entry point used by IDT/HWIs via _vec[]. */ \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
mov %ax, %ds ; \
mov %ax, %es ; \
movl $KPSEL, %eax ; \
mov %ax, %fs ; \
; \
maybe_extra_ipending ; \
; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
; \
movl PCPU(CURTHREAD),%ebx ; \
cmpl $0,TD_CRITNEST(%ebx) ; \
je 1f ; \
movl $1,PCPU(INT_PENDING) ; \
orl $IRQ_BIT(irq_num),PCPU(IPENDING) ; \
jmp 10f ; \
1: ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
addl $4, %esp ; /* discard the parameter */ \
; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
MEXITCOUNT ; \
jmp doreti
/*
* Handle "spurious INTerrupts".
* Notes:
* This is different than the "spurious INTerrupt" generated by an
* 8259 PIC for missing INTs. See the APIC documentation for details.
* This routine should NOT do an 'EOI' cycle.
*/
.text
SUPERALIGN_TEXT
IDTVEC(spuriousint)
/* No EOI cycle used here */
iret
#ifdef SMP
/*
* Global address space TLB shootdown.
*/
.text
SUPERALIGN_TEXT
IDTVEC(invltlb)
pushl %eax
pushl %ds
movl $KDSEL, %eax /* Kernel data selector */
mov %ax, %ds
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax /* Private space selector */
mov %ax, %fs
movl PCPU(CPUID), %eax
popl %fs
incl xhits_gbl(,%eax,4)
#endif /* COUNT_XINVLTLB_HITS */
movl %cr3, %eax /* invalidate the TLB */
movl %eax, %cr3
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
lock
incl smp_tlb_wait
popl %ds
popl %eax
iret
/*
* Single page TLB shootdown
*/
.text
SUPERALIGN_TEXT
IDTVEC(invlpg)
pushl %eax
pushl %ds
movl $KDSEL, %eax /* Kernel data selector */
mov %ax, %ds
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax /* Private space selector */
mov %ax, %fs
movl PCPU(CPUID), %eax
popl %fs
incl xhits_pg(,%eax,4)
#endif /* COUNT_XINVLTLB_HITS */
movl smp_tlb_addr1, %eax
invlpg (%eax) /* invalidate single page */
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
lock
incl smp_tlb_wait
popl %ds
popl %eax
iret
/*
* Page range TLB shootdown.
*/
.text
SUPERALIGN_TEXT
IDTVEC(invlrng)
pushl %eax
pushl %edx
pushl %ds
movl $KDSEL, %eax /* Kernel data selector */
mov %ax, %ds
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax /* Private space selector */
mov %ax, %fs
movl PCPU(CPUID), %eax
popl %fs
incl xhits_rng(,%eax,4)
#endif /* COUNT_XINVLTLB_HITS */
movl smp_tlb_addr1, %edx
movl smp_tlb_addr2, %eax
1: invlpg (%edx) /* invalidate single page */
addl $PAGE_SIZE, %edx
cmpl %eax, %edx
jb 1b
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
lock
incl smp_tlb_wait
popl %ds
popl %edx
popl %eax
iret
/*
* Forward hardclock to another CPU. Pushes a clockframe and calls
* forwarded_hardclock().
*/
.text
SUPERALIGN_TEXT
IDTVEC(hardclock)
PUSH_FRAME
movl $KDSEL, %eax /* reload with kernel's data segment */
mov %ax, %ds
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
movl PCPU(CURTHREAD),%ebx
cmpl $0,TD_CRITNEST(%ebx)
je 1f
movl $1,PCPU(INT_PENDING)
orl $1,PCPU(SPENDING);
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_hardclock
addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
jmp doreti
/*
* Forward statclock to another CPU. Pushes a clockframe and calls
* forwarded_statclock().
*/
.text
SUPERALIGN_TEXT
IDTVEC(statclock)
PUSH_FRAME
movl $KDSEL, %eax /* reload with kernel's data segment */
mov %ax, %ds
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
FAKE_MCOUNT(13*4(%esp))
movl PCPU(CURTHREAD),%ebx
cmpl $0,TD_CRITNEST(%ebx)
je 1f
movl $1,PCPU(INT_PENDING)
orl $2,PCPU(SPENDING);
jmp 10f
1:
incl TD_INTR_NESTING_LEVEL(%ebx)
pushl $0 /* XXX convert trapframe to clockframe */
call forwarded_statclock
addl $4, %esp /* XXX convert clockframe to trapframe */
decl TD_INTR_NESTING_LEVEL(%ebx)
10:
MEXITCOUNT
jmp doreti
/*
* Executed by a CPU when it receives an Xcpuast IPI from another CPU,
*
* The other CPU has already executed aston() or need_resched() on our
* current process, so we simply need to ack the interrupt and return
* via doreti to run ast().
*/
.text
SUPERALIGN_TEXT
IDTVEC(cpuast)
PUSH_FRAME
movl $KDSEL, %eax
mov %ax, %ds /* use KERNEL data segment */
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
FAKE_MCOUNT(13*4(%esp))
MEXITCOUNT
jmp doreti
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
*
* - Signals its receipt.
* - Waits for permission to restart.
* - Signals its restart.
*/
.text
SUPERALIGN_TEXT
IDTVEC(cpustop)
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushl %ds /* save current data segment */
pushl %es
pushl %fs
movl $KDSEL, %eax
mov %ax, %ds /* use KERNEL data segment */
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
movl PCPU(CPUID), %eax
imull $PCB_SIZE, %eax
leal CNAME(stoppcbs)(%eax), %eax
pushl %eax
call CNAME(savectx) /* Save process context */
addl $4, %esp
movl PCPU(CPUID), %eax
lock
btsl %eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
1:
btl %eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
jnc 1b
lock
btrl %eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
lock
btrl %eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
test %eax, %eax
jnz 2f
movl CNAME(cpustop_restartfunc), %eax
test %eax, %eax
jz 2f
movl $0, CNAME(cpustop_restartfunc) /* One-shot */
call *%eax
2:
popl %fs
popl %es
popl %ds /* restore previous data segment */
popl %edx
popl %ecx
popl %eax
movl %ebp, %esp
popl %ebp
iret
#endif /* SMP */
MCOUNT_LABEL(bintr)
FAST_INTR(0,fastintr0)
FAST_INTR(1,fastintr1)
FAST_INTR(2,fastintr2)
FAST_INTR(3,fastintr3)
FAST_INTR(4,fastintr4)
FAST_INTR(5,fastintr5)
FAST_INTR(6,fastintr6)
FAST_INTR(7,fastintr7)
FAST_INTR(8,fastintr8)
FAST_INTR(9,fastintr9)
FAST_INTR(10,fastintr10)
FAST_INTR(11,fastintr11)
FAST_INTR(12,fastintr12)
FAST_INTR(13,fastintr13)
FAST_INTR(14,fastintr14)
FAST_INTR(15,fastintr15)
FAST_INTR(16,fastintr16)
FAST_INTR(17,fastintr17)
FAST_INTR(18,fastintr18)
FAST_INTR(19,fastintr19)
FAST_INTR(20,fastintr20)
FAST_INTR(21,fastintr21)
FAST_INTR(22,fastintr22)
FAST_INTR(23,fastintr23)
FAST_INTR(24,fastintr24)
FAST_INTR(25,fastintr25)
FAST_INTR(26,fastintr26)
FAST_INTR(27,fastintr27)
FAST_INTR(28,fastintr28)
FAST_INTR(29,fastintr29)
FAST_INTR(30,fastintr30)
FAST_INTR(31,fastintr31)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
INTR(0,intr0, CLKINTR_PENDING)
INTR(1,intr1,)
INTR(2,intr2,)
INTR(3,intr3,)
INTR(4,intr4,)
INTR(5,intr5,)
INTR(6,intr6,)
INTR(7,intr7,)
INTR(8,intr8,)
INTR(9,intr9,)
INTR(10,intr10,)
INTR(11,intr11,)
INTR(12,intr12,)
INTR(13,intr13,)
INTR(14,intr14,)
INTR(15,intr15,)
INTR(16,intr16,)
INTR(17,intr17,)
INTR(18,intr18,)
INTR(19,intr19,)
INTR(20,intr20,)
INTR(21,intr21,)
INTR(22,intr22,)
INTR(23,intr23,)
INTR(24,intr24,)
INTR(25,intr25,)
INTR(26,intr26,)
INTR(27,intr27,)
INTR(28,intr28,)
INTR(29,intr29,)
INTR(30,intr30,)
INTR(31,intr31,)
FAST_UNPEND(0,fastunpend0)
FAST_UNPEND(1,fastunpend1)
FAST_UNPEND(2,fastunpend2)
FAST_UNPEND(3,fastunpend3)
FAST_UNPEND(4,fastunpend4)
FAST_UNPEND(5,fastunpend5)
FAST_UNPEND(6,fastunpend6)
FAST_UNPEND(7,fastunpend7)
FAST_UNPEND(8,fastunpend8)
FAST_UNPEND(9,fastunpend9)
FAST_UNPEND(10,fastunpend10)
FAST_UNPEND(11,fastunpend11)
FAST_UNPEND(12,fastunpend12)
FAST_UNPEND(13,fastunpend13)
FAST_UNPEND(14,fastunpend14)
FAST_UNPEND(15,fastunpend15)
FAST_UNPEND(16,fastunpend16)
FAST_UNPEND(17,fastunpend17)
FAST_UNPEND(18,fastunpend18)
FAST_UNPEND(19,fastunpend19)
FAST_UNPEND(20,fastunpend20)
FAST_UNPEND(21,fastunpend21)
FAST_UNPEND(22,fastunpend22)
FAST_UNPEND(23,fastunpend23)
FAST_UNPEND(24,fastunpend24)
FAST_UNPEND(25,fastunpend25)
FAST_UNPEND(26,fastunpend26)
FAST_UNPEND(27,fastunpend27)
FAST_UNPEND(28,fastunpend28)
FAST_UNPEND(29,fastunpend29)
FAST_UNPEND(30,fastunpend30)
FAST_UNPEND(31,fastunpend31)
MCOUNT_LABEL(eintr)
#ifdef SMP
/*
* Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
*
* - Calls the generic rendezvous action function.
*/
.text
SUPERALIGN_TEXT
IDTVEC(rendezvous)
PUSH_FRAME
movl $KDSEL, %eax
mov %ax, %ds /* use KERNEL data segment */
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
call smp_rendezvous_action
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
POP_FRAME
iret
/*
* Clean up when we lose out on the lazy context switch optimization.
* ie: when we are about to release a PTD but a cpu is still borrowing it.
*/
SUPERALIGN_TEXT
IDTVEC(lazypmap)
PUSH_FRAME
movl $KDSEL, %eax
mov %ax, %ds /* use KERNEL data segment */
mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
call pmap_lazyfix_action
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
POP_FRAME
iret
#endif /* SMP */
.data
.globl apic_pin_trigger
apic_pin_trigger:
.long 0
.text

View File

@ -1,81 +0,0 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz.
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
.data
ALIGN_DATA
/* interrupt mask enable (all h/w off) */
.globl imen
imen: .long HWI_MASK
.text
SUPERALIGN_TEXT
ENTRY(INTREN)
movl 4(%esp), %eax
movl %eax, %ecx
notl %eax
andl %eax, imen
movl imen, %eax
testb %cl, %cl
je 1f
outb %al, $(IO_ICU1 + ICU_IMR_OFFSET)
1:
testb %ch, %ch
je 2f
shrl $8, %eax
outb %al, $(IO_ICU2 + ICU_IMR_OFFSET)
2:
ret
ENTRY(INTRDIS)
movl 4(%esp), %eax
movl %eax, %ecx
orl %eax, imen
movl imen, %eax
testb %cl, %cl
je 1f
outb %al, $(IO_ICU1 + ICU_IMR_OFFSET)
1:
testb %ch, %ch
je 2f
shrl $8, %eax
outb %al, $(IO_ICU2 + ICU_IMR_OFFSET)
2:
ret

View File

@ -1,253 +0,0 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $FreeBSD$
*/
#define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
#define IRQ_LBIT(irq_num) (1 << (irq_num))
#define IRQ_BYTE(irq_num) ((irq_num) >> 3)
#ifdef AUTO_EOI_1
#define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
#define OUTB_ICU1
#else
#define ENABLE_ICU1 \
movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
OUTB_ICU1 /* ... to clear in service bit */
#define OUTB_ICU1 \
outb %al,$IO_ICU1
#endif
#ifdef AUTO_EOI_2
/*
* The data sheet says no auto-EOI on slave, but it sometimes works.
*/
#define ENABLE_ICU1_AND_2 ENABLE_ICU1
#else
#define ENABLE_ICU1_AND_2 \
movb $ICU_EOI,%al ; /* as above */ \
outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
#endif
#define PUSH_FRAME \
pushl $0 ; /* dummy error code */ \
pushl $0 ; /* dummy trap type */ \
pushal ; /* 8 ints */ \
pushl %ds ; /* save data and extra segments ... */ \
pushl %es ; \
pushl %fs
#define PUSH_DUMMY \
pushfl ; /* eflags */ \
pushl %cs ; /* cs */ \
pushl 12(%esp) ; /* original caller eip */ \
pushl $0 ; /* dummy error code */ \
pushl $0 ; /* dummy trap type */ \
subl $11*4,%esp
#define POP_FRAME \
popl %fs ; \
popl %es ; \
popl %ds ; \
popal ; \
addl $4+4,%esp
#define POP_DUMMY \
addl $16*4,%esp
#define MASK_IRQ(icu, irq_num) \
movb imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
movb %al,imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET
#define UNMASK_IRQ(icu, irq_num) \
movb imen + IRQ_BYTE(irq_num),%al ; \
andb $~IRQ_BIT(irq_num),%al ; \
movb %al,imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET
/*
* Macros for interrupt interrupt entry, call to handler, and exit.
*/
#define FAST_INTR(irq_num, vec_name, icu, enable_icus) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
mov $KDSEL,%ax ; \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
movl PCPU(CURTHREAD),%ebx ; \
cmpl $0,TD_CRITNEST(%ebx) ; \
je 1f ; \
; \
movl $1,PCPU(INT_PENDING) ; \
orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \
MASK_IRQ(icu, irq_num) ; \
enable_icus ; \
jmp 10f ; \
1: ; \
incl TD_CRITNEST(%ebx) ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
call *intr_handler + (irq_num) * 4 ; \
addl $4,%esp ; \
enable_icus ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_CRITNEST(%ebx) ; \
cmpl $0,PCPU(INT_PENDING) ; \
je 2f ; \
; \
call i386_unpend ; \
2: ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
MEXITCOUNT ; \
jmp doreti
/*
* Restart a fast interrupt that was held up by a critical section.
* This routine is called from unpend(). unpend() ensures we are
* in a critical section and deals with the interrupt nesting level
* for us. If we previously masked the irq, we have to unmask it.
*
* We have a choice. We can regenerate the irq using the 'int'
* instruction or we can create a dummy frame and call the interrupt
* handler directly. I've chosen to use the dummy-frame method.
*/
#define FAST_UNPEND(irq_num, vec_name, icu) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
; \
pushl %ebp ; \
movl %esp, %ebp ; \
PUSH_DUMMY ; \
pushl intr_unit + (irq_num) * 4 ; \
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4, %esp ; \
incl cnt+V_INTR ; /* book-keeping can wait */ \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
UNMASK_IRQ(icu, irq_num) ; \
POP_DUMMY ; \
popl %ebp ; \
ret
/*
* Slow, threaded interrupts.
*
* XXX Most of the parameters here are obsolete. Fix this when we're
* done.
* XXX we really shouldn't return via doreti if we just schedule the
* interrupt handler and don't run anything. We could just do an
* iret. FIXME.
*/
#define INTR(irq_num, vec_name, icu, enable_icus, maybe_extra_ipending) \
.text ; \
SUPERALIGN_TEXT ; \
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \
mov %ax,%ds ; \
mov %ax,%es ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
; \
maybe_extra_ipending ; \
MASK_IRQ(icu, irq_num) ; \
enable_icus ; \
; \
movl PCPU(CURTHREAD),%ebx ; \
cmpl $0,TD_CRITNEST(%ebx) ; \
je 1f ; \
movl $1,PCPU(INT_PENDING); \
orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \
jmp 10f ; \
1: ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
cmpl $0,PCPU(INT_PENDING) ; \
je 9f ; \
call i386_unpend ; \
9: ; \
pushl $irq_num; /* pass the IRQ */ \
call sched_ithd ; \
addl $4, %esp ; /* discard the parameter */ \
; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
10: ; \
MEXITCOUNT ; \
jmp doreti
MCOUNT_LABEL(bintr)
FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1)
FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1)
FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1)
FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1)
FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1)
FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1)
FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1)
FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1)
FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2)
FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2)
#define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
/* Threaded interrupts */
INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING)
INTR(1,intr1, IO_ICU1, ENABLE_ICU1,)
INTR(2,intr2, IO_ICU1, ENABLE_ICU1,)
INTR(3,intr3, IO_ICU1, ENABLE_ICU1,)
INTR(4,intr4, IO_ICU1, ENABLE_ICU1,)
INTR(5,intr5, IO_ICU1, ENABLE_ICU1,)
INTR(6,intr6, IO_ICU1, ENABLE_ICU1,)
INTR(7,intr7, IO_ICU1, ENABLE_ICU1,)
INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,)
FAST_UNPEND(0,fastunpend0, IO_ICU1)
FAST_UNPEND(1,fastunpend1, IO_ICU1)
FAST_UNPEND(2,fastunpend2, IO_ICU1)
FAST_UNPEND(3,fastunpend3, IO_ICU1)
FAST_UNPEND(4,fastunpend4, IO_ICU1)
FAST_UNPEND(5,fastunpend5, IO_ICU1)
FAST_UNPEND(6,fastunpend6, IO_ICU1)
FAST_UNPEND(7,fastunpend7, IO_ICU1)
FAST_UNPEND(8,fastunpend8, IO_ICU2)
FAST_UNPEND(9,fastunpend9, IO_ICU2)
FAST_UNPEND(10,fastunpend10, IO_ICU2)
FAST_UNPEND(11,fastunpend11, IO_ICU2)
FAST_UNPEND(12,fastunpend12, IO_ICU2)
FAST_UNPEND(13,fastunpend13, IO_ICU2)
FAST_UNPEND(14,fastunpend14, IO_ICU2)
FAST_UNPEND(15,fastunpend15, IO_ICU2)
MCOUNT_LABEL(eintr)

View File

@ -1,251 +0,0 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _I386_ISA_INTR_MACHDEP_H_
#define _I386_ISA_INTR_MACHDEP_H_
/*
* Low level interrupt code.
*/
#ifdef _KERNEL
#if defined(SMP) || defined(APIC_IO)
/*
* XXX FIXME: rethink location for all IPI vectors.
*/
/*
APIC TPR priority vector levels:
0xff (255) +-------------+
| | 15 (IPIs: Xspuriousint)
0xf0 (240) +-------------+
| | 14
0xe0 (224) +-------------+
| | 13
0xd0 (208) +-------------+
| | 12
0xc0 (192) +-------------+
| | 11
0xb0 (176) +-------------+
| | 10 (IPIs: Xcpustop)
0xa0 (160) +-------------+
| | 9 (IPIs: Xinvltlb)
0x90 (144) +-------------+
| | 8 (linux/BSD syscall, IGNORE FAST HW INTS)
0x80 (128) +-------------+
| | 7 (FAST_INTR 16-23)
0x70 (112) +-------------+
| | 6 (FAST_INTR 0-15)
0x60 (96) +-------------+
| | 5 (IGNORE HW INTS)
0x50 (80) +-------------+
| | 4 (2nd IO APIC)
0x40 (64) +------+------+
| | | 3 (upper APIC hardware INTs: PCI)
0x30 (48) +------+------+
| | 2 (start of hardware INTs: ISA)
0x20 (32) +-------------+
| | 1 (exceptions, traps, etc.)
0x10 (16) +-------------+
| | 0 (exceptions, traps, etc.)
0x00 (0) +-------------+
*/
/* IDT vector base for regular (aka. slow) and fast interrupts */
#define TPR_SLOW_INTS 0x20
#define TPR_FAST_INTS 0x60
/* XXX note that the AST interrupt is at 0x50 */
/* blocking values for local APIC Task Priority Register */
#define TPR_BLOCK_HWI 0x4f /* hardware INTs */
#define TPR_IGNORE_HWI 0x5f /* ignore INTs */
#define TPR_BLOCK_FHWI 0x7f /* hardware FAST INTs */
#define TPR_IGNORE_FHWI 0x8f /* ignore FAST INTs */
#define TPR_BLOCK_XINVLTLB 0x9f /* */
#define TPR_BLOCK_XCPUSTOP 0xaf /* */
#define TPR_BLOCK_ALL 0xff /* all INTs */
#ifdef TEST_TEST1
/* put a 'fake' HWI in top of APIC prio 0x3x, 32 + 31 = 63 = 0x3f */
#define XTEST1_OFFSET (ICU_OFFSET + 31)
#endif /** TEST_TEST1 */
/* TLB shootdowns */
#define XINVLTLB_OFFSET (ICU_OFFSET + 112) /* 0x90 */
#define XINVLPG_OFFSET (ICU_OFFSET + 113) /* 0x91 */
#define XINVLRNG_OFFSET (ICU_OFFSET + 114) /* 0x92 */
/* inter-cpu clock handling */
#define XHARDCLOCK_OFFSET (ICU_OFFSET + 120) /* 0x98 */
#define XSTATCLOCK_OFFSET (ICU_OFFSET + 121) /* 0x99 */
/* inter-CPU rendezvous */
#define XRENDEZVOUS_OFFSET (ICU_OFFSET + 122) /* 0x9A */
/* lazy pmap release */
#define XLAZYPMAP_OFFSET (ICU_OFFSET + 123) /* 0x9B */
/* IPI to generate an additional software trap at the target CPU */
/* XXX in the middle of the interrupt range, overlapping IRQ48 */
#define XCPUAST_OFFSET (ICU_OFFSET + 48) /* 0x50 */
/* IPI to signal CPUs to stop and wait for another CPU to restart them */
#define XCPUSTOP_OFFSET (ICU_OFFSET + 128) /* 0xA0 */
/*
* Note: this vector MUST be xxxx1111, 32 + 223 = 255 = 0xff:
*/
#define XSPURIOUSINT_OFFSET (ICU_OFFSET + 223)
#endif /* SMP || APIC_IO */
#ifdef LOCORE
/*
* Protects the IO APIC, 8259 PIC, imen, and apic_imen
*/
#define ICU_LOCK MTX_LOCK_SPIN(icu_lock, 0)
#define ICU_UNLOCK MTX_UNLOCK_SPIN(icu_lock)
#else /* LOCORE */
/*
* Type of the first (asm) part of an interrupt handler.
*/
typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
typedef void unpendhand_t(void);
#define IDTVEC(name) __CONCAT(X,name)
extern u_long *intr_countp[]; /* pointers into intrcnt[] */
extern driver_intr_t *intr_handler[]; /* C entry points of intr handlers */
extern struct ithd *ithds[];
extern void *intr_unit[]; /* cookies to pass to intr handlers */
extern struct mtx icu_lock;
inthand_t
IDTVEC(fastintr0), IDTVEC(fastintr1),
IDTVEC(fastintr2), IDTVEC(fastintr3),
IDTVEC(fastintr4), IDTVEC(fastintr5),
IDTVEC(fastintr6), IDTVEC(fastintr7),
IDTVEC(fastintr8), IDTVEC(fastintr9),
IDTVEC(fastintr10), IDTVEC(fastintr11),
IDTVEC(fastintr12), IDTVEC(fastintr13),
IDTVEC(fastintr14), IDTVEC(fastintr15);
inthand_t
IDTVEC(intr0), IDTVEC(intr1), IDTVEC(intr2), IDTVEC(intr3),
IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
unpendhand_t
IDTVEC(fastunpend0), IDTVEC(fastunpend1), IDTVEC(fastunpend2),
IDTVEC(fastunpend3), IDTVEC(fastunpend4), IDTVEC(fastunpend5),
IDTVEC(fastunpend6), IDTVEC(fastunpend7), IDTVEC(fastunpend8),
IDTVEC(fastunpend9), IDTVEC(fastunpend10), IDTVEC(fastunpend11),
IDTVEC(fastunpend12), IDTVEC(fastunpend13), IDTVEC(fastunpend14),
IDTVEC(fastunpend15), IDTVEC(fastunpend16), IDTVEC(fastunpend17),
IDTVEC(fastunpend18), IDTVEC(fastunpend19), IDTVEC(fastunpend20),
IDTVEC(fastunpend21), IDTVEC(fastunpend22), IDTVEC(fastunpend23),
IDTVEC(fastunpend24), IDTVEC(fastunpend25), IDTVEC(fastunpend26),
IDTVEC(fastunpend27), IDTVEC(fastunpend28), IDTVEC(fastunpend29),
IDTVEC(fastunpend30), IDTVEC(fastunpend31);
#if defined(SMP) || defined(APIC_IO)
inthand_t
IDTVEC(fastintr16), IDTVEC(fastintr17),
IDTVEC(fastintr18), IDTVEC(fastintr19),
IDTVEC(fastintr20), IDTVEC(fastintr21),
IDTVEC(fastintr22), IDTVEC(fastintr23),
IDTVEC(fastintr24), IDTVEC(fastintr25),
IDTVEC(fastintr26), IDTVEC(fastintr27),
IDTVEC(fastintr28), IDTVEC(fastintr29),
IDTVEC(fastintr30), IDTVEC(fastintr31);
inthand_t
IDTVEC(intr16), IDTVEC(intr17), IDTVEC(intr18), IDTVEC(intr19),
IDTVEC(intr20), IDTVEC(intr21), IDTVEC(intr22), IDTVEC(intr23),
IDTVEC(intr24), IDTVEC(intr25), IDTVEC(intr26), IDTVEC(intr27),
IDTVEC(intr28), IDTVEC(intr29), IDTVEC(intr30), IDTVEC(intr31);
inthand_t
Xinvltlb, /* TLB shootdowns - global */
Xinvlpg, /* TLB shootdowns - 1 page */
Xinvlrng, /* TLB shootdowns - page range */
Xhardclock, /* Forward hardclock() */
Xstatclock, /* Forward statclock() */
Xcpuast, /* Additional software trap on other cpu */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint, /* handle APIC "spurious INTs" */
Xrendezvous, /* handle CPU rendezvous */
Xlazypmap; /* handle lazy pmap release */
#ifdef TEST_TEST1
inthand_t
Xtest1; /* 'fake' HWI at top of APIC prio 0x3x, 32+31 = 0x3f */
#endif /** TEST_TEST1 */
#endif /* SMP || APIC_IO */
#ifdef APIC_IO
/*
* This is to accommodate "mixed-mode" programming for
* motherboards that don't connect the 8254 to the IO APIC.
*/
#define AUTO_EOI_1 1
#endif
#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
void isa_defaultirq(void);
int isa_nmi(int cd);
int icu_setup(int intr, driver_intr_t *func, void *arg, int flags);
int icu_unset(int intr, driver_intr_t *handler);
void icu_reinit(void);
/*
* WARNING: These are internal functions and not to be used by device drivers!
* They are subject to change without notice.
*/
int inthand_add(const char *name, int irq, driver_intr_t handler, void *arg,
enum intr_type flags, void **cookiep);
int inthand_remove(void *cookie);
void sched_ithd(void *dummy);
void call_fast_unpend(int irq);
#endif /* LOCORE */
#endif /* _KERNEL */
#endif /* !_I386_ISA_INTR_MACHDEP_H_ */

View File

@ -1,92 +0,0 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* From BSDI: intr.c,v 1.6.2.5 1999/07/06 19:16:52 cp Exp
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Interrupt thread code. */
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/systm.h>
#include <sys/vmmeter.h>
#include <i386/isa/icu.h>
#include <i386/isa/intr_machdep.h>
struct int_entropy {
struct proc *p;
int irq;
};
static u_int straycount[ICU_LEN];
#define MAX_STRAY_LOG 5
/*
* Schedule a heavyweight interrupt process. This function is called
* from the interrupt handlers Xintr<num>.
*/
void
sched_ithd(void *cookie)
{
int irq = (int) cookie; /* IRQ we're handling */
struct ithd *ithd = ithds[irq]; /* and the process that does it */
int error;
/* This used to be in icu_vector.s */
/*
* We count software interrupts when we process them. The
* code here follows previous practice, but there's an
* argument for counting hardware interrupts when they're
* processed too.
*/
atomic_add_long(intr_countp[irq], 1); /* one more for this IRQ */
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
* Schedule the interrupt thread to run if needed and switch to it
* if we schedule it if !cold.
*/
error = ithread_schedule(ithd, !cold);
/*
* Log stray interrupts.
*/
if (error == EINVAL)
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
printf(
"got %d stray irq %d's: not logging anymore\n",
MAX_STRAY_LOG, irq);
}
}