- Heavyweight interrupt threads on the alpha for device I/O interrupts.

- Make softinterrupts (SWI's) almost completely MI, and divorce them
  completely from the x86 hardware interrupt code.
  - The ihandlers array is now gone.  Instead, there is a MI shandlers array
    that just contains SWI handlers.
  - Most of the former machine/ipl.h files have moved to a new sys/ipl.h.
- Stub out all the spl*() functions on all architectures.

Submitted by:	dfr
This commit is contained in:
jhb 2000-10-05 23:09:57 +00:00
parent d3d06a3e7c
commit 71938e9fcd
65 changed files with 862 additions and 1044 deletions

View File

@ -200,7 +200,7 @@ configure(void *dummy)
* Now we're ready to handle (pending) interrupts.
* XXX this is slightly misplaced.
*/
spl0();
alpha_pal_swpipl(ALPHA_PSL_IPL_0);
cold = 0;
}

View File

@ -45,7 +45,11 @@
#include <sys/vmmeter.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/interrupt.h>
#include <sys/ipl.h>
#include <sys/kthread.h>
#include <sys/ktr.h>
#include <sys/unistd.h>
#include <machine/reg.h>
#include <machine/frame.h>
@ -77,6 +81,8 @@ void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
static u_int schedclk2;
static void ithd_loop(void *);
static driver_intr_t alpha_clock_interrupt;
void
interrupt(a0, a1, a2, framep)
@ -115,18 +121,7 @@ interrupt(a0, a1, a2, framep)
}
mtx_enter(&Giant, MTX_DEF);
cnt.v_intr++;
#ifdef EVCNT_COUNTERS
clock_intr_evcnt.ev_count++;
#else
intrcnt[INTRCNT_CLOCK]++;
#endif
if (platform.clockintr){
(*platform.clockintr)(framep);
/* divide hz (1024) by 8 to get stathz (128) */
if((++schedclk2 & 0x7) == 0)
statclock((struct clockframe *)framep);
}
alpha_clock_interrupt(framep);
mtx_exit(&Giant, MTX_DEF);
break;
@ -331,61 +326,299 @@ LIST_HEAD(alpha_intr_list, alpha_intr);
struct alpha_intr {
LIST_ENTRY(alpha_intr) list; /* chain handlers in this hash bucket */
int vector; /* vector to match */
driver_intr_t *intr; /* handler function */
void *arg; /* argument to handler */
struct ithd *ithd; /* interrupt thread */
volatile long *cntp; /* interrupt counter */
void (*disable)(int); /* disable source */
void (*enable)(int); /* enable source */
};
static struct alpha_intr_list alpha_intr_hash[31];
int alpha_setup_intr(int vector, driver_intr_t *intr, void *arg,
void **cookiep, volatile long *cntp)
int
alpha_setup_intr(const char *name, int vector, driver_intr_t *handler,
void *arg, int pri, void **cookiep, volatile long *cntp,
void (*disable)(int), void (*enable)(int))
{
int h = HASHVEC(vector);
struct alpha_intr *i;
int s;
struct intrec *head, *idesc;
struct ithd *ithd;
struct proc *p;
int s, errcode;
i = malloc(sizeof(struct alpha_intr), M_DEVBUF, M_NOWAIT);
if (!i)
/* First, check for an existing hash table entry for this vector. */
for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
i = LIST_NEXT(i, list))
; /* nothing */
if (i == NULL) {
/* None was found, so create an entry. */
i = malloc(sizeof(struct alpha_intr), M_DEVBUF, M_NOWAIT);
if (i == NULL)
return ENOMEM;
i->vector = vector;
i->ithd = NULL;
i->cntp = cntp;
i->disable = disable;
i->enable = enable;
s = splhigh();
LIST_INSERT_HEAD(&alpha_intr_hash[h], i, list);
splx(s);
}
/* Second, create the interrupt thread if needed. */
ithd = i->ithd;
if (ithd == NULL || ithd->it_ih == NULL) {
/* first handler for this vector */
if (ithd == NULL) {
ithd = malloc(sizeof(struct ithd), M_DEVBUF, M_WAITOK);
if (ithd == NULL)
return ENOMEM;
bzero(ithd, sizeof(struct ithd));
ithd->irq = vector;
ithd->it_md = i;
i->ithd = ithd;
}
/* Create a kernel thread if needed. */
if (ithd->it_proc == NULL) {
errcode = kthread_create(ithd_loop, NULL, &p,
RFSTOPPED | RFHIGHPID, "intr: %s", name);
if (errcode)
panic(
"alpha_setup_intr: Can't create interrupt thread");
p->p_rtprio.type = RTP_PRIO_ITHREAD;
p->p_stat = SWAIT; /* we're idle */
/* Put in linkages. */
ithd->it_proc = p;
p->p_ithd = ithd;
} else
snprintf(ithd->it_proc->p_comm, MAXCOMLEN, "intr%03x: %s",
vector, name);
p->p_rtprio.prio = pri;
} else {
p = ithd->it_proc;
if (strlen(p->p_comm) + strlen(name) < MAXCOMLEN) {
strcat(p->p_comm, " ");
strcat(p->p_comm, name);
} else if (strlen(p->p_comm) == MAXCOMLEN)
p->p_comm[MAXCOMLEN - 1] = '+';
else
strcat(p->p_comm, "+");
}
/* Third, setup the interrupt descriptor for this handler. */
idesc = malloc(sizeof (struct intrec), M_DEVBUF, M_WAITOK);
if (idesc == NULL)
return ENOMEM;
i->vector = vector;
i->intr = intr;
i->arg = arg;
i->cntp = cntp;
bzero(idesc, sizeof(struct intrec));
s = splhigh();
LIST_INSERT_HEAD(&alpha_intr_hash[h], i, list);
splx(s);
idesc->handler = handler;
idesc->argument = arg;
idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
if (idesc->name == NULL) {
free(idesc, M_DEVBUF);
return(NULL);
}
strcpy(idesc->name, name);
*cookiep = i;
/* Fourth, add our handler to the end of the ithread's handler list. */
head = ithd->it_ih;
if (head) {
while (head->next != NULL)
head = head->next;
head->next = idesc;
} else
ithd->it_ih = idesc;
*cookiep = idesc;
return 0;
}
int alpha_teardown_intr(void *cookie)
int
alpha_teardown_intr(void *cookie)
{
struct alpha_intr *i = cookie;
struct intrec *idesc = cookie;
struct ithd *ithd;
struct intrec *head;
#if 0
struct alpha_intr *i;
int s;
#endif
/* First, detach ourself from our interrupt thread. */
ithd = idesc->ithd;
KASSERT(ithd != NULL, ("idesc without an interrupt thread"));
head = ithd->it_ih;
if (head == idesc)
ithd->it_ih = idesc->next;
else {
while (head != NULL && head->next != idesc)
head = head->next;
if (head == NULL)
return (-1); /* couldn't find ourself */
head->next = idesc->next;
}
free(idesc, M_DEVBUF);
/* XXX - if the ithd has no handlers left, we should remove it */
#if 0
s = splhigh();
LIST_REMOVE(i, list);
splx(s);
free(i, M_DEVBUF);
#endif
return 0;
}
void
alpha_dispatch_intr(void *frame, unsigned long vector)
{
struct alpha_intr *i;
volatile long *cntp;
int h = HASHVEC(vector);
for (i = LIST_FIRST(&alpha_intr_hash[h]); i; i = LIST_NEXT(i, list))
if (i->vector == vector) {
if ((cntp = i->cntp) != NULL)
(*cntp) ++;
i->intr(i->arg);
}
struct alpha_intr *i;
struct ithd *ithd; /* our interrupt thread */
/*
* Walk the hash bucket for this vector looking for this vector's
* interrupt thread.
*/
for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
i = LIST_NEXT(i, list))
; /* nothing */
if (i == NULL)
return; /* no ithread for this vector */
ithd = i->ithd;
KASSERT(ithd != NULL, ("interrupt vector without a thread"));
/*
* As an optomization until we have kthread_cancel(), if an ithread
* has no handlers, don't schedule it to run.
*/
if (ithd->it_ih == NULL)
return;
atomic_add_long(i->cntp, 1);
CTR3(KTR_INTR, "sched_ithd pid %d(%s) need=%d",
ithd->it_proc->p_pid, ithd->it_proc->p_comm, ithd->it_need);
/*
* Set it_need so that if the thread is already running but close
* to done, it will do another go-round. Then get the sched lock
* and see if the thread is on whichkqs yet. If not, put it on
* there. In any case, kick everyone so that if the new thread
* is higher priority than their current thread, it gets run now.
*/
ithd->it_need = 1;
mtx_enter(&sched_lock, MTX_SPIN);
if (ithd->it_proc->p_stat == SWAIT) {
/* not on the run queue and not running */
CTR1(KTR_INTR, "alpha_dispatch_intr: setrunqueue %d",
ithd->it_proc->p_pid);
alpha_mb(); /* XXX - ??? */
ithd->it_proc->p_stat = SRUN;
setrunqueue(ithd->it_proc);
aston();
} else {
CTR3(KTR_INTR, "alpha_dispatch_intr: %d: it_need %d, state %d",
ithd->it_proc->p_pid, ithd->it_need, ithd->it_proc->p_stat);
}
if (i->disable)
i->disable(i->vector);
mtx_exit(&sched_lock, MTX_SPIN);
need_resched();
}
void
ithd_loop(void *dummy)
{
struct ithd *ithd; /* our thread context */
struct intrec *ih; /* list of handlers */
struct alpha_intr *i; /* interrupt source */
ithd = curproc->p_ithd;
i = ithd->it_md;
/*
* As long as we have interrupts outstanding, go through the
* list of handlers, giving each one a go at it.
*/
for (;;) {
CTR3(KTR_INTR, "ithd_loop pid %d(%s) need=%d",
ithd->it_proc->p_pid, ithd->it_proc->p_comm, ithd->it_need);
while (ithd->it_need) {
/*
* Service interrupts. If another interrupt
* arrives while we are running, they will set
* it_need to denote that we should make
* another pass.
*/
ithd->it_need = 0;
alpha_wmb(); /* push out "it_need=0" */
for (ih = ithd->it_ih; ih != NULL; ih = ih->next) {
CTR5(KTR_INTR,
"ithd_loop pid %d ih=%p: %p(%p) flg=%x",
ithd->it_proc->p_pid, (void *)ih,
(void *)ih->handler, ih->argument,
ih->flags);
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_enter(&Giant, MTX_DEF);
ih->handler(ih->argument);
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_exit(&Giant, MTX_DEF);
}
/*
* Reenable the source to give it a chance to
* set it_need again.
*/
if (i->enable)
i->enable(i->vector);
}
/*
* Processed all our interrupts. Now get the sched
* lock. This may take a while and it_need may get
* set again, so we have to check it again.
*/
mtx_enter(&sched_lock, MTX_SPIN);
if (!ithd->it_need) {
ithd->it_proc->p_stat = SWAIT; /* we're idle */
CTR1(KTR_INTR, "ithd_loop pid %d: done",
ithd->it_proc->p_pid);
mi_switch();
CTR1(KTR_INTR, "ithd_loop pid %d: resumed",
ithd->it_proc->p_pid);
}
mtx_exit(&sched_lock, MTX_SPIN);
}
}
static void
alpha_clock_interrupt(void *framep)
{
cnt.v_intr++;
#ifdef EVCNT_COUNTERS
clock_intr_evcnt.ev_count++;
#else
intrcnt[INTRCNT_CLOCK]++;
#endif
if (platform.clockintr){
(*platform.clockintr)((struct trapframe *)framep);
/* divide hz (1024) by 8 to get stathz (128) */
if((++schedclk2 & 0x7) == 0)
statclock((struct clockframe *)framep);
}
}

View File

@ -50,7 +50,7 @@ unsigned int tty_imask; /* XXX */
static void swi_net(void);
void (*netisrs[32]) __P((void));
swihand_t *ihandlers[32] = { /* software interrupts */
swihand_t *shandlers[NSWI] = { /* software interrupts */
swi_null, swi_net, swi_null, swi_null,
swi_null, softclock, swi_null, swi_null,
swi_null, swi_null, swi_null, swi_null,
@ -62,45 +62,6 @@ swihand_t *ihandlers[32] = { /* software interrupts */
};
u_int32_t netisr;
u_int32_t ipending;
u_int32_t idelayed;
#define getcpl() (alpha_pal_rdps() & ALPHA_PSL_IPL_MASK)
static void atomic_setbit(u_int32_t* p, u_int32_t bit)
{
u_int32_t temp;
__asm__ __volatile__ (
"1:\tldl_l %0,%2\n\t" /* load current mask value, asserting lock */
"or %3,%0,%0\n\t" /* add our bits */
"stl_c %0,%1\n\t" /* attempt to store */
"beq %0,2f\n\t" /* if the store failed, spin */
"br 3f\n" /* it worked, exit */
"2:\tbr 1b\n" /* *p not updated, loop */
"3:\tmb\n" /* it worked */
: "=&r"(temp), "=m" (*p)
: "m"(*p), "r"(bit)
: "memory");
}
static u_int32_t atomic_readandclear(u_int32_t* p)
{
u_int32_t v, temp;
__asm__ __volatile__ (
"wmb\n" /* ensure pending writes have drained */
"1:\tldl_l %0,%3\n\t" /* load current value, asserting lock */
"ldiq %1,0\n\t" /* value to store */
"stl_c %1,%2\n\t" /* attempt to store */
"beq %1,2f\n\t" /* if the store failed, spin */
"br 3f\n" /* it worked, exit */
"2:\tbr 1b\n" /* *p not updated, loop */
"3:\tmb\n" /* it worked */
: "=&r"(v), "=&r"(temp), "=m" (*p)
: "m"(*p)
: "memory");
return v;
}
void
swi_null()
@ -118,7 +79,7 @@ swi_generic()
static void
swi_net()
{
u_int32_t bits = atomic_readandclear(&netisr);
u_int32_t bits = atomic_readandclear_32(&netisr);
int i;
for (i = 0; i < 32; i++) {
@ -127,118 +88,3 @@ swi_net()
bits >>= 1;
}
}
void
do_sir()
{
u_int32_t pend;
int i;
mtx_enter(&Giant, MTX_DEF);
atomic_add_int(&PCPU_GET(intr_nesting_level), 1);
splsoft();
while ((pend = atomic_readandclear(&ipending)) != 0) {
for (i = 0; pend && i < 32; i++) {
if (pend & (1 << i)) {
if (ihandlers[i] == swi_generic)
swi_dispatcher(i);
else
ihandlers[i]();
pend &= ~(1 << i);
}
}
}
atomic_subtract_int(&PCPU_GET(intr_nesting_level), 1);
mtx_exit(&Giant, MTX_DEF);
}
#define GENSET(name, ptr, bit) \
\
void name(void) \
{ \
atomic_setbit(ptr, bit); \
}
GENSET(setdelayed, &ipending, atomic_readandclear(&idelayed))
GENSET(setsofttty, &ipending, 1 << SWI_TTY)
GENSET(setsoftnet, &ipending, 1 << SWI_NET)
GENSET(setsoftcamnet, &ipending, 1 << SWI_CAMNET)
GENSET(setsoftcambio, &ipending, 1 << SWI_CAMBIO)
GENSET(setsoftvm, &ipending, 1 << SWI_VM)
GENSET(setsofttq, &ipending, 1 << SWI_TQ)
GENSET(setsoftclock, &ipending, 1 << SWI_CLOCK)
GENSET(schedsofttty, &idelayed, 1 << SWI_TTY)
GENSET(schedsoftnet, &idelayed, 1 << SWI_NET)
GENSET(schedsoftcamnet, &idelayed, 1 << SWI_CAMNET)
GENSET(schedsoftcambio, &idelayed, 1 << SWI_CAMBIO)
GENSET(schedsoftvm, &idelayed, 1 << SWI_VM)
GENSET(schedsofttq, &idelayed, 1 << SWI_TQ)
GENSET(schedsoftclock, &idelayed, 1 << SWI_CLOCK)
#ifdef INVARIANT_SUPPORT
#define SPLASSERT_IGNORE 0
#define SPLASSERT_LOG 1
#define SPLASSERT_PANIC 2
static int splassertmode = SPLASSERT_LOG;
SYSCTL_INT(_kern, OID_AUTO, splassertmode, CTLFLAG_RW,
&splassertmode, 0, "Set the mode of SPLASSERT");
static void
init_splassertmode(void *ignored)
{
TUNABLE_INT_FETCH("kern.splassertmode", 0, splassertmode);
}
SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_splassertmode, NULL);
static void
splassertfail(char *str, const char *msg, char *name, int level)
{
switch (splassertmode) {
case SPLASSERT_IGNORE:
break;
case SPLASSERT_LOG:
printf(str, msg, name, level);
printf("\n");
break;
case SPLASSERT_PANIC:
panic(str, msg, name, level);
break;
}
}
#define GENSPLASSERT(name, pri) \
void \
name##assert(const char *msg) \
{ \
u_int cpl; \
\
cpl = getcpl(); \
if (cpl < ALPHA_PSL_IPL_##pri) \
splassertfail("%s: not %s, cpl == %#x", \
msg, __XSTRING(name) + 3, cpl); \
}
#else
#define GENSPLASSERT(name, pri)
#endif
GENSPLASSERT(splbio, IO)
GENSPLASSERT(splcam, IO)
GENSPLASSERT(splclock, CLOCK)
GENSPLASSERT(splhigh, HIGH)
GENSPLASSERT(splimp, IO)
GENSPLASSERT(splnet, IO)
GENSPLASSERT(splsoftcam, SOFT)
GENSPLASSERT(splsoftcambio, SOFT) /* XXX no corresponding spl for alpha */
GENSPLASSERT(splsoftcamnet, SOFT) /* XXX no corresponding spl for alpha */
GENSPLASSERT(splsoftclock, SOFT)
GENSPLASSERT(splsofttty, SOFT) /* XXX no corresponding spl for alpha */
GENSPLASSERT(splsoftvm, SOFT)
GENSPLASSERT(splsofttq, SOFT)
GENSPLASSERT(splstatclock, CLOCK)
GENSPLASSERT(spltty, IO)
GENSPLASSERT(splvm, IO)

View File

@ -114,8 +114,6 @@ LEAF(cpu_switch, 1)
mov a0, s0 /* save old curproc */
mov a1, s1 /* save old U-area */
ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
sw1:
br pv, Lcs1
Lcs1: LDGP(pv)
@ -197,9 +195,6 @@ Lcs7:
stl t1, sched_lock+MTX_RECURSE /* restore lock */
ldq t1, GD_CURPROC(globalp)
stq t1, sched_lock+MTX_LOCK
ldq a0, U_PCB_CONTEXT+(8 * 8)(t0) /* restore ipl */
and a0, ALPHA_PSL_IPL_MASK, a0
call_pal PAL_OSF1_swpipl
ldiq v0, 1 /* possible ret to savectx() */
RET
@ -230,7 +225,6 @@ LEAF(switch_trampoline, 0)
* exception_return: return from trap, exception, or syscall
*/
IMPORT(ipending, 4)
IMPORT(astpending, 4)
LEAF(exception_return, 1) /* XXX should be NESTED */
@ -241,14 +235,6 @@ Ler1: LDGP(pv)
and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
bne t0, Lrestoreregs /* != 0: can't do AST or SIR */
/* see if we can do an SIR */
ldl t1, ipending /* SIR pending? */
beq t1, Lchkast /* no, try an AST*/
/* We've got a SIR. */
CALL(do_sir) /* do the SIR; lowers IPL */
Lchkast:
and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
beq t0, Lrestoreregs /* no: just return */

View File

@ -154,7 +154,6 @@ int badaddr __P((void *, size_t));
int badaddr_read __P((void *, size_t, void *));
void child_return __P((struct proc *p));
u_int64_t console_restart __P((u_int64_t, u_int64_t, u_int64_t));
void do_sir __P((void));
void dumpconf __P((void));
void exception_return __P((void)); /* MAGIC */
void frametoreg __P((struct trapframe *, struct reg *));

View File

@ -75,7 +75,9 @@ extern struct platform {
void (*pci_intr_map) __P((void *));
void (*pci_intr_disable) __P((int));
void (*pci_intr_enable) __P((int));
int (*pci_setup_ide_intr) __P((int chan, void (*fn)(void*), void *arg));
int (*pci_setup_ide_intr) __P((struct device *dev,
struct device *child,
int chan, void (*fn)(void*), void *arg));
int (*isa_setup_intr) __P((struct device *, struct device *,
struct resource *, int, void *, void *, void **));
int (*isa_teardown_intr) __P((struct device *, struct device *,

View File

@ -29,8 +29,10 @@
#ifndef _MACHINE_INTR_H_
#define _MACHINE_INTR_H_
int alpha_setup_intr(int vector, driver_intr_t *intr, void *arg,
void **cookiep, volatile long *cntp);
int alpha_setup_intr(const char *name, int vector,
driver_intr_t *handle, void *arg, int pri,
void **cookiep, volatile long *cntp,
void (*disable)(int), void (*enable)(int));
int alpha_teardown_intr(void *cookie);
void alpha_dispatch_intr(void *frame, unsigned long vector);

View File

@ -32,100 +32,8 @@
#include <machine/cpu.h> /* for pal inlines */
/*
* Software interrupt bit numbers
*/
#define SWI_TTY 0
#define SWI_NET 1
#define SWI_CAMNET 2
#define SWI_CAMBIO 3
#define SWI_VM 4
#define SWI_CLOCK 5
#define SWI_TQ 6
#define NSWI 32
#define NHWI 0
extern u_int32_t ipending;
#define getcpl() (alpha_pal_rdps() & ALPHA_PSL_IPL_MASK)
#define SPLDOWN(name, pri) \
\
static __inline int name(void) \
{ \
int s; \
s = alpha_pal_swpipl(ALPHA_PSL_IPL_##pri); \
return s; \
}
SPLDOWN(splsoftclock, SOFT)
SPLDOWN(splsoft, SOFT)
#define SPLUP(name, pri) \
\
static __inline int name(void) \
{ \
int cpl = getcpl(); \
if (ALPHA_PSL_IPL_##pri > cpl) { \
int s = alpha_pal_swpipl(ALPHA_PSL_IPL_##pri); \
return s; \
} else \
return cpl; \
}
SPLUP(splsoftcam, SOFT)
SPLUP(splsoftnet, SOFT)
SPLUP(splsoftvm, SOFT)
SPLUP(splsofttq, SOFT)
SPLUP(splnet, IO)
SPLUP(splbio, IO)
SPLUP(splcam, IO)
SPLUP(splimp, IO)
SPLUP(spltty, IO)
SPLUP(splvm, IO)
SPLUP(splclock, CLOCK)
SPLUP(splstatclock, CLOCK)
SPLUP(splhigh, HIGH)
static __inline void
spl0(void)
{
if (ipending)
do_sir(); /* lowers ipl to SOFT */
alpha_pal_swpipl(ALPHA_PSL_IPL_0);
}
static __inline void
splx(int s)
{
if (s)
alpha_pal_swpipl(s);
else
spl0();
}
extern void setdelayed(void);
extern void setsofttty(void);
extern void setsoftnet(void);
extern void setsoftcamnet(void);
extern void setsoftcambio(void);
extern void setsoftvm(void);
extern void setsofttq(void);
extern void setsoftclock(void);
extern void schedsofttty(void);
extern void schedsoftnet(void);
extern void schedsoftcamnet(void);
extern void schedsoftcambio(void);
extern void schedsoftvm(void);
extern void schedsofttq(void);
extern void schedsoftclock(void);
#if 0
/* XXX bogus */
extern unsigned cpl; /* current priority level mask */
#endif
#define HWHI 0
/*
* Interprocessor interrupts for SMP.
@ -142,4 +50,4 @@ void smp_ipi_all_but_self(u_int64_t ipi);
void smp_ipi_self(u_int64_t ipi);
void smp_handle_ipi(struct trapframe *frame);
#endif /* !_MACHINE_MD_VAR_H_ */
#endif /* !_MACHINE_IPL_H_ */

View File

@ -60,10 +60,12 @@ void alpha_register_pci_scsi __P((int bus, int slot, struct cam_sim *sim));
#ifdef _SYS_BUS_H_
struct resource *alpha_platform_alloc_ide_intr(int chan);
int alpha_platform_release_ide_intr(int chan, struct resource *res);
int alpha_platform_setup_ide_intr(struct resource *res,
int alpha_platform_setup_ide_intr(struct device *dev,
struct resource *res,
driver_intr_t *fn, void *arg,
void **cookiep);
int alpha_platform_teardown_ide_intr(struct resource *res, void *cookie);
int alpha_platform_teardown_ide_intr(struct device *dev,
struct resource *res, void *cookie);
int alpha_platform_pci_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
driver_intr_t *intr, void *arg,

View File

@ -33,7 +33,9 @@
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <isa/isareg.h>
#include <isa/isavar.h>
@ -315,9 +317,12 @@ isa_setup_intr(device_t dev, device_t child,
ii->arg = arg;
ii->irq = irq->r_start;
error = alpha_setup_intr(0x800 + (irq->r_start << 4),
isa_handle_intr, ii, &ii->ih,
&intrcnt[INTRCNT_ISA_IRQ + irq->r_start]);
error = alpha_setup_intr(
device_get_nameunit(child ? child : dev),
0x800 + (irq->r_start << 4), isa_handle_intr, ii,
ithread_priority(flags), &ii->ih,
&intrcnt[INTRCNT_ISA_IRQ + irq->r_start],
NULL, NULL);
if (error) {
free(ii, M_DEVBUF);
return error;

View File

@ -33,7 +33,9 @@
#include <sys/malloc.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <machine/swiz.h>
#include <machine/intr.h>
@ -256,12 +258,74 @@ mcpcia_disable_intr(struct mcpcia_softc *sc, int irq)
alpha_mb();
}
static void
mcpcia_disable_intr_vec(int vector)
{
int gid, mid, irq;
vm_offset_t p;
printf("D<%03x>", vector);
if (vector == MCPCIA_VEC_NCR) {
mid = 5;
irq = 16;
} else {
irq = ((vector - 0x900) >> 4) - 8;
if (irq < 32)
mid = 4;
else {
irq -= 32;
mid = 5;
}
}
gid = MCBUS_GID_FROM_INSTANCE(0);
p = (MCBUS_IOSPACE |
(((u_int64_t) gid) << MCBUS_GID_SHIFT) |
(((u_int64_t) mid) << MCBUS_MID_SHIFT) |
MCPCIA_PCI_BRIDGE |
_MCPCIA_INT_MASK0);
alpha_mb();
REGVAL(p) &= ~(1 << irq);
alpha_mb();
}
static void
mcpcia_enable_intr_vec(int vector)
{
int gid, mid, irq;
vm_offset_t p;
printf("E<%03x>", vector);
if (vector == MCPCIA_VEC_NCR) {
mid = 5;
irq = 16;
} else {
irq = ((vector - 0x900) >> 4) - 8;
if (irq < 32)
mid = 4;
else
mid = 5;
}
gid = MCBUS_GID_FROM_INSTANCE(0);
p = (MCBUS_IOSPACE |
(((u_int64_t) gid) << MCBUS_GID_SHIFT) |
(((u_int64_t) mid) << MCBUS_MID_SHIFT) |
MCPCIA_PCI_BRIDGE |
_MCPCIA_INT_MASK0);
alpha_mb();
REGVAL(p) |= (1 << irq);
alpha_mb();
}
static int
mcpcia_setup_intr(device_t dev, device_t child, struct resource *ir, int flags,
driver_intr_t *intr, void *arg, void **cp)
{
struct mcpcia_softc *sc = MCPCIA_SOFTC(dev);
int slot, mid, gid, birq, irq, error, intpin, h;
int slot, mid, gid, birq, irq, error, intpin, h, pri;
intpin = pci_get_intpin(child);
if (intpin == 0) {
@ -290,7 +354,7 @@ mcpcia_setup_intr(device_t dev, device_t child, struct resource *ir, int flags,
} else if (slot >= 2 && slot <= 5) {
irq = (slot - 2) * 4;
} else {
device_printf(child, "wierd slot number (%d); can't make irq\n",
device_printf(child, "weird slot number (%d); can't make irq\n",
slot);
return (ENXIO);
}
@ -310,7 +374,10 @@ mcpcia_setup_intr(device_t dev, device_t child, struct resource *ir, int flags,
((intpin - 1) * MCPCIA_VECWIDTH_PER_INTPIN);
}
birq = irq + INTRCNT_KN300_IRQ;
error = alpha_setup_intr(h, intr, arg, cp, &intrcnt[birq]);
pri = ithread_priority(flags);
error = alpha_setup_intr(device_get_nameunit(child ? child : dev), h,
intr, arg, pri, cp, &intrcnt[birq],
mcpcia_disable_intr_vec, mcpcia_enable_intr_vec);
if (error)
return error;
mcpcia_enable_intr(sc, irq);

View File

@ -62,7 +62,9 @@
#include <sys/malloc.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <alpha/pci/apecsreg.h>
#include <alpha/pci/apecsvar.h>
@ -334,6 +336,20 @@ apecs_release_resource(device_t bus, device_t child, int type, int rid,
return pci_release_resource(bus, child, type, rid, r);
}
static void
apecs_disable_intr(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_disable(irq);
}
static void
apecs_enable_intr(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_enable(irq);
}
static int
apecs_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
@ -353,9 +369,11 @@ apecs_setup_intr(device_t dev, device_t child,
if (error)
return error;
error = alpha_setup_intr(0x900 + (irq->r_start << 4),
intr, arg, cookiep,
&intrcnt[INTRCNT_EB64PLUS_IRQ + irq->r_start]);
error = alpha_setup_intr(device_get_nameunit(child ? child : dev),
0x900 + (irq->r_start << 4), intr, arg,
ithread_priority(flags), cookiep,
&intrcnt[INTRCNT_EB64PLUS_IRQ + irq->r_start],
apecs_disable_intr, apecs_enable_intr);
if (error)
return error;

View File

@ -98,7 +98,9 @@
#include <sys/malloc.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <alpha/pci/ciareg.h>
#include <alpha/pci/ciavar.h>
@ -380,6 +382,7 @@ static int
cia_probe(device_t dev)
{
uintptr_t use_bwx = 1;
device_t child;
if (cia0)
return ENXIO;
@ -428,9 +431,9 @@ cia_probe(device_t dev)
}
}
device_add_child(dev, "pcib", 0);
device_set_ivars(dev, (void *)use_bwx);
child = device_add_child(dev, "pcib", 0);
chipset_bwx = use_bwx = (use_bwx == (uintptr_t) 1);
device_set_ivars(child, (void *)use_bwx);
return 0;
}
@ -511,6 +514,20 @@ cia_attach(device_t dev)
return 0;
}
static void
cia_disable_intr(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_disable(irq);
}
static void
cia_enable_intr(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_enable(irq);
}
static int
cia_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
@ -522,9 +539,12 @@ cia_setup_intr(device_t dev, device_t child,
if (error)
return error;
error = alpha_setup_intr(0x900 + (irq->r_start << 4),
intr, arg, cookiep,
&intrcnt[INTRCNT_EB164_IRQ + irq->r_start]);
error = alpha_setup_intr(
device_get_nameunit(child ? child : dev),
0x900 + (irq->r_start << 4), intr, arg,
ithread_priority(flags), cookiep,
&intrcnt[INTRCNT_EB164_IRQ + irq->r_start],
cia_disable_intr, cia_enable_intr);
if (error)
return error;

View File

@ -31,6 +31,7 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/interrupt.h>
@ -97,17 +98,19 @@ alpha_platform_release_ide_intr(int chan, struct resource *res)
}
int
alpha_platform_setup_ide_intr(struct resource *res,
alpha_platform_setup_ide_intr(device_t dev,
struct resource *res,
driver_intr_t *fn, void *arg,
void **cookiep)
{
return isa_setup_intr(0, 0, res, INTR_TYPE_BIO, fn, arg, cookiep);
return isa_setup_intr(0, dev, res, INTR_TYPE_BIO, fn, arg, cookiep);
}
int
alpha_platform_teardown_ide_intr(struct resource *res, void *cookie)
alpha_platform_teardown_ide_intr(device_t dev,
struct resource *res, void *cookie)
{
return isa_teardown_intr(0, 0, res, cookie);
return isa_teardown_intr(0, dev, res, cookie);
}
#else
struct resource *

View File

@ -37,7 +37,9 @@
#include <sys/malloc.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <alpha/pci/t2reg.h>
#include <alpha/pci/t2var.h>
@ -326,6 +328,36 @@ static const char irq_to_mask[40] = {
0, 1, 2, 3, 4, 5, 6, 7 /* PCI 0-7 XXX */
};
static void
t2_disable_intr(int vector)
{
int mask = (vector - 0x900) >> 4;
t2_shadow_mask |= (1UL << mask);
if (mask <= 7)
outb(SLAVE0_ICU, t2_shadow_mask);
else if (mask <= 15)
outb(SLAVE1_ICU, t2_shadow_mask >> 8);
else
outb(SLAVE2_ICU, t2_shadow_mask >> 16);
}
static void
t2_enable_intr(int vector)
{
int mask = (vector - 0x900) >> 4;
t2_shadow_mask &= ~(1UL << mask);
if (mask <= 7)
outb(SLAVE0_ICU, t2_shadow_mask);
else if (mask <= 15)
outb(SLAVE1_ICU, t2_shadow_mask >> 8);
else
outb(SLAVE2_ICU, t2_shadow_mask >> 16);
}
static int
t2_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
@ -340,9 +372,10 @@ t2_setup_intr(device_t dev, device_t child,
if (error)
return error;
error = alpha_setup_intr(vector,
intr, arg, cookiep,
&intrcnt[irq->r_start]);
error = alpha_setup_intr(device_get_nameunit(child ? child : dev),
vector, intr, arg, ithread_priority(flags), cookiep,
&intrcnt[irq->r_start],
t2_disable_intr, t2_enable_intr);
if (error)
return error;

View File

@ -34,7 +34,9 @@
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <sys/malloc.h>
#include <pci/pcivar.h>
@ -302,6 +304,20 @@ tsunami_attach(device_t dev)
return 0;
}
static void
tsunami_disable_intr_vec(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_disable(irq);
}
static void
tsunami_enable_intr_vec(int vector)
{
int irq = (vector - 0x900) >> 4;
platform.pci_intr_enable(irq);
}
static int
tsunami_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
@ -313,9 +329,11 @@ tsunami_setup_intr(device_t dev, device_t child,
if (error)
return error;
error = alpha_setup_intr(0x900 + (irq->r_start << 4),
intr, arg, cookiep,
&intrcnt[INTRCNT_EB164_IRQ + irq->r_start]);
error = alpha_setup_intr(device_get_nameunit(child ? child : dev),
0x900 + (irq->r_start << 4), intr, arg,
ithread_priority(flags), cookiep,
&intrcnt[INTRCNT_EB164_IRQ + irq->r_start],
tsunami_disable_intr_vec, tsunami_enable_intr_vec);
if (error)
return error;

View File

@ -66,7 +66,9 @@
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <machine/swiz.h>
#include <machine/intr.h>
@ -377,7 +379,7 @@ dwlpx_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
driver_intr_t *intr, void *arg, void **cookiep)
{
struct dwlpx_softc *sc = DWLPX_SOFTC(dev);
int slot, ionode, hose, error, vector, intpin;
int slot, ionode, hose, error, vector, intpin, pri;
error = rman_activate_resource(irq);
if (error)
@ -389,8 +391,9 @@ dwlpx_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
hose = sc->bushose & 0x3;
vector = DWLPX_MVEC(ionode, hose, slot);
error = alpha_setup_intr(vector, intr, arg, cookiep,
&intrcnt[INTRCNT_KN8AE_IRQ]);
pri = ithread_priority(flags);
error = alpha_setup_intr(device_get_nameunit(child ? child : dev),
vector, intr, arg, pri, cookiep, &intrcnt[INTRCNT_KN8AE_IRQ], NULL, NULL);
if (error)
return error;
dwlpx_enadis_intr(vector, intpin, 1);

View File

@ -628,30 +628,6 @@ _Xrendezvous:
.data
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
.globl _ihandlers
_ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
#if 0
/* active flag for lazy masking */
iactive:

View File

@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
#include <machine/ipl.h>
#ifdef SMP
#include <machine/pmap.h>

View File

@ -36,7 +36,7 @@
#include "npx.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
#include <sys/ipl.h>
#include <machine/lock.h>
#include <machine/mutex.h>
#include <machine/psl.h>

View File

@ -36,7 +36,7 @@
#include "npx.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
#include <sys/ipl.h>
#include <machine/lock.h>
#include <machine/mutex.h>
#include <machine/psl.h>

View File

@ -136,7 +136,7 @@ SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
"Floatingpoint instructions executed in hardware");
#ifndef SMP
static u_int npx0_imask = SWI_LOW_MASK;
static u_int npx0_imask = 0;
static struct gate_descriptor npx_idt_probeintr;
static int npx_intrno;
static volatile u_int npx_intrs_while_probing;

View File

@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
#include <machine/ipl.h>
#ifdef SMP
#include <machine/pmap.h>

View File

@ -130,9 +130,7 @@ static void setup_8254_mixed_mode __P((void));
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
volatile u_int idelayed;
int statclock_disable;
u_int stat_imask = SWI_LOW_MASK;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
@ -143,9 +141,6 @@ int tsc_is_broken;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
static int beeping = 0;
#if 0
static u_int clk_imask = HWI_MASK | SWI_MASK;
#endif
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static u_int32_t i8254_lastcount;
@ -1005,7 +1000,6 @@ cpu_initclocks()
* flag which would normally cause the RTC to generate
* interrupts.
*/
stat_imask = HWI_MASK | SWI_MASK;
rtc_statusb = RTCSB_24HR;
} else {
/* Setting stathz to nonzero early helps avoid races. */

View File

@ -95,6 +95,7 @@ u_long kvtop __P((void *addr));
void setidt __P((int idx, alias_for_inthand_t *func, int typ, int dpl,
int selec));
void swi_vm __P((void));
void swi_net __P((void));
void userconfig __P((void));
int user_dbreg_trap __P((void));
int vm_page_zero_idle __P((void));

View File

@ -211,16 +211,3 @@ MCOUNT_LABEL(bintr)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
MCOUNT_LABEL(eintr)
.data
.globl _ihandlers
_ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
.text

View File

@ -130,9 +130,7 @@ static void setup_8254_mixed_mode __P((void));
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
volatile u_int idelayed;
int statclock_disable;
u_int stat_imask = SWI_LOW_MASK;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
@ -143,9 +141,6 @@ int tsc_is_broken;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
static int beeping = 0;
#if 0
static u_int clk_imask = HWI_MASK | SWI_MASK;
#endif
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static u_int32_t i8254_lastcount;
@ -1005,7 +1000,6 @@ cpu_initclocks()
* flag which would normally cause the RTC to generate
* interrupts.
*/
stat_imask = HWI_MASK | SWI_MASK;
rtc_statusb = RTCSB_24HR;
} else {
/* Setting stathz to nonzero early helps avoid races. */

View File

@ -211,16 +211,3 @@ MCOUNT_LABEL(bintr)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
MCOUNT_LABEL(eintr)
.data
.globl _ihandlers
_ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
.text

View File

@ -211,16 +211,3 @@ MCOUNT_LABEL(bintr)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
MCOUNT_LABEL(eintr)
.data
.globl _ihandlers
_ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
.text

View File

@ -90,13 +90,12 @@
#endif
/*
* Per-interrupt data. We consider the soft interrupt to be a special
* case, so these arrays have NHWI + NSWI entries, not ICU_LEN.
* Per-interrupt data.
*/
u_long *intr_countp[NHWI + NSWI]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[NHWI + NSWI]; /* first level interrupt handler */
struct ithd *ithds[NHWI + NSWI]; /* real interrupt handler */
void *intr_unit[NHWI + NSWI];
u_long *intr_countp[ICU_LEN]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[ICU_LEN]; /* first level interrupt handler */
struct ithd *ithds[ICU_LEN]; /* real interrupt handler */
void *intr_unit[ICU_LEN];
static inthand_t *fastintr[ICU_LEN] = {
&IDTVEC(fastintr0), &IDTVEC(fastintr1),

View File

@ -214,8 +214,6 @@ int icu_setup __P((int intr, driver_intr_t *func, void *arg,
int flags));
int icu_unset __P((int intr, driver_intr_t *handler));
intrmask_t splq __P((intrmask_t mask));
/*
* WARNING: These are internal functions and not to be used by device drivers!
* They are subject to change without notice.

View File

@ -87,15 +87,9 @@
#include <machine/mutex.h>
#include <sys/ktr.h>
#include <machine/cpu.h>
#if 0
#include <ddb/ddb.h>
#endif
u_long softintrcnt [NSWI];
static u_int straycount[NHWI];
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
#define MAX_STRAY_LOG 5
/*
@ -115,8 +109,7 @@ sched_ithd(void *cookie)
* argument for counting hardware interrupts when they're
* processed too.
*/
if (irq < NHWI) /* real interrupt, */
atomic_add_long(intr_countp[irq], 1); /* one more for this IRQ */
atomic_add_long(intr_countp[irq], 1); /* one more for this IRQ */
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
@ -124,47 +117,19 @@ sched_ithd(void *cookie)
* this IRQ, log it as a stray interrupt.
*/
if (ir == NULL || ir->it_proc == NULL) {
if (irq < NHWI) {
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
printf("got %d stray irq %d's: "
"not logging anymore\n",
MAX_STRAY_LOG, irq);
}
return;
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
printf(
"got %d stray irq %d's: not logging anymore\n",
MAX_STRAY_LOG, irq);
}
panic("sched_ithd: ithds[%d] == NULL", irq);
return;
}
CTR3(KTR_INTR, "sched_ithd pid %d(%s) need=%d",
ir->it_proc->p_pid, ir->it_proc->p_comm, ir->it_need);
#if 0
/*
* If we are in the debugger, we can't use interrupt threads to
* process interrupts since the threads are scheduled. Instead,
* call the interrupt handlers directly. This should be able to
* go away once we have light-weight interrupt handlers.
*/
if (db_active) {
struct intrec *ih; /* and our interrupt handler chain */
#if 0
membar_unlock(); /* push out "it_need=0" */
#endif
for (ih = ir->it_ih; ih != NULL; ih = ih->next) {
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_enter(&Giant, MTX_DEF);
ih->handler(ih->argument);
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_exit(&Giant, MTX_DEF);
}
INTREN (1 << ir->irq); /* reset the mask bit */
return;
}
#endif
/*
* Set it_need so that if the thread is already running but close
* to done, it will do another go-round. Then get the sched lock
@ -183,18 +148,13 @@ sched_ithd(void *cookie)
aston();
}
else {
if (irq < NHWI && (irq & 7) != 0)
CTR3(KTR_INTR, "sched_ithd %d: it_need %d, state %d",
ir->it_proc->p_pid,
ir->it_need,
ir->it_proc->p_stat );
}
mtx_exit(&sched_lock, MTX_SPIN);
#if 0
aston(); /* ??? check priorities first? */
#else
need_resched();
#endif
}
/*
@ -266,113 +226,3 @@ ithd_loop(void *dummy)
mtx_exit(&sched_lock, MTX_SPIN);
}
}
/*
* Start soft interrupt thread.
*/
void
start_softintr(void *dummy)
{
int error;
struct proc *p;
struct ithd *softintr; /* descriptor for the "IRQ" */
struct intrec *idesc; /* descriptor for this handler */
char *name = "sintr"; /* name for idesc */
int i;
if (ithds[SOFTINTR]) { /* we already have a thread */
printf("start_softintr: already running");
return;
}
/* first handler for this irq. */
softintr = malloc(sizeof (struct ithd), M_DEVBUF, M_WAITOK);
if (softintr == NULL)
panic ("Can't create soft interrupt thread");
bzero(softintr, sizeof(struct ithd));
softintr->irq = SOFTINTR;
ithds[SOFTINTR] = softintr;
error = kthread_create(intr_soft, NULL, &p,
RFSTOPPED | RFHIGHPID, "softinterrupt");
if (error)
panic("start_softintr: kthread_create error %d\n", error);
p->p_rtprio.type = RTP_PRIO_ITHREAD;
p->p_rtprio.prio = PI_SOFT; /* soft interrupt */
p->p_stat = SWAIT; /* we're idle */
p->p_flag |= P_NOLOAD;
/* Put in linkages. */
softintr->it_proc = p;
p->p_ithd = softintr; /* reverse link */
idesc = malloc(sizeof (struct intrec), M_DEVBUF, M_WAITOK);
if (idesc == NULL)
panic ("Can't create soft interrupt thread");
bzero(idesc, sizeof (struct intrec));
idesc->ithd = softintr;
idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
if (idesc->name == NULL)
panic ("Can't create soft interrupt thread");
strcpy(idesc->name, name);
for (i = NHWI; i < NHWI + NSWI; i++)
intr_countp[i] = &softintrcnt [i - NHWI];
}
/*
* Software interrupt process code.
*/
void
intr_soft(void *dummy)
{
int i;
struct ithd *me; /* our thread context */
me = curproc->p_ithd; /* point to myself */
/* Main loop */
for (;;) {
#if 0
CTR3(KTR_INTR, "intr_soft pid %d(%s) need=%d",
me->it_proc->p_pid, me->it_proc->p_comm,
me->it_need);
#endif
/*
* Service interrupts. If another interrupt arrives
* while we are running, they will set it_need to
* denote that we should make another pass.
*/
me->it_need = 0;
while ((i = ffs(spending))) {
i--;
atomic_add_long(intr_countp[i], 1);
spending &= ~ (1 << i);
mtx_enter(&Giant, MTX_DEF);
if (ihandlers[i] == swi_generic)
swi_dispatcher(i);
else
(ihandlers[i])();
mtx_exit(&Giant, MTX_DEF);
}
/*
* Processed all our interrupts. Now get the sched
* lock. This may take a while and it_need may get
* set again, so we have to check it again.
*/
mtx_enter(&sched_lock, MTX_SPIN);
if (!me->it_need) {
#if 0
CTR1(KTR_INTR, "intr_soft pid %d: done",
me->it_proc->p_pid);
#endif
me->it_proc->p_stat = SWAIT; /* we're idle */
mi_switch();
#if 0
CTR1(KTR_INTR, "intr_soft pid %d: resumed",
me->it_proc->p_pid);
#endif
}
mtx_exit(&sched_lock, MTX_SPIN);
}
}

View File

@ -90,13 +90,12 @@
#endif
/*
* Per-interrupt data. We consider the soft interrupt to be a special
* case, so these arrays have NHWI + NSWI entries, not ICU_LEN.
* Per-interrupt data.
*/
u_long *intr_countp[NHWI + NSWI]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[NHWI + NSWI]; /* first level interrupt handler */
struct ithd *ithds[NHWI + NSWI]; /* real interrupt handler */
void *intr_unit[NHWI + NSWI];
u_long *intr_countp[ICU_LEN]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[ICU_LEN]; /* first level interrupt handler */
struct ithd *ithds[ICU_LEN]; /* real interrupt handler */
void *intr_unit[ICU_LEN];
static inthand_t *fastintr[ICU_LEN] = {
&IDTVEC(fastintr0), &IDTVEC(fastintr1),

View File

@ -136,7 +136,7 @@ SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
"Floatingpoint instructions executed in hardware");
#ifndef SMP
static u_int npx0_imask = SWI_LOW_MASK;
static u_int npx0_imask = 0;
static struct gate_descriptor npx_idt_probeintr;
static int npx_intrno;
static volatile u_int npx_intrs_while_probing;

View File

@ -46,7 +46,7 @@
#endif
#include <machine/clock.h>
#include <machine/ipl.h>
#include <sys/ipl.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>

View File

@ -666,7 +666,7 @@ ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq,
{
if (ATA_MASTERDEV(dev)) {
#ifdef __alpha__
return alpha_platform_setup_ide_intr(irq, intr, arg, cookiep);
return alpha_platform_setup_ide_intr(child, irq, intr, arg, cookiep);
#else
return BUS_SETUP_INTR(device_get_parent(dev), child, irq,
flags, intr, arg, cookiep);
@ -683,7 +683,7 @@ ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
{
if (ATA_MASTERDEV(dev)) {
#ifdef __alpha__
return alpha_platform_teardown_ide_intr(irq, cookie);
return alpha_platform_teardown_ide_intr(child, irq, cookie);
#else
return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie);
#endif

View File

@ -64,6 +64,7 @@
#include <sys/dkstat.h>
#include <sys/fcntl.h>
#include <sys/interrupt.h>
#include <sys/ipl.h>
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
@ -83,7 +84,6 @@
#include <machine/lock.h>
#include <machine/clock.h>
#include <machine/ipl.h>
#ifndef SMP
#include <machine/lock.h>
#endif

View File

@ -628,30 +628,6 @@ _Xrendezvous:
.data
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
.globl _ihandlers
_ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
#if 0
/* active flag for lazy masking */
iactive:

View File

@ -36,7 +36,7 @@
#include "npx.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
#include <sys/ipl.h>
#include <machine/lock.h>
#include <machine/mutex.h>
#include <machine/psl.h>

View File

@ -42,7 +42,6 @@
#include <sys/rtprio.h>
#include <machine/asmacros.h>
#include <machine/ipl.h>
#ifdef SMP
#include <machine/pmap.h>

View File

@ -130,9 +130,7 @@ static void setup_8254_mixed_mode __P((void));
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
volatile u_int idelayed;
int statclock_disable;
u_int stat_imask = SWI_LOW_MASK;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
@ -143,9 +141,6 @@ int tsc_is_broken;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
static int beeping = 0;
#if 0
static u_int clk_imask = HWI_MASK | SWI_MASK;
#endif
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static u_int32_t i8254_lastcount;
@ -1005,7 +1000,6 @@ cpu_initclocks()
* flag which would normally cause the RTC to generate
* interrupts.
*/
stat_imask = HWI_MASK | SWI_MASK;
rtc_statusb = RTCSB_24HR;
} else {
/* Setting stathz to nonzero early helps avoid races. */

View File

@ -225,7 +225,6 @@
#define _get_syscall_lock get_syscall_lock
#define _Giant Giant
#define _idle idle
#define _ihandlers ihandlers
#define _imen imen
#define _imen_lock imen_lock
#define _in_vm86call in_vm86call
@ -309,6 +308,7 @@
#define _svr4_szsigcode svr4_szsigcode
#define _swi_dispatcher swi_dispatcher
#define _swi_generic swi_generic
#define _swi_net swi_net
#define _swi_null swi_null
#define _swi_vm swi_vm
#define _syscall2 syscall2

View File

@ -42,87 +42,12 @@
#include <i386/isa/icu_ipl.h>
#endif
/*
* Software interrupt level. We treat the software interrupt as a
* single interrupt at a fictive hardware interrupt level.
*/
#define SOFTINTR (NHWI + 0)
/*
* Software interrupt bit numbers in priority order. The priority only
* determines which swi will be dispatched next; a higher priority swi
* may be dispatched when a nested h/w interrupt handler returns.
*
* XXX FIXME: There's no longer a relation between the SWIs and the
* HWIs, so it makes more sense for these values to start at 0, but
* there's lots of code which expects them to start at NHWI.
*/
#define SWI_TTY (NHWI + 0)
#define SWI_NET (NHWI + 1)
#define SWI_CAMNET (NHWI + 2)
#define SWI_CAMBIO (NHWI + 3)
#define SWI_VM (NHWI + 4)
#define SWI_TQ (NHWI + 5)
#define SWI_CLOCK (NHWI + 6)
#define NSWI 7
/*
* Corresponding interrupt-pending bits for ipending.
*/
#define SWI_TTY_PENDING (1 << SWI_TTY)
#define SWI_NET_PENDING (1 << SWI_NET)
#define SWI_CAMNET_PENDING (1 << SWI_CAMNET)
#define SWI_CAMBIO_PENDING (1 << SWI_CAMBIO)
#define SWI_VM_PENDING (1 << SWI_VM)
#define SWI_TQ_PENDING (1 << SWI_TQ)
#define SWI_CLOCK_PENDING (1 << SWI_CLOCK)
/*
* Corresponding interrupt-disable masks for cpl. The ordering is now by
* inclusion (where each mask is considered as a set of bits). Everything
* except SWI_CLOCK_MASK includes SWI_LOW_MASK so that softclock() and low
* priority swi's don't run while other swi handlers are running and timeout
* routines can call swi handlers. SWI_TTY_MASK includes SWI_NET_MASK in
* case tty interrupts are processed at splsofttty() for a tty that is in
* SLIP or PPP line discipline (this is weaker than merging net_imask with
* tty_imask in isa.c - splimp() must mask hard and soft tty interrupts, but
* spltty() apparently only needs to mask soft net interrupts).
*/
#define SWI_TTY_MASK (SWI_TTY_PENDING | SWI_LOW_MASK | SWI_NET_MASK)
#define SWI_CAMNET_MASK (SWI_CAMNET_PENDING | SWI_LOW_MASK)
#define SWI_CAMBIO_MASK (SWI_CAMBIO_PENDING | SWI_LOW_MASK)
#define SWI_NET_MASK (SWI_NET_PENDING | SWI_LOW_MASK)
#define SWI_VM_MASK (SWI_VM_PENDING | SWI_LOW_MASK)
#define SWI_TQ_MASK (SWI_TQ_PENDING | SWI_LOW_MASK)
#define SWI_CLOCK_MASK SWI_CLOCK_PENDING
#define SWI_LOW_MASK (SWI_TQ_PENDING | SWI_CLOCK_MASK)
#define SWI_MASK (~HWI_MASK)
/*
* astpending bits
*/
#define AST_PENDING 0x00000001
#define AST_RESCHED 0x00000002
#ifndef LOCORE
/*
* cpl is preserved by interrupt handlers so it is effectively nonvolatile.
* ipending and idelayed are changed by interrupt handlers so they are
* volatile.
*/
#ifdef notyet /* in <sys/interrupt.h> until pci drivers stop hacking on them */
extern unsigned bio_imask; /* group of interrupts masked with splbio() */
#endif
extern volatile unsigned idelayed; /* interrupts to become pending */
extern volatile unsigned spending; /* pending software interrupts */
#ifdef notyet /* in <sys/systm.h> until pci drivers stop hacking on them */
extern unsigned net_imask; /* group of interrupts masked with splimp() */
extern unsigned stat_imask; /* interrupts masked with splstatclock() */
extern unsigned tty_imask; /* group of interrupts masked with spltty() */
#endif
#endif /* !LOCORE */
#endif /* !_MACHINE_IPL_H_ */

View File

@ -95,6 +95,7 @@ u_long kvtop __P((void *addr));
void setidt __P((int idx, alias_for_inthand_t *func, int typ, int dpl,
int selec));
void swi_vm __P((void));
void swi_net __P((void));
void userconfig __P((void));
int user_dbreg_trap __P((void));
int vm_page_zero_idle __P((void));

View File

@ -628,30 +628,6 @@ _Xrendezvous:
.data
/*
* Addresses of interrupt handlers.
* XresumeNN: Resumption addresses for HWIs.
*/
.globl _ihandlers
_ihandlers:
/*
* used by:
* ipl.s: doreti_unpend
*/
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long Xresume16, Xresume17, Xresume18, Xresume19
.long Xresume20, Xresume21, Xresume22, Xresume23
/*
* used by:
* ipl.s: doreti_unpend
* apic_ipl.s: splz_unpend
*/
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
#if 0
/* active flag for lazy masking */
iactive:

View File

@ -211,16 +211,3 @@ MCOUNT_LABEL(bintr)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
MCOUNT_LABEL(eintr)
.data
.globl _ihandlers
_ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
.text

View File

@ -130,9 +130,7 @@ static void setup_8254_mixed_mode __P((void));
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
volatile u_int idelayed;
int statclock_disable;
u_int stat_imask = SWI_LOW_MASK;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
@ -143,9 +141,6 @@ int tsc_is_broken;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
static int beeping = 0;
#if 0
static u_int clk_imask = HWI_MASK | SWI_MASK;
#endif
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static u_int32_t i8254_lastcount;
@ -1005,7 +1000,6 @@ cpu_initclocks()
* flag which would normally cause the RTC to generate
* interrupts.
*/
stat_imask = HWI_MASK | SWI_MASK;
rtc_statusb = RTCSB_24HR;
} else {
/* Setting stathz to nonzero early helps avoid races. */

View File

@ -211,16 +211,3 @@ MCOUNT_LABEL(bintr)
INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
MCOUNT_LABEL(eintr)
.data
.globl _ihandlers
_ihandlers: /* addresses of interrupt handlers */
/* actually resumption addresses for HWI's */
.long Xresume0, Xresume1, Xresume2, Xresume3
.long Xresume4, Xresume5, Xresume6, Xresume7
.long Xresume8, Xresume9, Xresume10, Xresume11
.long Xresume12, Xresume13, Xresume14, Xresume15
.long _swi_null, swi_net, _swi_null, _swi_null
.long _swi_vm, _swi_null, _softclock
.text

View File

@ -90,13 +90,12 @@
#endif
/*
* Per-interrupt data. We consider the soft interrupt to be a special
* case, so these arrays have NHWI + NSWI entries, not ICU_LEN.
* Per-interrupt data.
*/
u_long *intr_countp[NHWI + NSWI]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[NHWI + NSWI]; /* first level interrupt handler */
struct ithd *ithds[NHWI + NSWI]; /* real interrupt handler */
void *intr_unit[NHWI + NSWI];
u_long *intr_countp[ICU_LEN]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[ICU_LEN]; /* first level interrupt handler */
struct ithd *ithds[ICU_LEN]; /* real interrupt handler */
void *intr_unit[ICU_LEN];
static inthand_t *fastintr[ICU_LEN] = {
&IDTVEC(fastintr0), &IDTVEC(fastintr1),

View File

@ -214,8 +214,6 @@ int icu_setup __P((int intr, driver_intr_t *func, void *arg,
int flags));
int icu_unset __P((int intr, driver_intr_t *handler));
intrmask_t splq __P((intrmask_t mask));
/*
* WARNING: These are internal functions and not to be used by device drivers!
* They are subject to change without notice.

View File

@ -54,19 +54,19 @@
/* current priority (all off) */
.globl _tty_imask
_tty_imask: .long SWI_TTY_MASK
_tty_imask: .long 0
.globl _bio_imask
_bio_imask: .long SWI_CLOCK_MASK | SWI_CAMBIO_MASK
_bio_imask: .long 0
.globl _net_imask
_net_imask: .long SWI_NET_MASK | SWI_CAMNET_MASK
_net_imask: .long 0
.globl _cam_imask
_cam_imask: .long SWI_CAMBIO_MASK | SWI_CAMNET_MASK
_cam_imask: .long 0
.globl _soft_imask
_soft_imask: .long SWI_MASK
_soft_imask: .long 0
.globl _softnet_imask
_softnet_imask: .long SWI_NET_MASK
_softnet_imask: .long 0
.globl _softtty_imask
_softtty_imask: .long SWI_TTY_MASK
_softtty_imask: .long 0
/* pending software interrupts */
.globl _spending
@ -173,7 +173,9 @@ doreti_ast:
jmp doreti_next
ALIGN_TEXT
swi_net:
.globl _swi_net
.type _swi_net,@function
_swi_net:
MCOUNT
bsfl _netisr,%eax
je swi_net_done

View File

@ -29,70 +29,11 @@
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <machine/ipl.h>
#include <sys/proc.h>
#include <i386/isa/icu.h>
#include <i386/isa/intr_machdep.h>
#include <sys/ipl.h>
#include <sys/interrupt.h>
#include <machine/md_var.h>
/*
* Bits in the ipending bitmap variable must be set atomically because
* ipending may be manipulated by interrupts or other cpu's without holding
* any locks.
*
* Note: setbits uses a locked or, making simple cases MP safe.
*/
#define DO_SETBITS(name, var, bits) \
void name(void) \
{ \
atomic_set_int(var, bits); \
sched_ithd((void *) SOFTINTR); \
}
#define DO_SETBITS_AND_NO_MORE(name, var, bits) \
void name(void) \
{ \
atomic_set_int(var, bits); \
}
DO_SETBITS(setdelayed, &spending, loadandclear(&idelayed))
DO_SETBITS(setsoftcamnet,&spending, SWI_CAMNET_PENDING)
DO_SETBITS(setsoftcambio,&spending, SWI_CAMBIO_PENDING)
DO_SETBITS(setsoftclock, &spending, SWI_CLOCK_PENDING)
DO_SETBITS(setsoftnet, &spending, SWI_NET_PENDING)
DO_SETBITS(setsofttty, &spending, SWI_TTY_PENDING)
DO_SETBITS(setsoftvm, &spending, SWI_VM_PENDING)
DO_SETBITS(setsofttq, &spending, SWI_TQ_PENDING)
DO_SETBITS_AND_NO_MORE(schedsofttty, &idelayed, SWI_TTY_PENDING)
unsigned
softclockpending(void)
{
return (spending & SWI_CLOCK_PENDING);
}
/*
* Dummy spl calls. The only reason for these is to not break
* all the code which expects to call them.
*/
void spl0 (void) {}
void splx (intrmask_t x) {}
intrmask_t splq(intrmask_t mask) {return 0; }
intrmask_t splbio(void) {return 0; }
intrmask_t splcam(void) {return 0; }
intrmask_t splclock(void) {return 0; }
intrmask_t splhigh(void) {return 0; }
intrmask_t splimp(void) {return 0; }
intrmask_t splnet(void) {return 0; }
intrmask_t splsoftcam(void) {return 0; }
intrmask_t splsoftcambio(void) {return 0; }
intrmask_t splsoftcamnet(void) {return 0; }
intrmask_t splsoftclock(void) {return 0; }
intrmask_t splsofttty(void) {return 0; }
intrmask_t splsoftvm(void) {return 0; }
intrmask_t splsofttq(void) {return 0; }
intrmask_t splstatclock(void) {return 0; }
intrmask_t spltty(void) {return 0; }
intrmask_t splvm(void) {return 0; }
swihand_t *shandlers[NSWI] = { /* software interrupts */
swi_null, swi_net, swi_null, swi_null,
swi_vm, swi_null, softclock
};

View File

@ -87,15 +87,9 @@
#include <machine/mutex.h>
#include <sys/ktr.h>
#include <machine/cpu.h>
#if 0
#include <ddb/ddb.h>
#endif
u_long softintrcnt [NSWI];
static u_int straycount[NHWI];
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
#define MAX_STRAY_LOG 5
/*
@ -115,8 +109,7 @@ sched_ithd(void *cookie)
* argument for counting hardware interrupts when they're
* processed too.
*/
if (irq < NHWI) /* real interrupt, */
atomic_add_long(intr_countp[irq], 1); /* one more for this IRQ */
atomic_add_long(intr_countp[irq], 1); /* one more for this IRQ */
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
@ -124,47 +117,19 @@ sched_ithd(void *cookie)
* this IRQ, log it as a stray interrupt.
*/
if (ir == NULL || ir->it_proc == NULL) {
if (irq < NHWI) {
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
printf("got %d stray irq %d's: "
"not logging anymore\n",
MAX_STRAY_LOG, irq);
}
return;
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
printf(
"got %d stray irq %d's: not logging anymore\n",
MAX_STRAY_LOG, irq);
}
panic("sched_ithd: ithds[%d] == NULL", irq);
return;
}
CTR3(KTR_INTR, "sched_ithd pid %d(%s) need=%d",
ir->it_proc->p_pid, ir->it_proc->p_comm, ir->it_need);
#if 0
/*
* If we are in the debugger, we can't use interrupt threads to
* process interrupts since the threads are scheduled. Instead,
* call the interrupt handlers directly. This should be able to
* go away once we have light-weight interrupt handlers.
*/
if (db_active) {
struct intrec *ih; /* and our interrupt handler chain */
#if 0
membar_unlock(); /* push out "it_need=0" */
#endif
for (ih = ir->it_ih; ih != NULL; ih = ih->next) {
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_enter(&Giant, MTX_DEF);
ih->handler(ih->argument);
if ((ih->flags & INTR_MPSAFE) == 0)
mtx_exit(&Giant, MTX_DEF);
}
INTREN (1 << ir->irq); /* reset the mask bit */
return;
}
#endif
/*
* Set it_need so that if the thread is already running but close
* to done, it will do another go-round. Then get the sched lock
@ -183,18 +148,13 @@ sched_ithd(void *cookie)
aston();
}
else {
if (irq < NHWI && (irq & 7) != 0)
CTR3(KTR_INTR, "sched_ithd %d: it_need %d, state %d",
ir->it_proc->p_pid,
ir->it_need,
ir->it_proc->p_stat );
}
mtx_exit(&sched_lock, MTX_SPIN);
#if 0
aston(); /* ??? check priorities first? */
#else
need_resched();
#endif
}
/*
@ -266,113 +226,3 @@ ithd_loop(void *dummy)
mtx_exit(&sched_lock, MTX_SPIN);
}
}
/*
* Start soft interrupt thread.
*/
void
start_softintr(void *dummy)
{
int error;
struct proc *p;
struct ithd *softintr; /* descriptor for the "IRQ" */
struct intrec *idesc; /* descriptor for this handler */
char *name = "sintr"; /* name for idesc */
int i;
if (ithds[SOFTINTR]) { /* we already have a thread */
printf("start_softintr: already running");
return;
}
/* first handler for this irq. */
softintr = malloc(sizeof (struct ithd), M_DEVBUF, M_WAITOK);
if (softintr == NULL)
panic ("Can't create soft interrupt thread");
bzero(softintr, sizeof(struct ithd));
softintr->irq = SOFTINTR;
ithds[SOFTINTR] = softintr;
error = kthread_create(intr_soft, NULL, &p,
RFSTOPPED | RFHIGHPID, "softinterrupt");
if (error)
panic("start_softintr: kthread_create error %d\n", error);
p->p_rtprio.type = RTP_PRIO_ITHREAD;
p->p_rtprio.prio = PI_SOFT; /* soft interrupt */
p->p_stat = SWAIT; /* we're idle */
p->p_flag |= P_NOLOAD;
/* Put in linkages. */
softintr->it_proc = p;
p->p_ithd = softintr; /* reverse link */
idesc = malloc(sizeof (struct intrec), M_DEVBUF, M_WAITOK);
if (idesc == NULL)
panic ("Can't create soft interrupt thread");
bzero(idesc, sizeof (struct intrec));
idesc->ithd = softintr;
idesc->name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
if (idesc->name == NULL)
panic ("Can't create soft interrupt thread");
strcpy(idesc->name, name);
for (i = NHWI; i < NHWI + NSWI; i++)
intr_countp[i] = &softintrcnt [i - NHWI];
}
/*
* Software interrupt process code.
*/
void
intr_soft(void *dummy)
{
int i;
struct ithd *me; /* our thread context */
me = curproc->p_ithd; /* point to myself */
/* Main loop */
for (;;) {
#if 0
CTR3(KTR_INTR, "intr_soft pid %d(%s) need=%d",
me->it_proc->p_pid, me->it_proc->p_comm,
me->it_need);
#endif
/*
* Service interrupts. If another interrupt arrives
* while we are running, they will set it_need to
* denote that we should make another pass.
*/
me->it_need = 0;
while ((i = ffs(spending))) {
i--;
atomic_add_long(intr_countp[i], 1);
spending &= ~ (1 << i);
mtx_enter(&Giant, MTX_DEF);
if (ihandlers[i] == swi_generic)
swi_dispatcher(i);
else
(ihandlers[i])();
mtx_exit(&Giant, MTX_DEF);
}
/*
* Processed all our interrupts. Now get the sched
* lock. This may take a while and it_need may get
* set again, so we have to check it again.
*/
mtx_enter(&sched_lock, MTX_SPIN);
if (!me->it_need) {
#if 0
CTR1(KTR_INTR, "intr_soft pid %d: done",
me->it_proc->p_pid);
#endif
me->it_proc->p_stat = SWAIT; /* we're idle */
mi_switch();
#if 0
CTR1(KTR_INTR, "intr_soft pid %d: resumed",
me->it_proc->p_pid);
#endif
}
mtx_exit(&sched_lock, MTX_SPIN);
}
}

View File

@ -90,13 +90,12 @@
#endif
/*
* Per-interrupt data. We consider the soft interrupt to be a special
* case, so these arrays have NHWI + NSWI entries, not ICU_LEN.
* Per-interrupt data.
*/
u_long *intr_countp[NHWI + NSWI]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[NHWI + NSWI]; /* first level interrupt handler */
struct ithd *ithds[NHWI + NSWI]; /* real interrupt handler */
void *intr_unit[NHWI + NSWI];
u_long *intr_countp[ICU_LEN]; /* pointers to interrupt counters */
driver_intr_t *intr_handler[ICU_LEN]; /* first level interrupt handler */
struct ithd *ithds[ICU_LEN]; /* real interrupt handler */
void *intr_unit[ICU_LEN];
static inthand_t *fastintr[ICU_LEN] = {
&IDTVEC(fastintr0), &IDTVEC(fastintr1),

View File

@ -136,7 +136,7 @@ SYSCTL_INT(_hw,HW_FLOATINGPT, floatingpoint,
"Floatingpoint instructions executed in hardware");
#ifndef SMP
static u_int npx0_imask = SWI_LOW_MASK;
static u_int npx0_imask = 0;
static struct gate_descriptor npx_idt_probeintr;
static int npx_intrno;
static volatile u_int npx_intrs_while_probing;

View File

@ -130,9 +130,7 @@ static void setup_8254_mixed_mode __P((void));
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
volatile u_int idelayed;
int statclock_disable;
u_int stat_imask = SWI_LOW_MASK;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
@ -143,9 +141,6 @@ int tsc_is_broken;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
static int beeping = 0;
#if 0
static u_int clk_imask = HWI_MASK | SWI_MASK;
#endif
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static u_int32_t i8254_lastcount;
@ -1005,7 +1000,6 @@ cpu_initclocks()
* flag which would normally cause the RTC to generate
* interrupts.
*/
stat_imask = HWI_MASK | SWI_MASK;
rtc_statusb = RTCSB_24HR;
} else {
/* Setting stathz to nonzero early helps avoid races. */

View File

@ -64,6 +64,7 @@
#include <sys/dkstat.h>
#include <sys/fcntl.h>
#include <sys/interrupt.h>
#include <sys/ipl.h>
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
@ -83,7 +84,6 @@
#include <machine/lock.h>
#include <machine/clock.h>
#include <machine/ipl.h>
#ifndef SMP
#include <machine/lock.h>
#endif

View File

@ -16,6 +16,7 @@
#include <sys/vmmeter.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/ipl.h>
#include <sys/kthread.h>
#include <sys/queue.h>
#include <sys/eventhandler.h>
@ -27,7 +28,6 @@
#endif
#include <machine/cpu.h>
#include <machine/ipl.h>
#include <machine/mutex.h>
#include <machine/smp.h>
@ -103,6 +103,5 @@ idle_proc(void *dummy)
mtx_enter(&sched_lock, MTX_SPIN);
mi_switch();
mtx_exit(&sched_lock, MTX_SPIN);
spl0();
}
}

View File

@ -32,11 +32,18 @@
#include <sys/bus.h>
#include <sys/rtprio.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <machine/ipl.h>
#include <sys/ipl.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/ktr.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/unistd.h>
#include <sys/vmmeter.h>
#include <machine/atomic.h>
#include <machine/cpu.h>
#include <machine/mutex.h>
struct swilist {
swihand_t *sl_handler;
@ -44,6 +51,13 @@ struct swilist {
};
static struct swilist swilists[NSWI];
u_long softintr_count[NSWI];
static struct proc *softithd;
volatile u_int sdelayed;
volatile u_int spending;
static void start_softintr(void *);
static void intr_soft(void *);
void
register_swi(intr, handler)
@ -53,18 +67,18 @@ register_swi(intr, handler)
struct swilist *slp, *slq;
int s;
if (intr < NHWI || intr >= NHWI + NSWI)
if (intr < 0 || intr >= NSWI)
panic("register_swi: bad intr %d", intr);
if (handler == swi_generic || handler == swi_null)
panic("register_swi: bad handler %p", (void *)handler);
slp = &swilists[intr - NHWI];
slp = &swilists[intr];
s = splhigh();
if (ihandlers[intr] == swi_null)
ihandlers[intr] = handler;
if (shandlers[intr] == swi_null)
shandlers[intr] = handler;
else {
if (slp->sl_next == NULL) {
slp->sl_handler = ihandlers[intr];
ihandlers[intr] = swi_generic;
slp->sl_handler = shandlers[intr];
shandlers[intr] = swi_generic;
}
slq = malloc(sizeof(*slq), M_DEVBUF, M_NOWAIT);
if (slq == NULL)
@ -84,7 +98,7 @@ swi_dispatcher(intr)
{
struct swilist *slp;
slp = &swilists[intr - NHWI];
slp = &swilists[intr];
do {
(*slp->sl_handler)();
slp = slp->sl_next;
@ -99,21 +113,21 @@ unregister_swi(intr, handler)
struct swilist *slfoundpred, *slp, *slq;
int s;
if (intr < NHWI || intr >= NHWI + NSWI)
if (intr < 0 || intr >= NSWI)
panic("unregister_swi: bad intr %d", intr);
if (handler == swi_generic || handler == swi_null)
panic("unregister_swi: bad handler %p", (void *)handler);
slp = &swilists[intr - NHWI];
slp = &swilists[intr];
s = splhigh();
if (ihandlers[intr] == handler)
ihandlers[intr] = swi_null;
if (shandlers[intr] == handler)
shandlers[intr] = swi_null;
else if (slp->sl_next != NULL) {
slfoundpred = NULL;
for (slq = slp->sl_next; slq != NULL;
slp = slq, slq = slp->sl_next)
if (slq->sl_handler == handler)
slfoundpred = slp;
slp = &swilists[intr - NHWI];
slp = &swilists[intr];
if (slfoundpred != NULL) {
slq = slfoundpred->sl_next;
slfoundpred->sl_next = slq->sl_next;
@ -125,7 +139,7 @@ unregister_swi(intr, handler)
free(slq, M_DEVBUF);
}
if (slp->sl_next == NULL)
ihandlers[intr] = slp->sl_handler;
shandlers[intr] = slp->sl_handler;
}
splx(s);
}
@ -167,3 +181,197 @@ ithread_priority(flags)
return pri;
}
/*
* Schedule the soft interrupt handler thread.
*/
void
sched_softintr(void)
{
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
* If we don't have an interrupt resource or an interrupt thread for
* this IRQ, log it as a stray interrupt.
*/
if (softithd == NULL)
panic("soft interrupt scheduled too early");
CTR3(KTR_INTR, "sched_softintr pid %d(%s) spending=0x%x",
softithd->p_pid, softithd->p_comm, spending);
/*
* Get the sched lock and see if the thread is on whichkqs yet.
* If not, put it on there. In any case, kick everyone so that if
* the new thread is higher priority than their current thread, it
* gets run now.
*/
mtx_enter(&sched_lock, MTX_SPIN);
if (softithd->p_stat == SWAIT) { /* not on run queue */
CTR1(KTR_INTR, "sched_softintr: setrunqueue %d",
softithd->p_pid);
/* membar_lock(); */
softithd->p_stat = SRUN;
setrunqueue(softithd);
aston();
}
mtx_exit(&sched_lock, MTX_SPIN);
#if 0
aston(); /* ??? check priorities first? */
#else
need_resched();
#endif
}
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
/*
* Start soft interrupt thread.
*/
static void
start_softintr(dummy)
void *dummy;
{
int error;
if (softithd != NULL) { /* we already have a thread */
printf("start_softintr: already running");
return;
}
error = kthread_create(intr_soft, NULL, &softithd,
RFSTOPPED | RFHIGHPID, "softinterrupt");
if (error)
panic("start_softintr: kthread_create error %d\n", error);
softithd->p_rtprio.type = RTP_PRIO_ITHREAD;
softithd->p_rtprio.prio = PI_SOFT; /* soft interrupt */
softithd->p_stat = SWAIT; /* we're idle */
softithd->p_flag |= P_NOLOAD;
}
/*
* Software interrupt process code.
*/
static void
intr_soft(dummy)
void *dummy;
{
int i;
u_int pend;
/* Main loop */
for (;;) {
CTR3(KTR_INTR, "intr_soft pid %d(%s) spending=0x%x",
curproc->p_pid, curproc->p_comm, spending);
/*
* Service interrupts. If another interrupt arrives
* while we are running, they will set spending to
* denote that we should make another pass.
*/
pend = atomic_readandclear_int(&spending);
while ((i = ffs(pend))) {
i--;
atomic_add_long(&softintr_count[i], 1);
pend &= ~ (1 << i);
mtx_enter(&Giant, MTX_DEF);
if (shandlers[i] == swi_generic)
swi_dispatcher(i);
else
(shandlers[i])();
mtx_exit(&Giant, MTX_DEF);
}
/*
* Processed all our interrupts. Now get the sched
* lock. This may take a while and spending may get
* set again, so we have to check it again.
*/
mtx_enter(&sched_lock, MTX_SPIN);
if (spending == 0) {
CTR1(KTR_INTR, "intr_soft pid %d: done",
curproc->p_pid);
curproc->p_stat = SWAIT; /* we're idle */
mi_switch();
CTR1(KTR_INTR, "intr_soft pid %d: resumed",
curproc->p_pid);
}
mtx_exit(&sched_lock, MTX_SPIN);
}
}
/*
* Bits in the spending bitmap variable must be set atomically because
* spending may be manipulated by interrupts or other cpu's without holding
* any locks.
*
* Note: setbits uses a locked or, making simple cases MP safe.
*/
#define DO_SETBITS(name, var, bits) \
void name(void) \
{ \
atomic_set_int(var, bits); \
sched_softintr(); \
}
#define DO_SETBITS_AND_NO_MORE(name, var, bits) \
void name(void) \
{ \
atomic_set_int(var, bits); \
}
DO_SETBITS(setsoftcamnet,&spending, SWI_CAMNET_PENDING)
DO_SETBITS(setsoftcambio,&spending, SWI_CAMBIO_PENDING)
DO_SETBITS(setsoftclock, &spending, SWI_CLOCK_PENDING)
DO_SETBITS(setsoftnet, &spending, SWI_NET_PENDING)
DO_SETBITS(setsofttty, &spending, SWI_TTY_PENDING)
DO_SETBITS(setsoftvm, &spending, SWI_VM_PENDING)
DO_SETBITS(setsofttq, &spending, SWI_TQ_PENDING)
DO_SETBITS_AND_NO_MORE(schedsoftcamnet, &sdelayed, SWI_CAMNET_PENDING)
DO_SETBITS_AND_NO_MORE(schedsoftcambio, &sdelayed, SWI_CAMBIO_PENDING)
DO_SETBITS_AND_NO_MORE(schedsoftnet, &sdelayed, SWI_NET_PENDING)
DO_SETBITS_AND_NO_MORE(schedsofttty, &sdelayed, SWI_TTY_PENDING)
DO_SETBITS_AND_NO_MORE(schedsoftvm, &sdelayed, SWI_VM_PENDING)
DO_SETBITS_AND_NO_MORE(schedsofttq, &sdelayed, SWI_TQ_PENDING)
void
setdelayed(void)
{
int pend;
pend = atomic_readandclear_int(&sdelayed);
if (pend != 0) {
atomic_set_int(&spending, pend);
sched_softintr();
}
}
intrmask_t
softclockpending(void)
{
return (spending & SWI_CLOCK_PENDING);
}
/*
* Dummy spl calls. The only reason for these is to not break
* all the code which expects to call them.
*/
void spl0 (void) {}
void splx (intrmask_t x) {}
intrmask_t splq(intrmask_t mask) { return 0; }
intrmask_t splbio(void) { return 0; }
intrmask_t splcam(void) { return 0; }
intrmask_t splclock(void) { return 0; }
intrmask_t splhigh(void) { return 0; }
intrmask_t splimp(void) { return 0; }
intrmask_t splnet(void) { return 0; }
intrmask_t splsoftcam(void) { return 0; }
intrmask_t splsoftcambio(void) { return 0; }
intrmask_t splsoftcamnet(void) { return 0; }
intrmask_t splsoftclock(void) { return 0; }
intrmask_t splsofttty(void) { return 0; }
intrmask_t splsoftvm(void) { return 0; }
intrmask_t splsofttq(void) { return 0; }
intrmask_t splstatclock(void) { return 0; }
intrmask_t spltty(void) { return 0; }
intrmask_t splvm(void) { return 0; }

View File

@ -33,8 +33,8 @@
#include <sys/kernel.h>
#include <sys/taskqueue.h>
#include <sys/interrupt.h>
#include <sys/ipl.h>
#include <sys/malloc.h>
#include <machine/ipl.h>
MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");

View File

@ -60,10 +60,12 @@ void alpha_register_pci_scsi __P((int bus, int slot, struct cam_sim *sim));
#ifdef _SYS_BUS_H_
struct resource *alpha_platform_alloc_ide_intr(int chan);
int alpha_platform_release_ide_intr(int chan, struct resource *res);
int alpha_platform_setup_ide_intr(struct resource *res,
int alpha_platform_setup_ide_intr(struct device *dev,
struct resource *res,
driver_intr_t *fn, void *arg,
void **cookiep);
int alpha_platform_teardown_ide_intr(struct resource *res, void *cookie);
int alpha_platform_teardown_ide_intr(struct device *dev,
struct resource *res, void *cookie);
int alpha_platform_pci_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
driver_intr_t *intr, void *arg,

View File

@ -49,13 +49,14 @@ struct intrec {
typedef void swihand_t __P((void));
extern swihand_t *shandlers[];
void register_swi __P((int intr, swihand_t *handler));
void swi_dispatcher __P((int intr));
swihand_t swi_generic;
swihand_t swi_null;
void unregister_swi __P((int intr, swihand_t *handler));
int ithread_priority __P((int flags));
extern swihand_t *ihandlers[];
void sched_softintr __P((void));
#endif

76
sys/sys/ipl.h Normal file
View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 1993 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_IPL_H_
#define _SYS_IPL_H_
#include <machine/ipl.h>
/*
* Software interrupt bit numbers in priority order. The priority only
* determines which swi will be dispatched next; a higher priority swi
* may be dispatched when a nested h/w interrupt handler returns.
*/
#define SWI_TTY 0
#define SWI_NET 1
#define SWI_CAMNET 2
#define SWI_CAMBIO 3
#define SWI_VM 4
#define SWI_TQ 5
#define SWI_CLOCK 6
/*
* Corresponding interrupt-pending bits for ipending.
*/
#define SWI_TTY_PENDING (1 << SWI_TTY)
#define SWI_NET_PENDING (1 << SWI_NET)
#define SWI_CAMNET_PENDING (1 << SWI_CAMNET)
#define SWI_CAMBIO_PENDING (1 << SWI_CAMBIO)
#define SWI_VM_PENDING (1 << SWI_VM)
#define SWI_TQ_PENDING (1 << SWI_TQ)
#define SWI_CLOCK_PENDING (1 << SWI_CLOCK)
#ifndef LOCORE
/*
* spending and sdelayed are changed by interrupt handlers so they are
* volatile.
*/
extern volatile u_int sdelayed; /* interrupts to become pending */
extern volatile u_int spending; /* pending software interrupts */
#endif /* !LOCORE */
#endif /* !_SYS_IPL_H_ */

View File

@ -380,6 +380,7 @@ struct ithd {
int it_cnt; /* number of schedule events */
#endif
void *it_md; /* hook for MD interrupt code */
};
#ifdef _KERNEL

View File

@ -204,12 +204,6 @@ struct callout_handle timeout __P((timeout_t *, void *, int));
void untimeout __P((timeout_t *, void *, struct callout_handle));
/* Interrupt management */
/*
* For the alpha arch, some of these functions are static __inline, and
* the others should be.
*/
#ifdef __i386__
void setdelayed __P((void));
void setsoftast __P((void));
void setsoftcambio __P((void));
@ -244,10 +238,10 @@ intrmask_t splstatclock __P((void));
intrmask_t spltty __P((void));
intrmask_t splvm __P((void));
void splx __P((intrmask_t ipl));
intrmask_t splq __P((intrmask_t ipl));
void splz __P((void));
#endif /* __i386__ */
#if defined(__alpha__) || defined(__ia64__)
#if defined(__ia64__)
#include <machine/ipl.h>
#endif