Reorganize the interrupt handling code a bit to make a few things cleaner

and increase flexibility to allow various different approaches to be tried
in the future.
- Split struct ithd up into two pieces.  struct intr_event holds the list
  of interrupt handlers associated with interrupt sources.
  struct intr_thread contains the data relative to an interrupt thread.
  Currently we still provide a 1:1 relationship of events to threads
  with the exception that events only have an associated thread if there
  is at least one threaded interrupt handler attached to the event.  This
  means that on x86 we no longer have 4 bazillion interrupt threads with
  no handlers.  It also means that interrupt events with only INTR_FAST
  handlers no longer have an associated thread either.
- Renamed struct intrhand to struct intr_handler to follow the struct
  intr_foo naming convention.  This did require renaming the powerpc
  MD struct intr_handler to struct ppc_intr_handler.
- INTR_FAST no longer implies INTR_EXCL on all architectures except for
  powerpc.  This means that multiple INTR_FAST handlers can attach to the
  same interrupt and that INTR_FAST and non-INTR_FAST handlers can attach
  to the same interrupt.  Sharing INTR_FAST handlers may not always be
  desirable, but having sio(4) and uhci(4) fight over an IRQ isn't fun
  either.  Drivers can always still use INTR_EXCL to ask for an interrupt
  exclusively.  The way this sharing works is that when an interrupt
  comes in, all the INTR_FAST handlers are executed first, and if any
  threaded handlers exist, the interrupt thread is scheduled afterwards.
  This type of layout also makes it possible to investigate using interrupt
  filters ala OS X where the filter determines whether or not its companion
  threaded handler should run.
- Aside from the INTR_FAST changes above, the impact on MD interrupt code
  is mostly just 's/ithread/intr_event/'.
- A new MI ddb command 'show intrs' walks the list of interrupt events
  dumping their state.  It also has a '/v' verbose switch which dumps
  info about all of the handlers attached to each event.
- We currently don't destroy an interrupt thread when the last threaded
  handler is removed because it would suck for things like ppbus(8)'s
  braindead behavior.  The code is present, though, it is just under
  #if 0 for now.
- Move the code to actually execute the threaded handlers for an interrrupt
  event into a separate function so that ithread_loop() becomes more
  readable.  Previously this code was all in the middle of ithread_loop()
  and indented halfway across the screen.
- Made struct intr_thread private to kern_intr.c and replaced td_ithd
  with a thread private flag TDP_ITHREAD.
- In statclock, check curthread against idlethread directly rather than
  curthread's proc against idlethread's proc. (Not really related to intr
  changes)

Tested on:	alpha, amd64, i386, sparc64
Tested on:	arm, ia64 (older version of patch by cognet and marcel)
This commit is contained in:
John Baldwin 2005-10-25 19:48:48 +00:00
parent eb2893ec18
commit e0f66ef861
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=151658
22 changed files with 800 additions and 586 deletions

View File

@ -325,8 +325,9 @@ LIST_HEAD(alpha_intr_list, alpha_intr);
struct alpha_intr {
LIST_ENTRY(alpha_intr) list; /* chain handlers in this hash bucket */
uintptr_t vector; /* vector to match */
struct ithd *ithd; /* interrupt thread */
struct intr_event *ie; /* interrupt event structure */
volatile long *cntp; /* interrupt counter */
void (*disable)(uintptr_t);
};
static struct mtx alpha_intr_hash_lock;
@ -338,7 +339,7 @@ static void
ithds_init(void *dummy)
{
mtx_init(&alpha_intr_hash_lock, "ithread table lock", NULL, MTX_SPIN);
mtx_init(&alpha_intr_hash_lock, "intr table", NULL, MTX_SPIN);
}
SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
@ -371,8 +372,9 @@ alpha_setup_intr(const char *name, uintptr_t vector, driver_intr_t handler, void
return ENOMEM;
i->vector = vector;
i->cntp = cntp;
errcode = ithread_create(&i->ithd, vector, 0, disable, enable,
"intr:");
i->disable = disable;
errcode = intr_event_create(&i->ie, (void *)vector, 0,
(void (*)(void *))enable, "intr:");
if (errcode) {
free(i, M_DEVBUF);
return errcode;
@ -384,44 +386,49 @@ alpha_setup_intr(const char *name, uintptr_t vector, driver_intr_t handler, void
}
/* Second, add this handler. */
return (ithread_add_handler(i->ithd, name, handler, arg,
ithread_priority(flags), flags, cookiep));
return (intr_event_add_handler(i->ie, name, handler, arg,
intr_priority(flags), flags, cookiep));
}
int
alpha_teardown_intr(void *cookie)
{
return (ithread_remove_handler(cookie));
return (intr_event_remove_handler(cookie));
}
/*
* XXX: Alpha doesn't count stray interrupts like some of the other archs.
*/
void
alpha_dispatch_intr(void *frame, unsigned long vector)
{
int h = HASHVEC(vector);
struct alpha_intr *i;
struct ithd *ithd; /* our interrupt thread */
struct intrhand *ih;
int error;
struct intr_event *ie;
struct intr_handler *ih;
int error, thread;
/*
* Walk the hash bucket for this vector looking for this vector's
* interrupt thread.
* interrupt structure.
*/
for (i = LIST_FIRST(&alpha_intr_hash[h]); i && i->vector != vector;
i = LIST_NEXT(i, list))
; /* nothing */
if (i == NULL)
return; /* no ithread for this vector */
ithd = i->ithd;
KASSERT(ithd != NULL, ("interrupt vector without a thread"));
/* No interrupt structure for this vector. */
if (i == NULL)
return;
ie = i->ie;
KASSERT(ie != NULL, ("interrupt structure without an event"));
/*
* As an optimization, if an ithread has no handlers, don't
* As an optimization, if an event has no handlers, don't
* schedule it to run.
*/
if (TAILQ_EMPTY(&ithd->it_handlers))
if (TAILQ_EMPTY(&ie->ie_handlers))
return;
atomic_add_long(i->cntp, 1);
@ -433,25 +440,32 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
*/
sched_pin();
/*
* Handle a fast interrupt if there is no actual thread for this
* interrupt by calling the handler directly without Giant. Note
* that this means that any fast interrupt handler must be MP safe.
*/
ih = TAILQ_FIRST(&ithd->it_handlers);
if ((ih->ih_flags & IH_FAST) != 0) {
/* Execute all fast interrupt handlers directly. */
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
thread = 1;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_handler, ih->ih_argument, ih->ih_name);
ih->ih_handler(ih->ih_argument);
}
critical_exit();
} else {
if (ithd->it_disable) {
/*
* If the ithread needs to run, disable the source and schedule the
* thread.
*/
if (thread) {
if (i->disable) {
CTR1(KTR_INTR,
"alpha_dispatch_intr: disabling vector 0x%x",
i->vector);
ithd->it_disable(ithd->it_vector);
i->disable(i->vector);
}
error = ithread_schedule(ithd);
error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("got an impossible stray interrupt"));
}
sched_unpin();

View File

@ -389,14 +389,14 @@ isa_teardown_intr(device_t dev, device_t child,
struct resource *irq, void *cookie)
{
struct isa_intr *ii = cookie;
struct intrhand *ih, *handler = (struct intrhand *)ii->ih;
struct ithd *ithread = handler->ih_ithread;
struct intr_handler *ih, *handler = (struct intr_handler *)ii->ih;
struct intr_event *ie = handler->ih_event;
int num_handlers = 0;
mtx_lock(&ithread->it_lock);
TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
mtx_lock(&ie->ie_lock);
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
num_handlers++;
mtx_unlock(&ithread->it_lock);
mtx_unlock(&ie->ie_lock);
/*
* Only disable the interrupt in hardware if there are no

View File

@ -57,7 +57,7 @@
#define MAX_STRAY_LOG 5
typedef void (*mask_fn)(uintptr_t vector);
typedef void (*mask_fn)(void *);
static int intrcnt_index;
static struct intsrc *interrupt_sources[NUM_IO_INTS];
@ -81,15 +81,14 @@ intr_register_source(struct intsrc *isrc)
vector = isrc->is_pic->pic_vector(isrc);
if (interrupt_sources[vector] != NULL)
return (EEXIST);
error = ithread_create(&isrc->is_ithread, (uintptr_t)isrc, 0,
(mask_fn)isrc->is_pic->pic_disable_source,
error = intr_event_create(&isrc->is_event, isrc, 0,
(mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
if (error)
return (error);
mtx_lock_spin(&intr_table_lock);
if (interrupt_sources[vector] != NULL) {
mtx_unlock_spin(&intr_table_lock);
ithread_destroy(isrc->is_ithread);
intr_event_destroy(isrc->is_event);
return (EEXIST);
}
intrcnt_register(isrc);
@ -115,8 +114,8 @@ intr_add_handler(const char *name, int vector, driver_intr_t handler,
isrc = intr_lookup_source(vector);
if (isrc == NULL)
return (EINVAL);
error = ithread_add_handler(isrc->is_ithread, name, handler, arg,
ithread_priority(flags), flags, cookiep);
error = intr_event_add_handler(isrc->is_event, name, handler, arg,
intr_priority(flags), flags, cookiep);
if (error == 0) {
intrcnt_updatename(isrc);
isrc->is_pic->pic_enable_intr(isrc);
@ -130,7 +129,7 @@ intr_remove_handler(void *cookie)
{
int error;
error = ithread_remove_handler(cookie);
error = intr_event_remove_handler(cookie);
#ifdef XXX
if (error == 0)
intrcnt_updatename(/* XXX */);
@ -153,12 +152,11 @@ void
intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
{
struct thread *td;
struct ithd *it;
struct intrhand *ih;
int error, vector;
struct intr_event *ie;
struct intr_handler *ih;
int error, vector, thread;
td = curthread;
td->td_intr_nesting_level++;
/*
* We count software interrupts when we process them. The
@ -169,11 +167,7 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
(*isrc->is_count)++;
PCPU_LAZY_INC(cnt.v_intr);
it = isrc->is_ithread;
if (it == NULL)
ih = NULL;
else
ih = TAILQ_FIRST(&it->it_handlers);
ie = isrc->is_event;
/*
* XXX: We assume that IRQ 0 is only used for the ISA timer
@ -183,40 +177,12 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
if (vector == 0)
clkintr_pending = 1;
if (ih != NULL && ih->ih_flags & IH_FAST) {
/*
* Execute fast interrupt handlers directly.
* To support clock handlers, if a handler registers
* with a NULL argument, then we pass it a pointer to
* a trapframe as its argument.
*/
critical_enter();
TAILQ_FOREACH(ih, &it->it_handlers, ih_next) {
MPASS(ih->ih_flags & IH_FAST);
CTR3(KTR_INTR, "%s: executing handler %p(%p)",
__func__, ih->ih_handler,
ih->ih_argument == NULL ? iframe :
ih->ih_argument);
if (ih->ih_argument == NULL)
ih->ih_handler(iframe);
else
ih->ih_handler(ih->ih_argument);
}
isrc->is_pic->pic_eoi_source(isrc);
error = 0;
critical_exit();
} else {
/*
* For stray and threaded interrupts, we mask and EOI the
* source.
* For stray interrupts, mask and EOI the source, bump the
* stray count, and log the condition.
*/
if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
if (ih == NULL)
error = EINVAL;
else
error = ithread_schedule(it);
}
if (error == EINVAL) {
(*isrc->is_straycount)++;
if (*isrc->is_straycount < MAX_STRAY_LOG)
log(LOG_ERR, "stray irq%d\n", vector);
@ -225,6 +191,46 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
"too many stray irq %d's: not logging anymore\n",
vector);
}
/*
* Execute fast interrupt handlers directly.
* To support clock handlers, if a handler registers
* with a NULL argument, then we pass it a pointer to
* an intrframe as its argument.
*/
td->td_intr_nesting_level++;
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
thread = 1;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_handler, ih->ih_argument == NULL ? iframe :
ih->ih_argument, ih->ih_name);
if (ih->ih_argument == NULL)
ih->ih_handler(iframe);
else
ih->ih_handler(ih->ih_argument);
}
/*
* If there are any threaded handlers that need to run,
* mask the source as well as sending it an EOI. Otherwise,
* just send it an EOI but leave it unmasked.
*/
if (thread)
isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
else
isrc->is_pic->pic_eoi_source(isrc);
critical_exit();
/* Schedule the ithread if needed. */
if (thread) {
error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("bad stray interrupt"));
}
td->td_intr_nesting_level--;
}
@ -266,7 +272,7 @@ static void
intrcnt_updatename(struct intsrc *is)
{
intrcnt_setname(is->is_ithread->it_td->td_proc->p_comm, is->is_index);
intrcnt_setname(is->is_event->ie_fullname, is->is_index);
}
static void
@ -275,7 +281,7 @@ intrcnt_register(struct intsrc *is)
char straystr[MAXCOMLEN + 1];
/* mtx_assert(&intr_table_lock, MA_OWNED); */
KASSERT(is->is_ithread != NULL, ("%s: isrc with no ithread", __func__));
KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
is->is_index = intrcnt_index;
intrcnt_index += 2;
snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
@ -325,6 +331,6 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
for (i = 0; i < NUM_IO_INTS && !quit; i++, isrc++)
if (*isrc != NULL)
db_dump_ithread((*isrc)->is_ithread, verbose);
db_dump_intr_event((*isrc)->is_event, verbose);
}
#endif

View File

@ -75,7 +75,7 @@ enum {
*/
struct intsrc {
struct pic *is_pic;
struct ithd *is_ithread;
struct intr_event *is_event;
u_long *is_count;
u_long *is_straycount;
u_int is_index;

View File

@ -472,10 +472,10 @@ atpic_handle_intr(void *cookie, struct intrframe iframe)
isrc = &atintrs[vec].at_intsrc;
/*
* If we don't have an ithread, see if this is a spurious
* If we don't have an event, see if this is a spurious
* interrupt.
*/
if (isrc->is_ithread == NULL && (vec == 7 || vec == 15)) {
if (isrc->is_event == NULL && (vec == 7 || vec == 15)) {
int port, isr;
/*

View File

@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
#include <machine/intr.h>
#include <machine/cpu.h>
static struct ithd *ithreads[NIRQ];
static struct intr_event *intr_events[NIRQ];
static int intrcnt_tab[NIRQ];
static int intrcnt_index = 0;
static int last_printed = 0;
@ -61,18 +61,18 @@ void
arm_setup_irqhandler(const char *name, void (*hand)(void*), void *arg,
int irq, int flags, void **cookiep)
{
struct ithd *cur_ith;
struct intr_event *event;
int error;
if (irq < 0 || irq >= NIRQ)
return;
cur_ith = ithreads[irq];
if (cur_ith == NULL) {
error = ithread_create(&cur_ith, irq, 0, arm_mask_irq,
arm_unmask_irq, "intr%d:", irq);
event = intr_events[irq];
if (event == NULL) {
error = intr_event_create(&event, (void *)irq, 0,
(void (*)(void *))arm_unmask_irq, "intr%d:", irq);
if (error)
return;
ithreads[irq] = cur_ith;
intr_events[irq] = event;
last_printed +=
snprintf(intrnames + last_printed,
MAXCOMLEN + 1,
@ -82,14 +82,14 @@ arm_setup_irqhandler(const char *name, void (*hand)(void*), void *arg,
intrcnt_index++;
}
ithread_add_handler(cur_ith, name, hand, arg,
ithread_priority(flags), flags, cookiep);
intr_event_add_handler(event, name, hand, arg,
intr_priority(flags), flags, cookiep);
}
int
arm_remove_irqhandler(void *cookie)
{
return (ithread_remove_handler(cookie));
return (intr_event_remove_handler(cookie));
}
void dosoftints(void);
@ -101,28 +101,34 @@ dosoftints(void)
void
arm_handler_execute(void *frame, int irqnb)
{
struct ithd *ithd;
int i;
struct intrhand *ih;
struct intr_event *event;
struct intr_handler *ih;
struct thread *td = curthread;
int i, thread;
td->td_intr_nesting_level++;
while ((i = arm_get_next_irq()) != -1) {
arm_mask_irq(i);
intrcnt[intrcnt_tab[i]]++;
ithd = ithreads[i];
if (!ithd)
event = intr_events[i];
if (!event || TAILQ_EMPTY(&event->ie_handlers))
continue;
ih = TAILQ_FIRST(&ithd->it_handlers);
if (ih && ih->ih_flags & IH_FAST) {
TAILQ_FOREACH(ih, &ithd->it_handlers,
ih_next) {
/* Execute fast handlers. */
thread = 0;
TAILQ_FOREACH(ih, &event->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST))
thread = 1;
else
ih->ih_handler(ih->ih_argument ?
ih->ih_argument : frame);
}
/* Schedule thread if needed. */
if (thread)
intr_event_schedule_thread(event);
else
arm_unmask_irq(i);
} else if (ih)
ithread_schedule(ithd);
}
td->td_intr_nesting_level--;
}

View File

@ -1055,9 +1055,9 @@ determined_type: ;
printf("\n");
if (sio_fast_ih == NULL) {
swi_add(&tty_ithd, "sio", siopoll, NULL, SWI_TTY, 0,
swi_add(&tty_intr_event, "sio", siopoll, NULL, SWI_TTY, 0,
&sio_fast_ih);
swi_add(&clk_ithd, "sio", siopoll, NULL, SWI_CLOCK, 0,
swi_add(&clk_intr_event, "sio", siopoll, NULL, SWI_CLOCK, 0,
&sio_slow_ih);
}

View File

@ -254,7 +254,7 @@ sunkbd_attach(struct uart_softc *sc)
#endif
sunkbd_enable(&sunkbd_softc.sc_kbd);
swi_add(&tty_ithd, uart_driver_name, sunkbd_uart_intr,
swi_add(&tty_intr_event, uart_driver_name, sunkbd_uart_intr,
&sunkbd_softc, SWI_TTY, INTR_TYPE_TTY, &sc->sc_softih);
sc->sc_opened = 1;

View File

@ -377,7 +377,7 @@ uart_tty_attach(struct uart_softc *sc)
ttyconsolemode(tp, 0);
}
swi_add(&tty_ithd, uart_driver_name, uart_tty_intr, sc, SWI_TTY,
swi_add(&tty_intr_event, uart_driver_name, uart_tty_intr, sc, SWI_TTY,
INTR_TYPE_TTY, &sc->sc_softih);
ttycreate(tp, TS_CALLOUT, "u%r", unit);
@ -392,7 +392,7 @@ int uart_tty_detach(struct uart_softc *sc)
tp = sc->sc_u.u_tty.tp;
tp->t_pps = NULL;
ttygone(tp);
ithread_remove_handler(sc->sc_softih);
intr_event_remove_handler(sc->sc_softih);
ttyfree(tp);
return (0);

View File

@ -57,7 +57,7 @@
#define MAX_STRAY_LOG 5
typedef void (*mask_fn)(uintptr_t vector);
typedef void (*mask_fn)(void *);
static int intrcnt_index;
static struct intsrc *interrupt_sources[NUM_IO_INTS];
@ -81,15 +81,14 @@ intr_register_source(struct intsrc *isrc)
vector = isrc->is_pic->pic_vector(isrc);
if (interrupt_sources[vector] != NULL)
return (EEXIST);
error = ithread_create(&isrc->is_ithread, (uintptr_t)isrc, 0,
(mask_fn)isrc->is_pic->pic_disable_source,
error = intr_event_create(&isrc->is_event, isrc, 0,
(mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
if (error)
return (error);
mtx_lock_spin(&intr_table_lock);
if (interrupt_sources[vector] != NULL) {
mtx_unlock_spin(&intr_table_lock);
ithread_destroy(isrc->is_ithread);
intr_event_destroy(isrc->is_event);
return (EEXIST);
}
intrcnt_register(isrc);
@ -115,8 +114,8 @@ intr_add_handler(const char *name, int vector, driver_intr_t handler,
isrc = intr_lookup_source(vector);
if (isrc == NULL)
return (EINVAL);
error = ithread_add_handler(isrc->is_ithread, name, handler, arg,
ithread_priority(flags), flags, cookiep);
error = intr_event_add_handler(isrc->is_event, name, handler, arg,
intr_priority(flags), flags, cookiep);
if (error == 0) {
intrcnt_updatename(isrc);
isrc->is_pic->pic_enable_intr(isrc);
@ -130,7 +129,7 @@ intr_remove_handler(void *cookie)
{
int error;
error = ithread_remove_handler(cookie);
error = intr_event_remove_handler(cookie);
#ifdef XXX
if (error == 0)
intrcnt_updatename(/* XXX */);
@ -153,12 +152,11 @@ void
intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
{
struct thread *td;
struct ithd *it;
struct intrhand *ih;
int error, vector;
struct intr_event *ie;
struct intr_handler *ih;
int error, vector, thread;
td = curthread;
td->td_intr_nesting_level++;
/*
* We count software interrupts when we process them. The
@ -169,11 +167,7 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
(*isrc->is_count)++;
PCPU_LAZY_INC(cnt.v_intr);
it = isrc->is_ithread;
if (it == NULL)
ih = NULL;
else
ih = TAILQ_FIRST(&it->it_handlers);
ie = isrc->is_event;
/*
* XXX: We assume that IRQ 0 is only used for the ISA timer
@ -183,40 +177,12 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
if (vector == 0)
clkintr_pending = 1;
if (ih != NULL && ih->ih_flags & IH_FAST) {
/*
* Execute fast interrupt handlers directly.
* To support clock handlers, if a handler registers
* with a NULL argument, then we pass it a pointer to
* a trapframe as its argument.
*/
critical_enter();
TAILQ_FOREACH(ih, &it->it_handlers, ih_next) {
MPASS(ih->ih_flags & IH_FAST);
CTR3(KTR_INTR, "%s: executing handler %p(%p)",
__func__, ih->ih_handler,
ih->ih_argument == NULL ? iframe :
ih->ih_argument);
if (ih->ih_argument == NULL)
ih->ih_handler(iframe);
else
ih->ih_handler(ih->ih_argument);
}
isrc->is_pic->pic_eoi_source(isrc);
error = 0;
critical_exit();
} else {
/*
* For stray and threaded interrupts, we mask and EOI the
* source.
* For stray interrupts, mask and EOI the source, bump the
* stray count, and log the condition.
*/
if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
if (ih == NULL)
error = EINVAL;
else
error = ithread_schedule(it);
}
if (error == EINVAL) {
(*isrc->is_straycount)++;
if (*isrc->is_straycount < MAX_STRAY_LOG)
log(LOG_ERR, "stray irq%d\n", vector);
@ -225,6 +191,46 @@ intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
"too many stray irq %d's: not logging anymore\n",
vector);
}
/*
* Execute fast interrupt handlers directly.
* To support clock handlers, if a handler registers
* with a NULL argument, then we pass it a pointer to
* an intrframe as its argument.
*/
td->td_intr_nesting_level++;
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
thread = 1;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_handler, ih->ih_argument == NULL ? iframe :
ih->ih_argument, ih->ih_name);
if (ih->ih_argument == NULL)
ih->ih_handler(iframe);
else
ih->ih_handler(ih->ih_argument);
}
/*
* If there are any threaded handlers that need to run,
* mask the source as well as sending it an EOI. Otherwise,
* just send it an EOI but leave it unmasked.
*/
if (thread)
isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
else
isrc->is_pic->pic_eoi_source(isrc);
critical_exit();
/* Schedule the ithread if needed. */
if (thread) {
error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("bad stray interrupt"));
}
td->td_intr_nesting_level--;
}
@ -266,7 +272,7 @@ static void
intrcnt_updatename(struct intsrc *is)
{
intrcnt_setname(is->is_ithread->it_td->td_proc->p_comm, is->is_index);
intrcnt_setname(is->is_event->ie_fullname, is->is_index);
}
static void
@ -275,7 +281,7 @@ intrcnt_register(struct intsrc *is)
char straystr[MAXCOMLEN + 1];
/* mtx_assert(&intr_table_lock, MA_OWNED); */
KASSERT(is->is_ithread != NULL, ("%s: isrc with no ithread", __func__));
KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
is->is_index = intrcnt_index;
intrcnt_index += 2;
snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
@ -325,6 +331,6 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
for (i = 0; i < NUM_IO_INTS && !quit; i++, isrc++)
if (*isrc != NULL)
db_dump_ithread((*isrc)->is_ithread, verbose);
db_dump_intr_event((*isrc)->is_event, verbose);
}
#endif

View File

@ -75,7 +75,7 @@ enum {
*/
struct intsrc {
struct pic *is_pic;
struct ithd *is_ithread;
struct intr_event *is_event;
u_long *is_count;
u_long *is_straycount;
u_int is_index;

View File

@ -535,10 +535,10 @@ atpic_handle_intr(struct intrframe iframe)
isrc = &atintrs[iframe.if_vec].at_intsrc;
/*
* If we don't have an ithread, see if this is a spurious
* If we don't have an event, see if this is a spurious
* interrupt.
*/
if (isrc->is_ithread == NULL &&
if (isrc->is_event == NULL &&
(iframe.if_vec == 7 || iframe.if_vec == 15)) {
int port, isr;

View File

@ -254,7 +254,7 @@ interrupt(u_int64_t vector, struct trapframe *tf)
#define IA64_HARDWARE_IRQ_BASE 0x20
struct ia64_intr {
struct ithd *ithd; /* interrupt thread */
struct intr_event *event; /* interrupt event */
volatile long *cntp; /* interrupt counter */
};
@ -268,7 +268,7 @@ static void
ithds_init(void *dummy)
{
mtx_init(&ia64_intrs_lock, "ithread table lock", NULL, MTX_SPIN);
mtx_init(&ia64_intrs_lock, "intr table", NULL, MTX_SPIN);
}
SYSINIT(ithds_init, SI_SUB_INTR, SI_ORDER_SECOND, ithds_init, NULL);
@ -291,7 +291,7 @@ ia64_setup_intr(const char *name, int irq, driver_intr_t handler, void *arg,
{
struct ia64_intr *i;
int errcode;
int vector = irq + IA64_HARDWARE_IRQ_BASE;
intptr_t vector = irq + IA64_HARDWARE_IRQ_BASE;
char *intrname;
/*
@ -321,8 +321,8 @@ ia64_setup_intr(const char *name, int irq, driver_intr_t handler, void *arg,
memset(intrname, ' ', INTRNAME_LEN - 1);
bcopy(name, intrname, strlen(name));
}
errcode = ithread_create(&i->ithd, vector, 0, 0,
ia64_send_eoi, "intr:");
errcode = intr_event_create(&i->event, (void *)vector, 0,
(void (*)(void *))ia64_send_eoi, "intr:");
if (errcode) {
free(i, M_DEVBUF);
return errcode;
@ -334,8 +334,8 @@ ia64_setup_intr(const char *name, int irq, driver_intr_t handler, void *arg,
}
/* Second, add this handler. */
errcode = ithread_add_handler(i->ithd, name, handler, arg,
ithread_priority(flags), flags, cookiep);
errcode = intr_event_add_handler(i->event, name, handler, arg,
intr_priority(flags), flags, cookiep);
if (errcode)
return errcode;
@ -346,53 +346,60 @@ int
ia64_teardown_intr(void *cookie)
{
return (ithread_remove_handler(cookie));
return (intr_event_remove_handler(cookie));
}
void
ia64_dispatch_intr(void *frame, unsigned long vector)
{
struct ia64_intr *i;
struct ithd *ithd; /* our interrupt thread */
struct intrhand *ih;
int error;
struct intr_event *ie; /* our interrupt event */
struct intr_handler *ih;
int error, thread;
/*
* Find the interrupt thread for this vector.
*/
i = ia64_intrs[vector];
if (i == NULL)
return; /* no ithread for this vector */
return; /* no event for this vector */
if (i->cntp)
atomic_add_long(i->cntp, 1);
ithd = i->ithd;
KASSERT(ithd != NULL, ("interrupt vector without a thread"));
ie = i->event;
KASSERT(ie != NULL, ("interrupt vector without an event"));
/*
* As an optimization, if an ithread has no handlers, don't
* As an optimization, if an event has no handlers, don't
* schedule it to run.
*/
if (TAILQ_EMPTY(&ithd->it_handlers))
if (TAILQ_EMPTY(&ie->ie_handlers))
return;
/*
* Handle a fast interrupt if there is no actual thread for this
* interrupt by calling the handler directly without Giant. Note
* Execute all fast interrupt handlers directly without Giant. Note
* that this means that any fast interrupt handler must be MP safe.
*/
ih = TAILQ_FIRST(&ithd->it_handlers);
if ((ih->ih_flags & IH_FAST) != 0) {
thread = 0;
critical_enter();
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
thread = 1;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
ih->ih_handler, ih->ih_argument, ih->ih_name);
ih->ih_handler(ih->ih_argument);
ia64_send_eoi(vector);
critical_exit();
return;
}
critical_exit();
error = ithread_schedule(ithd);
if (thread) {
error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("got an impossible stray interrupt"));
} else
ia64_send_eoi(vector);
}
#ifdef DDB

View File

@ -439,7 +439,8 @@ statclock(frame)
* so that we know how much of its real time was spent
* in ``non-process'' (i.e., interrupt) work.
*/
if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) {
if ((td->td_pflags & TDP_ITHREAD) ||
td->td_intr_nesting_level >= 2) {
p->p_rux.rux_iticks++;
cp_time[CP_INTR]++;
} else {
@ -447,7 +448,7 @@ statclock(frame)
thread_statclock(0);
td->td_sticks++;
p->p_rux.rux_sticks++;
if (p != PCPU_GET(idlethread)->td_proc)
if (td != PCPU_GET(idlethread))
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;

View File

@ -58,13 +58,26 @@ __FBSDID("$FreeBSD$");
#include <ddb/db_sym.h>
#endif
struct int_entropy {
struct proc *proc;
uintptr_t vector;
/*
* Describe an interrupt thread. There is one of these per interrupt event.
*/
struct intr_thread {
struct intr_event *it_event;
struct thread *it_thread; /* Kernel thread. */
int it_flags; /* (j) IT_* flags. */
int it_need; /* Needs service. */
};
struct ithd *clk_ithd;
struct ithd *tty_ithd;
/* Interrupt thread flags kept in it_flags */
#define IT_DEAD 0x000001 /* Thread is waiting to exit. */
struct intr_entropy {
struct thread *td;
uintptr_t event;
};
struct intr_event *clk_intr_event;
struct intr_event *tty_intr_event;
void *softclock_ih;
void *vm_ih;
@ -75,13 +88,21 @@ TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
&intr_storm_threshold, 0,
"Number of consecutive interrupts before storm protection is enabled");
static TAILQ_HEAD(, intr_event) event_list =
TAILQ_HEAD_INITIALIZER(event_list);
static void intr_event_update(struct intr_event *ie);
static struct intr_thread *ithread_create(const char *name);
#ifdef notyet
static void ithread_destroy(struct intr_thread *ithread);
#endif
static void ithread_execute_handlers(struct proc *p, struct intr_event *ie);
static void ithread_loop(void *);
static void ithread_update(struct ithd *);
static void ithread_update(struct intr_thread *ithd);
static void start_softintr(void *);
u_char
ithread_priority(enum intr_type flags)
intr_priority(enum intr_type flags)
{
u_char pri;
@ -115,153 +136,206 @@ ithread_priority(enum intr_type flags)
break;
default:
/* We didn't specify an interrupt level. */
panic("ithread_priority: no interrupt type in flags");
panic("intr_priority: no interrupt type in flags");
}
return pri;
}
/*
* Regenerate the name (p_comm) and priority for a threaded interrupt thread.
* Update an ithread based on the associated intr_event.
*/
static void
ithread_update(struct ithd *ithd)
ithread_update(struct intr_thread *ithd)
{
struct intrhand *ih;
struct intr_event *ie;
struct thread *td;
struct proc *p;
int missed;
u_char pri;
mtx_assert(&ithd->it_lock, MA_OWNED);
td = ithd->it_td;
if (td == NULL)
return;
p = td->td_proc;
ie = ithd->it_event;
td = ithd->it_thread;
strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
ithd->it_flags &= ~IT_ENTROPY;
/* Determine the overall priority of this event. */
if (TAILQ_EMPTY(&ie->ie_handlers))
pri = PRI_MAX_ITHD;
else
pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
ih = TAILQ_FIRST(&ithd->it_handlers);
if (ih == NULL) {
/* Update name and priority. */
strlcpy(td->td_proc->p_comm, ie->ie_fullname,
sizeof(td->td_proc->p_comm));
mtx_lock_spin(&sched_lock);
sched_prio(td, PRI_MAX_ITHD);
sched_prio(td, pri);
mtx_unlock_spin(&sched_lock);
return;
}
mtx_lock_spin(&sched_lock);
sched_prio(td, ih->ih_pri);
mtx_unlock_spin(&sched_lock);
/*
* Regenerate the full name of an interrupt event and update its priority.
*/
static void
intr_event_update(struct intr_event *ie)
{
struct intr_handler *ih;
char *last;
int missed, space;
/* Start off with no entropy and just the name of the event. */
mtx_assert(&ie->ie_lock, MA_OWNED);
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
ie->ie_flags &= ~IE_ENTROPY;
missed = 0;
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
sizeof(p->p_comm)) {
strcat(p->p_comm, " ");
strcat(p->p_comm, ih->ih_name);
space = 1;
/* Run through all the handlers updating values. */
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
sizeof(ie->ie_fullname)) {
strcat(ie->ie_fullname, " ");
strcat(ie->ie_fullname, ih->ih_name);
space = 0;
} else
missed++;
if (ih->ih_flags & IH_ENTROPY)
ithd->it_flags |= IT_ENTROPY;
ie->ie_flags |= IE_ENTROPY;
}
/*
* If the handler names were too long, add +'s to indicate missing
* names. If we run out of room and still have +'s to add, change
* the last character from a + to a *.
*/
last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
while (missed-- > 0) {
if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
p->p_comm[sizeof(p->p_comm) - 2] = '*';
else
p->p_comm[sizeof(p->p_comm) - 2] = '+';
if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
if (*last == '+') {
*last = '*';
break;
} else
strcat(p->p_comm, "+");
*last = '+';
} else if (space) {
strcat(ie->ie_fullname, " +");
space = 0;
} else
strcat(ie->ie_fullname, "+");
}
CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
/*
* If this event has an ithread, update it's priority and
* name.
*/
if (ie->ie_thread != NULL)
ithread_update(ie->ie_thread);
CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
}
int
ithread_create(struct ithd **ithread, uintptr_t vector, int flags,
void (*disable)(uintptr_t), void (*enable)(uintptr_t), const char *fmt, ...)
intr_event_create(struct intr_event **event, void *source, int flags,
void (*enable)(void *), const char *fmt, ...)
{
struct ithd *ithd;
struct thread *td;
struct proc *p;
int error;
struct intr_event *ie;
va_list ap;
/* The only valid flag during creation is IT_SOFT. */
if ((flags & ~IT_SOFT) != 0)
/* The only valid flag during creation is IE_SOFT. */
if ((flags & ~IE_SOFT) != 0)
return (EINVAL);
ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
ithd->it_vector = vector;
ithd->it_disable = disable;
ithd->it_enable = enable;
ithd->it_flags = flags;
TAILQ_INIT(&ithd->it_handlers);
mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
ie->ie_source = source;
ie->ie_enable = enable;
ie->ie_flags = flags;
TAILQ_INIT(&ie->ie_handlers);
mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
va_start(ap, fmt);
vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
va_end(ap);
error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
0, "%s", ithd->it_name);
if (error) {
mtx_destroy(&ithd->it_lock);
free(ithd, M_ITHREAD);
return (error);
}
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
mtx_lock_spin(&sched_lock);
td->td_ksegrp->kg_pri_class = PRI_ITHD;
td->td_priority = PRI_MAX_ITHD;
TD_SET_IWAIT(td);
mtx_unlock_spin(&sched_lock);
ithd->it_td = td;
td->td_ithd = ithd;
if (ithread != NULL)
*ithread = ithd;
CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
mtx_pool_lock(mtxpool_sleep, &event_list);
TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
mtx_pool_unlock(mtxpool_sleep, &event_list);
if (event != NULL)
*event = ie;
CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
return (0);
}
int
ithread_destroy(struct ithd *ithread)
intr_event_destroy(struct intr_event *ie)
{
struct thread *td;
if (ithread == NULL)
return (EINVAL);
td = ithread->it_td;
mtx_lock(&ithread->it_lock);
if (!TAILQ_EMPTY(&ithread->it_handlers)) {
mtx_unlock(&ithread->it_lock);
return (EINVAL);
mtx_lock(&ie->ie_lock);
if (!TAILQ_EMPTY(&ie->ie_handlers)) {
mtx_unlock(&ie->ie_lock);
return (EBUSY);
}
ithread->it_flags |= IT_DEAD;
mtx_pool_lock(mtxpool_sleep, &event_list);
TAILQ_REMOVE(&event_list, ie, ie_list);
mtx_pool_unlock(mtxpool_sleep, &event_list);
mtx_unlock(&ie->ie_lock);
mtx_destroy(&ie->ie_lock);
free(ie, M_ITHREAD);
return (0);
}
static struct intr_thread *
ithread_create(const char *name)
{
struct intr_thread *ithd;
struct thread *td;
struct proc *p;
int error;
ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
0, "%s", name);
if (error)
panic("kthread_create() failed with %d", error);
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
mtx_lock_spin(&sched_lock);
td->td_ksegrp->kg_pri_class = PRI_ITHD;
TD_SET_IWAIT(td);
mtx_unlock_spin(&sched_lock);
td->td_pflags |= TDP_ITHREAD;
ithd->it_thread = td;
CTR2(KTR_INTR, "%s: created %s", __func__, name);
return (ithd);
}
#ifdef notyet
static void
ithread_destroy(struct intr_thread *ithread)
{
struct thread *td;
td = ithread->it_thread;
mtx_lock_spin(&sched_lock);
ithread->it_flags |= IT_DEAD;
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
setrunqueue(td, SRQ_INTR);
}
mtx_unlock_spin(&sched_lock);
mtx_unlock(&ithread->it_lock);
CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
return (0);
}
#endif
int
ithread_add_handler(struct ithd* ithread, const char *name,
intr_event_add_handler(struct intr_event *ie, const char *name,
driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
void **cookiep)
{
struct intrhand *ih, *temp_ih;
struct intr_handler *ih, *temp_ih;
struct intr_thread *it;
if (ithread == NULL || name == NULL || handler == NULL)
if (ie == NULL || name == NULL || handler == NULL)
return (EINVAL);
ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
/* Allocate and populate an interrupt handler structure. */
ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
ih->ih_handler = handler;
ih->ih_argument = arg;
ih->ih_name = name;
ih->ih_ithread = ithread;
ih->ih_event = ie;
ih->ih_pri = pri;
if (flags & INTR_FAST)
ih->ih_flags = IH_FAST;
@ -272,68 +346,96 @@ ithread_add_handler(struct ithd* ithread, const char *name,
if (flags & INTR_ENTROPY)
ih->ih_flags |= IH_ENTROPY;
mtx_lock(&ithread->it_lock);
if ((flags & INTR_EXCL) != 0 && !TAILQ_EMPTY(&ithread->it_handlers))
goto fail;
if (!TAILQ_EMPTY(&ithread->it_handlers)) {
temp_ih = TAILQ_FIRST(&ithread->it_handlers);
if (temp_ih->ih_flags & IH_EXCLUSIVE)
goto fail;
if ((ih->ih_flags & IH_FAST) && !(temp_ih->ih_flags & IH_FAST))
goto fail;
if (!(ih->ih_flags & IH_FAST) && (temp_ih->ih_flags & IH_FAST))
goto fail;
}
TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
if (temp_ih->ih_pri > ih->ih_pri)
break;
if (temp_ih == NULL)
TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
else
TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
ithread_update(ithread);
mtx_unlock(&ithread->it_lock);
if (cookiep != NULL)
*cookiep = ih;
CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
ithread->it_name);
return (0);
fail:
mtx_unlock(&ithread->it_lock);
/* We can only have one exclusive handler in a event. */
mtx_lock(&ie->ie_lock);
if (!TAILQ_EMPTY(&ie->ie_handlers)) {
if ((flags & INTR_EXCL) ||
(TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
mtx_unlock(&ie->ie_lock);
free(ih, M_ITHREAD);
return (EINVAL);
}
}
/* Add the new handler to the event in priority order. */
TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
if (temp_ih->ih_pri > ih->ih_pri)
break;
}
if (temp_ih == NULL)
TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
else
TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
intr_event_update(ie);
/* Create a thread if we need one. */
while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
if (ie->ie_flags & IE_ADDING_THREAD)
msleep(ie, &ie->ie_lock, curthread->td_priority,
"ithread", 0);
else {
ie->ie_flags |= IE_ADDING_THREAD;
mtx_unlock(&ie->ie_lock);
it = ithread_create("intr: newborn");
mtx_lock(&ie->ie_lock);
ie->ie_flags &= ~IE_ADDING_THREAD;
ie->ie_thread = it;
it->it_event = ie;
ithread_update(it);
wakeup(ie);
}
}
CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
ie->ie_name);
mtx_unlock(&ie->ie_lock);
if (cookiep != NULL)
*cookiep = ih;
return (0);
}
int
ithread_remove_handler(void *cookie)
intr_event_remove_handler(void *cookie)
{
struct intrhand *handler = (struct intrhand *)cookie;
struct ithd *ithread;
struct intr_handler *handler = (struct intr_handler *)cookie;
struct intr_event *ie;
#ifdef INVARIANTS
struct intrhand *ih;
struct intr_handler *ih;
#endif
#ifdef notyet
int dead;
#endif
if (handler == NULL)
return (EINVAL);
ithread = handler->ih_ithread;
KASSERT(ithread != NULL,
("interrupt handler \"%s\" has a NULL interrupt thread",
ie = handler->ih_event;
KASSERT(ie != NULL,
("interrupt handler \"%s\" has a NULL interrupt event",
handler->ih_name));
mtx_lock(&ie->ie_lock);
CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
ithread->it_name);
mtx_lock(&ithread->it_lock);
ie->ie_name);
#ifdef INVARIANTS
TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
if (ih == handler)
goto ok;
mtx_unlock(&ithread->it_lock);
panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
ih->ih_name, ithread->it_name);
mtx_unlock(&ie->ie_lock);
panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
ih->ih_name, ie->ie_name);
ok:
#endif
/*
* If there is no ithread, then just remove the handler and return.
* XXX: Note that an INTR_FAST handler might be running on another
* CPU!
*/
if (ie->ie_thread == NULL) {
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
mtx_unlock(&ie->ie_lock);
free(handler, M_ITHREAD);
return (0);
}
/*
* If the interrupt thread is already running, then just mark this
* handler as being dead and let the ithread do the actual removal.
@ -343,7 +445,7 @@ ithread_remove_handler(void *cookie)
* thread do it.
*/
mtx_lock_spin(&sched_lock);
if (!TD_AWAITING_INTR(ithread->it_td) && !cold) {
if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
handler->ih_flags |= IH_DEAD;
/*
@ -351,22 +453,42 @@ ithread_remove_handler(void *cookie)
* again and remove this handler if it has already passed
* it on the list.
*/
ithread->it_need = 1;
ie->ie_thread->it_need = 1;
} else
TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
mtx_unlock_spin(&sched_lock);
if ((handler->ih_flags & IH_DEAD) != 0)
msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
ithread_update(ithread);
mtx_unlock(&ithread->it_lock);
while (handler->ih_flags & IH_DEAD)
msleep(handler, &ie->ie_lock, curthread->td_priority, "iev_rmh",
0);
intr_event_update(ie);
#ifdef notyet
/*
* XXX: This could be bad in the case of ppbus(8). Also, I think
* this could lead to races of stale data when servicing an
* interrupt.
*/
dead = 1;
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
dead = 0;
break;
}
}
if (dead) {
ithread_destroy(ie->ie_thread);
ie->ie_thread = NULL;
}
#endif
mtx_unlock(&ie->ie_lock);
free(handler, M_ITHREAD);
return (0);
}
int
ithread_schedule(struct ithd *ithread)
intr_event_schedule_thread(struct intr_event *ie)
{
struct int_entropy entropy;
struct intr_entropy entropy;
struct intr_thread *it;
struct thread *td;
struct thread *ctd;
struct proc *p;
@ -374,43 +496,45 @@ ithread_schedule(struct ithd *ithread)
/*
* If no ithread or no handlers, then we have a stray interrupt.
*/
if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
ie->ie_thread == NULL)
return (EINVAL);
ctd = curthread;
td = ithread->it_td;
it = ie->ie_thread;
td = it->it_thread;
p = td->td_proc;
/*
* If any of the handlers for this ithread claim to be good
* sources of entropy, then gather some.
*/
if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
p->p_pid, p->p_comm);
entropy.vector = ithread->it_vector;
entropy.proc = ctd->td_proc;
entropy.event = (uintptr_t)ie;
entropy.td = ctd;
random_harvest(&entropy, sizeof(entropy), 2, 0,
RANDOM_INTERRUPT);
}
KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
__func__, p->p_pid, p->p_comm, ithread->it_need);
KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
/*
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
* put this thread on the runqueue.
*/
ithread->it_need = 1;
it->it_need = 1;
mtx_lock_spin(&sched_lock);
if (TD_AWAITING_INTR(td)) {
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
p->p_comm);
TD_CLR_IWAIT(td);
setrunqueue(td, SRQ_INTR);
} else {
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
__func__, p->p_pid, ithread->it_need, td->td_state);
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
__func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
}
mtx_unlock_spin(&sched_lock);
@ -418,49 +542,47 @@ ithread_schedule(struct ithd *ithread)
}
int
swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
void *arg, int pri, enum intr_type flags, void **cookiep)
{
struct ithd *ithd;
struct intr_event *ie;
int error;
if (flags & (INTR_FAST | INTR_ENTROPY))
return (EINVAL);
ithd = (ithdp != NULL) ? *ithdp : NULL;
ie = (eventp != NULL) ? *eventp : NULL;
if (ithd != NULL) {
if ((ithd->it_flags & IT_SOFT) == 0)
if (ie != NULL) {
if (!(ie->ie_flags & IE_SOFT))
return (EINVAL);
} else {
error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
error = intr_event_create(&ie, NULL, IE_SOFT, NULL,
"swi%d:", pri);
if (error)
return (error);
if (ithdp != NULL)
*ithdp = ithd;
if (eventp != NULL)
*eventp = ie;
}
return (ithread_add_handler(ithd, name, handler, arg,
return (intr_event_add_handler(ie, name, handler, arg,
(pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
/* XXKSE.. think of a better way to get separate queues */
}
/*
* Schedule a heavyweight software interrupt process.
* Schedule a software interrupt thread.
*/
void
swi_sched(void *cookie, int flags)
{
struct intrhand *ih = (struct intrhand *)cookie;
struct ithd *it = ih->ih_ithread;
struct intr_handler *ih = (struct intr_handler *)cookie;
struct intr_event *ie = ih->ih_event;
int error;
PCPU_LAZY_INC(cnt.v_intr);
CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
ih->ih_need);
/*
* Set ih_need for this handler so that if the ithread is already
@ -469,30 +591,111 @@ swi_sched(void *cookie, int flags)
*/
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
error = ithread_schedule(it);
error = intr_event_schedule_thread(ie);
KASSERT(error == 0, ("stray software interrupt"));
}
}
static void
ithread_execute_handlers(struct proc *p, struct intr_event *ie)
{
struct intr_handler *ih, *ihn;
/* Interrupt handlers should not sleep. */
if (!(ie->ie_flags & IE_SOFT))
THREAD_NO_SLEEPING();
TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
/*
* If this handler is marked for death, remove it from
* the list of handlers and wake up the sleeper.
*/
if (ih->ih_flags & IH_DEAD) {
mtx_lock(&ie->ie_lock);
TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
ih->ih_flags &= ~IH_DEAD;
wakeup(ih);
mtx_unlock(&ie->ie_lock);
continue;
}
/*
* For software interrupt threads, we only execute
* handlers that have their need flag set. Hardware
* interrupt threads always invoke all of their handlers.
*/
if (ie->ie_flags & IE_SOFT) {
if (!ih->ih_need)
continue;
else
atomic_store_rel_int(&ih->ih_need, 0);
}
/* Fast handlers are handled in primary interrupt context. */
if (ih->ih_flags & IH_FAST)
continue;
/* Execute this handler. */
CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
__func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
ih->ih_name, ih->ih_flags);
if (!(ih->ih_flags & IH_MPSAFE))
mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if (!(ih->ih_flags & IH_MPSAFE))
mtx_unlock(&Giant);
}
if (!(ie->ie_flags & IE_SOFT))
THREAD_SLEEPING_OK();
/*
* Interrupt storm handling:
*
* If this interrupt source is currently storming, then throttle
* it to only fire the handler once per clock tick.
*
* If this interrupt source is not currently storming, but the
* number of back to back interrupts exceeds the storm threshold,
* then enter storming mode.
*/
if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold) {
if (ie->ie_warned == 0) {
printf(
"Interrupt storm detected on \"%s\"; throttling interrupt source\n",
ie->ie_name);
ie->ie_warned = 1;
}
tsleep(&ie->ie_count, curthread->td_priority, "istorm", 1);
} else
ie->ie_count++;
/*
* Now that all the handlers have had a chance to run, reenable
* the interrupt source.
*/
if (ie->ie_enable != NULL)
ie->ie_enable(ie->ie_source);
}
/*
* This is the main code for interrupt threads.
*/
static void
ithread_loop(void *arg)
{
struct ithd *ithd; /* our thread context */
struct intrhand *ih; /* and our interrupt handler chain */
struct intr_thread *ithd;
struct intr_event *ie;
struct thread *td;
struct proc *p;
int count, warned;
td = curthread;
p = td->td_proc;
ithd = (struct ithd *)arg; /* point to myself */
KASSERT(ithd->it_td == td && td->td_ithd == ithd,
ithd = (struct intr_thread *)arg;
KASSERT(ithd->it_thread == td,
("%s: ithread and proc linkage out of sync", __func__));
count = 0;
warned = 0;
ie = ithd->it_event;
ie->ie_count = 0;
/*
* As long as we have interrupts outstanding, go through the
@ -503,80 +706,26 @@ ithread_loop(void *arg)
* If we are an orphaned thread, then just die.
*/
if (ithd->it_flags & IT_DEAD) {
CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
p->p_pid, p->p_comm);
td->td_ithd = NULL;
mtx_destroy(&ithd->it_lock);
free(ithd, M_ITHREAD);
kthread_exit(0);
}
CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
p->p_pid, p->p_comm, ithd->it_need);
/*
* Service interrupts. If another interrupt arrives while
* we are running, it will set it_need to note that we
* should make another pass.
*/
while (ithd->it_need) {
/*
* Service interrupts. If another interrupt
* arrives while we are running, they will set
* it_need to denote that we should make
* another pass.
* This might need a full read and write barrier
* to make sure that this write posts before any
* of the memory or device accesses in the
* handlers.
*/
atomic_store_rel_int(&ithd->it_need, 0);
if (!(ithd->it_flags & IT_SOFT))
THREAD_NO_SLEEPING();
restart:
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
if (ithd->it_flags & IT_SOFT && !ih->ih_need)
continue;
atomic_store_rel_int(&ih->ih_need, 0);
CTR6(KTR_INTR,
"%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
p->p_pid, (void *)ih,
(void *)ih->ih_handler, ih->ih_argument,
ih->ih_flags);
if ((ih->ih_flags & IH_DEAD) != 0) {
mtx_lock(&ithd->it_lock);
TAILQ_REMOVE(&ithd->it_handlers, ih,
ih_next);
wakeup(ih);
mtx_unlock(&ithd->it_lock);
goto restart;
}
if ((ih->ih_flags & IH_MPSAFE) == 0)
mtx_lock(&Giant);
ih->ih_handler(ih->ih_argument);
if ((ih->ih_flags & IH_MPSAFE) == 0)
mtx_unlock(&Giant);
}
if (!(ithd->it_flags & IT_SOFT))
THREAD_SLEEPING_OK();
/*
* Interrupt storm handling:
*
* If this interrupt source is currently storming,
* then throttle it to only fire the handler once
* per clock tick.
*
* If this interrupt source is not currently
* storming, but the number of back to back
* interrupts exceeds the storm threshold, then
* enter storming mode.
*/
if (intr_storm_threshold != 0 &&
count >= intr_storm_threshold) {
if (!warned) {
printf(
"Interrupt storm detected on \"%s\"; throttling interrupt source\n",
p->p_comm);
warned = 1;
}
tsleep(&count, td->td_priority, "istorm", 1);
} else
count++;
if (ithd->it_enable != NULL)
ithd->it_enable(ithd->it_vector);
ithread_execute_handlers(p, ie);
}
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
mtx_assert(&Giant, MA_NOTOWNED);
@ -587,12 +736,10 @@ ithread_loop(void *arg)
* set again, so we have to check it again.
*/
mtx_lock_spin(&sched_lock);
if (!ithd->it_need) {
if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
TD_SET_IWAIT(td);
count = 0;
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
ie->ie_count = 0;
mi_switch(SW_VOL, NULL);
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
}
mtx_unlock_spin(&sched_lock);
}
@ -603,7 +750,7 @@ ithread_loop(void *arg)
* Dump details about an interrupt handler
*/
static void
db_dump_intrhand(struct intrhand *ih)
db_dump_intrhand(struct intr_handler *ih)
{
int comma;
@ -686,41 +833,42 @@ db_dump_intrhand(struct intrhand *ih)
}
/*
* Dump details about an ithread
* Dump details about a event.
*/
void
db_dump_ithread(struct ithd *ithd, int handlers)
db_dump_intr_event(struct intr_event *ie, int handlers)
{
struct proc *p;
struct intrhand *ih;
struct intr_handler *ih;
struct intr_thread *it;
int comma;
if (ithd->it_td != NULL) {
p = ithd->it_td->td_proc;
db_printf("%s (pid %d)", p->p_comm, p->p_pid);
} else
db_printf("%s: (no thread)", ithd->it_name);
if ((ithd->it_flags & (IT_SOFT | IT_ENTROPY | IT_DEAD)) != 0 ||
ithd->it_need) {
db_printf("%s ", ie->ie_fullname);
it = ie->ie_thread;
if (it != NULL)
db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
else
db_printf("(no thread)");
if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
(it != NULL && it->it_need)) {
db_printf(" {");
comma = 0;
if (ithd->it_flags & IT_SOFT) {
if (ie->ie_flags & IE_SOFT) {
db_printf("SOFT");
comma = 1;
}
if (ithd->it_flags & IT_ENTROPY) {
if (ie->ie_flags & IE_ENTROPY) {
if (comma)
db_printf(", ");
db_printf("ENTROPY");
comma = 1;
}
if (ithd->it_flags & IT_DEAD) {
if (ie->ie_flags & IE_ADDING_THREAD) {
if (comma)
db_printf(", ");
db_printf("DEAD");
db_printf("ADDING_THREAD");
comma = 1;
}
if (ithd->it_need) {
if (it != NULL && it->it_need) {
if (comma)
db_printf(", ");
db_printf("NEED");
@ -730,9 +878,28 @@ db_dump_ithread(struct ithd *ithd, int handlers)
db_printf("\n");
if (handlers)
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next)
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
db_dump_intrhand(ih);
}
/*
* Dump data about interrupt handlers
*/
DB_SHOW_COMMAND(intr, db_show_intr)
{
struct intr_event *ie;
int quit, all, verbose;
quit = 0;
verbose = index(modif, 'v') != NULL;
all = index(modif, 'a') != NULL;
db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
TAILQ_FOREACH(ie, &event_list, ie_list) {
if (!all && TAILQ_EMPTY(&ie->ie_handlers))
continue;
db_dump_intr_event(ie, verbose);
}
}
#endif /* DDB */
/*
@ -743,12 +910,12 @@ start_softintr(void *dummy)
{
struct proc *p;
if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
INTR_MPSAFE, &softclock_ih) ||
swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
panic("died while creating standard software ithreads");
p = clk_ithd->it_td->td_proc;
p = clk_intr_event->ie_thread->it_thread->td_proc;
PROC_LOCK(p);
p->p_flag |= P_NOLOAD;
PROC_UNLOCK(p);

View File

@ -376,7 +376,6 @@ static struct witness_order_list_entry order_lists[] = {
{ "ng_worklist", &lock_class_mtx_spin },
{ "taskqueue_fast", &lock_class_mtx_spin },
{ "intr table", &lock_class_mtx_spin },
{ "ithread table lock", &lock_class_mtx_spin },
{ "sleepq chain", &lock_class_mtx_spin },
{ "sched lock", &lock_class_mtx_spin },
{ "turnstile chain", &lock_class_mtx_spin },

View File

@ -30,12 +30,12 @@
typedef void ih_func_t(void *);
struct ithd;
struct intr_event;
struct intr_handler {
struct ppc_intr_handler {
ih_func_t *ih_func;
void *ih_arg;
struct ithd *ih_ithd;
struct intr_event *ih_event;
u_int ih_irq;
u_int ih_flags;
u_int ih_index;

View File

@ -85,7 +85,7 @@ MALLOC_DEFINE(M_INTR, "intr", "interrupt handler data");
static int intr_initialized = 0;
static u_int intr_nirq;
static struct intr_handler *intr_handlers;
static struct ppc_intr_handler *intr_handlers;
static struct mtx intr_table_lock;
@ -100,7 +100,7 @@ static void (*irq_enable)(uintptr_t);
static void (*irq_disable)(uintptr_t);
static void intrcnt_setname(const char *name, int index);
static void intrcnt_updatename(struct intr_handler *ih);
static void intrcnt_updatename(struct ppc_intr_handler *ih);
static void
intrcnt_setname(const char *name, int index)
@ -110,18 +110,18 @@ intrcnt_setname(const char *name, int index)
}
static void
intrcnt_updatename(struct intr_handler *ih)
intrcnt_updatename(struct ppc_intr_handler *ih)
{
intrcnt_setname(ih->ih_ithd->it_td->td_proc->p_comm, ih->ih_index);
intrcnt_setname(ih->ih_event->ie_fullname, ih->ih_index);
}
static void
intrcnt_register(struct intr_handler *ih)
intrcnt_register(struct ppc_intr_handler *ih)
{
char straystr[MAXCOMLEN + 1];
KASSERT(ih->ih_ithd != NULL,
("%s: intr_handler with no ithread", __func__));
KASSERT(ih->ih_event != NULL,
("%s: ppc_intr_handler with no event", __func__));
ih->ih_index = intrcnt_index;
intrcnt_index += 2;
@ -145,7 +145,7 @@ intr_init(void (*handler)(void), int nirq, void (*irq_e)(uintptr_t),
intr_initialized++;
intr_nirq = nirq;
intr_handlers = malloc(nirq * sizeof(struct intr_handler), M_INTR,
intr_handlers = malloc(nirq * sizeof(struct ppc_intr_handler), M_INTR,
M_NOWAIT|M_ZERO);
if (intr_handlers == NULL)
panic("intr_init: unable to allocate interrupt handler array");
@ -172,7 +172,7 @@ intr_init(void (*handler)(void), int nirq, void (*irq_e)(uintptr_t),
irq_enable = irq_e;
irq_disable = irq_d;
mtx_init(&intr_table_lock, "ithread table lock", NULL, MTX_SPIN);
mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN);
}
void
@ -195,10 +195,10 @@ int
inthand_add(const char *name, u_int irq, void (*handler)(void *), void *arg,
int flags, void **cookiep)
{
struct intr_handler *ih;
struct ithd *ithd, *orphan;
struct ppc_intr_handler *ih;
struct intr_event *event, *orphan;
int error = 0;
int created_ithd = 0;
int created_event = 0;
/*
* Work around a race where more than one CPU may be registering
@ -206,30 +206,33 @@ inthand_add(const char *name, u_int irq, void (*handler)(void *), void *arg,
*/
ih = &intr_handlers[irq];
mtx_lock_spin(&intr_table_lock);
ithd = ih->ih_ithd;
event = ih->ih_event;
mtx_unlock_spin(&intr_table_lock);
if (ithd == NULL) {
error = ithread_create(&ithd, irq, 0, irq_disable,
irq_enable, "irq%d:", irq);
if (event == NULL) {
error = intr_event_create(&event, (void *)irq, 0,
(void (*)(void *))irq_enable, "irq%d:", irq);
if (error)
return (error);
mtx_lock_spin(&intr_table_lock);
if (ih->ih_ithd == NULL) {
ih->ih_ithd = ithd;
created_ithd++;
if (ih->ih_event == NULL) {
ih->ih_event = event;
created_event++;
mtx_unlock_spin(&intr_table_lock);
} else {
orphan = ithd;
ithd = ih->ih_ithd;
orphan = event;
event = ih->ih_event;
mtx_unlock_spin(&intr_table_lock);
ithread_destroy(orphan);
intr_event_destroy(orphan);
}
}
error = ithread_add_handler(ithd, name, handler, arg,
ithread_priority(flags), flags, cookiep);
/* XXX: Should probably fix support for multiple FAST. */
if (flags & INTR_FAST)
flags |= INTR_EXCL;
error = intr_event_add_handler(event, name, handler, arg,
intr_priority(flags), flags, cookiep);
if ((flags & INTR_FAST) == 0 || error) {
intr_setup(irq, sched_ithd, ih, flags);
@ -250,17 +253,17 @@ inthand_add(const char *name, u_int irq, void (*handler)(void *), void *arg,
int
inthand_remove(u_int irq, void *cookie)
{
struct intr_handler *ih;
struct ppc_intr_handler *ih;
int error;
error = ithread_remove_handler(cookie);
error = intr_event_remove_handler(cookie);
if (error == 0) {
ih = &intr_handlers[irq];
mtx_lock_spin(&intr_table_lock);
if (ih->ih_ithd == NULL) {
if (ih->ih_event == NULL) {
intr_setup(irq, intr_stray_handler, ih, 0);
} else {
intr_setup(irq, sched_ithd, ih, 0);
@ -286,9 +289,9 @@ intr_handle(u_int irq)
static void
intr_stray_handler(void *cookie)
{
struct intr_handler *ih;
struct ppc_intr_handler *ih;
ih = (struct intr_handler *)cookie;
ih = (struct ppc_intr_handler *)cookie;
if (*intr_handlers[ih->ih_irq].ih_straycount < MAX_STRAY_LOG) {
printf("stray irq %d\n", ih->ih_irq);
@ -303,12 +306,12 @@ intr_stray_handler(void *cookie)
static void
sched_ithd(void *cookie)
{
struct intr_handler *ih;
struct ppc_intr_handler *ih;
int error;
ih = (struct intr_handler *)cookie;
ih = (struct ppc_intr_handler *)cookie;
error = ithread_schedule(ih->ih_ithd);
error = intr_event_schedule_thread(ih->ih_event);
if (error == EINVAL)
intr_stray_handler(ih);

View File

@ -67,7 +67,7 @@ struct intr_request {
struct intr_vector {
iv_func_t *iv_func;
void *iv_arg;
struct ithd *iv_ithd;
struct intr_event *iv_event;
u_int iv_pri;
u_int iv_vec;
};

View File

@ -227,38 +227,44 @@ void
intr_init2()
{
mtx_init(&intr_table_lock, "ithread table lock", NULL, MTX_SPIN);
mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN);
}
static void
intr_execute_handlers(void *cookie)
{
struct intr_vector *iv;
struct ithd *ithd;
struct intrhand *ih;
int error;
struct intr_event *ie;
struct intr_handler *ih;
int error, thread;
iv = cookie;
ithd = iv->iv_ithd;
if (ithd == NULL)
ih = NULL;
else
ih = TAILQ_FIRST(&ithd->it_handlers);
if (ih != NULL && ih->ih_flags & IH_FAST) {
/* Execute fast interrupt handlers directly. */
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
MPASS(ih->ih_flags & IH_FAST &&
ih->ih_argument != NULL);
CTR3(KTR_INTR, "%s: executing handler %p(%p)",
__func__, ih->ih_handler, ih->ih_argument);
ih->ih_handler(ih->ih_argument);
}
ie = iv->iv_event;
if (ie == NULL) {
intr_stray_vector(iv);
return;
}
/* Execute fast interrupt handlers directly. */
thread = 0;
TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (!(ih->ih_flags & IH_FAST)) {
thread = 1;
continue;
}
MPASS(ih->ih_flags & IH_FAST && ih->ih_argument != NULL);
CTR3(KTR_INTR, "%s: executing handler %p(%p)", __func__,
ih->ih_handler, ih->ih_argument);
ih->ih_handler(ih->ih_argument);
}
/* Schedule a heavyweight interrupt process. */
error = ithread_schedule(ithd);
if (thread)
error = intr_event_schedule_thread(ie);
else if (TAILQ_EMPTY(&ie->ie_handlers))
error = EINVAL;
else
error = 0;
if (error == EINVAL)
intr_stray_vector(iv);
}
@ -268,8 +274,8 @@ inthand_add(const char *name, int vec, void (*handler)(void *), void *arg,
int flags, void **cookiep)
{
struct intr_vector *iv;
struct ithd *ithd; /* descriptor for the IRQ */
struct ithd *orphan;
struct intr_event *ie; /* descriptor for the IRQ */
struct intr_event *orphan;
int errcode;
/*
@ -278,27 +284,27 @@ inthand_add(const char *name, int vec, void (*handler)(void *), void *arg,
*/
iv = &intr_vectors[vec];
mtx_lock_spin(&intr_table_lock);
ithd = iv->iv_ithd;
ie = iv->iv_event;
mtx_unlock_spin(&intr_table_lock);
if (ithd == NULL) {
errcode = ithread_create(&ithd, vec, 0, NULL, NULL, "vec%d:",
vec);
if (ie == NULL) {
errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL,
"vec%d:", vec);
if (errcode)
return (errcode);
mtx_lock_spin(&intr_table_lock);
if (iv->iv_ithd == NULL) {
iv->iv_ithd = ithd;
if (iv->iv_event == NULL) {
iv->iv_event = ie;
mtx_unlock_spin(&intr_table_lock);
} else {
orphan = ithd;
ithd = iv->iv_ithd;
orphan = ie;
ie = iv->iv_event;
mtx_unlock_spin(&intr_table_lock);
ithread_destroy(orphan);
intr_event_destroy(orphan);
}
}
errcode = ithread_add_handler(ithd, name, handler, arg,
ithread_priority(flags), flags, cookiep);
errcode = intr_event_add_handler(ie, name, handler, arg,
intr_priority(flags), flags, cookiep);
if (errcode)
return (errcode);
@ -307,7 +313,7 @@ inthand_add(const char *name, int vec, void (*handler)(void *), void *arg,
intr_stray_count[vec] = 0;
intrcnt_updatename(vec, ithd->it_td->td_proc->p_comm, 0);
intrcnt_updatename(vec, ie->ie_fullname, 0);
return (0);
}
@ -318,15 +324,16 @@ inthand_remove(int vec, void *cookie)
struct intr_vector *iv;
int error;
error = ithread_remove_handler(cookie);
error = intr_event_remove_handler(cookie);
if (error == 0) {
/*
* XXX: maybe this should be done regardless of whether
* ithread_remove_handler() succeeded?
* intr_event_remove_handler() succeeded?
* XXX: aren't the PIL's backwards below?
*/
iv = &intr_vectors[vec];
mtx_lock_spin(&intr_table_lock);
if (iv->iv_ithd == NULL)
if (iv->iv_event == NULL)
intr_setup(PIL_ITHREAD, intr_fast, vec,
intr_stray_vector, iv);
else

View File

@ -32,20 +32,23 @@
#include <sys/_lock.h>
#include <sys/_mutex.h>
struct intr_event;
struct intr_thread;
/*
* Describe a hardware interrupt handler.
*
* Multiple interrupt handlers for a specific vector can be chained
* Multiple interrupt handlers for a specific event can be chained
* together.
*/
struct intrhand {
struct intr_handler {
driver_intr_t *ih_handler; /* Handler function. */
void *ih_argument; /* Argument to pass to handler. */
int ih_flags;
const char *ih_name; /* Name of handler. */
struct ithd *ih_ithread; /* Ithread we are connected to. */
struct intr_event *ih_event; /* Event we are connected to. */
int ih_need; /* Needs service. */
TAILQ_ENTRY(intrhand) ih_next; /* Next handler for this vector. */
TAILQ_ENTRY(intr_handler) ih_next; /* Next handler for this event. */
u_char ih_pri; /* Priority of this handler. */
};
@ -57,29 +60,26 @@ struct intrhand {
#define IH_MPSAFE 0x80000000 /* Handler does not need Giant. */
/*
* Describe an interrupt thread. There is one of these per interrupt vector.
* Note that this actually describes an interrupt source. There may or may
* not be an actual kernel thread attached to a given source.
* Describe an interrupt event. An event holds a list of handlers.
*/
struct ithd {
struct mtx it_lock;
struct thread *it_td; /* Interrupt process. */
LIST_ENTRY(ithd) it_list; /* All interrupt threads. */
TAILQ_HEAD(, intrhand) it_handlers; /* Interrupt handlers. */
struct ithd *it_interrupted; /* Who we interrupted. */
void (*it_disable)(uintptr_t); /* Enable interrupt source. */
void (*it_enable)(uintptr_t); /* Disable interrupt source. */
void *it_md; /* Hook for MD interrupt code. */
int it_flags; /* Interrupt-specific flags. */
int it_need; /* Needs service. */
uintptr_t it_vector;
char it_name[MAXCOMLEN + 1];
struct intr_event {
TAILQ_ENTRY(intr_event) ie_list;
TAILQ_HEAD(, intr_handler) ie_handlers; /* Interrupt handlers. */
char ie_name[MAXCOMLEN]; /* Individual event name. */
char ie_fullname[MAXCOMLEN];
struct mtx ie_lock;
void *ie_source; /* Cookie used by MD code. */
struct intr_thread *ie_thread; /* Thread we are connected to. */
void (*ie_enable)(void *);
int ie_flags;
int ie_count; /* Loop counter. */
int ie_warned; /* Warned about interrupt storm. */
};
/* Interrupt thread flags kept in it_flags */
#define IT_SOFT 0x000001 /* Software interrupt. */
#define IT_ENTROPY 0x000002 /* Interrupt is an entropy source. */
#define IT_DEAD 0x000004 /* Thread is waiting to exit. */
/* Interrupt event flags kept in ie_flags. */
#define IE_SOFT 0x000001 /* Software interrupt. */
#define IE_ENTROPY 0x000002 /* Interrupt is an entropy source. */
#define IE_ADDING_THREAD 0x000004 /* Currently building an ithread. */
/* Flags to pass to sched_swi. */
#define SWI_DELAY 0x2
@ -97,8 +97,8 @@ struct ithd {
#define SWI_TQ 6
#define SWI_TQ_GIANT 6
extern struct ithd *tty_ithd;
extern struct ithd *clk_ithd;
extern struct intr_event *tty_intr_event;
extern struct intr_event *clk_intr_event;
extern void *softclock_ih;
extern void *vm_ih;
@ -109,19 +109,19 @@ extern u_long intrcnt[]; /* counts for for each device and stray */
extern char intrnames[]; /* string table containing device names */
#ifdef DDB
void db_dump_ithread(struct ithd *ithd, int handlers);
void db_dump_intr_event(struct intr_event *ie, int handlers);
#endif
int ithread_create(struct ithd **ithread, uintptr_t vector, int flags,
void (*disable)(uintptr_t), void (*enable)(uintptr_t),
const char *fmt, ...) __printflike(6, 7);
int ithread_destroy(struct ithd *ithread);
u_char ithread_priority(enum intr_type flags);
int ithread_add_handler(struct ithd *ithread, const char *name,
u_char intr_priority(enum intr_type flags);
int intr_event_add_handler(struct intr_event *ie, const char *name,
driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
void **cookiep);
int ithread_remove_handler(void *cookie);
int ithread_schedule(struct ithd *ithread);
int swi_add(struct ithd **ithdp, const char *name,
int intr_event_create(struct intr_event **event, void *source,
int flags, void (*enable)(void *), const char *fmt, ...)
__printflike(5, 6);
int intr_event_destroy(struct intr_event *ie);
int intr_event_remove_handler(void *cookie);
int intr_event_schedule_thread(struct intr_event *ie);
int swi_add(struct intr_event **eventp, const char *name,
driver_intr_t handler, void *arg, int pri, enum intr_type flags,
void **cookiep);
void swi_sched(void *cookie, int flags);

View File

@ -150,7 +150,6 @@ struct pargs {
* either lock is sufficient for read access, but both locks must be held
* for write access.
*/
struct ithd;
struct kg_sched;
struct nlminfo;
struct kaioinfo;
@ -270,7 +269,6 @@ struct thread {
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
short td_locks; /* (k) DEBUG: lockmgr count of locks. */
struct turnstile *td_blocked; /* (j) Lock process is blocked on. */
struct ithd *td_ithd; /* (b) For interrupt threads only. */
const char *td_lockname; /* (j) Name of lock blocked on. */
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
@ -372,7 +370,7 @@ struct thread {
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_UNUSED10 0x00000400 /* --available -- */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */