2001-08-10 04:48:48 +00:00
|
|
|
/*-
|
2017-11-27 15:10:39 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2001-08-10 04:48:48 +00:00
|
|
|
* Copyright (c) 2001 Jake Burkholder.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_INTR_MACHDEP_H_
|
|
|
|
#define _MACHINE_INTR_MACHDEP_H_
|
|
|
|
|
2002-05-25 02:39:28 +00:00
|
|
|
#define IRSR_BUSY (1 << 5)
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2002-05-25 02:39:28 +00:00
|
|
|
#define PIL_MAX (1 << 4)
|
|
|
|
#define IV_MAX (1 << 11)
|
|
|
|
|
|
|
|
#define IR_FREE (PIL_MAX * 2)
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2001-12-29 06:57:55 +00:00
|
|
|
#define IH_SHIFT PTR_SHIFT
|
2001-08-10 04:48:48 +00:00
|
|
|
#define IQE_SHIFT 5
|
2007-09-06 19:16:30 +00:00
|
|
|
#define IV_SHIFT 6
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2001-10-12 16:06:41 +00:00
|
|
|
#define PIL_LOW 1 /* stray interrupts */
|
- Give PIL_PREEMPT the lowest priority just above low/stray interrupts.
The reason for this is that the SPARC v9 architecture allows nested
interrupts of higher priority/level than that of the current interrupt
to occur (and we can't just entirely bypass this model, also, at least
for tick interrupts, this also wouldn't be wise). However, when a
preemption interrupt interrupts another interrupt of lower priority,
f.e. PIL_ITHREAD, and that one in turn is nested by a third interrupt,
f.e. PIL_TICK, with SCHED_ULE the execution of interrupts higher than
PIL_PREEMPT may be migrated to another CPU. In particular, tl1_ret(),
which is responsible for restoring the state of the CPU prior to entry
to the interrupt based on the (also migrated) trap frame, then is run
on a CPU which actually didn't receive the interrupt in question,
causing an inappropriate processor interrupt level to be "restored".
In turn, this causes interrupts of the first level, i.e. PIL_ITHREAD
in the above scenario, to be blocked on the target of the migration
until the correct PIL happens to be restored again on that CPU again.
Making PIL_PREEMPT the lowest real priority, this effectively prevents
this scenario from happening, as preemption interrupts no longer can
interrupt any other interrupt besides stray ones (which is no issue).
Thanks to attilio@ and especially mav@ for helping me to understand
this problem at the 201208DevSummit.
- Give PIL_STOP (which is also used for IPI_STOP_HARD, given that there's
no real equivalent to NMIs on SPARC v9) the highest possible priority
just below the hardwired PIL_TICK, so it has a chance to interrupt
more things.
MFC after: 1 week
2012-10-20 12:07:48 +00:00
|
|
|
#define PIL_PREEMPT 2 /* preempt idle thread CPU IPI */
|
|
|
|
#define PIL_ITHREAD 3 /* interrupts that use ithreads */
|
|
|
|
#define PIL_RENDEZVOUS 4 /* SMP rendezvous IPI */
|
|
|
|
#define PIL_AST 5 /* asynchronous trap IPI */
|
|
|
|
#define PIL_HARDCLOCK 6 /* hardclock broadcast */
|
|
|
|
#define PIL_FILTER 11 /* filter interrupts */
|
|
|
|
#define PIL_BRIDGE 12 /* bridge interrupts */
|
|
|
|
#define PIL_STOP 13 /* stop CPU IPI */
|
2008-11-19 22:12:32 +00:00
|
|
|
#define PIL_TICK 14 /* tick interrupts */
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2007-01-19 11:15:34 +00:00
|
|
|
#ifndef LOCORE
|
|
|
|
|
2011-01-04 16:11:32 +00:00
|
|
|
#define INTR_BRIDGE INTR_MD1
|
|
|
|
|
2002-05-29 19:25:14 +00:00
|
|
|
struct trapframe;
|
|
|
|
|
2001-08-10 04:48:48 +00:00
|
|
|
typedef void ih_func_t(struct trapframe *);
|
|
|
|
typedef void iv_func_t(void *);
|
|
|
|
|
2002-05-25 02:39:28 +00:00
|
|
|
struct intr_request {
|
|
|
|
struct intr_request *ir_next;
|
|
|
|
iv_func_t *ir_func;
|
|
|
|
void *ir_arg;
|
|
|
|
u_int ir_vec;
|
|
|
|
u_int ir_pri;
|
2001-08-10 04:48:48 +00:00
|
|
|
};
|
|
|
|
|
2007-09-06 19:16:30 +00:00
|
|
|
struct intr_controller {
|
|
|
|
void (*ic_enable)(void *);
|
|
|
|
void (*ic_disable)(void *);
|
2008-04-23 20:04:38 +00:00
|
|
|
void (*ic_assign)(void *);
|
|
|
|
void (*ic_clear)(void *);
|
2007-09-06 19:16:30 +00:00
|
|
|
};
|
|
|
|
|
2001-09-05 05:18:35 +00:00
|
|
|
struct intr_vector {
|
2001-10-20 16:03:41 +00:00
|
|
|
iv_func_t *iv_func;
|
2001-08-10 04:48:48 +00:00
|
|
|
void *iv_arg;
|
2007-09-06 19:16:30 +00:00
|
|
|
const struct intr_controller *iv_ic;
|
|
|
|
void *iv_icarg;
|
Reorganize the interrupt handling code a bit to make a few things cleaner
and increase flexibility to allow various different approaches to be tried
in the future.
- Split struct ithd up into two pieces. struct intr_event holds the list
of interrupt handlers associated with interrupt sources.
struct intr_thread contains the data relative to an interrupt thread.
Currently we still provide a 1:1 relationship of events to threads
with the exception that events only have an associated thread if there
is at least one threaded interrupt handler attached to the event. This
means that on x86 we no longer have 4 bazillion interrupt threads with
no handlers. It also means that interrupt events with only INTR_FAST
handlers no longer have an associated thread either.
- Renamed struct intrhand to struct intr_handler to follow the struct
intr_foo naming convention. This did require renaming the powerpc
MD struct intr_handler to struct ppc_intr_handler.
- INTR_FAST no longer implies INTR_EXCL on all architectures except for
powerpc. This means that multiple INTR_FAST handlers can attach to the
same interrupt and that INTR_FAST and non-INTR_FAST handlers can attach
to the same interrupt. Sharing INTR_FAST handlers may not always be
desirable, but having sio(4) and uhci(4) fight over an IRQ isn't fun
either. Drivers can always still use INTR_EXCL to ask for an interrupt
exclusively. The way this sharing works is that when an interrupt
comes in, all the INTR_FAST handlers are executed first, and if any
threaded handlers exist, the interrupt thread is scheduled afterwards.
This type of layout also makes it possible to investigate using interrupt
filters ala OS X where the filter determines whether or not its companion
threaded handler should run.
- Aside from the INTR_FAST changes above, the impact on MD interrupt code
is mostly just 's/ithread/intr_event/'.
- A new MI ddb command 'show intrs' walks the list of interrupt events
dumping their state. It also has a '/v' verbose switch which dumps
info about all of the handlers attached to each event.
- We currently don't destroy an interrupt thread when the last threaded
handler is removed because it would suck for things like ppbus(8)'s
braindead behavior. The code is present, though, it is just under
#if 0 for now.
- Move the code to actually execute the threaded handlers for an interrrupt
event into a separate function so that ithread_loop() becomes more
readable. Previously this code was all in the middle of ithread_loop()
and indented halfway across the screen.
- Made struct intr_thread private to kern_intr.c and replaced td_ithd
with a thread private flag TDP_ITHREAD.
- In statclock, check curthread against idlethread directly rather than
curthread's proc against idlethread's proc. (Not really related to intr
changes)
Tested on: alpha, amd64, i386, sparc64
Tested on: arm, ia64 (older version of patch by cognet and marcel)
2005-10-25 19:48:48 +00:00
|
|
|
struct intr_event *iv_event;
|
2001-08-10 04:48:48 +00:00
|
|
|
u_int iv_pri;
|
2001-10-20 16:03:41 +00:00
|
|
|
u_int iv_vec;
|
2007-09-06 19:16:30 +00:00
|
|
|
u_int iv_mid;
|
|
|
|
u_int iv_refcnt;
|
|
|
|
u_int iv_pad[2];
|
2001-08-10 04:48:48 +00:00
|
|
|
};
|
|
|
|
|
2001-12-29 06:57:55 +00:00
|
|
|
extern ih_func_t *intr_handlers[];
|
2001-08-10 04:48:48 +00:00
|
|
|
extern struct intr_vector intr_vectors[];
|
|
|
|
|
2012-05-10 15:23:20 +00:00
|
|
|
#ifdef SMP
|
2012-10-09 12:22:43 +00:00
|
|
|
void intr_add_cpu(u_int cpu);
|
2012-05-10 15:17:21 +00:00
|
|
|
#endif
|
2012-10-09 12:22:43 +00:00
|
|
|
int intr_bind(int vec, u_char cpu);
|
2009-12-24 15:43:37 +00:00
|
|
|
int intr_describe(int vec, void *ih, const char *descr);
|
2001-08-10 04:48:48 +00:00
|
|
|
void intr_setup(int level, ih_func_t *ihf, int pri, iv_func_t *ivf,
|
2007-01-19 11:15:34 +00:00
|
|
|
void *iva);
|
2002-02-13 16:36:44 +00:00
|
|
|
void intr_init1(void);
|
|
|
|
void intr_init2(void);
|
2007-09-06 19:16:30 +00:00
|
|
|
int intr_controller_register(int vec, const struct intr_controller *ic,
|
|
|
|
void *icarg);
|
2007-02-23 12:19:07 +00:00
|
|
|
int inthand_add(const char *name, int vec, int (*filt)(void *),
|
2007-09-06 19:16:30 +00:00
|
|
|
void (*handler)(void *), void *arg, int flags, void **cookiep);
|
2001-10-12 16:06:41 +00:00
|
|
|
int inthand_remove(int vec, void *cookie);
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2002-09-28 03:06:35 +00:00
|
|
|
ih_func_t intr_fast;
|
2001-08-10 04:48:48 +00:00
|
|
|
|
2007-01-19 11:15:34 +00:00
|
|
|
#endif /* !LOCORE */
|
|
|
|
|
|
|
|
#endif /* !_MACHINE_INTR_MACHDEP_H_ */
|