freebsd-dev/sys/kern/subr_intr.c
Svatopluk Kraus 169e6abd8f Mark other parts of interrupt framework as INTR_SOLO option specific.
Note that isrc_arg member of struct intr_irqsrc is used only for
INTR_SOLO and IPI filter. This should be remembered if IPI filters
and their arguments will be stored on another place.

This option could be unusable very soon, if interrupt controllers
implementations will not be implemented considering it.
2016-03-01 10:57:29 +00:00

1236 lines
28 KiB
C

/*-
* Copyright (c) 2012-2014 Jakub Wojciech Klama <jceel@FreeBSD.org>.
* Copyright (c) 2015 Svatopluk Kraus
* Copyright (c) 2015 Michal Meloun
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* New-style Interrupt Framework
*
* TODO: - to support IPI (PPI) enabling on other CPUs if already started
* - to complete things for removable PICs
*/
#include "opt_ddb.h"
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/conf.h>
#include <sys/cpuset.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <machine/atomic.h>
#include <machine/intr.h>
#include <machine/cpu.h>
#include <machine/smp.h>
#include <machine/stdarg.h>
#ifdef FDT
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#endif
#ifdef DDB
#include <ddb/ddb.h>
#endif
#include "pic_if.h"
#define INTRNAME_LEN (2*MAXCOMLEN + 1)
#ifdef DEBUG
#define debugf(fmt, args...) do { printf("%s(): ", __func__); \
printf(fmt,##args); } while (0)
#else
#define debugf(fmt, args...)
#endif
MALLOC_DECLARE(M_INTRNG);
MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
/* Main interrupt handler called from assembler -> 'hidden' for C code. */
void intr_irq_handler(struct trapframe *tf);
/* Root interrupt controller stuff. */
static struct intr_irqsrc *irq_root_isrc;
device_t intr_irq_root_dev;
static intr_irq_filter_t *irq_root_filter;
static void *irq_root_arg;
static u_int irq_root_ipicount;
/* Interrupt controller definition. */
struct intr_pic {
SLIST_ENTRY(intr_pic) pic_next;
intptr_t pic_xref; /* hardware identification */
device_t pic_dev;
};
static struct mtx pic_list_lock;
static SLIST_HEAD(, intr_pic) pic_list;
static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
/* Interrupt source definition. */
static struct mtx isrc_table_lock;
static struct intr_irqsrc *irq_sources[NIRQ];
u_int irq_next_free;
#define IRQ_INVALID nitems(irq_sources)
#ifdef SMP
static boolean_t irq_assign_cpu = FALSE;
#endif
/*
* - 2 counters for each I/O interrupt.
* - MAXCPU counters for each IPI counters for SMP.
*/
#ifdef SMP
#define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
#else
#define INTRCNT_COUNT (NIRQ * 2)
#endif
/* Data for MI statistics reporting. */
u_long intrcnt[INTRCNT_COUNT];
char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
size_t sintrcnt = sizeof(intrcnt);
size_t sintrnames = sizeof(intrnames);
static u_int intrcnt_index;
/*
* Interrupt framework initialization routine.
*/
static void
intr_irq_init(void *dummy __unused)
{
SLIST_INIT(&pic_list);
mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
}
SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
static void
intrcnt_setname(const char *name, int index)
{
snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
INTRNAME_LEN - 1, name);
}
/*
* Update name for interrupt source with interrupt event.
*/
static void
intrcnt_updatename(struct intr_irqsrc *isrc)
{
/* QQQ: What about stray counter name? */
mtx_assert(&isrc_table_lock, MA_OWNED);
intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
}
/*
* Virtualization for interrupt source interrupt counter increment.
*/
static inline void
isrc_increment_count(struct intr_irqsrc *isrc)
{
/*
* XXX - It should be atomic for PPI interrupts. It was proven that
* the lost is measurable easily for timer PPI interrupts.
*/
isrc->isrc_count[0]++;
/*atomic_add_long(&isrc->isrc_count[0], 1);*/
}
/*
* Virtualization for interrupt source interrupt stray counter increment.
*/
static inline void
isrc_increment_straycount(struct intr_irqsrc *isrc)
{
isrc->isrc_count[1]++;
}
/*
* Virtualization for interrupt source interrupt name update.
*/
static void
isrc_update_name(struct intr_irqsrc *isrc, const char *name)
{
char str[INTRNAME_LEN];
mtx_assert(&isrc_table_lock, MA_OWNED);
if (name != NULL) {
snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
intrcnt_setname(str, isrc->isrc_index);
snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
name);
intrcnt_setname(str, isrc->isrc_index + 1);
} else {
snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
intrcnt_setname(str, isrc->isrc_index);
snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
intrcnt_setname(str, isrc->isrc_index + 1);
}
}
/*
* Virtualization for interrupt source interrupt counters setup.
*/
static void
isrc_setup_counters(struct intr_irqsrc *isrc)
{
u_int index;
/*
* XXX - it does not work well with removable controllers and
* interrupt sources !!!
*/
index = atomic_fetchadd_int(&intrcnt_index, 2);
isrc->isrc_index = index;
isrc->isrc_count = &intrcnt[index];
isrc_update_name(isrc, NULL);
}
#ifdef SMP
/*
* Virtualization for interrupt source IPI counters setup.
*/
u_long *
intr_ipi_setup_counters(const char *name)
{
u_int index, i;
char str[INTRNAME_LEN];
index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
for (i = 0; i < MAXCPU; i++) {
snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
intrcnt_setname(str, index + i);
}
return (&intrcnt[index]);
}
#endif
/*
* Main interrupt dispatch handler. It's called straight
* from the assembler, where CPU interrupt is served.
*/
void
intr_irq_handler(struct trapframe *tf)
{
struct trapframe * oldframe;
struct thread * td;
KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
PCPU_INC(cnt.v_intr);
critical_enter();
td = curthread;
oldframe = td->td_intr_frame;
td->td_intr_frame = tf;
irq_root_filter(irq_root_arg);
td->td_intr_frame = oldframe;
critical_exit();
}
/*
* interrupt controller dispatch function for interrupts. It should
* be called straight from the interrupt controller, when associated interrupt
* source is learned.
*/
void
intr_irq_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
{
KASSERT(isrc != NULL, ("%s: no source", __func__));
isrc_increment_count(isrc);
#ifdef INTR_SOLO
if (isrc->isrc_filter != NULL) {
int error;
error = isrc->isrc_filter(isrc->isrc_arg, tf);
PIC_POST_FILTER(isrc->isrc_dev, isrc);
if (error == FILTER_HANDLED)
return;
} else
#endif
if (isrc->isrc_event != NULL) {
if (intr_event_handle(isrc->isrc_event, tf) == 0)
return;
}
isrc_increment_straycount(isrc);
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
device_printf(isrc->isrc_dev, "stray irq <%s> disabled",
isrc->isrc_name);
}
/*
* Allocate interrupt source.
*/
static struct intr_irqsrc *
isrc_alloc(u_int type, u_int extsize)
{
struct intr_irqsrc *isrc;
isrc = malloc(sizeof(*isrc) + extsize, M_INTRNG, M_WAITOK | M_ZERO);
isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
isrc->isrc_type = type;
isrc->isrc_nspc_type = INTR_IRQ_NSPC_NONE;
isrc->isrc_trig = INTR_TRIGGER_CONFORM;
isrc->isrc_pol = INTR_POLARITY_CONFORM;
CPU_ZERO(&isrc->isrc_cpu);
return (isrc);
}
/*
* Free interrupt source.
*/
static void
isrc_free(struct intr_irqsrc *isrc)
{
free(isrc, M_INTRNG);
}
void
intr_irq_set_name(struct intr_irqsrc *isrc, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
va_end(ap);
}
/*
* Alloc unique interrupt number (resource handle) for interrupt source.
*
* There could be various strategies how to allocate free interrupt number
* (resource handle) for new interrupt source.
*
* 1. Handles are always allocated forward, so handles are not recycled
* immediately. However, if only one free handle left which is reused
* constantly...
*/
static int
isrc_alloc_irq_locked(struct intr_irqsrc *isrc)
{
u_int maxirqs, irq;
mtx_assert(&isrc_table_lock, MA_OWNED);
maxirqs = nitems(irq_sources);
if (irq_next_free >= maxirqs)
return (ENOSPC);
for (irq = irq_next_free; irq < maxirqs; irq++) {
if (irq_sources[irq] == NULL)
goto found;
}
for (irq = 0; irq < irq_next_free; irq++) {
if (irq_sources[irq] == NULL)
goto found;
}
irq_next_free = maxirqs;
return (ENOSPC);
found:
isrc->isrc_irq = irq;
irq_sources[irq] = isrc;
intr_irq_set_name(isrc, "irq%u", irq);
isrc_setup_counters(isrc);
irq_next_free = irq + 1;
if (irq_next_free >= maxirqs)
irq_next_free = 0;
return (0);
}
#ifdef notyet
/*
* Free unique interrupt number (resource handle) from interrupt source.
*/
static int
isrc_free_irq(struct intr_irqsrc *isrc)
{
u_int maxirqs;
mtx_assert(&isrc_table_lock, MA_NOTOWNED);
maxirqs = nitems(irq_sources);
if (isrc->isrc_irq >= maxirqs)
return (EINVAL);
mtx_lock(&isrc_table_lock);
if (irq_sources[isrc->isrc_irq] != isrc) {
mtx_unlock(&isrc_table_lock);
return (EINVAL);
}
irq_sources[isrc->isrc_irq] = NULL;
isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
mtx_unlock(&isrc_table_lock);
return (0);
}
#endif
/*
* Lookup interrupt source by interrupt number (resource handle).
*/
static struct intr_irqsrc *
isrc_lookup(u_int irq)
{
if (irq < nitems(irq_sources))
return (irq_sources[irq]);
return (NULL);
}
/*
* Lookup interrupt source by namespace description.
*/
static struct intr_irqsrc *
isrc_namespace_lookup(device_t dev, uint16_t type, uint16_t num)
{
u_int irq;
struct intr_irqsrc *isrc;
mtx_assert(&isrc_table_lock, MA_OWNED);
for (irq = 0; irq < nitems(irq_sources); irq++) {
isrc = irq_sources[irq];
if (isrc != NULL && isrc->isrc_dev == dev &&
isrc->isrc_nspc_type == type && isrc->isrc_nspc_num == num)
return (isrc);
}
return (NULL);
}
/*
* Map interrupt source according to namespace into framework. If such mapping
* does not exist, create it. Return unique interrupt number (resource handle)
* associated with mapped interrupt source.
*/
u_int
intr_namespace_map_irq(device_t dev, uint16_t type, uint16_t num)
{
struct intr_irqsrc *isrc, *new_isrc;
int error;
new_isrc = isrc_alloc(INTR_ISRCT_NAMESPACE, 0);
mtx_lock(&isrc_table_lock);
isrc = isrc_namespace_lookup(dev, type, num);
if (isrc != NULL) {
mtx_unlock(&isrc_table_lock);
isrc_free(new_isrc);
return (isrc->isrc_irq); /* already mapped */
}
error = isrc_alloc_irq_locked(new_isrc);
if (error != 0) {
mtx_unlock(&isrc_table_lock);
isrc_free(new_isrc);
return (IRQ_INVALID); /* no space left */
}
new_isrc->isrc_dev = dev;
new_isrc->isrc_nspc_type = type;
new_isrc->isrc_nspc_num = num;
mtx_unlock(&isrc_table_lock);
return (new_isrc->isrc_irq);
}
#ifdef FDT
/*
* Lookup interrupt source by FDT description.
*/
static struct intr_irqsrc *
isrc_fdt_lookup(intptr_t xref, pcell_t *cells, u_int ncells)
{
u_int irq, cellsize;
struct intr_irqsrc *isrc;
mtx_assert(&isrc_table_lock, MA_OWNED);
cellsize = ncells * sizeof(*cells);
for (irq = 0; irq < nitems(irq_sources); irq++) {
isrc = irq_sources[irq];
if (isrc != NULL && isrc->isrc_type == INTR_ISRCT_FDT &&
isrc->isrc_xref == xref && isrc->isrc_ncells == ncells &&
memcmp(isrc->isrc_cells, cells, cellsize) == 0)
return (isrc);
}
return (NULL);
}
/*
* Map interrupt source according to FDT data into framework. If such mapping
* does not exist, create it. Return unique interrupt number (resource handle)
* associated with mapped interrupt source.
*/
u_int
intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
{
struct intr_irqsrc *isrc, *new_isrc;
u_int cellsize;
intptr_t xref;
int error;
xref = (intptr_t)node; /* It's so simple for now. */
cellsize = ncells * sizeof(*cells);
new_isrc = isrc_alloc(INTR_ISRCT_FDT, cellsize);
mtx_lock(&isrc_table_lock);
isrc = isrc_fdt_lookup(xref, cells, ncells);
if (isrc != NULL) {
mtx_unlock(&isrc_table_lock);
isrc_free(new_isrc);
return (isrc->isrc_irq); /* already mapped */
}
error = isrc_alloc_irq_locked(new_isrc);
if (error != 0) {
mtx_unlock(&isrc_table_lock);
isrc_free(new_isrc);
return (IRQ_INVALID); /* no space left */
}
new_isrc->isrc_xref = xref;
new_isrc->isrc_ncells = ncells;
memcpy(new_isrc->isrc_cells, cells, cellsize);
mtx_unlock(&isrc_table_lock);
return (new_isrc->isrc_irq);
}
#endif
/*
* Register interrupt source into interrupt controller.
*/
static int
isrc_register(struct intr_irqsrc *isrc)
{
struct intr_pic *pic;
boolean_t is_percpu;
int error;
if (isrc->isrc_flags & INTR_ISRCF_REGISTERED)
return (0);
if (isrc->isrc_dev == NULL) {
pic = pic_lookup(NULL, isrc->isrc_xref);
if (pic == NULL || pic->pic_dev == NULL)
return (ESRCH);
isrc->isrc_dev = pic->pic_dev;
}
error = PIC_REGISTER(isrc->isrc_dev, isrc, &is_percpu);
if (error != 0)
return (error);
mtx_lock(&isrc_table_lock);
isrc->isrc_flags |= INTR_ISRCF_REGISTERED;
if (is_percpu)
isrc->isrc_flags |= INTR_ISRCF_PERCPU;
isrc_update_name(isrc, NULL);
mtx_unlock(&isrc_table_lock);
return (0);
}
#ifdef INTR_SOLO
/*
* Setup filter into interrupt source.
*/
static int
iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
intr_irq_filter_t *filter, void *arg, void **cookiep)
{
if (filter == NULL)
return (EINVAL);
mtx_lock(&isrc_table_lock);
/*
* Make sure that we do not mix the two ways
* how we handle interrupt sources.
*/
if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
mtx_unlock(&isrc_table_lock);
return (EBUSY);
}
isrc->isrc_filter = filter;
isrc->isrc_arg = arg;
isrc_update_name(isrc, name);
mtx_unlock(&isrc_table_lock);
*cookiep = isrc;
return (0);
}
#endif
/*
* Interrupt source pre_ithread method for MI interrupt framework.
*/
static void
intr_isrc_pre_ithread(void *arg)
{
struct intr_irqsrc *isrc = arg;
PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
}
/*
* Interrupt source post_ithread method for MI interrupt framework.
*/
static void
intr_isrc_post_ithread(void *arg)
{
struct intr_irqsrc *isrc = arg;
PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
}
/*
* Interrupt source post_filter method for MI interrupt framework.
*/
static void
intr_isrc_post_filter(void *arg)
{
struct intr_irqsrc *isrc = arg;
PIC_POST_FILTER(isrc->isrc_dev, isrc);
}
/*
* Interrupt source assign_cpu method for MI interrupt framework.
*/
static int
intr_isrc_assign_cpu(void *arg, int cpu)
{
#ifdef SMP
struct intr_irqsrc *isrc = arg;
int error;
if (isrc->isrc_dev != intr_irq_root_dev)
return (EINVAL);
mtx_lock(&isrc_table_lock);
if (cpu == NOCPU) {
CPU_ZERO(&isrc->isrc_cpu);
isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
} else {
CPU_SETOF(cpu, &isrc->isrc_cpu);
isrc->isrc_flags |= INTR_ISRCF_BOUND;
}
/*
* In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
* re-balance it to another CPU or enable it on more CPUs. However,
* PIC is expected to change isrc_cpu appropriately to keep us well
* informed if the call is successfull.
*/
if (irq_assign_cpu) {
error = PIC_BIND(isrc->isrc_dev, isrc);
if (error) {
CPU_ZERO(&isrc->isrc_cpu);
mtx_unlock(&isrc_table_lock);
return (error);
}
}
mtx_unlock(&isrc_table_lock);
return (0);
#else
return (EOPNOTSUPP);
#endif
}
/*
* Create interrupt event for interrupt source.
*/
static int
isrc_event_create(struct intr_irqsrc *isrc)
{
struct intr_event *ie;
int error;
error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
if (error)
return (error);
mtx_lock(&isrc_table_lock);
/*
* Make sure that we do not mix the two ways
* how we handle interrupt sources. Let contested event wins.
*/
#ifdef INTR_SOLO
if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
#else
if (isrc->isrc_event != NULL) {
#endif
mtx_unlock(&isrc_table_lock);
intr_event_destroy(ie);
return (isrc->isrc_event != NULL ? EBUSY : 0);
}
isrc->isrc_event = ie;
mtx_unlock(&isrc_table_lock);
return (0);
}
#ifdef notyet
/*
* Destroy interrupt event for interrupt source.
*/
static void
isrc_event_destroy(struct intr_irqsrc *isrc)
{
struct intr_event *ie;
mtx_lock(&isrc_table_lock);
ie = isrc->isrc_event;
isrc->isrc_event = NULL;
mtx_unlock(&isrc_table_lock);
if (ie != NULL)
intr_event_destroy(ie);
}
#endif
/*
* Add handler to interrupt source.
*/
static int
isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
driver_filter_t filter, driver_intr_t handler, void *arg,
enum intr_type flags, void **cookiep)
{
int error;
if (isrc->isrc_event == NULL) {
error = isrc_event_create(isrc);
if (error)
return (error);
}
error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
arg, intr_priority(flags), flags, cookiep);
if (error == 0) {
mtx_lock(&isrc_table_lock);
intrcnt_updatename(isrc);
mtx_unlock(&isrc_table_lock);
}
return (error);
}
/*
* Lookup interrupt controller locked.
*/
static struct intr_pic *
pic_lookup_locked(device_t dev, intptr_t xref)
{
struct intr_pic *pic;
mtx_assert(&pic_list_lock, MA_OWNED);
SLIST_FOREACH(pic, &pic_list, pic_next) {
if (pic->pic_xref != xref)
continue;
if (pic->pic_xref != 0 || pic->pic_dev == dev)
return (pic);
}
return (NULL);
}
/*
* Lookup interrupt controller.
*/
static struct intr_pic *
pic_lookup(device_t dev, intptr_t xref)
{
struct intr_pic *pic;
mtx_lock(&pic_list_lock);
pic = pic_lookup_locked(dev, xref);
mtx_unlock(&pic_list_lock);
return (pic);
}
/*
* Create interrupt controller.
*/
static struct intr_pic *
pic_create(device_t dev, intptr_t xref)
{
struct intr_pic *pic;
mtx_lock(&pic_list_lock);
pic = pic_lookup_locked(dev, xref);
if (pic != NULL) {
mtx_unlock(&pic_list_lock);
return (pic);
}
pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
pic->pic_xref = xref;
pic->pic_dev = dev;
SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
mtx_unlock(&pic_list_lock);
return (pic);
}
#ifdef notyet
/*
* Destroy interrupt controller.
*/
static void
pic_destroy(device_t dev, intptr_t xref)
{
struct intr_pic *pic;
mtx_lock(&pic_list_lock);
pic = pic_lookup_locked(dev, xref);
if (pic == NULL) {
mtx_unlock(&pic_list_lock);
return;
}
SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
mtx_unlock(&pic_list_lock);
free(pic, M_INTRNG);
}
#endif
/*
* Register interrupt controller.
*/
int
intr_pic_register(device_t dev, intptr_t xref)
{
struct intr_pic *pic;
pic = pic_create(dev, xref);
if (pic == NULL)
return (ENOMEM);
if (pic->pic_dev != dev)
return (EINVAL); /* XXX it could be many things. */
debugf("PIC %p registered for %s <xref %x>\n", pic,
device_get_nameunit(dev), xref);
return (0);
}
/*
* Unregister interrupt controller.
*/
int
intr_pic_unregister(device_t dev, intptr_t xref)
{
panic("%s: not implemented", __func__);
}
/*
* Mark interrupt controller (itself) as a root one.
*
* Note that only an interrupt controller can really know its position
* in interrupt controller's tree. So root PIC must claim itself as a root.
*
* In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
* page 30:
* "The root of the interrupt tree is determined when traversal
* of the interrupt tree reaches an interrupt controller node without
* an interrupts property and thus no explicit interrupt parent."
*/
int
intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
void *arg, u_int ipicount)
{
int error;
u_int rootirq;
if (pic_lookup(dev, xref) == NULL) {
device_printf(dev, "not registered\n");
return (EINVAL);
}
if (filter == NULL) {
device_printf(dev, "filter missing\n");
return (EINVAL);
}
/*
* Only one interrupt controllers could be on the root for now.
* Note that we further suppose that there is not threaded interrupt
* routine (handler) on the root. See intr_irq_handler().
*/
if (intr_irq_root_dev != NULL) {
device_printf(dev, "another root already set\n");
return (EBUSY);
}
rootirq = intr_namespace_map_irq(device_get_parent(dev), 0, 0);
if (rootirq == IRQ_INVALID) {
device_printf(dev, "failed to map an irq for the root pic\n");
return (ENOMEM);
}
/* Create the isrc. */
irq_root_isrc = isrc_lookup(rootirq);
/* XXX "register" with the PIC. We are the "pic" here, so fake it. */
irq_root_isrc->isrc_flags |= INTR_ISRCF_REGISTERED;
error = intr_irq_add_handler(device_get_parent(dev),
(void*)filter, NULL, arg, rootirq, INTR_TYPE_CLK, NULL);
if (error != 0) {
device_printf(dev, "failed to install root pic handler\n");
return (error);
}
intr_irq_root_dev = dev;
irq_root_filter = filter;
irq_root_arg = arg;
irq_root_ipicount = ipicount;
debugf("irq root set to %s\n", device_get_nameunit(dev));
return (0);
}
int
intr_irq_add_handler(device_t dev, driver_filter_t filt, driver_intr_t hand,
void *arg, u_int irq, int flags, void **cookiep)
{
const char *name;
struct intr_irqsrc *isrc;
int error;
name = device_get_nameunit(dev);
#ifdef INTR_SOLO
/*
* Standard handling is done thru MI interrupt framework. However,
* some interrupts could request solely own special handling. This
* non standard handling can be used for interrupt controllers without
* handler (filter only), so in case that interrupt controllers are
* chained, MI interrupt framework is called only in leaf controller.
*
* Note that root interrupt controller routine is served as well,
* however in intr_irq_handler(), i.e. main system dispatch routine.
*/
if (flags & INTR_SOLO && hand != NULL) {
debugf("irq %u cannot solo on %s\n", irq, name);
return (EINVAL);
}
#endif
isrc = isrc_lookup(irq);
if (isrc == NULL) {
debugf("irq %u without source on %s\n", irq, name);
return (EINVAL);
}
error = isrc_register(isrc);
if (error != 0) {
debugf("irq %u map error %d on %s\n", irq, error, name);
return (error);
}
#ifdef INTR_SOLO
if (flags & INTR_SOLO) {
error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
arg, cookiep);
debugf("irq %u setup filter error %d on %s\n", irq, error,
name);
} else
#endif
{
error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
cookiep);
debugf("irq %u add handler error %d on %s\n", irq, error, name);
}
if (error != 0)
return (error);
mtx_lock(&isrc_table_lock);
isrc->isrc_handlers++;
if (isrc->isrc_handlers == 1) {
PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
}
mtx_unlock(&isrc_table_lock);
return (0);
}
int
intr_irq_remove_handler(device_t dev, u_int irq, void *cookie)
{
struct intr_irqsrc *isrc;
int error;
isrc = isrc_lookup(irq);
if (isrc == NULL || isrc->isrc_handlers == 0)
return (EINVAL);
#ifdef INTR_SOLO
if (isrc->isrc_filter != NULL) {
if (isrc != cookie)
return (EINVAL);
mtx_lock(&isrc_table_lock);
isrc->isrc_filter = NULL;
isrc->isrc_arg = NULL;
isrc->isrc_handlers = 0;
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
isrc_update_name(isrc, NULL);
mtx_unlock(&isrc_table_lock);
return (0);
}
#endif
if (isrc != intr_handler_source(cookie))
return (EINVAL);
error = intr_event_remove_handler(cookie);
if (error == 0) {
mtx_lock(&isrc_table_lock);
isrc->isrc_handlers--;
if (isrc->isrc_handlers == 0) {
PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc);
PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
}
intrcnt_updatename(isrc);
mtx_unlock(&isrc_table_lock);
}
return (error);
}
int
intr_irq_config(u_int irq, enum intr_trigger trig, enum intr_polarity pol)
{
struct intr_irqsrc *isrc;
isrc = isrc_lookup(irq);
if (isrc == NULL)
return (EINVAL);
if (isrc->isrc_handlers != 0)
return (EBUSY); /* interrrupt is enabled (active) */
/*
* Once an interrupt is enabled, we do not change its configuration.
* A controller PIC_ENABLE_INTR() method is called when an interrupt
* is going to be enabled. In this method, a controller should setup
* the interrupt according to saved configuration parameters.
*/
isrc->isrc_trig = trig;
isrc->isrc_pol = pol;
return (0);
}
int
intr_irq_describe(u_int irq, void *cookie, const char *descr)
{
struct intr_irqsrc *isrc;
int error;
isrc = isrc_lookup(irq);
if (isrc == NULL || isrc->isrc_handlers == 0)
return (EINVAL);
#ifdef INTR_SOLO
if (isrc->isrc_filter != NULL) {
if (isrc != cookie)
return (EINVAL);
mtx_lock(&isrc_table_lock);
isrc_update_name(isrc, descr);
mtx_unlock(&isrc_table_lock);
return (0);
}
#endif
error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
if (error == 0) {
mtx_lock(&isrc_table_lock);
intrcnt_updatename(isrc);
mtx_unlock(&isrc_table_lock);
}
return (error);
}
#ifdef SMP
int
intr_irq_bind(u_int irq, int cpu)
{
struct intr_irqsrc *isrc;
isrc = isrc_lookup(irq);
if (isrc == NULL || isrc->isrc_handlers == 0)
return (EINVAL);
#ifdef INTR_SOLO
if (isrc->isrc_filter != NULL)
return (intr_isrc_assign_cpu(isrc, cpu));
#endif
return (intr_event_bind(isrc->isrc_event, cpu));
}
/*
* Return the CPU that the next interrupt source should use.
* For now just returns the next CPU according to round-robin.
*/
u_int
intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
{
if (!irq_assign_cpu || mp_ncpus == 1)
return (PCPU_GET(cpuid));
do {
last_cpu++;
if (last_cpu > mp_maxid)
last_cpu = 0;
} while (!CPU_ISSET(last_cpu, cpumask));
return (last_cpu);
}
/*
* Distribute all the interrupt sources among the available
* CPUs once the AP's have been launched.
*/
static void
intr_irq_shuffle(void *arg __unused)
{
struct intr_irqsrc *isrc;
u_int i;
if (mp_ncpus == 1)
return;
mtx_lock(&isrc_table_lock);
irq_assign_cpu = TRUE;
for (i = 0; i < NIRQ; i++) {
isrc = irq_sources[i];
if (isrc == NULL || isrc->isrc_handlers == 0 ||
isrc->isrc_flags & INTR_ISRCF_PERCPU)
continue;
if (isrc->isrc_event != NULL &&
isrc->isrc_flags & INTR_ISRCF_BOUND &&
isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
panic("%s: CPU inconsistency", __func__);
if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
CPU_ZERO(&isrc->isrc_cpu); /* start again */
/*
* We are in wicked position here if the following call fails
* for bound ISRC. The best thing we can do is to clear
* isrc_cpu so inconsistency with ie_cpu will be detectable.
*/
if (PIC_BIND(isrc->isrc_dev, isrc) != 0)
CPU_ZERO(&isrc->isrc_cpu);
}
mtx_unlock(&isrc_table_lock);
}
SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
#else
u_int
intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
{
return (PCPU_GET(cpuid));
}
#endif
void dosoftints(void);
void
dosoftints(void)
{
}
#ifdef SMP
/*
* Init interrupt controller on another CPU.
*/
void
intr_pic_init_secondary(void)
{
/*
* QQQ: Only root PIC is aware of other CPUs ???
*/
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
//mtx_lock(&isrc_table_lock);
PIC_INIT_SECONDARY(intr_irq_root_dev);
//mtx_unlock(&isrc_table_lock);
}
#endif
#ifdef DDB
DB_SHOW_COMMAND(irqs, db_show_irqs)
{
u_int i, irqsum;
struct intr_irqsrc *isrc;
for (irqsum = 0, i = 0; i < NIRQ; i++) {
isrc = irq_sources[i];
if (isrc == NULL)
continue;
db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
isrc->isrc_name, isrc->isrc_cpu.__bits[0],
isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "",
isrc->isrc_count[0]);
irqsum += isrc->isrc_count[0];
}
db_printf("irq total %u\n", irqsum);
}
#endif