- Add the interrupt vector number to intr_event_create so MI code can
lookup hard interrupt events by number. Ignore the irq# for soft intrs. - Add support to cpuset for binding hardware interrupts. This has the side effect of binding any ithread associated with the hard interrupt. As per restrictions imposed by MD code we can only bind interrupts to a single cpu presently. Interrupts can be 'unbound' by binding them to all cpus. Reviewed by: jhb Sponsored by: Nokia
This commit is contained in:
parent
14320f1e7f
commit
9b33b154b5
@ -139,7 +139,7 @@ intr_register_source(struct intsrc *isrc)
|
||||
vector = isrc->is_pic->pic_vector(isrc);
|
||||
if (interrupt_sources[vector] != NULL)
|
||||
return (EEXIST);
|
||||
error = intr_event_create(&isrc->is_event, isrc, 0,
|
||||
error = intr_event_create(&isrc->is_event, isrc, 0, vector,
|
||||
intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
|
||||
(mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
|
||||
vector);
|
||||
|
@ -70,7 +70,7 @@ arm_setup_irqhandler(const char *name, driver_filter_t *filt,
|
||||
return;
|
||||
event = intr_events[irq];
|
||||
if (event == NULL) {
|
||||
error = intr_event_create(&event, (void *)irq, 0,
|
||||
error = intr_event_create(&event, (void *)irq, 0, irq,
|
||||
(mask_fn)arm_mask_irq, (mask_fn)arm_unmask_irq,
|
||||
NULL, NULL, "intr%d:", irq);
|
||||
if (error)
|
||||
|
@ -130,7 +130,7 @@ intr_register_source(struct intsrc *isrc)
|
||||
vector = isrc->is_pic->pic_vector(isrc);
|
||||
if (interrupt_sources[vector] != NULL)
|
||||
return (EEXIST);
|
||||
error = intr_event_create(&isrc->is_event, isrc, 0,
|
||||
error = intr_event_create(&isrc->is_event, isrc, 0, vector,
|
||||
intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
|
||||
(mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
|
||||
vector);
|
||||
|
@ -340,7 +340,7 @@ ia64_setup_intr(const char *name, int irq, driver_filter_t filter,
|
||||
return (ENOMEM);
|
||||
|
||||
error = intr_event_create(&i->event, (void *)(uintptr_t)vector,
|
||||
0, ia64_intr_mask, ia64_intr_unmask, ia64_intr_eoi,
|
||||
0, vector, ia64_intr_mask, ia64_intr_unmask, ia64_intr_eoi,
|
||||
NULL, "irq%u:", irq);
|
||||
if (error) {
|
||||
free(i, M_DEVBUF);
|
||||
|
@ -412,6 +412,8 @@ cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
|
||||
return (0);
|
||||
}
|
||||
return (ESRCH);
|
||||
case CPU_WHICH_IRQ:
|
||||
return (0);
|
||||
default:
|
||||
return (EINVAL);
|
||||
}
|
||||
@ -760,6 +762,8 @@ cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
|
||||
break;
|
||||
case CPU_WHICH_CPUSET:
|
||||
break;
|
||||
case CPU_WHICH_IRQ:
|
||||
return (EINVAL);
|
||||
}
|
||||
switch (uap->level) {
|
||||
case CPU_LEVEL_ROOT:
|
||||
@ -820,6 +824,9 @@ cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
|
||||
break;
|
||||
case CPU_WHICH_CPUSET:
|
||||
break;
|
||||
case CPU_WHICH_IRQ:
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (uap->level == CPU_LEVEL_ROOT)
|
||||
nset = cpuset_refroot(set);
|
||||
@ -845,6 +852,9 @@ cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
|
||||
case CPU_WHICH_CPUSET:
|
||||
CPU_COPY(&set->cs_mask, mask);
|
||||
break;
|
||||
case CPU_WHICH_IRQ:
|
||||
error = intr_getaffinity(uap->id, mask);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -921,6 +931,9 @@ cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
|
||||
break;
|
||||
case CPU_WHICH_CPUSET:
|
||||
break;
|
||||
case CPU_WHICH_IRQ:
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (uap->level == CPU_LEVEL_ROOT)
|
||||
nset = cpuset_refroot(set);
|
||||
@ -946,6 +959,9 @@ cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
|
||||
cpuset_rel(set);
|
||||
}
|
||||
break;
|
||||
case CPU_WHICH_IRQ:
|
||||
error = intr_setaffinity(uap->id, mask);
|
||||
break;
|
||||
default:
|
||||
error = EINVAL;
|
||||
break;
|
||||
|
@ -28,11 +28,11 @@
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_intr_filter.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/conf.h>
|
||||
#include <sys/cpuset.h>
|
||||
#include <sys/rtprio.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/interrupt.h>
|
||||
@ -92,6 +92,8 @@ SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
|
||||
"Number of consecutive interrupts before storm protection is enabled");
|
||||
static TAILQ_HEAD(, intr_event) event_list =
|
||||
TAILQ_HEAD_INITIALIZER(event_list);
|
||||
static struct mtx event_lock;
|
||||
MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
|
||||
|
||||
static void intr_event_update(struct intr_event *ie);
|
||||
#ifdef INTR_FILTER
|
||||
@ -244,7 +246,7 @@ intr_event_update(struct intr_event *ie)
|
||||
}
|
||||
|
||||
int
|
||||
intr_event_create(struct intr_event **event, void *source,int flags,
|
||||
intr_event_create(struct intr_event **event, void *source,int flags, int irq,
|
||||
void (*pre_ithread)(void *), void (*post_ithread)(void *),
|
||||
void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
|
||||
const char *fmt, ...)
|
||||
@ -262,6 +264,7 @@ intr_event_create(struct intr_event **event, void *source,int flags,
|
||||
ie->ie_post_filter = post_filter;
|
||||
ie->ie_assign_cpu = assign_cpu;
|
||||
ie->ie_flags = flags;
|
||||
ie->ie_irq = irq;
|
||||
ie->ie_cpu = NOCPU;
|
||||
TAILQ_INIT(&ie->ie_handlers);
|
||||
mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
|
||||
@ -270,9 +273,9 @@ intr_event_create(struct intr_event **event, void *source,int flags,
|
||||
vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
|
||||
va_end(ap);
|
||||
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
|
||||
mtx_pool_lock(mtxpool_sleep, &event_list);
|
||||
mtx_lock(&event_lock);
|
||||
TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
|
||||
mtx_pool_unlock(mtxpool_sleep, &event_list);
|
||||
mtx_unlock(&event_lock);
|
||||
if (event != NULL)
|
||||
*event = ie;
|
||||
CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
|
||||
@ -290,7 +293,8 @@ intr_event_create(struct intr_event **event, void *source,int flags,
|
||||
int
|
||||
intr_event_bind(struct intr_event *ie, u_char cpu)
|
||||
{
|
||||
struct thread *td;
|
||||
cpuset_t mask;
|
||||
lwpid_t id;
|
||||
int error;
|
||||
|
||||
/* Need a CPU to bind to. */
|
||||
@ -299,28 +303,97 @@ intr_event_bind(struct intr_event *ie, u_char cpu)
|
||||
|
||||
if (ie->ie_assign_cpu == NULL)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
/* Don't allow a bind request if the interrupt is already bound. */
|
||||
/*
|
||||
* If we have any ithreads try to set their mask first since this
|
||||
* can fail.
|
||||
*/
|
||||
mtx_lock(&ie->ie_lock);
|
||||
if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
|
||||
if (ie->ie_thread != NULL) {
|
||||
CPU_ZERO(&mask);
|
||||
if (cpu == NOCPU)
|
||||
CPU_COPY(cpuset_root, &mask);
|
||||
else
|
||||
CPU_SET(cpu, &mask);
|
||||
id = ie->ie_thread->it_thread->td_tid;
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
error = cpuset_setthread(id, &mask);
|
||||
if (error)
|
||||
return (error);
|
||||
} else
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
return (EBUSY);
|
||||
}
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
|
||||
error = ie->ie_assign_cpu(ie->ie_source, cpu);
|
||||
if (error)
|
||||
return (error);
|
||||
mtx_lock(&ie->ie_lock);
|
||||
if (ie->ie_thread != NULL)
|
||||
td = ie->ie_thread->it_thread;
|
||||
else
|
||||
td = NULL;
|
||||
if (td != NULL)
|
||||
thread_lock(td);
|
||||
ie->ie_cpu = cpu;
|
||||
if (td != NULL)
|
||||
thread_unlock(td);
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
static struct intr_event *
|
||||
intr_lookup(int irq)
|
||||
{
|
||||
struct intr_event *ie;
|
||||
|
||||
mtx_lock(&event_lock);
|
||||
TAILQ_FOREACH(ie, &event_list, ie_list)
|
||||
if (ie->ie_irq == irq &&
|
||||
(ie->ie_flags & IE_SOFT) == 0 &&
|
||||
TAILQ_FIRST(&ie->ie_handlers) != NULL)
|
||||
break;
|
||||
mtx_unlock(&event_lock);
|
||||
return (ie);
|
||||
}
|
||||
|
||||
int
|
||||
intr_setaffinity(int irq, void *m)
|
||||
{
|
||||
struct intr_event *ie;
|
||||
cpuset_t *mask;
|
||||
u_char cpu;
|
||||
int error;
|
||||
int n;
|
||||
|
||||
mask = m;
|
||||
error = 0;
|
||||
cpu = NOCPU;
|
||||
/*
|
||||
* If we're setting all cpus we can unbind. Otherwise make sure
|
||||
* only one cpu is in the set.
|
||||
*/
|
||||
if (CPU_CMP(cpuset_root, mask)) {
|
||||
for (n = 0; n < CPU_SETSIZE; n++) {
|
||||
if (!CPU_ISSET(n, mask))
|
||||
continue;
|
||||
if (cpu != NOCPU)
|
||||
return (EINVAL);
|
||||
cpu = (u_char)n;
|
||||
}
|
||||
}
|
||||
ie = intr_lookup(irq);
|
||||
if (ie == NULL)
|
||||
return (ESRCH);
|
||||
intr_event_bind(ie, cpu);
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
intr_getaffinity(int irq, void *m)
|
||||
{
|
||||
struct intr_event *ie;
|
||||
cpuset_t *mask;
|
||||
|
||||
mask = m;
|
||||
ie = intr_lookup(irq);
|
||||
if (ie == NULL)
|
||||
return (ESRCH);
|
||||
CPU_ZERO(mask);
|
||||
mtx_lock(&ie->ie_lock);
|
||||
if (ie->ie_cpu == NOCPU)
|
||||
CPU_COPY(cpuset_root, mask);
|
||||
else
|
||||
CPU_SET(ie->ie_cpu, mask);
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
return (0);
|
||||
}
|
||||
@ -329,14 +402,14 @@ int
|
||||
intr_event_destroy(struct intr_event *ie)
|
||||
{
|
||||
|
||||
mtx_lock(&event_lock);
|
||||
mtx_lock(&ie->ie_lock);
|
||||
if (!TAILQ_EMPTY(&ie->ie_handlers)) {
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
mtx_unlock(&event_lock);
|
||||
return (EBUSY);
|
||||
}
|
||||
mtx_pool_lock(mtxpool_sleep, &event_list);
|
||||
TAILQ_REMOVE(&event_list, ie, ie_list);
|
||||
mtx_pool_unlock(mtxpool_sleep, &event_list);
|
||||
#ifndef notyet
|
||||
if (ie->ie_thread != NULL) {
|
||||
ithread_destroy(ie->ie_thread);
|
||||
@ -344,6 +417,7 @@ intr_event_destroy(struct intr_event *ie)
|
||||
}
|
||||
#endif
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
mtx_unlock(&event_lock);
|
||||
mtx_destroy(&ie->ie_lock);
|
||||
free(ie, M_ITHREAD);
|
||||
return (0);
|
||||
@ -914,7 +988,7 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
|
||||
if (!(ie->ie_flags & IE_SOFT))
|
||||
return (EINVAL);
|
||||
} else {
|
||||
error = intr_event_create(&ie, NULL, IE_SOFT,
|
||||
error = intr_event_create(&ie, NULL, IE_SOFT, 0,
|
||||
NULL, NULL, NULL, NULL, "swi%d:", pri);
|
||||
if (error)
|
||||
return (error);
|
||||
@ -1106,7 +1180,6 @@ ithread_loop(void *arg)
|
||||
struct intr_event *ie;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
u_char cpu;
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
@ -1115,7 +1188,6 @@ ithread_loop(void *arg)
|
||||
("%s: ithread and proc linkage out of sync", __func__));
|
||||
ie = ithd->it_event;
|
||||
ie->ie_count = 0;
|
||||
cpu = NOCPU;
|
||||
|
||||
/*
|
||||
* As long as we have interrupts outstanding, go through the
|
||||
@ -1161,21 +1233,6 @@ ithread_loop(void *arg)
|
||||
ie->ie_count = 0;
|
||||
mi_switch(SW_VOL, NULL);
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
* Ensure we are bound to the correct CPU. We can't
|
||||
* move ithreads until SMP is running however, so just
|
||||
* leave interrupts on the boor CPU during boot.
|
||||
*/
|
||||
if (ie->ie_cpu != cpu && smp_started) {
|
||||
cpu = ie->ie_cpu;
|
||||
if (cpu == NOCPU)
|
||||
sched_unbind(td);
|
||||
else
|
||||
sched_bind(td, cpu);
|
||||
}
|
||||
#endif
|
||||
thread_unlock(td);
|
||||
}
|
||||
}
|
||||
@ -1276,7 +1333,6 @@ ithread_loop(void *arg)
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
int priv;
|
||||
u_char cpu;
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
@ -1287,7 +1343,6 @@ ithread_loop(void *arg)
|
||||
("%s: ithread and proc linkage out of sync", __func__));
|
||||
ie = ithd->it_event;
|
||||
ie->ie_count = 0;
|
||||
cpu = NOCPU;
|
||||
|
||||
/*
|
||||
* As long as we have interrupts outstanding, go through the
|
||||
@ -1336,21 +1391,6 @@ ithread_loop(void *arg)
|
||||
ie->ie_count = 0;
|
||||
mi_switch(SW_VOL, NULL);
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
* Ensure we are bound to the correct CPU. We can't
|
||||
* move ithreads until SMP is running however, so just
|
||||
* leave interrupts on the boor CPU during boot.
|
||||
*/
|
||||
if (!priv && ie->ie_cpu != cpu && smp_started) {
|
||||
cpu = ie->ie_cpu;
|
||||
if (cpu == NOCPU)
|
||||
sched_unbind(td);
|
||||
else
|
||||
sched_bind(td, cpu);
|
||||
}
|
||||
#endif
|
||||
thread_unlock(td);
|
||||
}
|
||||
}
|
||||
@ -1580,8 +1620,6 @@ db_dump_intr_event(struct intr_event *ie, int handlers)
|
||||
db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
|
||||
else
|
||||
db_printf("(no thread)");
|
||||
if (ie->ie_cpu != NOCPU)
|
||||
db_printf(" (CPU %d)", ie->ie_cpu);
|
||||
if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
|
||||
(it != NULL && it->it_need)) {
|
||||
db_printf(" {");
|
||||
|
@ -226,7 +226,7 @@ powerpc_setup_intr(const char *name, u_int irq, driver_filter_t filter,
|
||||
return (ENOMEM);
|
||||
|
||||
if (i->event == NULL) {
|
||||
error = intr_event_create(&i->event, (void *)irq, 0,
|
||||
error = intr_event_create(&i->event, (void *)irq, 0, irq,
|
||||
powerpc_intr_mask, powerpc_intr_unmask, powerpc_intr_eoi,
|
||||
NULL, "irq%u:", irq);
|
||||
if (error)
|
||||
|
@ -282,7 +282,7 @@ intr_controller_register(int vec, const struct intr_controller *ic,
|
||||
* CPU as long as the source of a level sensitive interrupt is
|
||||
* not cleared.
|
||||
*/
|
||||
error = intr_event_create(&ie, iv, 0, NULL, intr_enable_eoi,
|
||||
error = intr_event_create(&ie, iv, 0, vec, NULL, intr_enable_eoi,
|
||||
intr_enable_eoi, NULL, "vec%d:", vec);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
@ -307,8 +307,8 @@ inthand_add(const char *name, int vec, driver_filter_t *filt,
|
||||
ie = iv->iv_event;
|
||||
mtx_unlock_spin(&intr_table_lock);
|
||||
if (ie == NULL) {
|
||||
errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, NULL,
|
||||
intr_enable, intr_enable, NULL, "vec%d:", vec);
|
||||
errcode = intr_event_create(&ie, (void *)(intptr_t)vec, 0, vec,
|
||||
NULL, intr_enable, intr_enable, NULL, "vec%d:", vec);
|
||||
if (errcode)
|
||||
return (errcode);
|
||||
mtx_lock_spin(&intr_table_lock);
|
||||
|
@ -131,6 +131,7 @@ typedef struct _cpuset {
|
||||
#define CPU_WHICH_TID 1 /* Specifies a thread id. */
|
||||
#define CPU_WHICH_PID 2 /* Specifies a process id. */
|
||||
#define CPU_WHICH_CPUSET 3 /* Specifies a set id. */
|
||||
#define CPU_WHICH_IRQ 4 /* Specifies an irq #. */
|
||||
|
||||
/*
|
||||
* Reserved cpuset identifiers.
|
||||
|
@ -107,6 +107,7 @@ struct intr_event {
|
||||
int ie_count; /* Loop counter. */
|
||||
int ie_warncnt; /* Rate-check interrupt storm warns. */
|
||||
struct timeval ie_warntm;
|
||||
int ie_irq; /* Physical irq number if !SOFT. */
|
||||
u_char ie_cpu; /* CPU this event is bound to. */
|
||||
};
|
||||
|
||||
@ -151,14 +152,16 @@ int intr_event_add_handler(struct intr_event *ie, const char *name,
|
||||
u_char pri, enum intr_type flags, void **cookiep);
|
||||
int intr_event_bind(struct intr_event *ie, u_char cpu);
|
||||
int intr_event_create(struct intr_event **event, void *source,
|
||||
int flags, void (*pre_ithread)(void *),
|
||||
int flags, int irq, void (*pre_ithread)(void *),
|
||||
void (*post_ithread)(void *), void (*post_filter)(void *),
|
||||
int (*assign_cpu)(void *, u_char), const char *fmt, ...)
|
||||
__printflike(8, 9);
|
||||
__printflike(9, 10);
|
||||
int intr_event_destroy(struct intr_event *ie);
|
||||
int intr_event_handle(struct intr_event *ie, struct trapframe *frame);
|
||||
int intr_event_remove_handler(void *cookie);
|
||||
int intr_getaffinity(int irq, void *mask);
|
||||
void *intr_handler_source(void *cookie);
|
||||
int intr_setaffinity(int irq, void *mask);
|
||||
int swi_add(struct intr_event **eventp, const char *name,
|
||||
driver_intr_t handler, void *arg, int pri, enum intr_type flags,
|
||||
void **cookiep);
|
||||
|
Loading…
Reference in New Issue
Block a user