2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1997-05-26 14:37:43 +00:00
|
|
|
* Copyright (c) 1997, Stefan Esser <se@freebsd.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
1998-06-11 07:23:59 +00:00
|
|
|
|
2003-10-24 21:05:30 +00:00
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
1998-11-10 09:16:29 +00:00
|
|
|
#include <sys/param.h>
|
2000-09-13 18:33:25 +00:00
|
|
|
#include <sys/bus.h>
|
2003-02-14 13:10:40 +00:00
|
|
|
#include <sys/conf.h>
|
2000-09-13 18:33:25 +00:00
|
|
|
#include <sys/rtprio.h>
|
1997-05-26 14:37:43 +00:00
|
|
|
#include <sys/systm.h>
|
1997-06-02 08:19:06 +00:00
|
|
|
#include <sys/interrupt.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/ktr.h>
|
2004-06-05 18:27:28 +00:00
|
|
|
#include <sys/limits.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/proc.h>
|
2001-02-20 10:25:29 +00:00
|
|
|
#include <sys/random.h>
|
2001-02-09 17:42:43 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2004-12-30 20:29:58 +00:00
|
|
|
#include <sys/sched.h>
|
2001-06-01 13:23:28 +00:00
|
|
|
#include <sys/sysctl.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
#include <machine/atomic.h>
|
|
|
|
#include <machine/cpu.h>
|
2000-10-25 05:19:40 +00:00
|
|
|
#include <machine/md_var.h>
|
2001-02-09 17:42:43 +00:00
|
|
|
#include <machine/stdarg.h>
|
2003-10-24 21:05:30 +00:00
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
#include <ddb/db_sym.h>
|
|
|
|
#endif
|
1997-05-26 14:37:43 +00:00
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
struct int_entropy {
|
|
|
|
struct proc *proc;
|
2003-11-17 06:08:10 +00:00
|
|
|
uintptr_t vector;
|
2001-02-20 10:25:29 +00:00
|
|
|
};
|
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
struct ithd *clk_ithd;
|
|
|
|
struct ithd *tty_ithd;
|
2004-04-17 02:46:05 +00:00
|
|
|
void *softclock_ih;
|
|
|
|
void *vm_ih;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
|
|
|
|
|
2004-04-16 20:25:40 +00:00
|
|
|
static int intr_storm_threshold = 500;
|
|
|
|
TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
|
|
|
|
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
|
|
|
|
&intr_storm_threshold, 0,
|
2004-04-17 02:46:05 +00:00
|
|
|
"Number of consecutive interrupts before storm protection is enabled");
|
|
|
|
|
|
|
|
static void ithread_loop(void *);
|
|
|
|
static void ithread_update(struct ithd *);
|
|
|
|
static void start_softintr(void *);
|
2004-04-16 20:25:40 +00:00
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
u_char
|
|
|
|
ithread_priority(enum intr_type flags)
|
2000-09-13 18:33:25 +00:00
|
|
|
{
|
2001-02-09 17:42:43 +00:00
|
|
|
u_char pri;
|
2000-09-13 18:33:25 +00:00
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
|
2001-06-16 22:42:19 +00:00
|
|
|
INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
|
2000-09-13 18:33:25 +00:00
|
|
|
switch (flags) {
|
2001-02-09 17:42:43 +00:00
|
|
|
case INTR_TYPE_TTY:
|
2000-09-13 18:33:25 +00:00
|
|
|
pri = PI_TTYLOW;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_BIO:
|
|
|
|
/*
|
|
|
|
* XXX We need to refine this. BSD/OS distinguishes
|
|
|
|
* between tape and disk priorities.
|
|
|
|
*/
|
|
|
|
pri = PI_DISK;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_NET:
|
|
|
|
pri = PI_NET;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_CAM:
|
|
|
|
pri = PI_DISK; /* XXX or PI_CAM? */
|
|
|
|
break;
|
2001-06-16 22:42:19 +00:00
|
|
|
case INTR_TYPE_AV: /* Audio/video */
|
|
|
|
pri = PI_AV;
|
|
|
|
break;
|
2001-02-09 17:42:43 +00:00
|
|
|
case INTR_TYPE_CLK:
|
|
|
|
pri = PI_REALTIME;
|
|
|
|
break;
|
2000-09-13 18:33:25 +00:00
|
|
|
case INTR_TYPE_MISC:
|
|
|
|
pri = PI_DULL; /* don't care */
|
|
|
|
break;
|
|
|
|
default:
|
2001-02-09 17:42:43 +00:00
|
|
|
/* We didn't specify an interrupt level. */
|
2000-09-13 18:33:25 +00:00
|
|
|
panic("ithread_priority: no interrupt type in flags");
|
|
|
|
}
|
|
|
|
|
|
|
|
return pri;
|
|
|
|
}
|
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
/*
|
|
|
|
* Regenerate the name (p_comm) and priority for a threaded interrupt thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ithread_update(struct ithd *ithd)
|
|
|
|
{
|
|
|
|
struct intrhand *ih;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2001-02-09 17:42:43 +00:00
|
|
|
struct proc *p;
|
2004-11-05 19:11:24 +00:00
|
|
|
int missed;
|
2001-02-09 17:42:43 +00:00
|
|
|
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_assert(&ithd->it_lock, MA_OWNED);
|
2001-09-12 08:38:13 +00:00
|
|
|
td = ithd->it_td;
|
|
|
|
if (td == NULL)
|
2001-02-09 17:42:43 +00:00
|
|
|
return;
|
2001-09-12 08:38:13 +00:00
|
|
|
p = td->td_proc;
|
2001-02-09 17:42:43 +00:00
|
|
|
|
2002-10-17 21:02:02 +00:00
|
|
|
strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
|
2004-11-05 19:11:24 +00:00
|
|
|
ithd->it_flags &= ~IT_ENTROPY;
|
2002-10-17 20:03:38 +00:00
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
ih = TAILQ_FIRST(&ithd->it_handlers);
|
|
|
|
if (ih == NULL) {
|
2002-04-11 21:03:35 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2004-12-30 20:29:58 +00:00
|
|
|
sched_prio(td, PRI_MAX_ITHD);
|
2002-04-11 21:03:35 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
return;
|
|
|
|
}
|
2002-04-11 21:03:35 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2004-12-30 20:29:58 +00:00
|
|
|
sched_prio(td, ih->ih_pri);
|
2002-04-11 21:03:35 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2004-11-05 19:11:24 +00:00
|
|
|
missed = 0;
|
2001-02-09 17:42:43 +00:00
|
|
|
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
|
|
|
|
if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
|
|
|
|
sizeof(p->p_comm)) {
|
|
|
|
strcat(p->p_comm, " ");
|
|
|
|
strcat(p->p_comm, ih->ih_name);
|
2004-11-05 19:11:24 +00:00
|
|
|
} else
|
|
|
|
missed++;
|
|
|
|
if (ih->ih_flags & IH_ENTROPY)
|
|
|
|
ithd->it_flags |= IT_ENTROPY;
|
|
|
|
}
|
|
|
|
while (missed-- > 0) {
|
|
|
|
if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
|
2001-02-09 17:42:43 +00:00
|
|
|
if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
|
|
|
|
p->p_comm[sizeof(p->p_comm) - 2] = '*';
|
|
|
|
else
|
|
|
|
p->p_comm[sizeof(p->p_comm) - 2] = '+';
|
|
|
|
} else
|
|
|
|
strcat(p->p_comm, "+");
|
|
|
|
}
|
2002-12-28 23:22:22 +00:00
|
|
|
CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
|
2001-02-09 17:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2003-11-17 06:08:10 +00:00
|
|
|
ithread_create(struct ithd **ithread, uintptr_t vector, int flags,
|
|
|
|
void (*disable)(uintptr_t), void (*enable)(uintptr_t), const char *fmt, ...)
|
2001-02-09 17:42:43 +00:00
|
|
|
{
|
|
|
|
struct ithd *ithd;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2001-02-09 17:42:43 +00:00
|
|
|
struct proc *p;
|
|
|
|
int error;
|
|
|
|
va_list ap;
|
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
/* The only valid flag during creation is IT_SOFT. */
|
|
|
|
if ((flags & ~IT_SOFT) != 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
|
2001-02-09 17:42:43 +00:00
|
|
|
ithd->it_vector = vector;
|
|
|
|
ithd->it_disable = disable;
|
|
|
|
ithd->it_enable = enable;
|
|
|
|
ithd->it_flags = flags;
|
|
|
|
TAILQ_INIT(&ithd->it_handlers);
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
|
2001-02-09 17:42:43 +00:00
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
|
2002-10-02 07:44:29 +00:00
|
|
|
0, "%s", ithd->it_name);
|
2001-02-09 17:42:43 +00:00
|
|
|
if (error) {
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_destroy(&ithd->it_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
free(ithd, M_ITHREAD);
|
|
|
|
return (error);
|
|
|
|
}
|
2002-02-07 20:58:47 +00:00
|
|
|
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
|
2003-04-17 22:25:35 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-02-11 20:37:54 +00:00
|
|
|
td->td_ksegrp->kg_pri_class = PRI_ITHD;
|
|
|
|
td->td_priority = PRI_MAX_ITHD;
|
2002-09-11 08:13:56 +00:00
|
|
|
TD_SET_IWAIT(td);
|
2003-04-17 22:25:35 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
ithd->it_td = td;
|
|
|
|
td->td_ithd = ithd;
|
2001-02-09 17:42:43 +00:00
|
|
|
if (ithread != NULL)
|
|
|
|
*ithread = ithd;
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
|
2001-02-09 17:42:43 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ithread_destroy(struct ithd *ithread)
|
|
|
|
{
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2001-05-17 22:43:26 +00:00
|
|
|
if (ithread == NULL)
|
2001-02-09 17:42:43 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td = ithread->it_td;
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_lock(&ithread->it_lock);
|
|
|
|
if (!TAILQ_EMPTY(&ithread->it_handlers)) {
|
|
|
|
mtx_unlock(&ithread->it_lock);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2001-02-09 17:42:43 +00:00
|
|
|
ithread->it_flags |= IT_DEAD;
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-09-11 08:13:56 +00:00
|
|
|
if (TD_AWAITING_INTR(td)) {
|
|
|
|
TD_CLR_IWAIT(td);
|
2004-09-01 02:11:28 +00:00
|
|
|
setrunqueue(td, SRQ_INTR);
|
2001-02-09 17:42:43 +00:00
|
|
|
}
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_unlock(&ithread->it_lock);
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
|
2001-02-09 17:42:43 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ithread_add_handler(struct ithd* ithread, const char *name,
|
|
|
|
driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
|
|
|
|
void **cookiep)
|
|
|
|
{
|
|
|
|
struct intrhand *ih, *temp_ih;
|
|
|
|
|
|
|
|
if (ithread == NULL || name == NULL || handler == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
|
2001-02-09 17:42:43 +00:00
|
|
|
ih->ih_handler = handler;
|
|
|
|
ih->ih_argument = arg;
|
|
|
|
ih->ih_name = name;
|
|
|
|
ih->ih_ithread = ithread;
|
|
|
|
ih->ih_pri = pri;
|
|
|
|
if (flags & INTR_FAST)
|
2003-11-03 22:42:58 +00:00
|
|
|
ih->ih_flags = IH_FAST;
|
2001-02-09 17:42:43 +00:00
|
|
|
else if (flags & INTR_EXCL)
|
|
|
|
ih->ih_flags = IH_EXCLUSIVE;
|
|
|
|
if (flags & INTR_MPSAFE)
|
|
|
|
ih->ih_flags |= IH_MPSAFE;
|
|
|
|
if (flags & INTR_ENTROPY)
|
|
|
|
ih->ih_flags |= IH_ENTROPY;
|
|
|
|
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_lock(&ithread->it_lock);
|
2003-11-03 22:42:58 +00:00
|
|
|
if ((flags & INTR_EXCL) != 0 && !TAILQ_EMPTY(&ithread->it_handlers))
|
2001-02-09 17:42:43 +00:00
|
|
|
goto fail;
|
2003-11-03 22:42:58 +00:00
|
|
|
if (!TAILQ_EMPTY(&ithread->it_handlers)) {
|
|
|
|
temp_ih = TAILQ_FIRST(&ithread->it_handlers);
|
|
|
|
if (temp_ih->ih_flags & IH_EXCLUSIVE)
|
|
|
|
goto fail;
|
|
|
|
if ((ih->ih_flags & IH_FAST) && !(temp_ih->ih_flags & IH_FAST))
|
|
|
|
goto fail;
|
|
|
|
if (!(ih->ih_flags & IH_FAST) && (temp_ih->ih_flags & IH_FAST))
|
|
|
|
goto fail;
|
|
|
|
}
|
2001-02-09 17:42:43 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
|
|
|
|
if (temp_ih->ih_pri > ih->ih_pri)
|
|
|
|
break;
|
|
|
|
if (temp_ih == NULL)
|
|
|
|
TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
|
|
|
|
ithread_update(ithread);
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_unlock(&ithread->it_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
|
|
|
|
if (cookiep != NULL)
|
|
|
|
*cookiep = ih;
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
|
2001-02-22 02:14:08 +00:00
|
|
|
ithread->it_name);
|
2001-02-09 17:42:43 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_unlock(&ithread->it_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
free(ih, M_ITHREAD);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ithread_remove_handler(void *cookie)
|
|
|
|
{
|
|
|
|
struct intrhand *handler = (struct intrhand *)cookie;
|
|
|
|
struct ithd *ithread;
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
struct intrhand *ih;
|
|
|
|
#endif
|
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
if (handler == NULL)
|
2001-02-09 17:42:43 +00:00
|
|
|
return (EINVAL);
|
2001-02-22 00:23:56 +00:00
|
|
|
ithread = handler->ih_ithread;
|
|
|
|
KASSERT(ithread != NULL,
|
2001-02-20 10:25:29 +00:00
|
|
|
("interrupt handler \"%s\" has a NULL interrupt thread",
|
|
|
|
handler->ih_name));
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
|
2001-02-22 02:14:08 +00:00
|
|
|
ithread->it_name);
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_lock(&ithread->it_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
|
2001-02-20 10:25:29 +00:00
|
|
|
if (ih == handler)
|
|
|
|
goto ok;
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_unlock(&ithread->it_lock);
|
2001-02-20 10:25:29 +00:00
|
|
|
panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
|
|
|
|
ih->ih_name, ithread->it_name);
|
|
|
|
ok:
|
2001-02-09 17:42:43 +00:00
|
|
|
#endif
|
2001-02-22 02:18:32 +00:00
|
|
|
/*
|
|
|
|
* If the interrupt thread is already running, then just mark this
|
|
|
|
* handler as being dead and let the ithread do the actual removal.
|
2004-01-13 22:55:46 +00:00
|
|
|
*
|
|
|
|
* During a cold boot while cold is set, msleep() does not sleep,
|
|
|
|
* so we have to remove the handler here rather than letting the
|
|
|
|
* thread do it.
|
2001-02-22 02:18:32 +00:00
|
|
|
*/
|
|
|
|
mtx_lock_spin(&sched_lock);
|
2004-01-13 22:55:46 +00:00
|
|
|
if (!TD_AWAITING_INTR(ithread->it_td) && !cold) {
|
2001-02-22 02:18:32 +00:00
|
|
|
handler->ih_flags |= IH_DEAD;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the thread will process the handler list
|
|
|
|
* again and remove this handler if it has already passed
|
|
|
|
* it on the list.
|
|
|
|
*/
|
|
|
|
ithread->it_need = 1;
|
2001-05-17 22:43:26 +00:00
|
|
|
} else
|
2001-02-22 02:18:32 +00:00
|
|
|
TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-05-17 22:43:26 +00:00
|
|
|
if ((handler->ih_flags & IH_DEAD) != 0)
|
|
|
|
msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
|
|
|
|
ithread_update(ithread);
|
|
|
|
mtx_unlock(&ithread->it_lock);
|
|
|
|
free(handler, M_ITHREAD);
|
2001-02-09 17:42:43 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
int
|
2004-07-02 20:21:44 +00:00
|
|
|
ithread_schedule(struct ithd *ithread)
|
2001-02-20 10:25:29 +00:00
|
|
|
{
|
|
|
|
struct int_entropy entropy;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2002-08-01 18:45:10 +00:00
|
|
|
struct thread *ctd;
|
2001-02-20 10:25:29 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If no ithread or no handlers, then we have a stray interrupt.
|
|
|
|
*/
|
|
|
|
if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
|
|
|
|
return (EINVAL);
|
|
|
|
|
2002-08-01 18:45:10 +00:00
|
|
|
ctd = curthread;
|
2004-08-06 03:39:28 +00:00
|
|
|
td = ithread->it_td;
|
|
|
|
p = td->td_proc;
|
2001-02-20 10:25:29 +00:00
|
|
|
/*
|
|
|
|
* If any of the handlers for this ithread claim to be good
|
|
|
|
* sources of entropy, then gather some.
|
|
|
|
*/
|
|
|
|
if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
|
2004-08-06 03:39:28 +00:00
|
|
|
CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
|
|
|
|
p->p_pid, p->p_comm);
|
2001-02-20 10:25:29 +00:00
|
|
|
entropy.vector = ithread->it_vector;
|
2002-09-06 00:18:52 +00:00
|
|
|
entropy.proc = ctd->td_proc;
|
2001-02-20 10:25:29 +00:00
|
|
|
random_harvest(&entropy, sizeof(entropy), 2, 0,
|
|
|
|
RANDOM_INTERRUPT);
|
|
|
|
}
|
|
|
|
|
2001-03-02 05:33:03 +00:00
|
|
|
KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
|
|
|
|
__func__, p->p_pid, p->p_comm, ithread->it_need);
|
2001-02-20 10:25:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set it_need to tell the thread to keep running if it is already
|
|
|
|
* running. Then, grab sched_lock and see if we actually need to
|
2004-07-02 20:21:44 +00:00
|
|
|
* put this thread on the runqueue.
|
2001-02-20 10:25:29 +00:00
|
|
|
*/
|
|
|
|
ithread->it_need = 1;
|
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-09-11 08:13:56 +00:00
|
|
|
if (TD_AWAITING_INTR(td)) {
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
|
2002-09-11 08:13:56 +00:00
|
|
|
TD_CLR_IWAIT(td);
|
2004-09-01 02:11:28 +00:00
|
|
|
setrunqueue(td, SRQ_INTR);
|
2001-02-20 10:25:29 +00:00
|
|
|
} else {
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
|
2003-04-17 22:01:01 +00:00
|
|
|
__func__, p->p_pid, ithread->it_need, td->td_state);
|
2001-02-20 10:25:29 +00:00
|
|
|
}
|
|
|
|
mtx_unlock_spin(&sched_lock);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
int
|
|
|
|
swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
|
|
|
|
void *arg, int pri, enum intr_type flags, void **cookiep)
|
2000-10-25 05:19:40 +00:00
|
|
|
{
|
|
|
|
struct ithd *ithd;
|
2001-02-09 17:42:43 +00:00
|
|
|
int error;
|
2000-10-25 05:19:40 +00:00
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
if (flags & (INTR_FAST | INTR_ENTROPY))
|
|
|
|
return (EINVAL);
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
ithd = (ithdp != NULL) ? *ithdp : NULL;
|
|
|
|
|
2001-02-20 10:25:29 +00:00
|
|
|
if (ithd != NULL) {
|
|
|
|
if ((ithd->it_flags & IT_SOFT) == 0)
|
|
|
|
return(EINVAL);
|
|
|
|
} else {
|
2001-02-09 17:42:43 +00:00
|
|
|
error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
|
|
|
|
"swi%d:", pri);
|
2000-10-25 05:19:40 +00:00
|
|
|
if (error)
|
2001-02-09 17:42:43 +00:00
|
|
|
return (error);
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
if (ithdp != NULL)
|
|
|
|
*ithdp = ithd;
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
2001-02-12 00:20:08 +00:00
|
|
|
return (ithread_add_handler(ithd, name, handler, arg,
|
|
|
|
(pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
|
2004-09-05 02:09:54 +00:00
|
|
|
/* XXKSE.. think of a better way to get separate queues */
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2000-10-25 05:19:40 +00:00
|
|
|
* Schedule a heavyweight software interrupt process.
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2000-10-25 05:19:40 +00:00
|
|
|
void
|
2001-02-09 17:42:43 +00:00
|
|
|
swi_sched(void *cookie, int flags)
|
2000-10-05 23:09:57 +00:00
|
|
|
{
|
2001-02-09 17:42:43 +00:00
|
|
|
struct intrhand *ih = (struct intrhand *)cookie;
|
|
|
|
struct ithd *it = ih->ih_ithread;
|
2001-02-20 10:25:29 +00:00
|
|
|
int error;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2005-04-12 23:18:54 +00:00
|
|
|
PCPU_LAZY_INC(cnt.v_intr);
|
2000-10-25 05:19:40 +00:00
|
|
|
|
2001-02-09 17:42:43 +00:00
|
|
|
CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
|
2001-09-12 08:38:13 +00:00
|
|
|
it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
2001-02-20 10:25:29 +00:00
|
|
|
* Set ih_need for this handler so that if the ithread is already
|
|
|
|
* running it will execute this handler on the next pass. Otherwise,
|
|
|
|
* it will execute it the next time it runs.
|
2000-10-25 05:19:40 +00:00
|
|
|
*/
|
2001-02-09 17:42:43 +00:00
|
|
|
atomic_store_rel_int(&ih->ih_need, 1);
|
|
|
|
if (!(flags & SWI_DELAY)) {
|
2004-07-02 20:21:44 +00:00
|
|
|
error = ithread_schedule(it);
|
2001-02-20 10:25:29 +00:00
|
|
|
KASSERT(error == 0, ("stray software interrupt"));
|
2000-10-25 05:19:40 +00:00
|
|
|
}
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-02-09 17:42:43 +00:00
|
|
|
* This is the main code for interrupt threads.
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static void
|
2001-02-09 17:42:43 +00:00
|
|
|
ithread_loop(void *arg)
|
2000-10-05 23:09:57 +00:00
|
|
|
{
|
2001-02-09 17:42:43 +00:00
|
|
|
struct ithd *ithd; /* our thread context */
|
2000-10-25 05:19:40 +00:00
|
|
|
struct intrhand *ih; /* and our interrupt handler chain */
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2001-02-09 17:42:43 +00:00
|
|
|
struct proc *p;
|
2004-11-16 16:09:46 +00:00
|
|
|
int count, warned, storming;
|
2000-10-25 05:19:40 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td = curthread;
|
|
|
|
p = td->td_proc;
|
2001-02-09 17:42:43 +00:00
|
|
|
ithd = (struct ithd *)arg; /* point to myself */
|
2001-09-12 08:38:13 +00:00
|
|
|
KASSERT(ithd->it_td == td && td->td_ithd == ithd,
|
2001-12-10 05:40:12 +00:00
|
|
|
("%s: ithread and proc linkage out of sync", __func__));
|
2004-11-03 22:11:20 +00:00
|
|
|
count = 0;
|
2004-04-16 20:25:40 +00:00
|
|
|
warned = 0;
|
2004-11-16 16:09:46 +00:00
|
|
|
storming = 0;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
|
|
|
* As long as we have interrupts outstanding, go through the
|
|
|
|
* list of handlers, giving each one a go at it.
|
|
|
|
*/
|
2000-10-05 23:09:57 +00:00
|
|
|
for (;;) {
|
2001-02-09 17:42:43 +00:00
|
|
|
/*
|
|
|
|
* If we are an orphaned thread, then just die.
|
|
|
|
*/
|
|
|
|
if (ithd->it_flags & IT_DEAD) {
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
|
2001-02-09 17:42:43 +00:00
|
|
|
p->p_pid, p->p_comm);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_ithd = NULL;
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_destroy(&ithd->it_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
free(ithd, M_ITHREAD);
|
|
|
|
kthread_exit(0);
|
|
|
|
}
|
|
|
|
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
|
2001-02-09 17:42:43 +00:00
|
|
|
p->p_pid, p->p_comm, ithd->it_need);
|
|
|
|
while (ithd->it_need) {
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
|
|
|
* Service interrupts. If another interrupt
|
|
|
|
* arrives while we are running, they will set
|
|
|
|
* it_need to denote that we should make
|
|
|
|
* another pass.
|
|
|
|
*/
|
2001-02-09 17:42:43 +00:00
|
|
|
atomic_store_rel_int(&ithd->it_need, 0);
|
2001-02-22 02:18:32 +00:00
|
|
|
restart:
|
2001-02-09 17:42:43 +00:00
|
|
|
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
|
|
|
|
if (ithd->it_flags & IT_SOFT && !ih->ih_need)
|
2000-10-25 05:19:40 +00:00
|
|
|
continue;
|
2001-02-09 17:42:43 +00:00
|
|
|
atomic_store_rel_int(&ih->ih_need, 0);
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR6(KTR_INTR,
|
|
|
|
"%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
|
2000-10-25 05:19:40 +00:00
|
|
|
p->p_pid, (void *)ih,
|
|
|
|
(void *)ih->ih_handler, ih->ih_argument,
|
|
|
|
ih->ih_flags);
|
|
|
|
|
2001-02-22 02:18:32 +00:00
|
|
|
if ((ih->ih_flags & IH_DEAD) != 0) {
|
2001-05-17 22:43:26 +00:00
|
|
|
mtx_lock(&ithd->it_lock);
|
2001-02-22 02:18:32 +00:00
|
|
|
TAILQ_REMOVE(&ithd->it_handlers, ih,
|
|
|
|
ih_next);
|
2001-05-17 22:43:26 +00:00
|
|
|
wakeup(ih);
|
|
|
|
mtx_unlock(&ithd->it_lock);
|
2001-02-22 02:18:32 +00:00
|
|
|
goto restart;
|
|
|
|
}
|
2001-05-17 22:43:26 +00:00
|
|
|
if ((ih->ih_flags & IH_MPSAFE) == 0)
|
|
|
|
mtx_lock(&Giant);
|
2000-10-25 05:19:40 +00:00
|
|
|
ih->ih_handler(ih->ih_argument);
|
2001-02-09 17:42:43 +00:00
|
|
|
if ((ih->ih_flags & IH_MPSAFE) == 0)
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&Giant);
|
2000-10-25 05:19:40 +00:00
|
|
|
}
|
2004-06-05 18:27:28 +00:00
|
|
|
|
|
|
|
/*
|
2004-11-16 16:09:46 +00:00
|
|
|
* Interrupt storm handling:
|
|
|
|
*
|
|
|
|
* If this interrupt source is currently storming,
|
|
|
|
* then throttle it to only fire the handler once
|
2004-11-17 14:39:41 +00:00
|
|
|
* per clock tick.
|
2004-11-16 16:09:46 +00:00
|
|
|
*
|
|
|
|
* If this interrupt source is not currently
|
|
|
|
* storming, but the number of back to back
|
|
|
|
* interrupts exceeds the storm threshold, then
|
|
|
|
* enter storming mode.
|
2004-06-05 18:27:28 +00:00
|
|
|
*/
|
2004-11-17 14:39:41 +00:00
|
|
|
if (!storming && intr_storm_threshold != 0 &&
|
2004-11-03 22:11:20 +00:00
|
|
|
count >= intr_storm_threshold) {
|
2004-06-05 18:27:28 +00:00
|
|
|
if (!warned) {
|
|
|
|
printf(
|
|
|
|
"Interrupt storm detected on \"%s\"; throttling interrupt source\n",
|
|
|
|
p->p_comm);
|
|
|
|
warned = 1;
|
|
|
|
}
|
2004-11-16 16:09:46 +00:00
|
|
|
storming = 1;
|
2004-11-17 14:39:41 +00:00
|
|
|
}
|
|
|
|
if (storming)
|
|
|
|
tsleep(&count, td->td_priority, "istorm", 1);
|
|
|
|
else
|
2004-11-03 22:11:20 +00:00
|
|
|
count++;
|
2004-06-05 18:27:28 +00:00
|
|
|
|
2004-11-03 22:11:20 +00:00
|
|
|
if (ithd->it_enable != NULL)
|
|
|
|
ithd->it_enable(ithd->it_vector);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
2004-04-16 20:25:40 +00:00
|
|
|
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
|
|
|
|
mtx_assert(&Giant, MA_NOTOWNED);
|
2000-10-25 05:19:40 +00:00
|
|
|
|
2000-10-05 23:09:57 +00:00
|
|
|
/*
|
|
|
|
* Processed all our interrupts. Now get the sched
|
2000-10-25 05:19:40 +00:00
|
|
|
* lock. This may take a while and it_need may get
|
2000-10-05 23:09:57 +00:00
|
|
|
* set again, so we have to check it again.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2001-02-09 17:42:43 +00:00
|
|
|
if (!ithd->it_need) {
|
2004-04-16 20:25:40 +00:00
|
|
|
TD_SET_IWAIT(td);
|
2004-11-03 22:11:20 +00:00
|
|
|
count = 0;
|
2004-11-16 16:09:46 +00:00
|
|
|
storming = 0;
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
|
2004-07-02 19:09:50 +00:00
|
|
|
mi_switch(SW_VOL, NULL);
|
2001-12-10 05:40:12 +00:00
|
|
|
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-10-24 21:05:30 +00:00
|
|
|
#ifdef DDB
|
|
|
|
/*
|
|
|
|
* Dump details about an interrupt handler
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
db_dump_intrhand(struct intrhand *ih)
|
|
|
|
{
|
|
|
|
int comma;
|
|
|
|
|
|
|
|
db_printf("\t%-10s ", ih->ih_name);
|
|
|
|
switch (ih->ih_pri) {
|
|
|
|
case PI_REALTIME:
|
|
|
|
db_printf("CLK ");
|
|
|
|
break;
|
|
|
|
case PI_AV:
|
|
|
|
db_printf("AV ");
|
|
|
|
break;
|
|
|
|
case PI_TTYHIGH:
|
|
|
|
case PI_TTYLOW:
|
|
|
|
db_printf("TTY ");
|
|
|
|
break;
|
|
|
|
case PI_TAPE:
|
|
|
|
db_printf("TAPE");
|
|
|
|
break;
|
|
|
|
case PI_NET:
|
|
|
|
db_printf("NET ");
|
|
|
|
break;
|
|
|
|
case PI_DISK:
|
|
|
|
case PI_DISKLOW:
|
|
|
|
db_printf("DISK");
|
|
|
|
break;
|
|
|
|
case PI_DULL:
|
|
|
|
db_printf("DULL");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (ih->ih_pri >= PI_SOFT)
|
|
|
|
db_printf("SWI ");
|
|
|
|
else
|
|
|
|
db_printf("%4u", ih->ih_pri);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
db_printf(" ");
|
|
|
|
db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
|
|
|
|
db_printf("(%p)", ih->ih_argument);
|
|
|
|
if (ih->ih_need ||
|
|
|
|
(ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
|
|
|
|
IH_MPSAFE)) != 0) {
|
|
|
|
db_printf(" {");
|
|
|
|
comma = 0;
|
|
|
|
if (ih->ih_flags & IH_FAST) {
|
|
|
|
db_printf("FAST");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ih->ih_flags & IH_EXCLUSIVE) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("EXCL");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ih->ih_flags & IH_ENTROPY) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("ENTROPY");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ih->ih_flags & IH_DEAD) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("DEAD");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ih->ih_flags & IH_MPSAFE) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("MPSAFE");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ih->ih_need) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("NEED");
|
|
|
|
}
|
|
|
|
db_printf("}");
|
|
|
|
}
|
|
|
|
db_printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump details about an ithread
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
db_dump_ithread(struct ithd *ithd, int handlers)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
struct intrhand *ih;
|
|
|
|
int comma;
|
|
|
|
|
|
|
|
if (ithd->it_td != NULL) {
|
|
|
|
p = ithd->it_td->td_proc;
|
|
|
|
db_printf("%s (pid %d)", p->p_comm, p->p_pid);
|
|
|
|
} else
|
|
|
|
db_printf("%s: (no thread)", ithd->it_name);
|
|
|
|
if ((ithd->it_flags & (IT_SOFT | IT_ENTROPY | IT_DEAD)) != 0 ||
|
|
|
|
ithd->it_need) {
|
|
|
|
db_printf(" {");
|
|
|
|
comma = 0;
|
|
|
|
if (ithd->it_flags & IT_SOFT) {
|
|
|
|
db_printf("SOFT");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ithd->it_flags & IT_ENTROPY) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("ENTROPY");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ithd->it_flags & IT_DEAD) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("DEAD");
|
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (ithd->it_need) {
|
|
|
|
if (comma)
|
|
|
|
db_printf(", ");
|
|
|
|
db_printf("NEED");
|
|
|
|
}
|
|
|
|
db_printf("}");
|
|
|
|
}
|
|
|
|
db_printf("\n");
|
|
|
|
|
|
|
|
if (handlers)
|
|
|
|
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next)
|
|
|
|
db_dump_intrhand(ih);
|
|
|
|
}
|
|
|
|
#endif /* DDB */
|
|
|
|
|
2000-10-05 23:09:57 +00:00
|
|
|
/*
|
2000-10-25 05:19:40 +00:00
|
|
|
* Start standard software interrupt threads
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2000-10-25 05:19:40 +00:00
|
|
|
static void
|
2001-02-09 17:42:43 +00:00
|
|
|
start_softintr(void *dummy)
|
2000-10-25 05:19:40 +00:00
|
|
|
{
|
2003-04-17 22:02:47 +00:00
|
|
|
struct proc *p;
|
2001-02-09 17:42:43 +00:00
|
|
|
|
2002-09-22 05:56:41 +00:00
|
|
|
if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
|
2001-02-09 17:42:43 +00:00
|
|
|
INTR_MPSAFE, &softclock_ih) ||
|
2003-07-01 16:00:38 +00:00
|
|
|
swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
|
2001-02-09 17:42:43 +00:00
|
|
|
panic("died while creating standard software ithreads");
|
2001-02-20 10:25:29 +00:00
|
|
|
|
2003-04-17 22:02:47 +00:00
|
|
|
p = clk_ithd->it_td->td_proc;
|
|
|
|
PROC_LOCK(p);
|
|
|
|
p->p_flag |= P_NOLOAD;
|
|
|
|
PROC_UNLOCK(p);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
2001-02-09 17:42:43 +00:00
|
|
|
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2001-06-01 13:23:28 +00:00
|
|
|
/*
|
|
|
|
* Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
|
|
|
|
* The data for this machine dependent, and the declarations are in machine
|
|
|
|
* dependent code. The layout of intrnames and intrcnt however is machine
|
|
|
|
* independent.
|
|
|
|
*
|
|
|
|
* We do not know the length of intrcnt and intrnames at compile time, so
|
|
|
|
* calculate things at run time.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_intrnames(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
|
|
|
|
req));
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
|
|
|
|
NULL, 0, sysctl_intrnames, "", "Interrupt Names");
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
return (sysctl_handle_opaque(oidp, intrcnt,
|
|
|
|
(char *)eintrcnt - (char *)intrcnt, req));
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
|
|
|
|
NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
|
2003-10-24 21:05:30 +00:00
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
/*
|
|
|
|
* DDB command to dump the interrupt statistics.
|
|
|
|
*/
|
|
|
|
DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
|
|
|
|
{
|
|
|
|
u_long *i;
|
|
|
|
char *cp;
|
|
|
|
int quit;
|
|
|
|
|
|
|
|
cp = intrnames;
|
2004-11-01 22:15:15 +00:00
|
|
|
db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
|
2003-10-24 21:05:30 +00:00
|
|
|
for (i = intrcnt, quit = 0; i != eintrcnt && !quit; i++) {
|
|
|
|
if (*cp == '\0')
|
|
|
|
break;
|
|
|
|
if (*i != 0)
|
|
|
|
db_printf("%s\t%lu\n", cp, *i);
|
|
|
|
cp += strlen(cp) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|