1997-05-26 14:37:43 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1997, Stefan Esser <se@freebsd.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1997-05-26 14:37:43 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
1998-06-11 07:23:59 +00:00
|
|
|
|
1998-11-10 09:16:29 +00:00
|
|
|
#include <sys/param.h>
|
2000-09-13 18:33:25 +00:00
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/rtprio.h>
|
1997-05-26 14:37:43 +00:00
|
|
|
#include <sys/systm.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/ipl.h>
|
1997-06-02 08:19:06 +00:00
|
|
|
#include <sys/interrupt.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/ktr.h>
|
|
|
|
#include <sys/malloc.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-10-05 23:09:57 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
#include <machine/atomic.h>
|
|
|
|
#include <machine/cpu.h>
|
2000-10-25 05:19:40 +00:00
|
|
|
#include <machine/md_var.h>
|
1997-05-26 14:37:43 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
#include <net/netisr.h> /* prototype for legacy_setsoftnet */
|
1998-08-11 15:08:13 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
struct intrhand *net_ih;
|
|
|
|
struct intrhand *vm_ih;
|
|
|
|
struct intrhand *softclock_ih;
|
|
|
|
struct ithd *clk_ithd;
|
|
|
|
struct ithd *tty_ithd;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
|
|
|
static void start_softintr(void *);
|
2000-10-25 05:19:40 +00:00
|
|
|
static void swi_net(void *);
|
1998-08-11 15:08:13 +00:00
|
|
|
|
2000-09-13 18:33:25 +00:00
|
|
|
int
|
|
|
|
ithread_priority(flags)
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
int pri;
|
|
|
|
|
2000-11-10 21:19:14 +00:00
|
|
|
flags &= ~INTR_MPSAFE;
|
2000-09-13 18:33:25 +00:00
|
|
|
switch (flags) {
|
|
|
|
case INTR_TYPE_TTY: /* keyboard or parallel port */
|
|
|
|
pri = PI_TTYLOW;
|
|
|
|
break;
|
|
|
|
case (INTR_TYPE_TTY | INTR_FAST): /* sio */
|
|
|
|
pri = PI_TTYHIGH;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_BIO:
|
|
|
|
/*
|
|
|
|
* XXX We need to refine this. BSD/OS distinguishes
|
|
|
|
* between tape and disk priorities.
|
|
|
|
*/
|
|
|
|
pri = PI_DISK;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_NET:
|
|
|
|
pri = PI_NET;
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_CAM:
|
|
|
|
pri = PI_DISK; /* XXX or PI_CAM? */
|
|
|
|
break;
|
|
|
|
case INTR_TYPE_MISC:
|
|
|
|
pri = PI_DULL; /* don't care */
|
|
|
|
break;
|
|
|
|
/* We didn't specify an interrupt level. */
|
|
|
|
default:
|
|
|
|
panic("ithread_priority: no interrupt type in flags");
|
|
|
|
}
|
|
|
|
|
|
|
|
return pri;
|
|
|
|
}
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
void sithd_loop(void *);
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
struct intrhand *
|
|
|
|
sinthand_add(const char *name, struct ithd **ithdp, driver_intr_t handler,
|
|
|
|
void *arg, int pri, int flags)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
struct ithd *ithd;
|
|
|
|
struct intrhand *ih;
|
|
|
|
struct intrhand *this_ih;
|
|
|
|
|
|
|
|
ithd = (ithdp != NULL) ? *ithdp : NULL;
|
|
|
|
|
|
|
|
|
|
|
|
if (ithd == NULL) {
|
|
|
|
int error;
|
|
|
|
ithd = malloc(sizeof (struct ithd), M_DEVBUF, M_WAITOK | M_ZERO);
|
|
|
|
error = kthread_create(sithd_loop, NULL, &p,
|
|
|
|
RFSTOPPED | RFHIGHPID, "swi%d: %s", pri, name);
|
|
|
|
if (error)
|
|
|
|
panic("inthand_add: Can't create interrupt thread");
|
|
|
|
ithd->it_proc = p;
|
|
|
|
p->p_ithd = ithd;
|
|
|
|
p->p_rtprio.type = RTP_PRIO_ITHREAD;
|
|
|
|
p->p_rtprio.prio = pri + PI_SOFT; /* soft interrupt */
|
|
|
|
p->p_stat = SWAIT; /* we're idle */
|
|
|
|
/* XXX - some hacks are _really_ gross */
|
|
|
|
if (pri == SWI_CLOCK)
|
|
|
|
p->p_flag |= P_NOLOAD;
|
|
|
|
if (ithdp != NULL)
|
|
|
|
*ithdp = ithd;
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
2000-10-25 05:19:40 +00:00
|
|
|
this_ih = malloc(sizeof (struct intrhand), M_DEVBUF, M_WAITOK | M_ZERO);
|
|
|
|
this_ih->ih_handler = handler;
|
|
|
|
this_ih->ih_argument = arg;
|
|
|
|
this_ih->ih_flags = flags;
|
|
|
|
this_ih->ih_ithd = ithd;
|
|
|
|
this_ih->ih_name = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
|
|
|
|
if ((ih = ithd->it_ih)) {
|
|
|
|
while (ih->ih_next != NULL)
|
|
|
|
ih = ih->ih_next;
|
|
|
|
ih->ih_next = this_ih;
|
|
|
|
} else
|
|
|
|
ithd->it_ih = this_ih;
|
|
|
|
strcpy(this_ih->ih_name, name);
|
|
|
|
return (this_ih);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2000-10-25 05:19:40 +00:00
|
|
|
* Schedule a heavyweight software interrupt process.
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2000-10-25 05:19:40 +00:00
|
|
|
void
|
|
|
|
sched_swi(struct intrhand *ih, int flag)
|
2000-10-05 23:09:57 +00:00
|
|
|
{
|
2000-10-25 05:19:40 +00:00
|
|
|
struct ithd *it = ih->ih_ithd; /* and the process that does it */
|
|
|
|
struct proc *p = it->it_proc;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
|
|
|
|
|
2000-12-18 23:59:34 +00:00
|
|
|
CTR3(KTR_INTR, "sched_swi pid %d(%s) need=%d",
|
2000-10-25 05:19:40 +00:00
|
|
|
p->p_pid, p->p_comm, it->it_need);
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
|
|
|
* Set it_need so that if the thread is already running but close
|
|
|
|
* to done, it will do another go-round. Then get the sched lock
|
|
|
|
* and see if the thread is on whichkqs yet. If not, put it on
|
|
|
|
* there. In any case, kick everyone so that if the new thread
|
|
|
|
* is higher priority than their current thread, it gets run now.
|
|
|
|
*/
|
|
|
|
ih->ih_need = 1;
|
|
|
|
if (!(flag & SWI_DELAY)) {
|
|
|
|
it->it_need = 1;
|
|
|
|
mtx_enter(&sched_lock, MTX_SPIN);
|
|
|
|
if (p->p_stat == SWAIT) { /* not on run queue */
|
2000-11-15 22:05:23 +00:00
|
|
|
CTR1(KTR_INTR, "sched_swi: setrunqueue %d", p->p_pid);
|
2000-10-25 05:19:40 +00:00
|
|
|
/* membar_lock(); */
|
|
|
|
p->p_stat = SRUN;
|
|
|
|
setrunqueue(p);
|
|
|
|
aston();
|
|
|
|
}
|
|
|
|
else {
|
2000-11-15 22:05:23 +00:00
|
|
|
CTR3(KTR_INTR, "sched_swi %d: it_need %d, state %d",
|
2000-10-25 05:19:40 +00:00
|
|
|
p->p_pid, it->it_need, p->p_stat );
|
|
|
|
}
|
|
|
|
mtx_exit(&sched_lock, MTX_SPIN);
|
|
|
|
need_resched();
|
|
|
|
}
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-10-25 05:19:40 +00:00
|
|
|
* This is the main code for soft interrupt threads.
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2000-10-25 05:19:40 +00:00
|
|
|
void
|
|
|
|
sithd_loop(void *dummy)
|
2000-10-05 23:09:57 +00:00
|
|
|
{
|
2000-10-25 05:19:40 +00:00
|
|
|
struct ithd *it; /* our thread context */
|
|
|
|
struct intrhand *ih; /* and our interrupt handler chain */
|
|
|
|
|
|
|
|
struct proc *p = curproc;
|
|
|
|
it = p->p_ithd; /* point to myself */
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
|
|
|
* As long as we have interrupts outstanding, go through the
|
|
|
|
* list of handlers, giving each one a go at it.
|
|
|
|
*/
|
2000-10-05 23:09:57 +00:00
|
|
|
for (;;) {
|
2000-10-25 05:19:40 +00:00
|
|
|
CTR3(KTR_INTR, "sithd_loop pid %d(%s) need=%d",
|
|
|
|
p->p_pid, p->p_comm, it->it_need);
|
|
|
|
while (it->it_need) {
|
|
|
|
/*
|
|
|
|
* Service interrupts. If another interrupt
|
|
|
|
* arrives while we are running, they will set
|
|
|
|
* it_need to denote that we should make
|
|
|
|
* another pass.
|
|
|
|
*/
|
|
|
|
it->it_need = 0;
|
|
|
|
for (ih = it->it_ih; ih != NULL; ih = ih->ih_next) {
|
|
|
|
if (!ih->ih_need)
|
|
|
|
continue;
|
|
|
|
ih->ih_need = 0;
|
|
|
|
CTR5(KTR_INTR,
|
2000-11-07 00:45:18 +00:00
|
|
|
"sithd_loop pid %d ih=%p: %p(%p) flg=%x",
|
2000-10-25 05:19:40 +00:00
|
|
|
p->p_pid, (void *)ih,
|
|
|
|
(void *)ih->ih_handler, ih->ih_argument,
|
|
|
|
ih->ih_flags);
|
|
|
|
|
|
|
|
if ((ih->ih_flags & INTR_MPSAFE) == 0)
|
|
|
|
mtx_enter(&Giant, MTX_DEF);
|
|
|
|
ih->ih_handler(ih->ih_argument);
|
|
|
|
if ((ih->ih_flags & INTR_MPSAFE) == 0)
|
|
|
|
mtx_exit(&Giant, MTX_DEF);
|
|
|
|
}
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
2000-10-25 05:19:40 +00:00
|
|
|
|
2000-10-05 23:09:57 +00:00
|
|
|
/*
|
|
|
|
* Processed all our interrupts. Now get the sched
|
2000-10-25 05:19:40 +00:00
|
|
|
* lock. This may take a while and it_need may get
|
2000-10-05 23:09:57 +00:00
|
|
|
* set again, so we have to check it again.
|
|
|
|
*/
|
2000-11-15 22:05:23 +00:00
|
|
|
mtx_assert(&Giant, MA_NOTOWNED);
|
2000-10-05 23:09:57 +00:00
|
|
|
mtx_enter(&sched_lock, MTX_SPIN);
|
2000-10-25 05:19:40 +00:00
|
|
|
if (!it->it_need) {
|
|
|
|
p->p_stat = SWAIT; /* we're idle */
|
|
|
|
CTR1(KTR_INTR, "sithd_loop pid %d: done", p->p_pid);
|
2000-10-05 23:09:57 +00:00
|
|
|
mi_switch();
|
2000-10-25 05:19:40 +00:00
|
|
|
CTR1(KTR_INTR, "sithd_loop pid %d: resumed", p->p_pid);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
mtx_exit(&sched_lock, MTX_SPIN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
|
|
|
|
|
2000-10-05 23:09:57 +00:00
|
|
|
/*
|
2000-10-25 05:19:40 +00:00
|
|
|
* Start standard software interrupt threads
|
2000-10-05 23:09:57 +00:00
|
|
|
*/
|
2000-10-25 05:19:40 +00:00
|
|
|
static void
|
|
|
|
start_softintr(dummy)
|
|
|
|
void *dummy;
|
|
|
|
{
|
|
|
|
net_ih = sinthand_add("net", NULL, swi_net, NULL, SWI_NET, 0);
|
2000-12-04 09:52:39 +00:00
|
|
|
softclock_ih = sinthand_add("clock", &clk_ithd, softclock, NULL,
|
|
|
|
SWI_CLOCK, INTR_MPSAFE);
|
2000-10-25 05:19:40 +00:00
|
|
|
vm_ih = sinthand_add("vm", NULL, swi_vm, NULL, SWI_VM, 0);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
void
|
|
|
|
legacy_setsoftnet()
|
|
|
|
{
|
|
|
|
sched_swi(net_ih, SWI_NOSWITCH);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
/*
|
|
|
|
* XXX: This should really be in the network code somewhere and installed
|
|
|
|
* via a SI_SUB_SOFINTR, SI_ORDER_MIDDLE sysinit.
|
|
|
|
*/
|
|
|
|
void (*netisrs[32]) __P((void));
|
|
|
|
u_int netisr;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-12-05 00:36:00 +00:00
|
|
|
int
|
|
|
|
register_netisr(num, handler)
|
|
|
|
int num;
|
|
|
|
netisr_t *handler;
|
|
|
|
{
|
|
|
|
|
|
|
|
if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
|
|
|
|
printf("register_netisr: bad isr number: %d\n", num);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
netisrs[num] = handler;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
unregister_netisr(num)
|
|
|
|
int num;
|
|
|
|
{
|
|
|
|
|
|
|
|
if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
|
|
|
|
printf("unregister_netisr: bad isr number: %d\n", num);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
netisrs[num] = NULL;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
static void
|
|
|
|
swi_net(void *dummy)
|
2000-10-05 23:09:57 +00:00
|
|
|
{
|
2000-10-25 05:19:40 +00:00
|
|
|
u_int bits;
|
|
|
|
int i;
|
2000-10-05 23:09:57 +00:00
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
bits = atomic_readandclear_int(&netisr);
|
|
|
|
while ((i = ffs(bits)) != 0) {
|
|
|
|
i--;
|
|
|
|
netisrs[i]();
|
|
|
|
bits &= ~(1 << i);
|
2000-10-05 23:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dummy spl calls. The only reason for these is to not break
|
|
|
|
* all the code which expects to call them.
|
|
|
|
*/
|
|
|
|
void spl0 (void) {}
|
|
|
|
void splx (intrmask_t x) {}
|
|
|
|
intrmask_t splq(intrmask_t mask) { return 0; }
|
|
|
|
intrmask_t splbio(void) { return 0; }
|
|
|
|
intrmask_t splcam(void) { return 0; }
|
|
|
|
intrmask_t splclock(void) { return 0; }
|
|
|
|
intrmask_t splhigh(void) { return 0; }
|
|
|
|
intrmask_t splimp(void) { return 0; }
|
|
|
|
intrmask_t splnet(void) { return 0; }
|
|
|
|
intrmask_t splsoftcam(void) { return 0; }
|
|
|
|
intrmask_t splsoftcambio(void) { return 0; }
|
|
|
|
intrmask_t splsoftcamnet(void) { return 0; }
|
|
|
|
intrmask_t splsoftclock(void) { return 0; }
|
|
|
|
intrmask_t splsofttty(void) { return 0; }
|
|
|
|
intrmask_t splsoftvm(void) { return 0; }
|
|
|
|
intrmask_t splsofttq(void) { return 0; }
|
|
|
|
intrmask_t splstatclock(void) { return 0; }
|
|
|
|
intrmask_t spltty(void) { return 0; }
|
|
|
|
intrmask_t splvm(void) { return 0; }
|