Revert the critical section implementation to disable interrupts via

cli/sti now that we support many more than 32 interrupt sources.
This commit is contained in:
John Baldwin 2003-11-03 21:06:54 +00:00
parent 8d8dbef5ac
commit eb2a2211ff
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=121977
2 changed files with 9 additions and 145 deletions

View File

@ -30,144 +30,17 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
#include <machine/clock.h>
#include <machine/critical.h>
#ifdef SMP
#include <machine/privatespace.h>
#include <machine/smp.h>
#else
/*
* XXX this mess to get sched_ithd() and call_fast_unpend()
*/
#include <sys/bus.h>
#include <machine/apic.h>
#include <machine/frame.h>
#include <i386/isa/icu.h>
#include <i386/isa/intr_machdep.h>
#endif
void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* cpu_unpend() - called from critical_exit() inline after quick
* interrupt-pending check.
*/
void
cpu_unpend(void)
{
register_t eflags;
struct thread *td;
td = curthread;
eflags = intr_disable();
if (PCPU_GET(int_pending)) {
++td->td_intr_nesting_level;
i386_unpend();
--td->td_intr_nesting_level;
}
intr_restore(eflags);
}
/*
* cpu_critical_fork_exit() - cleanup after fork
*
* Enable interrupts in the saved copy of eflags.
*/
void
cpu_critical_fork_exit(void)
{
enable_intr();
}
/*
* Called from cpu_unpend or called from the assembly vector code
* to process any interrupts which may have occured while we were in
* a critical section.
*
* - interrupts must be disabled
* - td_critnest must be 0
* - td_intr_nesting_level must be incremented by the caller
*
* NOT STATIC (called from assembly)
*/
void
i386_unpend(void)
{
struct clockframe frame;
frame.cf_cs = SEL_KPL;
frame.cf_eip = (register_t)i386_unpend;
frame.cf_eflags = PSL_KERNEL;
KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
curthread->td_critnest = 1;
for (;;) {
u_int32_t mask;
int irq;
/*
* Fast interrupts have priority
*/
if ((mask = PCPU_GET(fpending)) != 0) {
irq = bsfl(mask);
PCPU_SET(fpending, mask & ~(1 << irq));
call_fast_unpend(irq);
KASSERT((read_eflags() & PSL_I) == 0,
("unpend interrupts enabled2 %d", irq));
continue;
}
/*
* Threaded interrupts come next
*/
if ((mask = PCPU_GET(ipending)) != 0) {
irq = bsfl(mask);
PCPU_SET(ipending, mask & ~(1 << irq));
sched_ithd((void *)irq);
KASSERT((read_eflags() & PSL_I) == 0,
("unpend interrupts enabled3 %d", irq));
continue;
}
/*
* Software interrupts and delayed IPIs are last
*
* XXX give the bits #defined names. see also
* isa/xxx_vector.s
*/
if ((mask = PCPU_GET(spending)) != 0) {
irq = bsfl(mask);
PCPU_SET(spending, mask & ~(1 << irq));
switch(irq) {
case 0: /* bit 0 - hardclock */
hardclock_process(&frame);
break;
case 1: /* bit 1 - statclock */
if (profprocs != 0)
profclock(&frame);
if (pscnt == psdiv)
statclock(&frame);
break;
}
KASSERT((read_eflags() & PSL_I) == 0,
("unpend interrupts enabled4 %d", irq));
continue;
}
break;
}
/*
* Interrupts are still disabled, we can safely clear int_pending
* and td_critnest.
*/
KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled5"));
PCPU_SET(int_pending, 0);
curthread->td_critnest = 0;
curthread->td_md.md_savecrit = read_eflags() | PSL_I;
}

View File

@ -44,7 +44,6 @@ __BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_unpend(void);
void cpu_critical_fork_exit(void);
#ifdef __GNUC__
@ -59,7 +58,11 @@ void cpu_critical_fork_exit(void);
* However, as a side effect any interrupts occuring while td_critnest
* is non-zero will be deferred.
*/
#define cpu_critical_enter()
static __inline void
cpu_critical_enter(void)
{
curthread->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
@ -75,19 +78,7 @@ void cpu_critical_fork_exit(void);
static __inline void
cpu_critical_exit(void)
{
/*
* We may have to schedule pending interrupts. Create
* conditions similar to an interrupt context and call
* unpend().
*
* note: we do this even if we are in an interrupt
* nesting level. Deep nesting is protected by
* critical_*() and if we conditionalized it then we
* would have to check int_pending again whenever
* we decrement td_intr_nesting_level to 0.
*/
if (PCPU_GET(int_pending))
cpu_unpend();
intr_restore(curthread->td_md.md_savecrit);
}
#else /* !__GNUC__ */