freebsd-dev/sys/amd64/isa/intr_machdep.h

257 lines
8.9 KiB
C
Raw Normal View History

/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
1999-08-28 01:08:13 +00:00
* $FreeBSD$
*/
#ifndef _I386_ISA_INTR_MACHDEP_H_
#define _I386_ISA_INTR_MACHDEP_H_
/*
* Low level interrupt code.
*/
#ifdef _KERNEL
1998-02-13 06:59:22 +00:00
#if defined(SMP) || defined(APIC_IO)
/*
* XXX FIXME: rethink location for all IPI vectors.
*/
/*
APIC TPR priority vector levels:
0xff (255) +-------------+
| | 15 (IPIs: Xspuriousint)
0xf0 (240) +-------------+
| | 14
0xe0 (224) +-------------+
| | 13
0xd0 (208) +-------------+
| | 12
0xc0 (192) +-------------+
| | 11
0xb0 (176) +-------------+
| | 10 (IPIs: Xcpustop)
0xa0 (160) +-------------+
| | 9 (IPIs: Xinvltlb)
0x90 (144) +-------------+
| | 8 (linux/BSD syscall, IGNORE FAST HW INTS)
0x80 (128) +-------------+
| | 7 (FAST_INTR 16-23)
0x70 (112) +-------------+
| | 6 (FAST_INTR 0-15)
0x60 (96) +-------------+
| | 5 (IGNORE HW INTS)
0x50 (80) +-------------+
| | 4 (2nd IO APIC)
0x40 (64) +------+------+
| | | 3 (upper APIC hardware INTs: PCI)
0x30 (48) +------+------+
| | 2 (start of hardware INTs: ISA)
0x20 (32) +-------------+
| | 1 (exceptions, traps, etc.)
0x10 (16) +-------------+
| | 0 (exceptions, traps, etc.)
0x00 (0) +-------------+
*/
/* IDT vector base for regular (aka. slow) and fast interrupts */
#define TPR_SLOW_INTS 0x20
#define TPR_FAST_INTS 0x60
/* XXX note that the AST interrupt is at 0x50 */
/* blocking values for local APIC Task Priority Register */
#define TPR_BLOCK_HWI 0x4f /* hardware INTs */
#define TPR_IGNORE_HWI 0x5f /* ignore INTs */
#define TPR_BLOCK_FHWI 0x7f /* hardware FAST INTs */
#define TPR_IGNORE_FHWI 0x8f /* ignore FAST INTs */
#define TPR_BLOCK_XINVLTLB 0x9f /* */
#define TPR_BLOCK_XCPUSTOP 0xaf /* */
#define TPR_BLOCK_ALL 0xff /* all INTs */
#ifdef TEST_TEST1
/* put a 'fake' HWI in top of APIC prio 0x3x, 32 + 31 = 63 = 0x3f */
#define XTEST1_OFFSET (ICU_OFFSET + 31)
#endif /** TEST_TEST1 */
/* TLB shootdowns */
#define XINVLTLB_OFFSET (ICU_OFFSET + 112) /* 0x90 */
#define XINVLPG_OFFSET (ICU_OFFSET + 113) /* 0x91 */
#define XINVLRNG_OFFSET (ICU_OFFSET + 114) /* 0x92 */
/* inter-cpu clock handling */
#define XHARDCLOCK_OFFSET (ICU_OFFSET + 120) /* 0x98 */
#define XSTATCLOCK_OFFSET (ICU_OFFSET + 121) /* 0x99 */
/* inter-CPU rendezvous */
#define XRENDEZVOUS_OFFSET (ICU_OFFSET + 122) /* 0x9A */
/* IPI to generate an additional software trap at the target CPU */
/* XXX in the middle of the interrupt range, overlapping IRQ48 */
#define XCPUAST_OFFSET (ICU_OFFSET + 48) /* 0x50 */
/* IPI to signal CPUs to stop and wait for another CPU to restart them */
#define XCPUSTOP_OFFSET (ICU_OFFSET + 128) /* 0xA0 */
/*
* Note: this vector MUST be xxxx1111, 32 + 223 = 255 = 0xff:
*/
#define XSPURIOUSINT_OFFSET (ICU_OFFSET + 223)
1998-02-13 06:59:22 +00:00
#endif /* SMP || APIC_IO */
#ifdef LOCORE
/*
* Protects the IO APIC, 8259 PIC, imen, and apic_imen
*/
#define ICU_LOCK MTX_LOCK_SPIN(icu_lock, 0)
#define ICU_UNLOCK MTX_UNLOCK_SPIN(icu_lock)
#else /* LOCORE */
/*
* Type of the first (asm) part of an interrupt handler.
*/
typedef void inthand_t __P((u_int cs, u_int ef, u_int esp, u_int ss));
STAGE-1 of 3 commit - allow (but do not require) interrupts to remain enabled in critical sections and streamline critical_enter() and critical_exit(). This commit allows an architecture to leave interrupts enabled inside critical sections if it so wishes. Architectures that do not wish to do this are not effected by this change. This commit implements the feature for the I386 architecture and provides a sysctl, debug.critical_mode, which defaults to 1 (use the feature). For now you can turn the sysctl on and off at any time in order to test the architectural changes or track down bugs. This commit is just the first stage. Some areas of the code, specifically the MACHINE_CRITICAL_ENTER #ifdef'd code, is strictly temporary and will be cleaned up in the STAGE-2 commit when the critical_*() functions are moved entirely into MD files. The following changes have been made: * critical_enter() and critical_exit() for I386 now simply increment and decrement curthread->td_critnest. They no longer disable hard interrupts. When critical_exit() decrements the counter to 0 it effectively calls a routine to deal with whatever interrupts were deferred during the time the code was operating in a critical section. Other architectures are unaffected. * fork_exit() has been conditionalized to remove MD assumptions for the new code. Old code will still use the old MD assumptions in regards to hard interrupt disablement. In STAGE-2 this will be turned into a subroutine call into MD code rather then hardcoded in MI code. The new code places the burden of entering the critical section in the trampoline code where it belongs. * I386: interrupts are now enabled while we are in a critical section. The interrupt vector code has been adjusted to deal with the fact. If it detects that we are in a critical section it currently defers the interrupt by adding the appropriate bit to an interrupt mask. * In order to accomplish the deferral, icu_lock is required. This is i386-specific. Thus icu_lock can only be obtained by mainline i386 code while interrupts are hard disabled. This change has been made. * Because interrupts may or may not be hard disabled during a context switch, cpu_switch() can no longer simply assume that PSL_I will be in a consistent state. Therefore, it now saves and restores eflags. * FAST INTERRUPT PROVISION. Fast interrupts are currently deferred. The intention is to eventually allow them to operate either while we are in a critical section or, if we are able to restrict the use of sched_lock, while we are not holding the sched_lock. * ICU and APIC vector assembly for I386 cleaned up. The ICU code has been cleaned up to match the APIC code in regards to format and macro availability. Additionally, the code has been adjusted to deal with deferred interrupts. * Deferred interrupts use a per-cpu boolean int_pending, and masks ipending, spending, and fpending. Being per-cpu variables it is not currently necessary to lock; bus cycles modifying them. Note that the same mechanism will enable preemption to be incorporated as a true software interrupt without having to further hack up the critical nesting code. * Note: the old critical_enter() code in kern/kern_switch.c is currently #ifdef to be compatible with both the old and new methodology. In STAGE-2 it will be moved entirely to MD code. Performance issues: One of the purposes of this commit is to enhance critical section performance, specifically to greatly reduce bus overhead to allow the critical section code to be used to protect per-cpu caches. These caches, such as Jeff's slab allocator work, can potentially operate very quickly making the effective savings of the new critical section code's performance very significant. The second purpose of this commit is to allow architectures to enable certain interrupts while in a critical section. Specifically, the intention is to eventually allow certain FAST interrupts to operate rather then defer. The third purpose of this commit is to begin to clean up the critical_enter()/critical_exit()/cpu_critical_enter()/ cpu_critical_exit() API which currently has serious cross pollution in MI code (in fork_exit() and ast() for example). The fourth purpose of this commit is to provide a framework that allows kernel-preempting software interrupts to be implemented cleanly. This is currently used for two forward interrupts in I386. Other architectures will have the choice of using this infrastructure or building the functionality directly into critical_enter()/ critical_exit(). Finally, this commit is designed to greatly improve the flexibility of various architectures to manage critical section handling, software interrupts, preemption, and other highly integrated architecture-specific details.
2002-02-26 17:06:21 +00:00
typedef void unpendhand_t __P((void));
#define IDTVEC(name) __CONCAT(X,name)
extern u_long *intr_countp[]; /* pointers into intrcnt[] */
extern driver_intr_t *intr_handler[]; /* C entry points of intr handlers */
extern struct ithd *ithds[];
extern void *intr_unit[]; /* cookies to pass to intr handlers */
extern struct mtx icu_lock;
inthand_t
IDTVEC(fastintr0), IDTVEC(fastintr1),
IDTVEC(fastintr2), IDTVEC(fastintr3),
IDTVEC(fastintr4), IDTVEC(fastintr5),
IDTVEC(fastintr6), IDTVEC(fastintr7),
IDTVEC(fastintr8), IDTVEC(fastintr9),
IDTVEC(fastintr10), IDTVEC(fastintr11),
IDTVEC(fastintr12), IDTVEC(fastintr13),
IDTVEC(fastintr14), IDTVEC(fastintr15);
inthand_t
IDTVEC(intr0), IDTVEC(intr1), IDTVEC(intr2), IDTVEC(intr3),
IDTVEC(intr4), IDTVEC(intr5), IDTVEC(intr6), IDTVEC(intr7),
IDTVEC(intr8), IDTVEC(intr9), IDTVEC(intr10), IDTVEC(intr11),
IDTVEC(intr12), IDTVEC(intr13), IDTVEC(intr14), IDTVEC(intr15);
STAGE-1 of 3 commit - allow (but do not require) interrupts to remain enabled in critical sections and streamline critical_enter() and critical_exit(). This commit allows an architecture to leave interrupts enabled inside critical sections if it so wishes. Architectures that do not wish to do this are not effected by this change. This commit implements the feature for the I386 architecture and provides a sysctl, debug.critical_mode, which defaults to 1 (use the feature). For now you can turn the sysctl on and off at any time in order to test the architectural changes or track down bugs. This commit is just the first stage. Some areas of the code, specifically the MACHINE_CRITICAL_ENTER #ifdef'd code, is strictly temporary and will be cleaned up in the STAGE-2 commit when the critical_*() functions are moved entirely into MD files. The following changes have been made: * critical_enter() and critical_exit() for I386 now simply increment and decrement curthread->td_critnest. They no longer disable hard interrupts. When critical_exit() decrements the counter to 0 it effectively calls a routine to deal with whatever interrupts were deferred during the time the code was operating in a critical section. Other architectures are unaffected. * fork_exit() has been conditionalized to remove MD assumptions for the new code. Old code will still use the old MD assumptions in regards to hard interrupt disablement. In STAGE-2 this will be turned into a subroutine call into MD code rather then hardcoded in MI code. The new code places the burden of entering the critical section in the trampoline code where it belongs. * I386: interrupts are now enabled while we are in a critical section. The interrupt vector code has been adjusted to deal with the fact. If it detects that we are in a critical section it currently defers the interrupt by adding the appropriate bit to an interrupt mask. * In order to accomplish the deferral, icu_lock is required. This is i386-specific. Thus icu_lock can only be obtained by mainline i386 code while interrupts are hard disabled. This change has been made. * Because interrupts may or may not be hard disabled during a context switch, cpu_switch() can no longer simply assume that PSL_I will be in a consistent state. Therefore, it now saves and restores eflags. * FAST INTERRUPT PROVISION. Fast interrupts are currently deferred. The intention is to eventually allow them to operate either while we are in a critical section or, if we are able to restrict the use of sched_lock, while we are not holding the sched_lock. * ICU and APIC vector assembly for I386 cleaned up. The ICU code has been cleaned up to match the APIC code in regards to format and macro availability. Additionally, the code has been adjusted to deal with deferred interrupts. * Deferred interrupts use a per-cpu boolean int_pending, and masks ipending, spending, and fpending. Being per-cpu variables it is not currently necessary to lock; bus cycles modifying them. Note that the same mechanism will enable preemption to be incorporated as a true software interrupt without having to further hack up the critical nesting code. * Note: the old critical_enter() code in kern/kern_switch.c is currently #ifdef to be compatible with both the old and new methodology. In STAGE-2 it will be moved entirely to MD code. Performance issues: One of the purposes of this commit is to enhance critical section performance, specifically to greatly reduce bus overhead to allow the critical section code to be used to protect per-cpu caches. These caches, such as Jeff's slab allocator work, can potentially operate very quickly making the effective savings of the new critical section code's performance very significant. The second purpose of this commit is to allow architectures to enable certain interrupts while in a critical section. Specifically, the intention is to eventually allow certain FAST interrupts to operate rather then defer. The third purpose of this commit is to begin to clean up the critical_enter()/critical_exit()/cpu_critical_enter()/ cpu_critical_exit() API which currently has serious cross pollution in MI code (in fork_exit() and ast() for example). The fourth purpose of this commit is to provide a framework that allows kernel-preempting software interrupts to be implemented cleanly. This is currently used for two forward interrupts in I386. Other architectures will have the choice of using this infrastructure or building the functionality directly into critical_enter()/ critical_exit(). Finally, this commit is designed to greatly improve the flexibility of various architectures to manage critical section handling, software interrupts, preemption, and other highly integrated architecture-specific details.
2002-02-26 17:06:21 +00:00
unpendhand_t
IDTVEC(fastunpend0), IDTVEC(fastunpend1), IDTVEC(fastunpend2),
IDTVEC(fastunpend3), IDTVEC(fastunpend4), IDTVEC(fastunpend5),
IDTVEC(fastunpend6), IDTVEC(fastunpend7), IDTVEC(fastunpend8),
IDTVEC(fastunpend9), IDTVEC(fastunpend10), IDTVEC(fastunpend11),
IDTVEC(fastunpend12), IDTVEC(fastunpend13), IDTVEC(fastunpend14),
IDTVEC(fastunpend15), IDTVEC(fastunpend16), IDTVEC(fastunpend17),
IDTVEC(fastunpend18), IDTVEC(fastunpend19), IDTVEC(fastunpend20),
IDTVEC(fastunpend21), IDTVEC(fastunpend22), IDTVEC(fastunpend23),
IDTVEC(fastunpend24), IDTVEC(fastunpend25), IDTVEC(fastunpend26),
IDTVEC(fastunpend27), IDTVEC(fastunpend28), IDTVEC(fastunpend29),
IDTVEC(fastunpend30), IDTVEC(fastunpend31);
1998-02-13 06:59:22 +00:00
#if defined(SMP) || defined(APIC_IO)
inthand_t
IDTVEC(fastintr16), IDTVEC(fastintr17),
IDTVEC(fastintr18), IDTVEC(fastintr19),
IDTVEC(fastintr20), IDTVEC(fastintr21),
IDTVEC(fastintr22), IDTVEC(fastintr23),
IDTVEC(fastintr24), IDTVEC(fastintr25),
IDTVEC(fastintr26), IDTVEC(fastintr27),
IDTVEC(fastintr28), IDTVEC(fastintr29),
IDTVEC(fastintr30), IDTVEC(fastintr31);
inthand_t
IDTVEC(intr16), IDTVEC(intr17), IDTVEC(intr18), IDTVEC(intr19),
IDTVEC(intr20), IDTVEC(intr21), IDTVEC(intr22), IDTVEC(intr23),
IDTVEC(intr24), IDTVEC(intr25), IDTVEC(intr26), IDTVEC(intr27),
IDTVEC(intr28), IDTVEC(intr29), IDTVEC(intr30), IDTVEC(intr31);
inthand_t
Xinvltlb, /* TLB shootdowns - global */
Xinvlpg, /* TLB shootdowns - 1 page */
Xinvlrng, /* TLB shootdowns - page range */
Overhaul of the SMP code. Several portions of the SMP kernel support have been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
2001-04-27 19:28:25 +00:00
Xhardclock, /* Forward hardclock() */
Xstatclock, /* Forward statclock() */
Xcpuast, /* Additional software trap on other cpu */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint, /* handle APIC "spurious INTs" */
Xrendezvous; /* handle CPU rendezvous */
#ifdef TEST_TEST1
inthand_t
Xtest1; /* 'fake' HWI at top of APIC prio 0x3x, 32+31 = 0x3f */
#endif /** TEST_TEST1 */
1998-02-13 06:59:22 +00:00
#endif /* SMP || APIC_IO */
#ifdef PC98
#define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
#define ICU_SLAVEID 7
#else
#define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
#define ICU_SLAVEID 2
#endif
#ifdef APIC_IO
/*
* This is to accommodate "mixed-mode" programming for
* motherboards that don't connect the 8254 to the IO APIC.
*/
#define AUTO_EOI_1 1
#endif
#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
void isa_defaultirq __P((void));
int isa_nmi __P((int cd));
int icu_setup __P((int intr, driver_intr_t *func, void *arg,
int flags));
int icu_unset __P((int intr, driver_intr_t *handler));
void icu_reinit(void);
2000-04-29 07:48:37 +00:00
/*
* WARNING: These are internal functions and not to be used by device drivers!
* They are subject to change without notice.
*/
int inthand_add(const char *name, int irq, driver_intr_t handler, void *arg,
enum intr_type flags, void **cookiep);
int inthand_remove(void *cookie);
void sched_ithd(void *dummy);
STAGE-1 of 3 commit - allow (but do not require) interrupts to remain enabled in critical sections and streamline critical_enter() and critical_exit(). This commit allows an architecture to leave interrupts enabled inside critical sections if it so wishes. Architectures that do not wish to do this are not effected by this change. This commit implements the feature for the I386 architecture and provides a sysctl, debug.critical_mode, which defaults to 1 (use the feature). For now you can turn the sysctl on and off at any time in order to test the architectural changes or track down bugs. This commit is just the first stage. Some areas of the code, specifically the MACHINE_CRITICAL_ENTER #ifdef'd code, is strictly temporary and will be cleaned up in the STAGE-2 commit when the critical_*() functions are moved entirely into MD files. The following changes have been made: * critical_enter() and critical_exit() for I386 now simply increment and decrement curthread->td_critnest. They no longer disable hard interrupts. When critical_exit() decrements the counter to 0 it effectively calls a routine to deal with whatever interrupts were deferred during the time the code was operating in a critical section. Other architectures are unaffected. * fork_exit() has been conditionalized to remove MD assumptions for the new code. Old code will still use the old MD assumptions in regards to hard interrupt disablement. In STAGE-2 this will be turned into a subroutine call into MD code rather then hardcoded in MI code. The new code places the burden of entering the critical section in the trampoline code where it belongs. * I386: interrupts are now enabled while we are in a critical section. The interrupt vector code has been adjusted to deal with the fact. If it detects that we are in a critical section it currently defers the interrupt by adding the appropriate bit to an interrupt mask. * In order to accomplish the deferral, icu_lock is required. This is i386-specific. Thus icu_lock can only be obtained by mainline i386 code while interrupts are hard disabled. This change has been made. * Because interrupts may or may not be hard disabled during a context switch, cpu_switch() can no longer simply assume that PSL_I will be in a consistent state. Therefore, it now saves and restores eflags. * FAST INTERRUPT PROVISION. Fast interrupts are currently deferred. The intention is to eventually allow them to operate either while we are in a critical section or, if we are able to restrict the use of sched_lock, while we are not holding the sched_lock. * ICU and APIC vector assembly for I386 cleaned up. The ICU code has been cleaned up to match the APIC code in regards to format and macro availability. Additionally, the code has been adjusted to deal with deferred interrupts. * Deferred interrupts use a per-cpu boolean int_pending, and masks ipending, spending, and fpending. Being per-cpu variables it is not currently necessary to lock; bus cycles modifying them. Note that the same mechanism will enable preemption to be incorporated as a true software interrupt without having to further hack up the critical nesting code. * Note: the old critical_enter() code in kern/kern_switch.c is currently #ifdef to be compatible with both the old and new methodology. In STAGE-2 it will be moved entirely to MD code. Performance issues: One of the purposes of this commit is to enhance critical section performance, specifically to greatly reduce bus overhead to allow the critical section code to be used to protect per-cpu caches. These caches, such as Jeff's slab allocator work, can potentially operate very quickly making the effective savings of the new critical section code's performance very significant. The second purpose of this commit is to allow architectures to enable certain interrupts while in a critical section. Specifically, the intention is to eventually allow certain FAST interrupts to operate rather then defer. The third purpose of this commit is to begin to clean up the critical_enter()/critical_exit()/cpu_critical_enter()/ cpu_critical_exit() API which currently has serious cross pollution in MI code (in fork_exit() and ast() for example). The fourth purpose of this commit is to provide a framework that allows kernel-preempting software interrupts to be implemented cleanly. This is currently used for two forward interrupts in I386. Other architectures will have the choice of using this infrastructure or building the functionality directly into critical_enter()/ critical_exit(). Finally, this commit is designed to greatly improve the flexibility of various architectures to manage critical section handling, software interrupts, preemption, and other highly integrated architecture-specific details.
2002-02-26 17:06:21 +00:00
void call_fast_unpend(int irq);
#endif /* LOCORE */
#endif /* _KERNEL */
#endif /* !_I386_ISA_INTR_MACHDEP_H_ */