From 182da8209d4cda12e2d510e06f9e4b516171f9b6 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Mon, 1 Apr 2002 23:51:23 +0000 Subject: [PATCH] Stage-2 commit of the critical*() code. This re-inlines cpu_critical_enter() and cpu_critical_exit() and moves associated critical prototypes into their own header file, //critical.h, which is only included by the three MI source files that need it. Backout and re-apply improperly comitted syntactical cleanups made to files that were still under active development. Backout improperly comitted program structure changes that moved localized declarations to the top of two procedures. Partially re-apply one of the program structure changes to move 'mask' into an intermediate block rather then in three separate sub-blocks to make the code more readable. Re-integrate bug fixes that Jake made to the sparc64 code. Note: In general, developers should not gratuitously move declarations out of sub-blocks. They are where they are for reasons of structure, grouping, readability, compiler-localizability, and to avoid developer-introduced bugs similar to several found in recent years in the VFS and VM code. Reviewed by: jake --- sys/alpha/alpha/critical.c | 19 +----- sys/alpha/include/cpufunc.h | 5 -- sys/alpha/include/critical.h | 72 +++++++++++++++++++++ sys/amd64/amd64/apic_vector.S | 4 +- sys/amd64/amd64/critical.c | 93 ++++++--------------------- sys/amd64/include/cpufunc.h | 4 -- sys/amd64/include/critical.h | 111 +++++++++++++++++++++++++++++++++ sys/amd64/isa/atpic_vector.S | 4 +- sys/amd64/isa/icu_vector.S | 4 +- sys/amd64/isa/icu_vector.s | 4 +- sys/i386/i386/apic_vector.s | 4 +- sys/i386/i386/critical.c | 93 ++++++--------------------- sys/i386/include/cpufunc.h | 4 -- sys/i386/include/critical.h | 111 +++++++++++++++++++++++++++++++++ sys/i386/isa/apic_vector.s | 4 +- sys/i386/isa/atpic_vector.s | 4 +- sys/i386/isa/icu_vector.s | 4 +- sys/ia64/ia64/critical.c | 19 +----- sys/ia64/include/cpufunc.h | 5 -- sys/ia64/include/critical.h | 73 ++++++++++++++++++++++ sys/kern/kern_fork.c | 1 + sys/kern/kern_proc.c | 1 + sys/kern/kern_switch.c | 1 + sys/powerpc/include/cpufunc.h | 6 -- sys/powerpc/include/critical.h | 76 ++++++++++++++++++++++ sys/powerpc/powerpc/critical.c | 20 ------ sys/sparc64/include/cpufunc.h | 5 -- sys/sparc64/include/critical.h | 75 ++++++++++++++++++++++ sys/sparc64/sparc64/critical.c | 21 ------- 29 files changed, 577 insertions(+), 270 deletions(-) create mode 100644 sys/alpha/include/critical.h create mode 100644 sys/amd64/include/critical.h create mode 100644 sys/i386/include/critical.h create mode 100644 sys/ia64/include/critical.h create mode 100644 sys/powerpc/include/critical.h create mode 100644 sys/sparc64/include/critical.h diff --git a/sys/alpha/alpha/critical.c b/sys/alpha/alpha/critical.c index 0bf9e9b0f641..c7e705d89c5e 100644 --- a/sys/alpha/alpha/critical.c +++ b/sys/alpha/alpha/critical.c @@ -18,24 +18,7 @@ #include #include #include - -void -cpu_critical_enter(void) -{ - struct thread *td; - - td = curthread; - td->td_md.md_savecrit = intr_disable(); -} - -void -cpu_critical_exit(void) -{ - struct thread *td; - - td = curthread; - intr_restore(td->td_md.md_savecrit); -} +#include /* * cpu_critical_fork_exit() - cleanup after fork diff --git a/sys/alpha/include/cpufunc.h b/sys/alpha/include/cpufunc.h index 95d7e1030b7e..05bdbd4a345a 100644 --- a/sys/alpha/include/cpufunc.h +++ b/sys/alpha/include/cpufunc.h @@ -59,11 +59,6 @@ intr_restore(register_t ipl) alpha_pal_swpipl(ipl); } -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); - #endif /* _KERNEL */ #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/alpha/include/critical.h b/sys/alpha/include/critical.h new file mode 100644 index 000000000000..dc5119ce47d7 --- /dev/null +++ b/sys/alpha/include/critical.h @@ -0,0 +1,72 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +/* + * Prototypes - see //critical.c + */ +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + */ +static __inline void +cpu_critical_enter(void) +{ + struct thread *td; + + td = curthread; + td->td_md.md_savecrit = intr_disable(); +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td; + + td = curthread; + intr_restore(td->td_md.md_savecrit); +} + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S index f0e649727f54..34cc9f2122ae 100644 --- a/sys/amd64/amd64/apic_vector.S +++ b/sys/amd64/amd64/apic_vector.S @@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c index 61f1a1fd8da7..4a983e413d36 100644 --- a/sys/amd64/amd64/critical.c +++ b/sys/amd64/amd64/critical.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef SMP #include @@ -30,7 +31,7 @@ #include #endif -void unpend(void); /* note: not static (called from assembly) */ +void i386_unpend(void); /* NOTE: not static, called from assembly */ /* * Instrument our ability to run critical sections with interrupts @@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode, CTLFLAG_RW, &critical_mode, 0, ""); /* - * cpu_critical_enter: - * - * This routine is called from critical_enter() on the 0->1 transition - * of td_critnest, prior to it being incremented to 1. - * - * If old-style critical section handling (critical_mode == 0), we - * disable interrupts. - * - * If new-style critical section handling (criticla_mode != 0), we - * do not have to do anything. However, as a side effect any - * interrupts occuring while td_critnest is non-zero will be - * deferred. + * cpu_unpend() - called from critical_exit() inline after quick + * interrupt-pending check. */ void -cpu_critical_enter(void) -{ - struct thread *td; - - if (critical_mode == 0) { - td = curthread; - td->td_md.md_savecrit = intr_disable(); - } -} - -/* - * cpu_critical_exit: - * - * This routine is called from critical_exit() on a 1->0 transition - * of td_critnest, after it has been decremented to 0. We are - * exiting the last critical section. - * - * If td_critnest is -1 this is the 'new' critical_enter()/exit() - * code (the default critical_mode=1) and we do not have to do - * anything unless PCPU_GET(int_pending) is non-zero. - * - * Note that the td->critnest (1->0) transition interrupt race against - * our int_pending/unpend() check below is handled by the interrupt - * code for us, so we do not have to do anything fancy. - * - * Otherwise td_critnest contains the saved hardware interrupt state - * and will be restored. Since interrupts were hard-disabled there - * will be no pending interrupts to dispatch (the 'original' code). - */ -void -cpu_critical_exit(void) +cpu_unpend(void) { register_t eflags; struct thread *td; td = curthread; - if (td->td_md.md_savecrit != (register_t)-1) { - intr_restore(td->td_md.md_savecrit); - td->td_md.md_savecrit = (register_t)-1; - } else { - /* - * We may have to schedule pending interrupts. Create - * conditions similar to an interrupt context and call - * unpend(). - * - * note: we do this even if we are in an interrupt - * nesting level. Deep nesting is protected by - * critical_*() and if we conditionalized it then we - * would have to check int_pending again whenever - * we decrement td_intr_nesting_level to 0. - */ - if (PCPU_GET(int_pending)) { - eflags = intr_disable(); - if (PCPU_GET(int_pending)) { - ++td->td_intr_nesting_level; - unpend(); - --td->td_intr_nesting_level; - } - intr_restore(eflags); - } + eflags = intr_disable(); + if (PCPU_GET(int_pending)) { + ++td->td_intr_nesting_level; + i386_unpend(); + --td->td_intr_nesting_level; } + intr_restore(eflags); } /* @@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td) } /* - * Called from cpu_critical_exit() or called from the assembly vector code + * Called from cpu_unpend or called from the assembly vector code * to process any interrupts which may have occured while we were in * a critical section. * * - interrupts must be disabled * - td_critnest must be 0 * - td_intr_nesting_level must be incremented by the caller + * + * NOT STATIC (called from assembly) */ void -unpend(void) +i386_unpend(void) { - int irq; - u_int32_t mask; - KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0")); KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1")); curthread->td_critnest = 1; for (;;) { + u_int32_t mask; + int irq; + /* * Fast interrupts have priority */ @@ -207,7 +152,7 @@ unpend(void) case 1: /* bit 1 - statclock */ mtx_lock_spin(&sched_lock); statclock_process(curthread->td_kse, - (register_t)unpend, 0); + (register_t)i386_unpend, 0); mtx_unlock_spin(&sched_lock); break; } diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h index 2b0abcc6aae6..d57839aab8de 100644 --- a/sys/amd64/include/cpufunc.h +++ b/sys/amd64/include/cpufunc.h @@ -624,10 +624,6 @@ u_int rcr0(void); u_int rcr3(void); u_int rcr4(void); void reset_dbregs(void); -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); __END_DECLS diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h new file mode 100644 index 000000000000..6d142921927e --- /dev/null +++ b/sys/amd64/include/critical.h @@ -0,0 +1,111 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +extern int critical_mode; + +/* + * Prototypes - see //critical.c + */ +void cpu_unpend(void); +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + * + * If old-style critical section handling (critical_mode == 0), we + * disable interrupts. + * + * If new-style critical section handling (criticla_mode != 0), we + * do not have to do anything. However, as a side effect any + * interrupts occuring while td_critnest is non-zero will be + * deferred. + */ +static __inline void +cpu_critical_enter(void) +{ + if (critical_mode == 0) { + struct thread *td = curthread; + td->td_md.md_savecrit = intr_disable(); + } +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + * + * If td_critnest is -1 this is the 'new' critical_enter()/exit() + * code (the default critical_mode=1) and we do not have to do + * anything unless PCPU_GET(int_pending) is non-zero. + * + * Note that the td->critnest (1->0) transition interrupt race against + * our int_pending/unpend() check below is handled by the interrupt + * code for us, so we do not have to do anything fancy. + * + * Otherwise td_critnest contains the saved hardware interrupt state + * and will be restored. Since interrupts were hard-disabled there + * will be no pending interrupts to dispatch (the 'original' code). + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td = curthread; + + if (td->td_md.md_savecrit != (register_t)-1) { + intr_restore(td->td_md.md_savecrit); + td->td_md.md_savecrit = (register_t)-1; + } else { + /* + * We may have to schedule pending interrupts. Create + * conditions similar to an interrupt context and call + * unpend(). + * + * note: we do this even if we are in an interrupt + * nesting level. Deep nesting is protected by + * critical_*() and if we conditionalized it then we + * would have to check int_pending again whenever + * we decrement td_intr_nesting_level to 0. + */ + if (PCPU_GET(int_pending)) + cpu_unpend(); + } +} + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S index be0e1059afcb..de369c179f51 100644 --- a/sys/amd64/isa/atpic_vector.S +++ b/sys/amd64/isa/atpic_vector.S @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S index be0e1059afcb..de369c179f51 100644 --- a/sys/amd64/isa/icu_vector.S +++ b/sys/amd64/isa/icu_vector.S @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s index be0e1059afcb..de369c179f51 100644 --- a/sys/amd64/isa/icu_vector.s +++ b/sys/amd64/isa/icu_vector.s @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s index f0e649727f54..34cc9f2122ae 100644 --- a/sys/i386/i386/apic_vector.s +++ b/sys/i386/i386/apic_vector.s @@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/i386/i386/critical.c b/sys/i386/i386/critical.c index 61f1a1fd8da7..4a983e413d36 100644 --- a/sys/i386/i386/critical.c +++ b/sys/i386/i386/critical.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef SMP #include @@ -30,7 +31,7 @@ #include #endif -void unpend(void); /* note: not static (called from assembly) */ +void i386_unpend(void); /* NOTE: not static, called from assembly */ /* * Instrument our ability to run critical sections with interrupts @@ -43,81 +44,23 @@ SYSCTL_INT(_debug, OID_AUTO, critical_mode, CTLFLAG_RW, &critical_mode, 0, ""); /* - * cpu_critical_enter: - * - * This routine is called from critical_enter() on the 0->1 transition - * of td_critnest, prior to it being incremented to 1. - * - * If old-style critical section handling (critical_mode == 0), we - * disable interrupts. - * - * If new-style critical section handling (criticla_mode != 0), we - * do not have to do anything. However, as a side effect any - * interrupts occuring while td_critnest is non-zero will be - * deferred. + * cpu_unpend() - called from critical_exit() inline after quick + * interrupt-pending check. */ void -cpu_critical_enter(void) -{ - struct thread *td; - - if (critical_mode == 0) { - td = curthread; - td->td_md.md_savecrit = intr_disable(); - } -} - -/* - * cpu_critical_exit: - * - * This routine is called from critical_exit() on a 1->0 transition - * of td_critnest, after it has been decremented to 0. We are - * exiting the last critical section. - * - * If td_critnest is -1 this is the 'new' critical_enter()/exit() - * code (the default critical_mode=1) and we do not have to do - * anything unless PCPU_GET(int_pending) is non-zero. - * - * Note that the td->critnest (1->0) transition interrupt race against - * our int_pending/unpend() check below is handled by the interrupt - * code for us, so we do not have to do anything fancy. - * - * Otherwise td_critnest contains the saved hardware interrupt state - * and will be restored. Since interrupts were hard-disabled there - * will be no pending interrupts to dispatch (the 'original' code). - */ -void -cpu_critical_exit(void) +cpu_unpend(void) { register_t eflags; struct thread *td; td = curthread; - if (td->td_md.md_savecrit != (register_t)-1) { - intr_restore(td->td_md.md_savecrit); - td->td_md.md_savecrit = (register_t)-1; - } else { - /* - * We may have to schedule pending interrupts. Create - * conditions similar to an interrupt context and call - * unpend(). - * - * note: we do this even if we are in an interrupt - * nesting level. Deep nesting is protected by - * critical_*() and if we conditionalized it then we - * would have to check int_pending again whenever - * we decrement td_intr_nesting_level to 0. - */ - if (PCPU_GET(int_pending)) { - eflags = intr_disable(); - if (PCPU_GET(int_pending)) { - ++td->td_intr_nesting_level; - unpend(); - --td->td_intr_nesting_level; - } - intr_restore(eflags); - } + eflags = intr_disable(); + if (PCPU_GET(int_pending)) { + ++td->td_intr_nesting_level; + i386_unpend(); + --td->td_intr_nesting_level; } + intr_restore(eflags); } /* @@ -147,24 +90,26 @@ cpu_thread_link(struct thread *td) } /* - * Called from cpu_critical_exit() or called from the assembly vector code + * Called from cpu_unpend or called from the assembly vector code * to process any interrupts which may have occured while we were in * a critical section. * * - interrupts must be disabled * - td_critnest must be 0 * - td_intr_nesting_level must be incremented by the caller + * + * NOT STATIC (called from assembly) */ void -unpend(void) +i386_unpend(void) { - int irq; - u_int32_t mask; - KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0")); KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1")); curthread->td_critnest = 1; for (;;) { + u_int32_t mask; + int irq; + /* * Fast interrupts have priority */ @@ -207,7 +152,7 @@ unpend(void) case 1: /* bit 1 - statclock */ mtx_lock_spin(&sched_lock); statclock_process(curthread->td_kse, - (register_t)unpend, 0); + (register_t)i386_unpend, 0); mtx_unlock_spin(&sched_lock); break; } diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h index 2b0abcc6aae6..d57839aab8de 100644 --- a/sys/i386/include/cpufunc.h +++ b/sys/i386/include/cpufunc.h @@ -624,10 +624,6 @@ u_int rcr0(void); u_int rcr3(void); u_int rcr4(void); void reset_dbregs(void); -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); __END_DECLS diff --git a/sys/i386/include/critical.h b/sys/i386/include/critical.h new file mode 100644 index 000000000000..6d142921927e --- /dev/null +++ b/sys/i386/include/critical.h @@ -0,0 +1,111 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +extern int critical_mode; + +/* + * Prototypes - see //critical.c + */ +void cpu_unpend(void); +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + * + * If old-style critical section handling (critical_mode == 0), we + * disable interrupts. + * + * If new-style critical section handling (criticla_mode != 0), we + * do not have to do anything. However, as a side effect any + * interrupts occuring while td_critnest is non-zero will be + * deferred. + */ +static __inline void +cpu_critical_enter(void) +{ + if (critical_mode == 0) { + struct thread *td = curthread; + td->td_md.md_savecrit = intr_disable(); + } +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + * + * If td_critnest is -1 this is the 'new' critical_enter()/exit() + * code (the default critical_mode=1) and we do not have to do + * anything unless PCPU_GET(int_pending) is non-zero. + * + * Note that the td->critnest (1->0) transition interrupt race against + * our int_pending/unpend() check below is handled by the interrupt + * code for us, so we do not have to do anything fancy. + * + * Otherwise td_critnest contains the saved hardware interrupt state + * and will be restored. Since interrupts were hard-disabled there + * will be no pending interrupts to dispatch (the 'original' code). + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td = curthread; + + if (td->td_md.md_savecrit != (register_t)-1) { + intr_restore(td->td_md.md_savecrit); + td->td_md.md_savecrit = (register_t)-1; + } else { + /* + * We may have to schedule pending interrupts. Create + * conditions similar to an interrupt context and call + * unpend(). + * + * note: we do this even if we are in an interrupt + * nesting level. Deep nesting is protected by + * critical_*() and if we conditionalized it then we + * would have to check int_pending again whenever + * we decrement td_intr_nesting_level to 0. + */ + if (PCPU_GET(int_pending)) + cpu_unpend(); + } +} + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s index f0e649727f54..34cc9f2122ae 100644 --- a/sys/i386/isa/apic_vector.s +++ b/sys/i386/isa/apic_vector.s @@ -155,7 +155,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -232,7 +232,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s index be0e1059afcb..de369c179f51 100644 --- a/sys/i386/isa/atpic_vector.s +++ b/sys/i386/isa/atpic_vector.s @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s index be0e1059afcb..de369c179f51 100644 --- a/sys/i386/isa/icu_vector.s +++ b/sys/i386/isa/icu_vector.s @@ -125,7 +125,7 @@ IDTVEC(vec_name) ; \ cmpl $0,PCPU(INT_PENDING) ; \ je 2f ; \ ; \ - call unpend ; \ + call i386_unpend ; \ 2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ 10: ; \ @@ -197,7 +197,7 @@ IDTVEC(vec_name) ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ cmpl $0,PCPU(INT_PENDING) ; \ je 9f ; \ - call unpend ; \ + call i386_unpend ; \ 9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ diff --git a/sys/ia64/ia64/critical.c b/sys/ia64/ia64/critical.c index 1698c0323a66..5272b803883d 100644 --- a/sys/ia64/ia64/critical.c +++ b/sys/ia64/ia64/critical.c @@ -18,24 +18,7 @@ #include #include #include - -void -cpu_critical_enter(void) -{ - struct thread *td; - - td = curthread; - td->td_md.md_savecrit = intr_disable(); -} - -void -cpu_critical_exit(void) -{ - struct thread *td; - - td = curthread; - intr_restore(td->td_md.md_savecrit); -} +#include /* * cpu_critical_fork_exit() - cleanup after fork diff --git a/sys/ia64/include/cpufunc.h b/sys/ia64/include/cpufunc.h index e7cf818021fc..5569ab0cdbb4 100644 --- a/sys/ia64/include/cpufunc.h +++ b/sys/ia64/include/cpufunc.h @@ -300,11 +300,6 @@ intr_restore(critical_t psr) __asm __volatile ("mov psr.l=%0;; srlz.d" :: "r" (psr)); } -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); - #endif /* _KERNEL */ #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/ia64/include/critical.h b/sys/ia64/include/critical.h new file mode 100644 index 000000000000..265edabccf44 --- /dev/null +++ b/sys/ia64/include/critical.h @@ -0,0 +1,73 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +/* + * Prototypes - see //critical.c + */ +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + */ +static __inline void +cpu_critical_enter(void) +{ + struct thread *td; + + td = curthread; + td->td_md.md_savecrit = intr_disable(); +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td; + + td = curthread; + intr_restore(td->td_md.md_savecrit); +} + + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index ddd962a66375..8782007bf48e 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -70,6 +70,7 @@ #include #include +#include static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index a1620bc19268..7e62effd9541 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -54,6 +54,7 @@ #include #include #include +#include MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); MALLOC_DEFINE(M_SESSION, "session", "session header"); diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index ccfb1149bfb5..1b33f2838ca0 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -34,6 +34,7 @@ #include #include #include +#include /* * Global run queue. diff --git a/sys/powerpc/include/cpufunc.h b/sys/powerpc/include/cpufunc.h index b788aa6935aa..429e83bd7ec7 100644 --- a/sys/powerpc/include/cpufunc.h +++ b/sys/powerpc/include/cpufunc.h @@ -132,12 +132,6 @@ powerpc_get_pcpup(void) return(ret); } -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); - - #endif /* _KERNEL */ #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/powerpc/include/critical.h b/sys/powerpc/include/critical.h new file mode 100644 index 000000000000..08184bd58208 --- /dev/null +++ b/sys/powerpc/include/critical.h @@ -0,0 +1,76 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +/* + * Prototypes - see //critical.c + */ +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + */ + +static __inline void +cpu_critical_enter(void) +{ + u_int msr; + struct thread *td = curthread; + + msr = mfmsr(); + td->td_md.md_savecrit = msr; + msr &= ~(PSL_EE | PSL_RI); + mtmsr(msr); +} + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td = curthread; + + mtmsr(td->td_md.md_savecrit); +} + + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ + diff --git a/sys/powerpc/powerpc/critical.c b/sys/powerpc/powerpc/critical.c index cb800627dc3a..873498cd7624 100644 --- a/sys/powerpc/powerpc/critical.c +++ b/sys/powerpc/powerpc/critical.c @@ -19,26 +19,6 @@ #include #include -void -cpu_critical_enter(void) -{ - u_int msr; - struct thread *td = curthread; - - msr = mfmsr(); - td->td_md.md_savecrit = msr; - msr &= ~(PSL_EE | PSL_RI); - mtmsr(msr); -} - -void -cpu_critical_exit(void) -{ - struct thread *td = curthread; - - mtmsr(td->td_md.md_savecrit); -} - /* * cpu_critical_fork_exit() - cleanup after fork */ diff --git a/sys/sparc64/include/cpufunc.h b/sys/sparc64/include/cpufunc.h index 4a3dd7ae6e3d..86e7ae526089 100644 --- a/sys/sparc64/include/cpufunc.h +++ b/sys/sparc64/include/cpufunc.h @@ -224,9 +224,4 @@ ffs(int mask) #undef LDNC_GEN #undef STNC_GEN -void cpu_critical_enter(void); -void cpu_critical_exit(void); -void cpu_critical_fork_exit(void); -void cpu_thread_link(struct thread *td); - #endif /* !_MACHINE_CPUFUNC_H_ */ diff --git a/sys/sparc64/include/critical.h b/sys/sparc64/include/critical.h new file mode 100644 index 000000000000..eee9914adc12 --- /dev/null +++ b/sys/sparc64/include/critical.h @@ -0,0 +1,75 @@ +/*- + * Copyright (c) 2002 Matthew Dillon. This code is distributed under + * the BSD copyright, /usr/src/COPYRIGHT. + * + * This file contains prototypes and high-level inlines related to + * machine-level critical function support: + * + * cpu_critical_enter() - inlined + * cpu_critical_exit() - inlined + * cpu_critical_fork_exit() - prototyped + * cpu_thread_link() - prototyped + * related support functions residing + * in //critical.c - prototyped + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_CRITICAL_H_ +#define _MACHINE_CRITICAL_H_ + +__BEGIN_DECLS + +/* + * Prototypes - see //critical.c + */ +void cpu_critical_fork_exit(void); +void cpu_thread_link(struct thread *td); + +#ifdef __GNUC__ + +/* + * cpu_critical_enter: + * + * This routine is called from critical_enter() on the 0->1 transition + * of td_critnest, prior to it being incremented to 1. + */ +static __inline void +cpu_critical_enter(void) +{ + struct thread *td; + critical_t pil; + + td = curthread; + pil = rdpr(pil); + wrpr(pil, 0, 14); + td->td_md.md_savecrit = pil; +} + + +/* + * cpu_critical_exit: + * + * This routine is called from critical_exit() on a 1->0 transition + * of td_critnest, after it has been decremented to 0. We are + * exiting the last critical section. + */ +static __inline void +cpu_critical_exit(void) +{ + struct thread *td; + + td = curthread; + wrpr(pil, td->td_md.md_savecrit, 0); +} + +#else /* !__GNUC__ */ + +void cpu_critical_enter(void) +void cpu_critical_exit(void) + +#endif /* __GNUC__ */ + +__END_DECLS + +#endif /* !_MACHINE_CRITICAL_H_ */ diff --git a/sys/sparc64/sparc64/critical.c b/sys/sparc64/sparc64/critical.c index 5815eeb49788..6be10aa53b44 100644 --- a/sys/sparc64/sparc64/critical.c +++ b/sys/sparc64/sparc64/critical.c @@ -19,27 +19,6 @@ #include #include -void -cpu_critical_enter(void) -{ - struct thread *td; - critical_t pil; - - td = curthread; - pil = rdpr(pil); - wrpr(pil, 0, 14); - td->td_md.md_savecrit = pil; -} - -void -cpu_critical_exit(void) -{ - struct thread *td; - - td = curthread; - wrpr(pil, td->td_md.md_savecrit, 0); -} - /* * cpu_critical_fork_exit() - cleanup after fork */