Remove the critmode sysctl - the new method for critical_enter/exit (already

the default) is now the only method for i386.

Remove the paraphanalia that supported critmode.  Remove td_critnest, clean
up the assembly, and clean up (mostly remove) the old junk from
cpu_critical_enter() and cpu_critical_exit().
This commit is contained in:
Matthew Dillon 2002-07-10 20:15:58 +00:00
parent b8e7c5a8e1
commit 50b6a55512
11 changed files with 42 additions and 178 deletions

View File

@ -33,16 +33,6 @@
void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* Instrument our ability to run critical sections with interrupts
* enabled. Default is 1 (enabled). The value can be changed on the
* fly, at any time. If set to 0 the original interrupt disablement
* will be used for critical sections.
*/
int critical_mode = 1;
SYSCTL_INT(_debug, OID_AUTO, critical_mode,
CTLFLAG_RW, &critical_mode, 0, "");
/*
* cpu_unpend() - called from critical_exit() inline after quick
* interrupt-pending check.
@ -66,8 +56,8 @@ cpu_unpend(void)
/*
* cpu_critical_fork_exit() - cleanup after fork
*
* For i386 we do not have to do anything, td_critnest and
* td_savecrit are handled by the fork trampoline code.
* For i386 we do not have to do anything, td_critnest is
* handled by the fork trampoline code.
*/
void
cpu_critical_fork_exit(void)
@ -77,16 +67,12 @@ cpu_critical_fork_exit(void)
/*
* cpu_thread_link() - thread linkup, initialize machine-dependant fields
*
* (copy code originally in kern/kern_proc.c). XXX we actually
* don't have to initialize this field but it's probably a good
* idea for the moment for debugging's sake. The field is only
* valid when td_critnest is non-zero.
* There are currently no machine-dependant fields that require
* initialization.
*/
void
cpu_thread_link(struct thread *td)
{
td->td_md.md_savecrit = 0;
}
/*

View File

@ -225,23 +225,7 @@ ENTRY(fork_trampoline)
pushl %esi /* function */
movl PCPU(CURTHREAD),%ebx /* setup critnest */
movl $1,TD_CRITNEST(%ebx)
/*
* Initialize md_savecrit based on critical_mode. If critical_mode
* is enabled (new/1) savecrit is basically not used but must
* be initialized to -1 so we know it isn't used in
* cpu_critical_exit(). If critical_mode is disabled (old/0)
* the eflags to restore must be saved in md_savecrit.
*/
cmpl $0,critical_mode
jne 1f
pushfl
popl TD_MD+MD_SAVECRIT(%ebx)
orl $PSL_I,TD_MD+MD_SAVECRIT(%ebx)
jmp 2f
1:
movl $-1,TD_MD+MD_SAVECRIT(%ebx)
sti /* enable interrupts */
2:
call fork_exit
addl $12,%esp
/* cut from syscall */

View File

@ -225,23 +225,7 @@ ENTRY(fork_trampoline)
pushl %esi /* function */
movl PCPU(CURTHREAD),%ebx /* setup critnest */
movl $1,TD_CRITNEST(%ebx)
/*
* Initialize md_savecrit based on critical_mode. If critical_mode
* is enabled (new/1) savecrit is basically not used but must
* be initialized to -1 so we know it isn't used in
* cpu_critical_exit(). If critical_mode is disabled (old/0)
* the eflags to restore must be saved in md_savecrit.
*/
cmpl $0,critical_mode
jne 1f
pushfl
popl TD_MD+MD_SAVECRIT(%ebx)
orl $PSL_I,TD_MD+MD_SAVECRIT(%ebx)
jmp 2f
1:
movl $-1,TD_MD+MD_SAVECRIT(%ebx)
sti /* enable interrupts */
2:
call fork_exit
addl $12,%esp
/* cut from syscall */

View File

@ -94,7 +94,6 @@ ASSYM(TD_MD, offsetof(struct thread, td_md));
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
ASSYM(MD_SAVECRIT, offsetof(struct mdthread, md_savecrit));
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));

View File

@ -20,8 +20,6 @@
__BEGIN_DECLS
extern int critical_mode;
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
@ -37,22 +35,11 @@ void cpu_thread_link(struct thread *td);
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*
* If old-style critical section handling (critical_mode == 0), we
* disable interrupts.
*
* If new-style critical section handling (criticla_mode != 0), we
* do not have to do anything. However, as a side effect any
* interrupts occuring while td_critnest is non-zero will be
* deferred.
* If new-style critical section handling we do not have to do anything.
* However, as a side effect any interrupts occuring while td_critnest
* is non-zero will be deferred.
*/
static __inline void
cpu_critical_enter(void)
{
if (critical_mode == 0) {
struct thread *td = curthread;
td->td_md.md_savecrit = intr_disable();
}
}
#define cpu_critical_enter()
/*
* cpu_critical_exit:
@ -61,41 +48,26 @@ cpu_critical_enter(void)
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*
* If td_critnest is -1 this is the 'new' critical_enter()/exit()
* code (the default critical_mode=1) and we do not have to do
* anything unless PCPU_GET(int_pending) is non-zero.
*
* Note that the td->critnest (1->0) transition interrupt race against
* our int_pending/unpend() check below is handled by the interrupt
* code for us, so we do not have to do anything fancy.
*
* Otherwise td_critnest contains the saved hardware interrupt state
* and will be restored. Since interrupts were hard-disabled there
* will be no pending interrupts to dispatch (the 'original' code).
*/
static __inline void
cpu_critical_exit(void)
{
struct thread *td = curthread;
if (td->td_md.md_savecrit != (register_t)-1) {
intr_restore(td->td_md.md_savecrit);
td->td_md.md_savecrit = (register_t)-1;
} else {
/*
* We may have to schedule pending interrupts. Create
* conditions similar to an interrupt context and call
* unpend().
*
* note: we do this even if we are in an interrupt
* nesting level. Deep nesting is protected by
* critical_*() and if we conditionalized it then we
* would have to check int_pending again whenever
* we decrement td_intr_nesting_level to 0.
*/
if (PCPU_GET(int_pending))
cpu_unpend();
}
/*
* We may have to schedule pending interrupts. Create
* conditions similar to an interrupt context and call
* unpend().
*
* note: we do this even if we are in an interrupt
* nesting level. Deep nesting is protected by
* critical_*() and if we conditionalized it then we
* would have to check int_pending again whenever
* we decrement td_intr_nesting_level to 0.
*/
if (PCPU_GET(int_pending))
cpu_unpend();
}
#else /* !__GNUC__ */

View File

@ -51,7 +51,6 @@ struct proc_ldt {
* Machine-dependent part of the proc structure for i386.
*/
struct mdthread {
register_t md_savecrit;
};
struct mdproc {

View File

@ -33,16 +33,6 @@
void i386_unpend(void); /* NOTE: not static, called from assembly */
/*
* Instrument our ability to run critical sections with interrupts
* enabled. Default is 1 (enabled). The value can be changed on the
* fly, at any time. If set to 0 the original interrupt disablement
* will be used for critical sections.
*/
int critical_mode = 1;
SYSCTL_INT(_debug, OID_AUTO, critical_mode,
CTLFLAG_RW, &critical_mode, 0, "");
/*
* cpu_unpend() - called from critical_exit() inline after quick
* interrupt-pending check.
@ -66,8 +56,8 @@ cpu_unpend(void)
/*
* cpu_critical_fork_exit() - cleanup after fork
*
* For i386 we do not have to do anything, td_critnest and
* td_savecrit are handled by the fork trampoline code.
* For i386 we do not have to do anything, td_critnest is
* handled by the fork trampoline code.
*/
void
cpu_critical_fork_exit(void)
@ -77,16 +67,12 @@ cpu_critical_fork_exit(void)
/*
* cpu_thread_link() - thread linkup, initialize machine-dependant fields
*
* (copy code originally in kern/kern_proc.c). XXX we actually
* don't have to initialize this field but it's probably a good
* idea for the moment for debugging's sake. The field is only
* valid when td_critnest is non-zero.
* There are currently no machine-dependant fields that require
* initialization.
*/
void
cpu_thread_link(struct thread *td)
{
td->td_md.md_savecrit = 0;
}
/*

View File

@ -225,23 +225,7 @@ ENTRY(fork_trampoline)
pushl %esi /* function */
movl PCPU(CURTHREAD),%ebx /* setup critnest */
movl $1,TD_CRITNEST(%ebx)
/*
* Initialize md_savecrit based on critical_mode. If critical_mode
* is enabled (new/1) savecrit is basically not used but must
* be initialized to -1 so we know it isn't used in
* cpu_critical_exit(). If critical_mode is disabled (old/0)
* the eflags to restore must be saved in md_savecrit.
*/
cmpl $0,critical_mode
jne 1f
pushfl
popl TD_MD+MD_SAVECRIT(%ebx)
orl $PSL_I,TD_MD+MD_SAVECRIT(%ebx)
jmp 2f
1:
movl $-1,TD_MD+MD_SAVECRIT(%ebx)
sti /* enable interrupts */
2:
call fork_exit
addl $12,%esp
/* cut from syscall */

View File

@ -94,7 +94,6 @@ ASSYM(TD_MD, offsetof(struct thread, td_md));
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
ASSYM(MD_SAVECRIT, offsetof(struct mdthread, md_savecrit));
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));

View File

@ -20,8 +20,6 @@
__BEGIN_DECLS
extern int critical_mode;
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
@ -37,22 +35,11 @@ void cpu_thread_link(struct thread *td);
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*
* If old-style critical section handling (critical_mode == 0), we
* disable interrupts.
*
* If new-style critical section handling (criticla_mode != 0), we
* do not have to do anything. However, as a side effect any
* interrupts occuring while td_critnest is non-zero will be
* deferred.
* If new-style critical section handling we do not have to do anything.
* However, as a side effect any interrupts occuring while td_critnest
* is non-zero will be deferred.
*/
static __inline void
cpu_critical_enter(void)
{
if (critical_mode == 0) {
struct thread *td = curthread;
td->td_md.md_savecrit = intr_disable();
}
}
#define cpu_critical_enter()
/*
* cpu_critical_exit:
@ -61,41 +48,26 @@ cpu_critical_enter(void)
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*
* If td_critnest is -1 this is the 'new' critical_enter()/exit()
* code (the default critical_mode=1) and we do not have to do
* anything unless PCPU_GET(int_pending) is non-zero.
*
* Note that the td->critnest (1->0) transition interrupt race against
* our int_pending/unpend() check below is handled by the interrupt
* code for us, so we do not have to do anything fancy.
*
* Otherwise td_critnest contains the saved hardware interrupt state
* and will be restored. Since interrupts were hard-disabled there
* will be no pending interrupts to dispatch (the 'original' code).
*/
static __inline void
cpu_critical_exit(void)
{
struct thread *td = curthread;
if (td->td_md.md_savecrit != (register_t)-1) {
intr_restore(td->td_md.md_savecrit);
td->td_md.md_savecrit = (register_t)-1;
} else {
/*
* We may have to schedule pending interrupts. Create
* conditions similar to an interrupt context and call
* unpend().
*
* note: we do this even if we are in an interrupt
* nesting level. Deep nesting is protected by
* critical_*() and if we conditionalized it then we
* would have to check int_pending again whenever
* we decrement td_intr_nesting_level to 0.
*/
if (PCPU_GET(int_pending))
cpu_unpend();
}
/*
* We may have to schedule pending interrupts. Create
* conditions similar to an interrupt context and call
* unpend().
*
* note: we do this even if we are in an interrupt
* nesting level. Deep nesting is protected by
* critical_*() and if we conditionalized it then we
* would have to check int_pending again whenever
* we decrement td_intr_nesting_level to 0.
*/
if (PCPU_GET(int_pending))
cpu_unpend();
}
#else /* !__GNUC__ */

View File

@ -51,7 +51,6 @@ struct proc_ldt {
* Machine-dependent part of the proc structure for i386.
*/
struct mdthread {
register_t md_savecrit;
};
struct mdproc {