Divorce critical sections from spinlocks. Critical sections as denoted by

critical_enter() and critical_exit() are now solely a mechanism for
deferring kernel preemptions.  They no longer have any affect on
interrupts.  This means that standalone critical sections are now very
cheap as they are simply unlocked integer increments and decrements for the
common case.

Spin mutexes now use a separate KPI implemented in MD code: spinlock_enter()
and spinlock_exit().  This KPI is responsible for providing whatever MD
guarantees are needed to ensure that a thread holding a spin lock won't
be preempted by any other code that will try to lock the same lock.  For
now all archs continue to block interrupts in a "spinlock section" as they
did formerly in all critical sections.  Note that I've also taken this
opportunity to push a few things into MD code rather than MI.  For example,
critical_fork_exit() no longer exists.  Instead, MD code ensures that new
threads have the correct state when they are created.  Also, we no longer
try to fixup the idlethreads for APs in MI code.  Instead, each arch sets
the initial curthread and adjusts the state of the idle thread it borrows
in order to perform the initial context switch.

This change is largely a big NOP, but the cleaner separation it provides
will allow for more efficient alternative locking schemes in other parts
of the kernel (bare critical sections rather than per-CPU spin mutexes
for per-CPU data for example).

Reviewed by:	grehan, cognet, arch@, others
Tested on:	i386, alpha, sparc64, powerpc, arm, possibly more
This commit is contained in:
John Baldwin 2005-04-04 21:53:56 +00:00
parent 426494536e
commit c6a37e8413
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=144637
61 changed files with 408 additions and 996 deletions

View File

@ -1,57 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/pcpu.h>
#include <sys/eventhandler.h> /* XX */
#include <sys/ktr.h> /* XX */
#include <sys/signalvar.h>
#include <sys/sysproto.h> /* XX */
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*
*/
void
cpu_critical_fork_exit(void)
{
struct thread *td;
td = curthread;
td->td_md.md_savecrit = ALPHA_PSL_IPL_0;
}

View File

@ -2397,3 +2397,27 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
pcpu->pc_idlepcb.apcb_ptbr = thread0.td_pcb->pcb_hw.apcb_ptbr;
pcpu->pc_current_asngen = 1;
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_ipl = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_ipl);
}

View File

@ -144,6 +144,10 @@ smp_init_secondary(void)
/* Clear userland thread pointer. */
alpha_pal_wrunique(0);
/* Initialize curthread. */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
PCPU_SET(curthread, PCPU_GET(idlethread));
/*
* Point interrupt/exception vectors to our own.
*/
@ -205,11 +209,24 @@ smp_init_secondary(void)
while (smp_started == 0)
; /* nothing */
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
/*
* Correct spinlock nesting. The idle thread context that we are
* borrowing was created so that it would start out with a single
* spin lock (sched_lock) held in fork_trampoline(). Since we've
* explicitly acquired locks in this function, the nesting count
* is now 2 rather than 1. Since we are nested, calling
* spinlock_exit() will simply adjust the counts without allowing
* spin lock using code to interrupt us.
*/
spinlock_exit();
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
cpu_throw(NULL, choosethread()); /* doesn't return */
panic("scheduler returned us to %s", __func__);

View File

@ -202,6 +202,10 @@ cpu_fork(td1, p2, td2, flags)
*/
td2->td_md.md_kernnest = 1;
#endif
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_ipl = ALPHA_PSL_IPL_0;
}
/*
@ -319,6 +323,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
*/
td->td_md.md_kernnest = 1;
#endif
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_ipl = ALPHA_PSL_IPL_0;
}
void

View File

@ -1,88 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
intr_restore(td->td_md.md_savecrit);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td);
void cpu_critical_exit(struct thread *td);
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -52,7 +52,8 @@ struct mdthread {
u_int64_t md_hae; /* user HAE register value */
void *osf_sigtramp; /* user-level signal trampoline */
u_int md_kernnest; /* nesting level in the kernel */
register_t md_savecrit; /* save PSL for critical section */
register_t md_saved_ipl; /* save IPL for critical section */
u_int md_spinlock_count;
};
#define MDP_UAC_NOPRINT 0x0010 /* Don't print unaligned traps */

View File

@ -1,48 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <machine/critical.h>
#include <machine/psl.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*
* Enable interrupts in the saved copy of eflags.
*/
void
cpu_critical_fork_exit(void)
{
curthread->td_md.md_savecrit = read_rflags() | PSL_I;
}

View File

@ -1302,6 +1302,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
pcpu->pc_acpi_id = 0xffffffff;
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_flags = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_flags);
}
/*
* Construct a PCB from a trapframe. This is called from kdb_trap() where
* we want to start a backtrace from the function that caused us to enter

View File

@ -452,6 +452,10 @@ init_secondary(void)
panic("cpuid mismatch! boom!!");
}
/* Initialize curthread. */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
PCPU_SET(curthread, PCPU_GET(idlethread));
mtx_lock_spin(&ap_boot_mtx);
/* Init local apic for irq's */
@ -490,6 +494,18 @@ init_secondary(void)
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
/*
* Correct spinlock nesting. The idle thread context that we are
* borrowing was created so that it would start out with a single
* spin lock (sched_lock) held in fork_trampoline(). Since we've
* explicitly acquired locks in this function, the nesting count
* is now 2 rather than 1. Since we are nested, calling
* spinlock_exit() will simply adjust the counts without allowing
* spin lock using code to interrupt us.
*/
spinlock_exit();
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);

View File

@ -163,6 +163,10 @@ cpu_fork(td1, p2, td2, flags)
* pcb2->pcb_[fg]sbase: cloned above
*/
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
/*
* Now, cpu_switch() can schedule the new process.
* pcb_rsp is loaded pointing to the cpu_switch() stack frame
@ -294,6 +298,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* pcb2->pcb_onfault: cloned above (always NULL here?).
* pcb2->pcb_[fg]sbase: cloned above
*/
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
}
/*

View File

@ -1,87 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
intr_restore(td->td_md.md_savecrit);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td);
void cpu_critical_exit(struct thread *td);
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -37,7 +37,8 @@
* Machine-dependent part of the proc structure for AMD64.
*/
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_flags; /* (k) */
};
struct mdproc {

View File

@ -1,52 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
#include <machine/clock.h>
#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
void
cpu_critical_fork_exit(void)
{
curthread->td_md.md_savecrit = __set_cpsr_c(0, 0) &~ I32_bit;
}

View File

@ -390,6 +390,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
{
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
restore_interrupts(td->td_md.md_saved_cspr);
}
/*
* Clear registers on exec
*/

View File

@ -129,6 +129,10 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
tf->tf_r0 = 0;
tf->tf_r1 = 0;
pcb2->un_32.pcb32_sp = (u_int)sf;
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_cspr = 0;
}
void
@ -263,6 +267,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
td->td_pcb->un_32.pcb32_sp = (u_int)sf;
td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + td->td_kstack_pages
* PAGE_SIZE + USPACE_UNDEF_STACK_TOP;
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_cspr = 0;
}
/*

View File

@ -1,54 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef MACHINE_CRITICAL_H
#define MACHINE_CRITICAL_H
void cpu_critical_fork_exit(void);
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = disable_interrupts(I32_bit | F32_bit);
}
static __inline void
cpu_critical_exit(struct thread *td)
{
restore_interrupts(td->td_md.md_savecrit);
}
#endif

View File

@ -46,7 +46,8 @@ struct md_utrap {
};
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_cspr; /* (k) */
int md_ptrace_instr;
int md_ptrace_addr;
void *md_tp;

View File

@ -43,7 +43,6 @@ alpha/alpha/busspace.c standard
alpha/alpha/clock.c standard
alpha/alpha/clock_if.m standard
alpha/alpha/cpuconf.c standard
alpha/alpha/critical.c standard
alpha/alpha/db_disasm.c optional ddb
alpha/alpha/db_interface.c optional ddb
alpha/alpha/db_trace.c optional ddb

View File

@ -78,7 +78,6 @@ amd64/amd64/autoconf.c standard
amd64/amd64/bios.c standard
amd64/amd64/busdma_machdep.c standard
amd64/amd64/cpu_switch.S standard
amd64/amd64/critical.c standard
amd64/amd64/db_disasm.c optional ddb
amd64/amd64/db_interface.c optional ddb
amd64/amd64/db_trace.c optional ddb

View File

@ -12,7 +12,6 @@ arm/arm/cpufunc_asm.S standard
arm/arm/cpufunc_asm_sa1.S standard
arm/arm/cpufunc_asm_armv4.S standard
arm/arm/cpufunc_asm_sa11x0.S standard
arm/arm/critical.c standard
arm/arm/db_disasm.c optional ddb
arm/arm/db_interface.c optional ddb
arm/arm/db_trace.c optional ddb

View File

@ -241,7 +241,6 @@ i386/i386/autoconf.c standard
i386/i386/bios.c standard
i386/i386/bioscall.s standard
i386/i386/busdma_machdep.c standard
i386/i386/critical.c standard
i386/i386/db_disasm.c optional ddb
i386/i386/db_interface.c optional ddb
i386/i386/db_trace.c optional ddb

View File

@ -90,7 +90,6 @@ ia64/ia64/busdma_machdep.c standard
ia64/ia64/clock.c standard
ia64/ia64/clock_if.m standard
ia64/ia64/context.S standard
ia64/ia64/critical.c standard
ia64/ia64/db_interface.c optional ddb
ia64/ia64/db_trace.c optional ddb
ia64/ia64/dump_machdep.c standard

View File

@ -162,7 +162,6 @@ i386/i386/autoconf.c standard
i386/i386/bios.c standard
i386/i386/bioscall.s standard
i386/i386/busdma_machdep.c standard
i386/i386/critical.c standard
i386/i386/db_disasm.c optional ddb
i386/i386/db_interface.c optional ddb
i386/i386/db_trace.c optional ddb

View File

@ -36,7 +36,6 @@ powerpc/powerpc/clock.c standard
powerpc/powerpc/copyinout.c standard
powerpc/powerpc/copystr.c standard
powerpc/powerpc/cpu.c standard
powerpc/powerpc/critical.c standard
powerpc/powerpc/elf_machdep.c standard
powerpc/powerpc/fpu.c standard
powerpc/powerpc/fuswintr.c standard

View File

@ -73,7 +73,6 @@ sparc64/sparc64/cache.c standard
sparc64/sparc64/cheetah.c standard
sparc64/sparc64/clock.c standard
sparc64/sparc64/counter.c standard
sparc64/sparc64/critical.c standard
sparc64/sparc64/db_disasm.c optional ddb
sparc64/sparc64/db_interface.c optional ddb
sparc64/sparc64/db_trace.c optional ddb

View File

@ -71,6 +71,7 @@ struct pci_ids {
static struct pci_ids pci_ids[] = {
{ 0x100812b9, "3COM PCI FaxModem", 0x10 },
{ 0x2000131f, "CyberSerial (1-port) 16550", 0x10 },
{ 0x65851282, "Davicom 56PDV PCI Modem", 0x10 },
{ 0x01101407, "Koutech IOFLEX-2S PCI Dual Port Serial", 0x10 },
{ 0x01111407, "Koutech IOFLEX-2S PCI Dual Port Serial", 0x10 },
{ 0x048011c1, "Lucent kermit based PCI Modem", 0x14 },

View File

@ -1,48 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <machine/critical.h>
#include <machine/psl.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*
* Enable interrupts in the saved copy of eflags.
*/
void
cpu_critical_fork_exit(void)
{
curthread->td_md.md_savecrit = read_eflags() | PSL_I;
}

View File

@ -2219,6 +2219,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
pcpu->pc_acpi_id = 0xffffffff;
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_flags = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_flags);
}
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
static void f00f_hack(void *unused);
SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)

View File

@ -432,6 +432,7 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
vm_offset_t addr;
int gsel_tss;
int x, myid;
u_int cr0;
@ -489,7 +490,8 @@ init_secondary(void)
/* BSP may have changed PTD while we were waiting */
invltlb();
pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
invlpg(addr);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
@ -513,6 +515,10 @@ init_secondary(void)
panic("cpuid mismatch! boom!!");
}
/* Initialize curthread. */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
PCPU_SET(curthread, PCPU_GET(idlethread));
mtx_lock_spin(&ap_boot_mtx);
/* Init local apic for irq's */
@ -551,6 +557,18 @@ init_secondary(void)
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
/*
* Correct spinlock nesting. The idle thread context that we are
* borrowing was created so that it would start out with a single
* spin lock (sched_lock) held in fork_trampoline(). Since we've
* explicitly acquired locks in this function, the nesting count
* is now 2 rather than 1. Since we are nested, calling
* spinlock_exit() will simply adjust the counts without allowing
* spin lock using code to interrupt us.
*/
spinlock_exit();
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);

View File

@ -257,6 +257,10 @@ cpu_fork(td1, p2, td2, flags)
}
mtx_unlock_spin(&sched_lock);
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
/*
* Now, cpu_switch() can schedule the new process.
* pcb_esp is loaded pointing to the cpu_switch() stack frame
@ -423,6 +427,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* pcb2->pcb_ext: cleared below.
*/
pcb2->pcb_ext = NULL;
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
}
/*

View File

@ -1,87 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
intr_restore(td->td_md.md_savecrit);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td);
void cpu_critical_exit(struct thread *td);
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -47,7 +47,8 @@ struct proc_ldt {
* Machine-dependent part of the proc structure for i386.
*/
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_flags; /* (k) */
};
struct mdproc {

View File

@ -1,55 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/pcpu.h>
#include <sys/eventhandler.h> /* XX */
#include <sys/ktr.h> /* XX */
#include <sys/signalvar.h>
#include <sys/sysproto.h> /* XX */
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
void
cpu_critical_fork_exit(void)
{
struct thread *td;
td = curthread;
td->td_md.md_savecrit = (ia64_get_psr() | IA64_PSR_I);
}

View File

@ -400,6 +400,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
pcpu->pc_acpi_id = cpuid;
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_intr = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_intr);
}
void
map_pal_code(void)
{

View File

@ -111,17 +111,33 @@ ia64_ap_startup(void)
ia64_mca_save_state(SAL_INFO_MCA);
ia64_mca_save_state(SAL_INFO_CMC);
/* Initialize curthread. */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
PCPU_SET(curthread, PCPU_GET(idlethread));
ap_awake++;
while (!smp_started)
/* spin */;
CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
mtx_lock_spin(&sched_lock);
/*
* Correct spinlock nesting. The idle thread context that we are
* borrowing was created so that it would start out with a single
* spin lock (sched_lock) held in fork_trampoline(). Since we've
* explicitly acquired locks in this function, the nesting count
* is now 2 rather than 1. Since we are nested, calling
* spinlock_exit() will simply adjust the counts without allowing
* spin lock using code to interrupt us.
*/
spinlock_exit();
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
mtx_lock_spin(&sched_lock);
ia64_set_tpr(0);
/* kick off the clock on this AP */

View File

@ -158,6 +158,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
pcb->pcb_special.sp = (uintptr_t)tf - 16;
pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
cpu_set_fork_handler(td, (void (*)(void*))fork_return, td);
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_intr = 1;
}
void
@ -271,6 +275,10 @@ cpu_fork(struct thread *td1, struct proc *p2 __unused, struct thread *td2,
td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16;
td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2);
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_intr = 1;
}
/*

View File

@ -1,89 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
intr_restore(td->td_md.md_savecrit);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td)
void cpu_critical_exit(struct thread *td)
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -30,7 +30,8 @@
#define _MACHINE_PROC_H_
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_intr; /* (k) */
};
struct mdproc {

View File

@ -72,7 +72,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <machine/critical.h>
#ifndef _SYS_SYSPROTO_H_
struct fork_args {
@ -764,7 +763,6 @@ fork_exit(callout, arg, frame)
sched_lock.mtx_lock = (uintptr_t)td;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
td, td->td_sched, p->p_pid, p->p_comm);

View File

@ -65,10 +65,6 @@ idle_setup(void *dummy)
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid);
pc->pc_idlethread = FIRST_THREAD_IN_PROC(p);
if (pc->pc_curthread == NULL) {
pc->pc_curthread = pc->pc_idlethread;
pc->pc_idlethread->td_critnest = 0;
}
#else
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, 0, "idle");

View File

@ -586,7 +586,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
break;
/* Give interrupts a chance while we spin. */
critical_exit();
spinlock_exit();
while (m->mtx_lock != MTX_UNOWNED) {
if (i++ < 10000000) {
cpu_spinwait();
@ -605,7 +605,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
}
cpu_spinwait();
}
critical_enter();
spinlock_enter();
}
if (LOCK_LOG_TEST(&m->mtx_object, opts))

View File

@ -63,7 +63,6 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/uma.h>
#include <machine/critical.h>
MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
MALLOC_DEFINE(M_SESSION, "session", "session header");

View File

@ -105,7 +105,6 @@ __FBSDID("$FreeBSD$");
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
#include <sys/smp.h>
#endif
#include <machine/critical.h>
#if defined(SMP) && defined(SCHED_4BSD)
#include <sys/sysctl.h>
#endif
@ -581,8 +580,6 @@ critical_enter(void)
struct thread *td;
td = curthread;
if (td->td_critnest == 0)
cpu_critical_enter(td);
td->td_critnest++;
CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
(long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
@ -610,7 +607,6 @@ critical_exit(void)
}
#endif
td->td_critnest = 0;
cpu_critical_exit(td);
} else {
td->td_critnest--;
}

View File

@ -2280,6 +2280,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_flags = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_flags);
}
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
static void f00f_hack(void *unused);
SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)

View File

@ -1,6 +0,0 @@
/*-
* This file is in the public domain.
*/
/* $FreeBSD$ */
#include <i386/critical.h>

View File

@ -2280,6 +2280,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_flags = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_flags);
}
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
static void f00f_hack(void *unused);
SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)

View File

@ -907,6 +907,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_msr = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_msr);
}
/*
* kcopy(const void *src, void *dst, size_t len);
*

View File

@ -154,6 +154,10 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
pcb->pcb_lr = (register_t)fork_trampoline;
pcb->pcb_usr = kernel_pmap->pm_sr[USER_SR];
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_msr = PSL_KERNSET;
/*
* Now cpu_switch() can schedule the new process.
*/
@ -322,6 +326,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
pcb2->pcb_sp = (register_t)cf;
pcb2->pcb_lr = (register_t)fork_trampoline;
pcb2->pcb_usr = kernel_pmap->pm_sr[USER_SR];
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_msr = PSL_KERNSET;
}
void

View File

@ -1,90 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
td->td_md.md_savecrit = intr_disable();
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
intr_restore(td->td_md.md_savecrit);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td);
void cpu_critical_exit(struct thread *td);
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -39,7 +39,8 @@
* Machine-dependent part of the proc structure
*/
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_msr; /* (k) */
};
struct mdproc {

View File

@ -1,44 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
void
cpu_critical_fork_exit(void)
{
struct thread *td = curthread;
td->td_md.md_savecrit = (mfmsr() | PSL_EE | PSL_RI);
}

View File

@ -907,6 +907,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_msr = intr_disable();
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_msr);
}
/*
* kcopy(const void *src, void *dst, size_t len);
*

View File

@ -154,6 +154,10 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
pcb->pcb_lr = (register_t)fork_trampoline;
pcb->pcb_usr = kernel_pmap->pm_sr[USER_SR];
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_msr = PSL_KERNSET;
/*
* Now cpu_switch() can schedule the new process.
*/
@ -322,6 +326,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
pcb2->pcb_sp = (register_t)cf;
pcb2->pcb_lr = (register_t)fork_trampoline;
pcb2->pcb_usr = kernel_pmap->pm_sr[USER_SR];
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_msr = PSL_KERNSET;
}
void

View File

@ -1,91 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file contains prototypes and high-level inlines related to
* machine-level critical function support:
*
* cpu_critical_enter() - inlined
* cpu_critical_exit() - inlined
* cpu_critical_fork_exit() - prototyped
* related support functions residing
* in <arch>/<arch>/critical.c - prototyped
*
* $FreeBSD$
*/
#ifndef _MACHINE_CRITICAL_H_
#define _MACHINE_CRITICAL_H_
__BEGIN_DECLS
/*
* Prototypes - see <arch>/<arch>/critical.c
*/
void cpu_critical_fork_exit(void);
#ifdef __CC_SUPPORTS___INLINE
/*
* cpu_critical_enter:
*
* This routine is called from critical_enter() on the 0->1 transition
* of td_critnest, prior to it being incremented to 1.
*/
static __inline void
cpu_critical_enter(struct thread *td)
{
critical_t pil;
pil = rdpr(pil);
wrpr(pil, 0, 14);
td->td_md.md_savecrit = pil;
}
/*
* cpu_critical_exit:
*
* This routine is called from critical_exit() on a 1->0 transition
* of td_critnest, after it has been decremented to 0. We are
* exiting the last critical section.
*/
static __inline void
cpu_critical_exit(struct thread *td)
{
wrpr(pil, td->td_md.md_savecrit, 0);
}
#else /* !__CC_SUPPORTS___INLINE */
void cpu_critical_enter(struct thread *td);
void cpu_critical_exit(struct thread *td);
#endif /* __CC_SUPPORTS___INLINE */
__END_DECLS
#endif /* !_MACHINE_CRITICAL_H_ */

View File

@ -42,7 +42,8 @@ struct md_utrap {
};
struct mdthread {
register_t md_savecrit;
int md_spinlock_count; /* (k) */
register_t md_saved_pil; /* (k) */
};
struct mdproc {

View File

@ -1,55 +0,0 @@
/*-
* Copyright (c) 2002 Matthew Dillon. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/pcpu.h>
#include <sys/eventhandler.h> /* XX */
#include <sys/ktr.h> /* XX */
#include <sys/signalvar.h>
#include <sys/sysproto.h> /* XX */
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
#include <machine/critical.h>
/*
* cpu_critical_fork_exit() - cleanup after fork
*/
void
cpu_critical_fork_exit(void)
{
struct thread *td;
td = curthread;
td->td_md.md_savecrit = 0;
}

View File

@ -232,6 +232,32 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
}
}
void
spinlock_enter(void)
{
struct thread *td;
td = curthread;
if (td->td_md.md_spinlock_count == 0) {
td->td_md.md_saved_pil = rdpr(pil);
wrpr(pil, 0, 14);
}
td->td_md.md_spinlock_count++;
critical_enter();
}
void
spinlock_exit(void)
{
struct thread *td;
td = curthread;
critical_exit();
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
wrpr(pil, td->td_md.md_saved_pil, 0);
}
unsigned
tick_get_timecount(struct timecounter *tc)
{

View File

@ -309,9 +309,7 @@ cpu_mp_unleash(void *v)
continue;
KASSERT(pc->pc_idlethread != NULL,
("cpu_mp_unleash: idlethread"));
KASSERT(pc->pc_curthread == pc->pc_idlethread,
("cpu_mp_unleash: curthread"));
pc->pc_curthread = pc->pc_idlethread;
pc->pc_curpcb = pc->pc_curthread->td_pcb;
for (i = 0; i < PCPU_PAGES; i++) {
va = pc->pc_addr + i * PAGE_SIZE;
@ -347,6 +345,7 @@ cpu_mp_bootstrap(struct pcpu *pc)
tick_start_ap();
smp_cpus++;
KASSERT(curthread != NULL, ("cpu_mp_bootstrap: curthread"));
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
@ -356,11 +355,11 @@ cpu_mp_bootstrap(struct pcpu *pc)
while (csa->csa_count != 0)
;
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
spinlock_exit();
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
cpu_throw(NULL, choosethread()); /* doesn't return */
}

View File

@ -170,6 +170,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
fr->fr_local[2] = (u_long)tf;
pcb->pcb_pc = (u_long)fork_trampoline - 8;
pcb->pcb_sp = (u_long)fr - SPOFF;
/* Setup to release sched_lock in fork_exit(). */
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_pil = 0;
}
void
@ -281,6 +285,10 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
pcb2->pcb_sp = (u_long)fp - SPOFF;
pcb2->pcb_pc = (u_long)fork_trampoline - 8;
/* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_pil = 0;
/*
* Now, cpu_switch() can schedule the new process.
*/

View File

@ -196,6 +196,8 @@ extern struct lock_class lock_class_mtx_sleep;
extern struct lock_class lock_class_mtx_spin;
extern struct lock_class lock_class_sx;
void spinlock_enter(void);
void spinlock_exit(void);
void witness_init(struct lock_object *);
void witness_destroy(struct lock_object *);
int witness_defineorder(struct lock_object *, struct lock_object *);

View File

@ -167,7 +167,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#define _get_spin_lock(mp, tid, opts, file, line) do { \
struct thread *_tid = (tid); \
\
critical_enter(); \
spinlock_enter(); \
if (!_obtain_lock((mp), _tid)) { \
if ((mp)->mtx_lock == (uintptr_t)_tid) \
(mp)->mtx_recurse++; \
@ -179,7 +179,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
#define _get_spin_lock(mp, tid, opts, file, line) do { \
struct thread *_tid = (tid); \
\
critical_enter(); \
spinlock_enter(); \
if ((mp)->mtx_lock == (uintptr_t)_tid) \
(mp)->mtx_recurse++; \
else { \
@ -207,8 +207,8 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
* Since spin locks are not _too_ common, inlining this code is not too big
* a deal.
*
* Since we always perform a critical_enter() when attempting to acquire a
* spin lock, we need to always perform a matching critical_exit() when
* Since we always perform a spinlock_enter() when attempting to acquire a
* spin lock, we need to always perform a matching spinlock_exit() when
* releasing a spin lock. This includes the recursion cases.
*/
#ifndef _rel_spin_lock
@ -218,7 +218,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
(mp)->mtx_recurse--; \
else \
_release_lock_quick((mp)); \
critical_exit(); \
spinlock_exit(); \
} while (0)
#else /* SMP */
#define _rel_spin_lock(mp) do { \
@ -226,7 +226,7 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
(mp)->mtx_recurse--; \
else \
(mp)->mtx_lock = MTX_UNOWNED; \
critical_exit(); \
spinlock_exit(); \
} while (0)
#endif /* SMP */
#endif