2000-09-07 01:33:02 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Berkeley Software Design Inc's name may not be used to endorse or
|
|
|
|
* promote products derived from this software without specific prior
|
|
|
|
* written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_MUTEX_H_
|
|
|
|
#define _MACHINE_MUTEX_H_
|
|
|
|
|
|
|
|
#ifndef LOCORE
|
|
|
|
|
2000-10-04 01:21:58 +00:00
|
|
|
#ifdef _KERNEL
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#include <machine/psl.h>
|
2000-09-08 21:48:06 +00:00
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
/* Global locks */
|
2000-10-06 02:20:21 +00:00
|
|
|
extern struct mtx clock_lock;
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugging
|
|
|
|
*/
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#ifdef MUTEX_DEBUG
|
2000-09-07 01:33:02 +00:00
|
|
|
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#ifdef _KERN_MUTEX_C_
|
|
|
|
char STR_IEN[] = "fl & PSL_I";
|
|
|
|
char STR_IDIS[] = "!(fl & PSL_I)";
|
|
|
|
char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
|
|
|
|
#else /* _KERN_MUTEX_C_ */
|
2000-09-07 01:33:02 +00:00
|
|
|
extern char STR_IEN[];
|
|
|
|
extern char STR_IDIS[];
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
extern char STR_SIEN[];
|
|
|
|
#endif /* _KERN_MUTEX_C_ */
|
|
|
|
#endif /* MUTEX_DEBUG */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
|
|
|
|
#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
|
|
|
|
#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-10-20 22:44:06 +00:00
|
|
|
#define mtx_legal2block() (read_eflags() & PSL_I)
|
|
|
|
|
2001-01-14 00:16:17 +00:00
|
|
|
/* Actually release mtx_lock quickly assuming that we own it */
|
|
|
|
#define _release_lock_quick(mp) \
|
|
|
|
atomic_cmpset_rel_int(&(mp)->mtx_lock, (mp)->mtx_lock, MTX_UNOWNED)
|
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
/*
|
|
|
|
* Assembly macros (for internal use only)
|
|
|
|
*------------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _V(x) __STRING(x)
|
|
|
|
|
2000-12-08 05:03:34 +00:00
|
|
|
#if 0
|
|
|
|
/* #ifndef I386_CPU */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For 486 and newer processors.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Get a sleep lock, deal with recursion inline. */
|
|
|
|
#define _getlock_sleep(mtxp, tid, type) ({ \
|
|
|
|
int _res; \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
|
|
|
" movl $" _V(MTX_UNOWNED) ",%%eax;" /* Unowned cookie */ \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" cmpxchgl %3,%1;" /* Try */ \
|
|
|
|
" jz 1f;" /* Got it */ \
|
|
|
|
" andl $" _V(MTX_FLAGMASK) ",%%eax;" /* turn off spec bits */ \
|
|
|
|
" cmpl %%eax,%3;" /* already have it? */ \
|
|
|
|
" je 2f;" /* yes, recurse */ \
|
|
|
|
" pushl %4;" \
|
|
|
|
" pushl %5;" \
|
|
|
|
" call mtx_enter_hard;" \
|
|
|
|
" addl $8,%%esp;" \
|
|
|
|
" jmp 1f;" \
|
2000-12-04 12:38:03 +00:00
|
|
|
"2:" \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" orl $" _V(MTX_RECURSE) ",%1;" \
|
2000-09-08 21:48:06 +00:00
|
|
|
" incl %2;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
"1:" \
|
|
|
|
"# getlock_sleep" \
|
|
|
|
: "=&a" (_res), /* 0 (dummy output) */ \
|
|
|
|
"+m" (mtxp->mtx_lock), /* 1 */ \
|
|
|
|
"+m" (mtxp->mtx_recurse) /* 2 */ \
|
|
|
|
: "r" (tid), /* 3 (input) */ \
|
|
|
|
"gi" (type), /* 4 */ \
|
|
|
|
"g" (mtxp) /* 5 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "memory", "ecx", "edx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
/* Get a spin lock, handle recursion inline (as the less common case) */
|
|
|
|
#define _getlock_spin_block(mtxp, tid, type) ({ \
|
|
|
|
int _res; \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
|
|
|
" pushfl;" \
|
|
|
|
" cli;" \
|
|
|
|
" movl $" _V(MTX_UNOWNED) ",%%eax;" /* Unowned cookie */ \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" cmpxchgl %3,%1;" /* Try */ \
|
|
|
|
" jz 2f;" /* got it */ \
|
|
|
|
" pushl %4;" \
|
|
|
|
" pushl %5;" \
|
|
|
|
" call mtx_enter_hard;" /* mtx_enter_hard(mtxp, type, oflags) */ \
|
2000-12-04 12:38:03 +00:00
|
|
|
" addl $12,%%esp;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
" jmp 1f;" \
|
|
|
|
"2: popl %2;" /* save flags */ \
|
|
|
|
"1:" \
|
|
|
|
"# getlock_spin_block" \
|
|
|
|
: "=&a" (_res), /* 0 (dummy output) */ \
|
|
|
|
"+m" (mtxp->mtx_lock), /* 1 */ \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
"=m" (mtxp->mtx_saveintr) /* 2 */ \
|
2000-09-07 01:33:02 +00:00
|
|
|
: "r" (tid), /* 3 (input) */ \
|
|
|
|
"gi" (type), /* 4 */ \
|
|
|
|
"g" (mtxp) /* 5 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "memory", "ecx", "edx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a lock without any recursion handling. Calls the hard enter function if
|
|
|
|
* we can't get it inline.
|
|
|
|
*/
|
|
|
|
#define _getlock_norecurse(mtxp, tid, type) ({ \
|
|
|
|
int _res; \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
|
|
|
" movl $" _V(MTX_UNOWNED) ",%%eax;" /* Unowned cookie */ \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" cmpxchgl %2,%1;" /* Try */ \
|
|
|
|
" jz 1f;" /* got it */ \
|
|
|
|
" pushl %3;" \
|
|
|
|
" pushl %4;" \
|
|
|
|
" call mtx_enter_hard;" /* mtx_enter_hard(mtxp, type) */ \
|
|
|
|
" addl $8,%%esp;" \
|
|
|
|
"1:" \
|
|
|
|
"# getlock_norecurse" \
|
|
|
|
: "=&a" (_res), /* 0 (dummy output) */ \
|
|
|
|
"+m" (mtxp->mtx_lock) /* 1 */ \
|
|
|
|
: "r" (tid), /* 2 (input) */ \
|
|
|
|
"gi" (type), /* 3 */ \
|
|
|
|
"g" (mtxp) /* 4 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "memory", "ecx", "edx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a sleep lock assuming we haven't recursed on it, recursion is handled
|
|
|
|
* in the hard function.
|
|
|
|
*/
|
|
|
|
#define _exitlock_norecurse(mtxp, tid, type) ({ \
|
|
|
|
int _tid = (int)(tid); \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" cmpxchgl %4,%0;" /* try easy rel */ \
|
|
|
|
" jz 1f;" /* released! */ \
|
|
|
|
" pushl %2;" \
|
|
|
|
" pushl %3;" \
|
|
|
|
" call mtx_exit_hard;" \
|
|
|
|
" addl $8,%%esp;" \
|
|
|
|
"1:" \
|
|
|
|
"# exitlock_norecurse" \
|
|
|
|
: "+m" (mtxp->mtx_lock), /* 0 */ \
|
|
|
|
"+a" (_tid) /* 1 */ \
|
|
|
|
: "gi" (type), /* 2 (input) */ \
|
|
|
|
"g" (mtxp), /* 3 */ \
|
|
|
|
"r" (MTX_UNOWNED) /* 4 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "memory", "ecx", "edx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a sleep lock when its likely we recursed (the code to
|
|
|
|
* deal with simple recursion is inline).
|
|
|
|
*/
|
|
|
|
#define _exitlock(mtxp, tid, type) ({ \
|
|
|
|
int _tid = (int)(tid); \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
|
|
|
" " MPLOCKED "" \
|
|
|
|
" cmpxchgl %5,%0;" /* try easy rel */ \
|
|
|
|
" jz 1f;" /* released! */ \
|
|
|
|
" testl $" _V(MTX_RECURSE) ",%%eax;" /* recursed? */ \
|
|
|
|
" jnz 3f;" /* handle recursion */ \
|
|
|
|
/* Lock not recursed and contested: do the hard way */ \
|
|
|
|
" pushl %3;" \
|
|
|
|
" pushl %4;" \
|
|
|
|
" call mtx_exit_hard;" /* mtx_exit_hard(mtxp,type) */ \
|
|
|
|
" addl $8,%%esp;" \
|
|
|
|
" jmp 1f;" \
|
|
|
|
/* lock recursed, lower recursion level */ \
|
2000-09-08 21:48:06 +00:00
|
|
|
"3: decl %1;" /* one less level */ \
|
2000-09-07 01:33:02 +00:00
|
|
|
" jnz 1f;" /* still recursed, done */ \
|
|
|
|
" lock; andl $~" _V(MTX_RECURSE) ",%0;" /* turn off recurse flag */ \
|
|
|
|
"1:" \
|
|
|
|
"# exitlock" \
|
|
|
|
: "+m" (mtxp->mtx_lock), /* 0 */ \
|
|
|
|
"+m" (mtxp->mtx_recurse), /* 1 */ \
|
|
|
|
"+a" (_tid) /* 2 */ \
|
|
|
|
: "gi" (type), /* 3 (input) */ \
|
|
|
|
"g" (mtxp), /* 4 */ \
|
|
|
|
"r" (MTX_UNOWNED) /* 5 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "memory", "ecx", "edx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a spin lock (with possible recursion).
|
|
|
|
*
|
|
|
|
* We use cmpxchgl to clear lock (instead of simple store) to flush posting
|
|
|
|
* buffers and make the change visible to other CPU's.
|
|
|
|
*/
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#define _exitlock_spin(mtxp) ({ \
|
2000-09-07 01:33:02 +00:00
|
|
|
int _res; \
|
|
|
|
\
|
|
|
|
__asm __volatile ( \
|
2000-12-07 02:23:16 +00:00
|
|
|
" movl %1,%2;" \
|
|
|
|
" decl %2;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
" js 1f;" \
|
2000-12-07 02:23:16 +00:00
|
|
|
" movl %2,%1;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
" jmp 2f;" \
|
2000-12-07 02:23:16 +00:00
|
|
|
"1: movl %0,%2;" \
|
2000-12-08 18:21:06 +00:00
|
|
|
" movl $ " _V(MTX_UNOWNED) ",%%ecx;" \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
" pushl %3;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
" " MPLOCKED "" \
|
2000-12-08 18:21:06 +00:00
|
|
|
" cmpxchgl %%ecx,%0;" \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
" popfl;" \
|
2000-09-07 01:33:02 +00:00
|
|
|
"2:" \
|
|
|
|
"# exitlock_spin" \
|
|
|
|
: "+m" (mtxp->mtx_lock), /* 0 */ \
|
|
|
|
"+m" (mtxp->mtx_recurse), /* 1 */ \
|
|
|
|
"=&a" (_res) /* 2 */ \
|
2000-12-08 18:21:06 +00:00
|
|
|
: "g" (mtxp->mtx_saveintr) /* 3 */ \
|
|
|
|
: "memory", "ecx" /* used */ ); \
|
2000-09-07 01:33:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
#endif /* I386_CPU */
|
|
|
|
|
2000-09-08 21:48:06 +00:00
|
|
|
#endif /* _KERNEL */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
#else /* !LOCORE */
|
|
|
|
|
|
|
|
/*
|
2000-12-08 19:53:37 +00:00
|
|
|
* Simple assembly macros to get and release spin locks.
|
2000-09-07 01:33:02 +00:00
|
|
|
*/
|
|
|
|
|
2000-12-12 03:49:58 +00:00
|
|
|
#ifdef WITNESS
|
|
|
|
#define WITNESS_ENTER(lck, reg) \
|
|
|
|
movl lck+MTX_DEBUG,reg; \
|
|
|
|
cmpl $0,MTXD_WITNESS(reg); \
|
|
|
|
jz 1f; \
|
|
|
|
pushl $0; \
|
|
|
|
pushl $0; \
|
|
|
|
pushl $MTX_SPIN; \
|
|
|
|
pushl $lck; \
|
|
|
|
call witness_enter; \
|
|
|
|
addl $0x10,%esp; \
|
|
|
|
1:
|
|
|
|
|
|
|
|
#define WITNESS_EXIT(lck, reg) \
|
|
|
|
movl lck+MTX_DEBUG,reg; \
|
|
|
|
cmpl $0,MTXD_WITNESS(reg); \
|
|
|
|
jz 1f; \
|
|
|
|
pushl $0; \
|
|
|
|
pushl $0; \
|
|
|
|
pushl $MTX_SPIN; \
|
|
|
|
pushl $lck; \
|
|
|
|
call witness_exit; \
|
|
|
|
addl $0x10,%esp; \
|
|
|
|
1:
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define WITNESS_ENTER(lck, reg)
|
|
|
|
#define WITNESS_EXIT(lck, reg)
|
|
|
|
#endif
|
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
#if defined(I386_CPU)
|
|
|
|
|
2000-12-08 08:49:36 +00:00
|
|
|
#define MTX_ENTER(lck, reg) \
|
|
|
|
movl _curproc,reg; \
|
|
|
|
pushfl; \
|
2000-12-04 12:38:03 +00:00
|
|
|
cli; \
|
|
|
|
movl reg,lck+MTX_LOCK; \
|
2000-12-12 03:49:58 +00:00
|
|
|
popl lck+MTX_SAVEINTR; \
|
|
|
|
WITNESS_ENTER(lck, reg)
|
2000-12-04 12:38:03 +00:00
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
#define MTX_EXIT(lck, reg) \
|
2000-12-12 03:49:58 +00:00
|
|
|
WITNESS_EXIT(lck, reg) \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
pushl lck+MTX_SAVEINTR; \
|
2000-09-24 23:34:21 +00:00
|
|
|
movl $ MTX_UNOWNED,lck+MTX_LOCK; \
|
2000-12-08 08:49:36 +00:00
|
|
|
popfl;
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-09-08 21:48:06 +00:00
|
|
|
#else /* I386_CPU */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-12-08 08:49:36 +00:00
|
|
|
#define MTX_ENTER(lck, reg) \
|
|
|
|
movl _curproc,reg; \
|
|
|
|
pushfl; \
|
2000-12-04 12:38:03 +00:00
|
|
|
cli; \
|
2000-09-07 01:33:02 +00:00
|
|
|
9: movl $ MTX_UNOWNED,%eax; \
|
|
|
|
MPLOCKED \
|
|
|
|
cmpxchgl reg,lck+MTX_LOCK; \
|
2000-09-24 23:34:21 +00:00
|
|
|
jnz 9b; \
|
2000-12-12 03:49:58 +00:00
|
|
|
popl lck+MTX_SAVEINTR; \
|
|
|
|
WITNESS_ENTER(lck, reg)
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
|
2000-12-08 08:49:36 +00:00
|
|
|
#define MTX_EXIT(lck, reg) \
|
2000-12-12 03:49:58 +00:00
|
|
|
WITNESS_EXIT(lck, reg) \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
pushl lck+MTX_SAVEINTR; \
|
2000-09-07 01:33:02 +00:00
|
|
|
movl lck+MTX_LOCK,%eax; \
|
|
|
|
movl $ MTX_UNOWNED,reg; \
|
|
|
|
MPLOCKED \
|
|
|
|
cmpxchgl reg,lck+MTX_LOCK; \
|
2000-12-08 08:49:36 +00:00
|
|
|
popfl;
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-12-08 08:49:36 +00:00
|
|
|
#define MTX_ENTER_WITH_RECURSION(lck, reg) \
|
2000-12-04 12:38:03 +00:00
|
|
|
pushf; \
|
|
|
|
cli; \
|
2000-09-07 01:33:02 +00:00
|
|
|
movl lck+MTX_LOCK,%eax; \
|
2000-09-24 23:34:21 +00:00
|
|
|
cmpl _curproc,%eax; \
|
2000-12-04 12:38:03 +00:00
|
|
|
jne 7f; \
|
|
|
|
incl lck+MTX_RECURSE; \
|
2000-09-07 01:33:02 +00:00
|
|
|
jmp 8f; \
|
2000-12-04 12:38:03 +00:00
|
|
|
7: movl $ MTX_UNOWNED,%eax; \
|
2000-09-07 01:33:02 +00:00
|
|
|
MPLOCKED \
|
|
|
|
cmpxchgl reg,lck+MTX_LOCK; \
|
2000-12-08 19:53:37 +00:00
|
|
|
jnz 7b; \
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
popl lck+MTX_SAVEINTR; \
|
2000-12-04 12:38:03 +00:00
|
|
|
jmp 9f; \
|
2000-09-24 23:34:21 +00:00
|
|
|
8: add $4,%esp; \
|
2000-12-12 03:49:58 +00:00
|
|
|
9: WITNESS_ENTER(lck, reg)
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-12-08 08:49:36 +00:00
|
|
|
#define MTX_EXIT_WITH_RECURSION(lck, reg) \
|
2000-12-12 03:49:58 +00:00
|
|
|
WITNESS_EXIT(lck, reg) \
|
2000-09-24 23:34:21 +00:00
|
|
|
movl lck+MTX_RECURSE,%eax; \
|
2000-09-22 04:30:33 +00:00
|
|
|
decl %eax; \
|
2000-12-04 12:38:03 +00:00
|
|
|
js 8f; \
|
2000-09-24 23:34:21 +00:00
|
|
|
movl %eax,lck+MTX_RECURSE; \
|
2000-12-04 12:38:03 +00:00
|
|
|
jmp 9f; \
|
|
|
|
8: pushl lck+MTX_SAVEINTR; \
|
2000-11-13 18:39:18 +00:00
|
|
|
movl lck+MTX_LOCK,%eax; \
|
2000-09-07 01:33:02 +00:00
|
|
|
movl $ MTX_UNOWNED,reg; \
|
|
|
|
MPLOCKED \
|
|
|
|
cmpxchgl reg,lck+MTX_LOCK; \
|
2000-12-04 12:38:03 +00:00
|
|
|
popf; \
|
|
|
|
9:
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-09-08 21:48:06 +00:00
|
|
|
#endif /* I386_CPU */
|
|
|
|
#endif /* !LOCORE */
|
|
|
|
#endif /* __MACHINE_MUTEX_H */
|