2000-09-07 01:33:02 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Berkeley Software Design Inc's name may not be used to endorse or
|
|
|
|
* promote products derived from this software without specific prior
|
|
|
|
* written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_MUTEX_H_
|
|
|
|
#define _MACHINE_MUTEX_H_
|
|
|
|
|
|
|
|
#ifndef LOCORE
|
|
|
|
|
2000-10-04 01:21:58 +00:00
|
|
|
#ifdef _KERNEL
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugging
|
|
|
|
*/
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#ifdef MUTEX_DEBUG
|
2000-09-07 01:33:02 +00:00
|
|
|
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
#ifdef _KERN_MUTEX_C_
|
|
|
|
char STR_IEN[] = "ps & IPL != IPL_HIGH";
|
|
|
|
char STR_IDIS[] = "ps & IPL == IPL_HIGH";
|
|
|
|
char STR_SIEN[] = "mpp->mtx_saveintr != IPL_HIGH";
|
|
|
|
#else /* _KERN_MUTEX_C_ */
|
2000-09-07 01:33:02 +00:00
|
|
|
extern char STR_IEN[];
|
|
|
|
extern char STR_IDIS[];
|
- Make the mutex code almost completely machine independent. This greatly
reducues the maintenance load for the mutex code. The only MD portions
of the mutex code are in machine/mutex.h now, which include the assembly
macros for handling mutexes as well as optionally overriding the mutex
micro-operations. For example, we use optimized micro-ops on the x86
platform #ifndef I386_CPU.
- Change the behavior of the SMP_DEBUG kernel option. In the new code,
mtx_assert() only depends on INVARIANTS, allowing other kernel developers
to have working mutex assertiions without having to include all of the
mutex debugging code. The SMP_DEBUG kernel option has been renamed to
MUTEX_DEBUG and now just controls extra mutex debugging code.
- Abolish the ugly mtx_f hack. Instead, we dynamically allocate
seperate mtx_debug structures on the fly in mtx_init, except for mutexes
that are initiated very early in the boot process. These mutexes
are declared using a special MUTEX_DECLARE() macro, and use a new
flag MTX_COLD when calling mtx_init. This is still somewhat hackish,
but it is less evil than the mtx_f filler struct, and the mtx struct is
now the same size with and without mutex debugging code.
- Add some micro-micro-operation macros for doing the actual atomic
operations on the mutex mtx_lock field to make it easier for other archs
to override/optimize mutex ops if needed. These new tiny ops also clean
up the code in some places by replacing long atomic operation function
calls that spanned 2-3 lines with a short 1-line macro call.
- Don't call mi_switch() from mtx_enter_hard() when we block while trying
to obtain a sleep mutex. Calling mi_switch() would bogusly release
Giant before switching to the next process. Instead, inline most of the
code from mi_switch() in the mtx_enter_hard() function. Note that when
we finally kill Giant we can back this out and go back to calling
mi_switch().
2000-10-20 07:26:37 +00:00
|
|
|
extern char STR_SIEN[];
|
|
|
|
#endif /* _KERN_MUTEX_C_ */
|
|
|
|
|
|
|
|
#endif /* MUTEX_DEBUG */
|
|
|
|
|
2000-09-12 22:40:29 +00:00
|
|
|
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
|
2000-09-07 01:33:02 +00:00
|
|
|
== ALPHA_PSL_IPL_HIGH, STR_IEN)
|
2000-09-12 22:40:29 +00:00
|
|
|
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
|
2000-09-07 01:33:02 +00:00
|
|
|
!= ALPHA_PSL_IPL_HIGH, STR_IDIS)
|
2000-10-20 20:27:12 +00:00
|
|
|
#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \
|
|
|
|
!= ALPHA_PSL_IPL_HIGH, STR_SIEN)
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-10-20 22:44:06 +00:00
|
|
|
#define mtx_legal2block() \
|
|
|
|
((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_HIGH)
|
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
/*
|
|
|
|
* Assembly macros (for internal use only)
|
|
|
|
*--------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _V(x) __STRING(x)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a spin lock, handle recusion inline (as the less common case)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _getlock_spin_block(mp, tid, type) do { \
|
2000-09-12 22:40:29 +00:00
|
|
|
u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \
|
2000-10-20 19:54:47 +00:00
|
|
|
if (!_obtain_lock(mp, tid)) \
|
2000-09-07 01:33:02 +00:00
|
|
|
mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \
|
|
|
|
else { \
|
|
|
|
alpha_mb(); \
|
2000-10-20 19:54:47 +00:00
|
|
|
(mp)->mtx_saveintr = _ipl; \
|
2000-09-07 01:33:02 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2000-09-08 21:47:29 +00:00
|
|
|
#endif /* _KERNEL */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
#else /* !LOCORE */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple assembly macros to get and release non-recursive spin locks
|
|
|
|
*/
|
|
|
|
#define MTX_ENTER(lck) \
|
2000-09-12 22:40:29 +00:00
|
|
|
ldiq a0, ALPHA_PSL_IPL_HIGH; \
|
|
|
|
call_pal PAL_OSF1_swpipl; \
|
2000-09-07 01:33:02 +00:00
|
|
|
1: ldq_l a0, lck+MTX_LOCK; \
|
|
|
|
cmpeq a0, MTX_UNOWNED, a1; \
|
|
|
|
beq a1, 1b; \
|
|
|
|
ldq a0, PC_CURPROC(globalp); \
|
|
|
|
stq_c a0, lck+MTX_LOCK; \
|
|
|
|
beq a0, 1b; \
|
|
|
|
mb; \
|
2000-10-20 19:54:47 +00:00
|
|
|
stl v0, lck+MTX_SAVEINTR
|
2000-09-07 01:33:02 +00:00
|
|
|
|
|
|
|
#define MTX_EXIT(lck) \
|
|
|
|
mb; \
|
|
|
|
ldiq a0, MTX_UNOWNED; \
|
|
|
|
stq a0, lck+MTX_LOCK; \
|
2000-10-20 19:54:47 +00:00
|
|
|
ldl a0, lck+MTX_SAVEINTR; \
|
2000-09-07 01:33:02 +00:00
|
|
|
call_pal PAL_OSF1_swpipl
|
|
|
|
|
2000-09-08 21:47:29 +00:00
|
|
|
#endif /* !LOCORE */
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2000-09-08 21:47:29 +00:00
|
|
|
#endif /* __MACHINE_MUTEX_H */
|