From c0016138302575371346a4f1f21c21640b64ef0e Mon Sep 17 00:00:00 2001 From: peter Date: Thu, 21 Jul 2005 22:35:02 +0000 Subject: [PATCH] Like on i386, bypass lock prefix for atomic ops on !SMP kernels. --- sys/amd64/include/atomic.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h index c737a22e9b1a..be3a20d6a131 100644 --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -152,6 +152,31 @@ atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src) return (res); } +#if defined(_KERNEL) && !defined(SMP) + +/* + * We assume that a = b will do atomic loads and stores. However, on a + * PentiumPro or higher, reads may pass writes, so for that case we have + * to use a serializing instruction (i.e. with LOCK) to do the load in + * SMP kernels. For UP kernels, however, the cache of the single processor + * is always consistent, so we don't need any memory barriers. + */ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +static __inline u_##TYPE \ +atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +{ \ + return (*p); \ +} \ + \ +static __inline void \ +atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ +{ \ + *p = v; \ +} \ +struct __hack + +#else /* defined(SMP) */ + #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ @@ -179,6 +204,8 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ } \ struct __hack +#endif /* SMP */ + #endif /* KLD_MODULE || !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);