- Fix memory barriers in atomic operations so that the barriers are always

"inside" of locked regions.  That is, an acquire atomic operation will
  always enforce a memory barrier after the atomic operation and a release
  operation will always enforce a memory barrier before the atomic
  operation.
- Explicitly use 'mb' instead of 'wmb' in release atomic operations.  The
  'wmb' memory barrier is not strong enough to guarantee coherence with
  other processors.  This is effectively a nop since alpha_wmb() actually
  performs a 'mb' and not a 'wmb', but I wanted the code to be more
  correct since at some point in the future alpha_wmb()'s implementation
  may switch to being a real 'wmb'.
This commit is contained in:
John Baldwin 2001-04-17 02:50:05 +00:00
parent 85eba1489b
commit 2bec909c3d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=75565

View File

@ -256,29 +256,29 @@ static __inline u_int64_t atomic_readandclear_64(volatile u_int64_t *addr)
static __inline void \
atomic_##NAME##_acq_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
{ \
alpha_mb(); \
atomic_##NAME##_##WIDTH(p, v); \
/* alpha_mb(); */ \
} \
\
static __inline void \
atomic_##NAME##_rel_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
{ \
atomic_##NAME##_##WIDTH(p, v); \
alpha_wmb(); \
} \
\
static __inline void \
atomic_##NAME##_acq_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
{ \
alpha_mb(); \
atomic_##NAME##_##WIDTH(p, v); \
} \
\
static __inline void \
atomic_##NAME##_rel_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
atomic_##NAME##_acq_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
{ \
atomic_##NAME##_##WIDTH(p, v); \
alpha_wmb(); \
/* alpha_mb(); */ \
} \
\
static __inline void \
atomic_##NAME##_rel_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
{ \
alpha_mb(); \
atomic_##NAME##_##WIDTH(p, v); \
}
ATOMIC_ACQ_REL(set, 8, char)
@ -307,28 +307,34 @@ ATOMIC_ACQ_REL(subtract, 64, long)
static __inline u_##TYPE \
atomic_load_acq_##WIDTH(volatile u_##TYPE *p) \
{ \
u_##TYPE v; \
\
v = *p; \
alpha_mb(); \
return (*p); \
return (v); \
} \
\
static __inline void \
atomic_store_rel_##WIDTH(volatile u_##TYPE *p, u_##TYPE v)\
{ \
alpha_mb(); \
*p = v; \
alpha_wmb(); \
} \
static __inline u_##TYPE \
atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
{ \
u_##TYPE v; \
\
v = *p; \
alpha_mb(); \
return (*p); \
return (v); \
} \
\
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
alpha_mb(); \
*p = v; \
alpha_wmb(); \
}
ATOMIC_STORE_LOAD(char, 8)
@ -408,35 +414,35 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
static __inline u_int32_t
atomic_cmpset_acq_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
{
int retval;
retval = atomic_cmpset_32(p, cmpval, newval);
alpha_mb();
return (atomic_cmpset_32(p, cmpval, newval));
return (retval);
}
static __inline u_int32_t
atomic_cmpset_rel_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
{
int retval;
retval = atomic_cmpset_32(p, cmpval, newval);
alpha_wmb();
return (retval);
alpha_mb();
return (atomic_cmpset_32(p, cmpval, newval));
}
static __inline u_int64_t
atomic_cmpset_acq_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
{
int retval;
retval = atomic_cmpset_64(p, cmpval, newval);
alpha_mb();
return (atomic_cmpset_64(p, cmpval, newval));
return (retval);
}
static __inline u_int64_t
atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
{
int retval;
retval = atomic_cmpset_64(p, cmpval, newval);
alpha_wmb();
return (retval);
alpha_mb();
return (atomic_cmpset_64(p, cmpval, newval));
}
#define atomic_cmpset_acq_int atomic_cmpset_acq_32