On PowerPC 64bit, the linux-compat mb() definition is implemented with

lwsync instruction, which does not provide Store/Load barrier.  Fix
this by using "full" sync barrier for mb().

atomic_store_rel() does not need full barrier, change mb() call there
to the lwsync instruction if not hitting the known CPU erratas
(i.e. on 32bit).  Provide powerpc_lwsync() helper to isolate the
lwsync/sync compile time selection, and use it in atomic_store_rel()
and several other places which duplicate the code.

Noted by:	alc
Reviewed and tested by:	nwhitehorn
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2015-11-24 09:13:21 +00:00
parent ff6b30b9fa
commit 0b39ffb35f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=291242

View File

@ -48,7 +48,7 @@
*/
#ifdef __powerpc64__
#define mb() __asm __volatile("lwsync" : : : "memory")
#define mb() __asm __volatile("sync" : : : "memory")
#define rmb() __asm __volatile("lwsync" : : : "memory")
#define wmb() __asm __volatile("lwsync" : : : "memory")
#define __ATOMIC_REL() __asm __volatile("lwsync" : : : "memory")
@ -61,6 +61,17 @@
#define __ATOMIC_ACQ() __asm __volatile("isync" : : : "memory")
#endif
static __inline void
powerpc_lwsync(void)
{
#ifdef __powerpc64__
__asm __volatile("lwsync" : : : "memory");
#else
__asm __volatile("sync" : : : "memory");
#endif
}
/*
* atomic_add(p, v)
* { *p += v; }
@ -506,7 +517,8 @@ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
{ \
mb(); \
\
powerpc_lwsync(); \
*p = v; \
}
@ -734,34 +746,21 @@ static __inline void
atomic_thread_fence_acq(void)
{
/* See above comment about lwsync being broken on Book-E. */
#ifdef __powerpc64__
__asm __volatile("lwsync" : : : "memory");
#else
__asm __volatile("sync" : : : "memory");
#endif
powerpc_lwsync();
}
static __inline void
atomic_thread_fence_rel(void)
{
#ifdef __powerpc64__
__asm __volatile("lwsync" : : : "memory");
#else
__asm __volatile("sync" : : : "memory");
#endif
powerpc_lwsync();
}
static __inline void
atomic_thread_fence_acq_rel(void)
{
#ifdef __powerpc64__
__asm __volatile("lwsync" : : : "memory");
#else
__asm __volatile("sync" : : : "memory");
#endif
powerpc_lwsync();
}
static __inline void