Unconditionally use locked addition of zero to tip of the stack for
memory barriers on i386. It works as a serialization instruction on all IA32 CPUs. Alternative solution of using {s,l,}fence requires run-time checking of the presense of the corresponding SSE or SSE2 extensions, and possible boot-time patching of the kernel text. Suggested by: many
This commit is contained in:
parent
aeb325719a
commit
2640173120
@ -32,20 +32,9 @@
|
||||
#error this file needs sys/cdefs.h as a prerequisite
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(I686_CPU)
|
||||
#define mb() __asm__ __volatile__ ("mfence;": : :"memory")
|
||||
#define wmb() __asm__ __volatile__ ("sfence;": : :"memory")
|
||||
#define rmb() __asm__ __volatile__ ("lfence;": : :"memory")
|
||||
#else
|
||||
/*
|
||||
* do we need a serializing instruction?
|
||||
*/
|
||||
#define mb()
|
||||
#define wmb()
|
||||
#define rmb()
|
||||
#endif
|
||||
|
||||
#define mb() __asm __volatile("lock;addl $0,(%esp)")
|
||||
#define wmb() __asm __volatile("lock;addl $0,(%esp)")
|
||||
#define rmb() __asm __volatile("lock;addl $0,(%esp)")
|
||||
|
||||
/*
|
||||
* Various simple operations on memory, each of which is atomic in the
|
||||
|
Loading…
Reference in New Issue
Block a user