Use private cache line for the locked nop in *mb() on i386.

Suggested by:	alc
Reviewed by:	alc, bde
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-07-30 00:13:20 +00:00
parent dd5b64258f
commit 48cae112b5
2 changed files with 41 additions and 34 deletions

View File

@ -111,8 +111,8 @@ _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
"OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
_Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
"OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
_Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
"OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
_Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
"__OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
static void cpu_reset_real(void);
#ifdef SMP

View File

@ -37,9 +37,31 @@
#include <machine/specialreg.h>
#endif
#define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
#define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
#define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
#ifndef __OFFSETOF_MONITORBUF
/*
* __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
*
* The open-coded number is used instead of the symbolic expression to
* avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
* An assertion in i386/vm_machdep.c ensures that the value is correct.
*/
#define __OFFSETOF_MONITORBUF 0x180
static __inline void
__mbk(void)
{
__asm __volatile("lock; addl $0,%%fs:%0"
: "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
}
static __inline void
__mbu(void)
{
__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
}
#endif
/*
* Various simple operations on memory, each of which is atomic in the
@ -246,40 +268,15 @@ atomic_testandset_int(volatile u_int *p, u_int v)
* reordering accesses in a way that violates the semantics of acquire
* and release.
*/
#if defined(_KERNEL)
/*
* OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
*
* The open-coded number is used instead of the symbolic expression to
* avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
* An assertion in i386/vm_machdep.c ensures that the value is correct.
*/
#define OFFSETOF_MONITORBUF 0x180
#if defined(SMP)
static __inline void
__storeload_barrier(void)
{
__asm __volatile("lock; addl $0,%%fs:%0"
: "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
}
#define __storeload_barrier() __mbk()
#else /* _KERNEL && UP */
static __inline void
__storeload_barrier(void)
{
__compiler_membar();
}
#define __storeload_barrier() __compiler_membar()
#endif /* SMP */
#else /* !_KERNEL */
static __inline void
__storeload_barrier(void)
{
__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
}
#define __storeload_barrier() __mbu()
#endif /* _KERNEL*/
#define ATOMIC_LOAD(TYPE) \
@ -776,4 +773,14 @@ u_long atomic_swap_long(volatile u_long *p, u_long v);
#endif /* !WANT_FUNCTIONS */
#if defined(_KERNEL)
#define mb() __mbk()
#define wmb() __mbk()
#define rmb() __mbk()
#else
#define mb() __mbu()
#define wmb() __mbu()
#define rmb() __mbu()
#endif
#endif /* !_MACHINE_ATOMIC_H_ */