Use atomic_interrupt_fence() instead of bare __compiler_membar()

for the which which definitely use membar to sync with interrupt handlers.

libc and rtld uses of __compiler_membar() seems to want compiler barriers
proper.

The barrier in sched_unpin_lite() after td_pinned decrement seems to be not
needed and removed, instead of convertion.

Reviewed by:	markj
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D28956
This commit is contained in:
Konstantin Belousov 2021-02-27 01:54:17 +02:00
parent 1d9ba697f9
commit b5449c92b4
5 changed files with 24 additions and 27 deletions

View File

@ -366,7 +366,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* Check to see if the IPI granted us the lock after all. The load of * Check to see if the IPI granted us the lock after all. The load of
* rmp_flags must happen after the tracker is removed from the list. * rmp_flags must happen after the tracker is removed from the list.
*/ */
__compiler_membar(); atomic_interrupt_fence();
if (tracker->rmp_flags) { if (tracker->rmp_flags) {
/* Just add back tracker - we hold the lock. */ /* Just add back tracker - we hold the lock. */
rm_tracker_add(pc, tracker); rm_tracker_add(pc, tracker);
@ -448,7 +448,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
td->td_critnest++; /* critical_enter(); */ td->td_critnest++; /* critical_enter(); */
__compiler_membar(); atomic_interrupt_fence();
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
@ -456,7 +456,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
sched_pin(); sched_pin();
__compiler_membar(); atomic_interrupt_fence();
td->td_critnest--; td->td_critnest--;
@ -873,17 +873,15 @@ db_show_rm(const struct lock_object *lock)
* Concurrent writers take turns taking the lock while going off cpu. If this is * Concurrent writers take turns taking the lock while going off cpu. If this is
* of concern for your usecase, this is not the right primitive. * of concern for your usecase, this is not the right primitive.
* *
* Neither rms_rlock nor rms_runlock use fences. Instead compiler barriers are * Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
* inserted to prevert reordering of generated code. Execution ordering is * fences are inserted to ensure ordering with the code executed in the IPI
* provided with the use of an IPI handler. * handler.
* *
* No attempt is made to track which CPUs read locked at least once, * No attempt is made to track which CPUs read locked at least once,
* consequently write locking sends IPIs to all of them. This will become a * consequently write locking sends IPIs to all of them. This will become a
* problem at some point. The easiest way to lessen it is to provide a bitmap. * problem at some point. The easiest way to lessen it is to provide a bitmap.
*/ */
#define rms_int_membar() __compiler_membar()
#define RMS_NOOWNER ((void *)0x1) #define RMS_NOOWNER ((void *)0x1)
#define RMS_TRANSIENT ((void *)0x2) #define RMS_TRANSIENT ((void *)0x2)
#define RMS_FLAGMASK 0xf #define RMS_FLAGMASK 0xf
@ -1030,14 +1028,14 @@ rms_rlock(struct rmslock *rms)
critical_enter(); critical_enter();
pcpu = rms_int_pcpu(rms); pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu); rms_int_influx_enter(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) { if (__predict_false(rms->writers > 0)) {
rms_rlock_fallback(rms); rms_rlock_fallback(rms);
return; return;
} }
rms_int_membar(); atomic_interrupt_fence();
rms_int_readers_inc(rms, pcpu); rms_int_readers_inc(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu); rms_int_influx_exit(rms, pcpu);
critical_exit(); critical_exit();
} }
@ -1052,15 +1050,15 @@ rms_try_rlock(struct rmslock *rms)
critical_enter(); critical_enter();
pcpu = rms_int_pcpu(rms); pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu); rms_int_influx_enter(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) { if (__predict_false(rms->writers > 0)) {
rms_int_influx_exit(rms, pcpu); rms_int_influx_exit(rms, pcpu);
critical_exit(); critical_exit();
return (0); return (0);
} }
rms_int_membar(); atomic_interrupt_fence();
rms_int_readers_inc(rms, pcpu); rms_int_readers_inc(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu); rms_int_influx_exit(rms, pcpu);
critical_exit(); critical_exit();
return (1); return (1);
@ -1092,14 +1090,14 @@ rms_runlock(struct rmslock *rms)
critical_enter(); critical_enter();
pcpu = rms_int_pcpu(rms); pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu); rms_int_influx_enter(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) { if (__predict_false(rms->writers > 0)) {
rms_runlock_fallback(rms); rms_runlock_fallback(rms);
return; return;
} }
rms_int_membar(); atomic_interrupt_fence();
rms_int_readers_dec(rms, pcpu); rms_int_readers_dec(rms, pcpu);
rms_int_membar(); atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu); rms_int_influx_exit(rms, pcpu);
critical_exit(); critical_exit();
} }

View File

@ -38,7 +38,7 @@ sched_pin_lite(struct thread_lite *td)
KASSERT((struct thread *)td == curthread, ("sched_pin called on non curthread")); KASSERT((struct thread *)td == curthread, ("sched_pin called on non curthread"));
td->td_pinned++; td->td_pinned++;
__compiler_membar(); atomic_interrupt_fence();
} }
static __inline void static __inline void
@ -47,9 +47,8 @@ sched_unpin_lite(struct thread_lite *td)
KASSERT((struct thread *)td == curthread, ("sched_unpin called on non curthread")); KASSERT((struct thread *)td == curthread, ("sched_unpin called on non curthread"));
KASSERT(td->td_pinned > 0, ("sched_unpin called on non pinned thread")); KASSERT(td->td_pinned > 0, ("sched_unpin called on non pinned thread"));
__compiler_membar(); atomic_interrupt_fence();
td->td_pinned--; td->td_pinned--;
__compiler_membar();
} }
#endif #endif
#endif #endif

View File

@ -1091,7 +1091,7 @@ void resume_all_fs(void);
_mpcpu = vfs_mount_pcpu(mp); \ _mpcpu = vfs_mount_pcpu(mp); \
MPASS(mpcpu->mntp_thread_in_ops == 0); \ MPASS(mpcpu->mntp_thread_in_ops == 0); \
_mpcpu->mntp_thread_in_ops = 1; \ _mpcpu->mntp_thread_in_ops = 1; \
__compiler_membar(); \ atomic_interrupt_fence(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \ if (__predict_false(mp->mnt_vfs_ops > 0)) { \
vfs_op_thread_exit_crit(mp, _mpcpu); \ vfs_op_thread_exit_crit(mp, _mpcpu); \
_retval_crit = false; \ _retval_crit = false; \
@ -1111,7 +1111,7 @@ void resume_all_fs(void);
#define vfs_op_thread_exit_crit(mp, _mpcpu) do { \ #define vfs_op_thread_exit_crit(mp, _mpcpu) do { \
MPASS(_mpcpu == vfs_mount_pcpu(mp)); \ MPASS(_mpcpu == vfs_mount_pcpu(mp)); \
MPASS(_mpcpu->mntp_thread_in_ops == 1); \ MPASS(_mpcpu->mntp_thread_in_ops == 1); \
__compiler_membar(); \ atomic_interrupt_fence(); \
_mpcpu->mntp_thread_in_ops = 0; \ _mpcpu->mntp_thread_in_ops = 0; \
} while (0) } while (0)

View File

@ -173,13 +173,13 @@ static __inline void
sched_pin(void) sched_pin(void)
{ {
curthread->td_pinned++; curthread->td_pinned++;
__compiler_membar(); atomic_interrupt_fence();
} }
static __inline void static __inline void
sched_unpin(void) sched_unpin(void)
{ {
__compiler_membar(); atomic_interrupt_fence();
curthread->td_pinned--; curthread->td_pinned--;
} }

View File

@ -284,7 +284,7 @@ critical_enter(void)
td = (struct thread_lite *)curthread; td = (struct thread_lite *)curthread;
td->td_critnest++; td->td_critnest++;
__compiler_membar(); atomic_interrupt_fence();
} }
static __inline void static __inline void
@ -295,9 +295,9 @@ critical_exit(void)
td = (struct thread_lite *)curthread; td = (struct thread_lite *)curthread;
KASSERT(td->td_critnest != 0, KASSERT(td->td_critnest != 0,
("critical_exit: td_critnest == 0")); ("critical_exit: td_critnest == 0"));
__compiler_membar(); atomic_interrupt_fence();
td->td_critnest--; td->td_critnest--;
__compiler_membar(); atomic_interrupt_fence();
if (__predict_false(td->td_owepreempt)) if (__predict_false(td->td_owepreempt))
critical_exit_preempt(); critical_exit_preempt();