mtx: switch to fcmpset

The found value is passed to locking routines in order to reduce cacheline
accesses.

mtx_unlock grows an explicit check for regular unlock. On ll/sc architectures
the routine can fail even if the lock could have been handled by the inline
primitive.

Discussed with:	jhb
Tested by:	pho (previous version)
This commit is contained in:
mjg 2017-02-05 03:26:34 +00:00
parent 68b1554284
commit d593add0a5
2 changed files with 36 additions and 31 deletions

View File

@ -455,12 +455,11 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
* sleep waiting for it), or if we need to recurse on it.
*/
void
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
const char *file, int line)
{
struct mtx *m;
struct turnstile *ts;
uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
@ -489,7 +488,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
m = mtxlock2mtx(c);
v = MTX_READ_VALUE(m);
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
@ -520,9 +518,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
for (;;) {
if (v == MTX_UNOWNED) {
if (_mtx_obtain_lock(m, tid))
if (_mtx_obtain_lock_fetch(m, &v, tid))
break;
v = MTX_READ_VALUE(m);
continue;
}
#ifdef KDTRACE_HOOKS
@ -674,12 +671,11 @@ _mtx_lock_spin_failed(struct mtx *m)
* is handled inline.
*/
void
_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
const char *file, int line)
_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line)
{
struct mtx *m;
struct lock_delay_arg lda;
uintptr_t v;
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
@ -706,12 +702,10 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
#ifdef KDTRACE_HOOKS
spin_time -= lockstat_nsecs(&m->lock_object);
#endif
v = MTX_READ_VALUE(m);
for (;;) {
if (v == MTX_UNOWNED) {
if (_mtx_obtain_lock(m, tid))
if (_mtx_obtain_lock_fetch(m, &v, tid))
break;
v = MTX_READ_VALUE(m);
continue;
}
/* Give interrupts a chance while we spin. */
@ -796,14 +790,11 @@ retry:
m->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
v = MTX_READ_VALUE(m);
for (;;) {
if (v == MTX_UNOWNED) {
if (_mtx_obtain_lock(m, tid))
break;
v = MTX_READ_VALUE(m);
if (_mtx_obtain_lock_fetch(m, &v, tid))
break;
if (v == MTX_UNOWNED)
continue;
}
if (v == tid) {
m->mtx_recurse++;
break;
@ -896,11 +887,18 @@ __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
{
struct mtx *m;
struct turnstile *ts;
uintptr_t v;
if (SCHEDULER_STOPPED())
return;
m = mtxlock2mtx(c);
v = MTX_READ_VALUE(m);
if (v == (uintptr_t)curthread) {
if (_mtx_release_lock(m, (uintptr_t)curthread))
return;
}
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)

View File

@ -98,13 +98,13 @@ void mtx_sysinit(void *arg);
int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
int line);
void mutex_init(void);
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
const char *file, int line);
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line);
void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file,
int line);
#ifdef SMP
void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
const char *file, int line);
void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line);
#endif
void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
int line);
@ -140,13 +140,13 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
_mtx_destroy(&(m)->mtx_lock)
#define mtx_trylock_flags_(m, o, f, l) \
_mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
#define _mtx_lock_sleep(m, t, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l)
#define _mtx_lock_sleep(m, v, t, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l)
#define _mtx_unlock_sleep(m, o, f, l) \
__mtx_unlock_sleep(&(m)->mtx_lock, o, f, l)
#ifdef SMP
#define _mtx_lock_spin(m, t, o, f, l) \
_mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l)
#define _mtx_lock_spin(m, v, t, o, f, l) \
_mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l)
#endif
#define _mtx_lock_flags(m, o, f, l) \
__mtx_lock_flags(&(m)->mtx_lock, o, f, l)
@ -171,6 +171,11 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
#define _mtx_obtain_lock(mp, tid) \
atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
#define _mtx_obtain_lock_fetch(mp, vp, tid) ({ \
*vp = MTX_UNOWNED; \
atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, vp, (tid)); \
})
/* Try to release mtx_lock if it is unrecursed and uncontested. */
#define _mtx_release_lock(mp, tid) \
atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
@ -188,9 +193,10 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
/* Lock a normal mutex. */
#define __mtx_lock(mp, tid, opts, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
uintptr_t _v; \
\
if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid)))\
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \
_mtx_lock_sleep((mp), _v, _tid, (opts), (file), (line));\
else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, \
mp, 0, 0, file, line); \
@ -205,13 +211,14 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
#ifdef SMP
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
uintptr_t _v; \
\
spinlock_enter(); \
if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
if ((mp)->mtx_lock == _tid) \
if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) { \
if (_v == _tid) \
(mp)->mtx_recurse++; \
else \
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
_mtx_lock_spin((mp), _v, _tid, (opts), (file), (line));\
} else \
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \
mp, 0, 0, file, line); \
@ -265,7 +272,7 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
\
if ((mp)->mtx_recurse == 0) \
LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, mp); \
if ((mp)->mtx_lock != _tid || !_mtx_release_lock((mp), _tid)) \
if (!_mtx_release_lock((mp), _tid)) \
_mtx_unlock_sleep((mp), (opts), (file), (line)); \
} while (0)