locks: clean up trylock primitives

In particular thius reduces accesses of the lock itself.
This commit is contained in:
mjg 2017-02-18 22:06:03 +00:00
parent 2f22d39bfd
commit 9d1d07d1cb
3 changed files with 63 additions and 34 deletions

View File

@ -374,13 +374,18 @@ int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
struct mtx *m;
struct thread *td;
uintptr_t tid, v;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
int rval;
bool recursed;
if (SCHEDULER_STOPPED())
td = curthread;
tid = (uintptr_t)td;
if (SCHEDULER_STOPPED_TD(td))
return (1);
m = mtxlock2mtx(c);
@ -394,13 +399,21 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
(opts & MTX_RECURSE) != 0)) {
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
rval = 1;
} else
rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
rval = 1;
recursed = false;
v = MTX_UNOWNED;
if (!_mtx_obtain_lock_fetch(m, &v, tid)) {
if (v == tid &&
((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
(opts & MTX_RECURSE) != 0)) {
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
recursed = true;
} else {
rval = 0;
}
}
opts &= ~MTX_RECURSE;
LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
@ -408,10 +421,9 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
TD_LOCKS_INC(curthread);
if (m->mtx_recurse == 0)
if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
m, contested, waittime, file, line);
}
return (rval);

View File

@ -293,9 +293,14 @@ int
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
struct thread *td;
uintptr_t tid, v;
int rval;
bool recursed;
if (SCHEDULER_STOPPED())
td = curthread;
tid = (uintptr_t)td;
if (SCHEDULER_STOPPED_TD(td))
return (1);
rw = rwlock2rw(c);
@ -306,20 +311,23 @@ __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
if (rw_wlocked(rw) &&
(rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
rw->rw_recurse++;
atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
rval = 1;
} else
rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
(uintptr_t)curthread);
rval = 1;
recursed = false;
v = RW_UNLOCKED;
if (!atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) {
if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
rw->rw_recurse++;
atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
} else {
rval = 0;
}
}
LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
if (rval) {
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
if (!rw_recursed(rw))
if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
rw, 0, 0, file, line, LOCKSTAT_WRITER);
TD_LOCKS_INC(curthread);
@ -637,13 +645,13 @@ __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
x = rw->rw_lock;
for (;;) {
x = rw->rw_lock;
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
if (!(x & RW_LOCK_READ))
break;
if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
line);
WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);

View File

@ -269,13 +269,13 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
curthread, sx->lock_object.lo_name, file, line));
x = sx->sx_lock;
for (;;) {
x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
("sx_try_slock() of destroyed sx @ %s:%d", file, line));
if (!(x & SX_LOCK_SHARED))
break;
if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
@ -322,9 +322,14 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
struct thread *td;
uintptr_t tid, x;
int rval;
bool recursed;
if (SCHEDULER_STOPPED())
td = curthread;
tid = (uintptr_t)td;
if (SCHEDULER_STOPPED_TD(td))
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@ -333,19 +338,23 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
if (sx_xlocked(sx) &&
(sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
sx->sx_recurse++;
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
rval = 1;
} else
rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
(uintptr_t)curthread);
rval = 1;
recursed = false;
x = SX_LOCK_UNLOCKED;
if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) {
if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
sx->sx_recurse++;
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
} else {
rval = 0;
}
}
LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
if (rval) {
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
if (!sx_recursed(sx))
if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
sx, 0, 0, file, line, LOCKSTAT_WRITER);
TD_LOCKS_INC(curthread);