locks: fix recursion support after recent changes

When a relevant lockstat probe is enabled the fallback primitive is called with
a constant signifying a free lock. This works fine for typical cases but breaks
with recursion, since it checks if the passed value is that of the executing
thread.

Read the value if necessary.
This commit is contained in:
Mateusz Guzik 2017-02-06 09:40:14 +00:00
parent 467c82cb84
commit c1aaf63cb5
3 changed files with 7 additions and 0 deletions

View File

@ -495,6 +495,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
m = mtxlock2mtx(c);
if (__predict_false(v == MTX_UNOWNED))
v = MTX_READ_VALUE(m);
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||

View File

@ -812,6 +812,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
lock_delay_arg_init(&lda, NULL);
#endif
rw = rwlock2rw(c);
if (__predict_false(v == RW_UNLOCKED))
v = RW_READ_VALUE(rw);
if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,

View File

@ -531,6 +531,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
if (__predict_false(x == SX_LOCK_UNLOCKED))
x = SX_READ_VALUE(sx);
/* If we already hold an exclusive lock, then recurse. */
if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,