locks: fix recursion support after recent changes
When a relevant lockstat probe is enabled the fallback primitive is called with a constant signifying a free lock. This works fine for typical cases but breaks with recursion, since it checks if the passed value is that of the executing thread. Read the value if necessary.
This commit is contained in:
parent
467c82cb84
commit
c1aaf63cb5
@ -495,6 +495,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
|
||||
lock_delay_arg_init(&lda, NULL);
|
||||
#endif
|
||||
m = mtxlock2mtx(c);
|
||||
if (__predict_false(v == MTX_UNOWNED))
|
||||
v = MTX_READ_VALUE(m);
|
||||
|
||||
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
|
||||
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
|
||||
|
@ -812,6 +812,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
|
||||
lock_delay_arg_init(&lda, NULL);
|
||||
#endif
|
||||
rw = rwlock2rw(c);
|
||||
if (__predict_false(v == RW_UNLOCKED))
|
||||
v = RW_READ_VALUE(rw);
|
||||
|
||||
if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
|
||||
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
|
||||
|
@ -531,6 +531,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
|
||||
lock_delay_arg_init(&lda, NULL);
|
||||
#endif
|
||||
|
||||
if (__predict_false(x == SX_LOCK_UNLOCKED))
|
||||
x = SX_READ_VALUE(sx);
|
||||
|
||||
/* If we already hold an exclusive lock, then recurse. */
|
||||
if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
|
||||
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
|
||||
|
Loading…
Reference in New Issue
Block a user