mtx: tidy up recursion handling in thread lock

Normally after grabbing the lock it has to be verified we got the right one
to begin with. However, if we are recursing, it must not change thus the
check can be avoided. In particular this avoids a lock read for non-recursing
case which found out the lock was changed.

While here avoid an irq trip of this happens.

Tested by:	pho (previous version)
This commit is contained in:
Mateusz Guzik 2018-03-04 22:01:23 +00:00
parent 89ba361fcc
commit 9f4e008d4a

View File

@ -829,10 +829,8 @@ _thread_lock(struct thread *td)
WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
return;
}
if (m->mtx_recurse != 0)
m->mtx_recurse--;
else
_mtx_release_lock_quick(m);
MPASS(m->mtx_recurse == 0);
_mtx_release_lock_quick(m);
slowpath_unlocked:
spinlock_exit();
slowpath_noirq:
@ -886,9 +884,10 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
if (__predict_false(doing_lockprof))
spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
spinlock_enter();
for (;;) {
retry:
spinlock_enter();
m = td->td_lock;
thread_lock_validate(m, opts, file, line);
v = MTX_READ_VALUE(m);
@ -900,6 +899,7 @@ retry:
}
if (v == tid) {
m->mtx_recurse++;
MPASS(m == td->td_lock);
break;
}
lock_profile_obtain_lock_failed(&m->lock_object,
@ -912,15 +912,18 @@ retry:
} else {
_mtx_lock_indefinite_check(m, &lda);
}
if (m != td->td_lock)
if (m != td->td_lock) {
spinlock_enter();
goto retry;
}
v = MTX_READ_VALUE(m);
} while (v != MTX_UNOWNED);
spinlock_enter();
}
if (m == td->td_lock)
break;
__mtx_unlock_spin(m); /* does spinlock_exit() */
MPASS(m->mtx_recurse == 0);
_mtx_release_lock_quick(m);
}
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);