diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 5d2146b7909c..b3e511611e20 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -289,6 +289,7 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; + uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; @@ -308,7 +309,14 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); - __mtx_lock_spin(m, curthread, opts, file, line); + spinlock_enter(); + tid = (uintptr_t)curthread; + v = MTX_UNOWNED; + if (!_mtx_obtain_lock_fetch(m, &v, tid)) + _mtx_lock_spin(m, v, opts, file, line); + else + LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, + m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); @@ -682,12 +690,18 @@ _mtx_lock_spin_failed(struct mtx *m) * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ +#if LOCK_DEBUG > 0 void -_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, - int opts, const char *file, int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, + const char *file, int line) +#else +void +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) +#endif { struct mtx *m; struct lock_delay_arg lda; + uintptr_t tid; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; @@ -699,10 +713,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int doing_lockprof; #endif - if (SCHEDULER_STOPPED()) - return; - - lock_delay_arg_init(&lda, &mtx_spin_delay); + tid = (uintptr_t)curthread; m = mtxlock2mtx(c); if (__predict_false(v == MTX_UNOWNED)) @@ -713,6 +724,11 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, return; } + if (SCHEDULER_STOPPED()) + return; + + lock_delay_arg_init(&lda, &mtx_spin_delay); + if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 359b1199714c..c37d4ebd2aaa 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -105,8 +105,12 @@ void __mtx_unlock_sleep(volatile uintptr_t *c); #endif #ifdef SMP -void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, - int opts, const char *file, int line); +#if LOCK_DEBUG > 0 +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, + const char *file, int line); +#else +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); +#endif #endif void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line); @@ -154,8 +158,13 @@ void thread_lock_flags_(struct thread *, int, const char *, int); __mtx_unlock_sleep(&(m)->mtx_lock) #endif #ifdef SMP -#define _mtx_lock_spin(m, v, t, o, f, l) \ - _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l) +#if LOCK_DEBUG > 0 +#define _mtx_lock_spin(m, v, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) +#else +#define _mtx_lock_spin(m, v, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, v) +#endif #endif #define _mtx_lock_flags(m, o, f, l) \ __mtx_lock_flags(&(m)->mtx_lock, o, f, l) @@ -219,11 +228,9 @@ void thread_lock_flags_(struct thread *, int, const char *, int); uintptr_t _v = MTX_UNOWNED; \ \ spinlock_enter(); \ - if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \ - _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line)); \ - else \ - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ - mp, 0, 0, file, line); \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ + !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ + _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ } while (0) #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ uintptr_t _tid = (uintptr_t)(tid); \