mtx: drop the tid argument from _mtx_lock_sleep

tid must be equal to curthread and the target routine was already reading
it anyway, which is not a problem. Not passing it as a parameter allows for
a little bit shorter code in callers.

MFC after:	1 week
This commit is contained in:
Mateusz Guzik 2017-09-27 00:57:05 +00:00
parent a8462c582c
commit 2f1ddb89fc
2 changed files with 18 additions and 15 deletions

View File

@ -248,7 +248,7 @@ __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
tid = (uintptr_t)curthread;
v = MTX_UNOWNED;
if (!_mtx_obtain_lock_fetch(m, &v, tid))
_mtx_lock_sleep(m, v, tid, opts, file, line);
_mtx_lock_sleep(m, v, opts, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
m, 0, 0, file, line);
@ -443,15 +443,17 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
*/
#if LOCK_DEBUG > 0
void
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
const char *file, int line)
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
int line)
#else
void
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid)
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
#endif
{
struct thread *td;
struct mtx *m;
struct turnstile *ts;
uintptr_t tid;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
@ -473,8 +475,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid)
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
int doing_lockprof;
#endif
if (SCHEDULER_STOPPED())
td = curthread;
tid = (uintptr_t)td;
if (SCHEDULER_STOPPED_TD(td))
return;
#if defined(ADAPTIVE_MUTEXES)
@ -486,7 +489,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid)
if (__predict_false(v == MTX_UNOWNED))
v = MTX_READ_VALUE(m);
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
if (__predict_false(lv_mtx_owner(v) == td)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
(opts & MTX_RECURSE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",

View File

@ -99,12 +99,12 @@ int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
int line);
void mutex_init(void);
#if LOCK_DEBUG > 0
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line);
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
const char *file, int line);
void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file,
int line);
#else
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid);
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v);
void __mtx_unlock_sleep(volatile uintptr_t *c);
#endif
@ -147,13 +147,13 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
#define mtx_trylock_flags_(m, o, f, l) \
_mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
#if LOCK_DEBUG > 0
#define _mtx_lock_sleep(m, v, t, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l)
#define _mtx_lock_sleep(m, v, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l)
#define _mtx_unlock_sleep(m, o, f, l) \
__mtx_unlock_sleep(&(m)->mtx_lock, o, f, l)
#else
#define _mtx_lock_sleep(m, v, t, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v, t)
#define _mtx_lock_sleep(m, v, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v)
#define _mtx_unlock_sleep(m, o, f, l) \
__mtx_unlock_sleep(&(m)->mtx_lock)
#endif
@ -208,7 +208,7 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
!_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
_mtx_lock_sleep((mp), _v, _tid, (opts), (file), (line));\
_mtx_lock_sleep((mp), _v, (opts), (file), (line)); \
} while (0)
/*