locks: push lock_delay_arg_init calls down

Minor cleanup to skip doing them when recursing on locks and so that
they can act on found lock value if need be.
This commit is contained in:
Mateusz Guzik 2020-11-24 03:49:37 +00:00
parent 094c148b7a
commit f90d57b808
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=367978
4 changed files with 21 additions and 20 deletions

View File

@ -603,10 +603,10 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
if (LK_CAN_WITNESS(flags))
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
file, line, flags & LK_INTERLOCK ? ilk : NULL);
x = lockmgr_read_value(lk);
lock_delay_arg_init(&lda, &lockmgr_delay);
if (!lk_adaptive)
flags &= ~LK_ADAPTIVE;
x = lockmgr_read_value(lk);
/*
* The lock may already be locked exclusive by curthread,
* avoid deadlock.

View File

@ -535,12 +535,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
if (SCHEDULER_STOPPED_TD(td))
return;
#if defined(ADAPTIVE_MUTEXES)
lock_delay_arg_init(&lda, &mtx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(v == MTX_UNOWNED))
v = MTX_READ_VALUE(m);
@ -562,6 +556,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
opts &= ~MTX_RECURSE;
#endif
#if defined(ADAPTIVE_MUTEXES)
lock_delay_arg_init(&lda, &mtx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -746,13 +746,13 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
if (SCHEDULER_STOPPED())
return;
lock_delay_arg_init(&lda, &mtx_spin_delay);
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
"spinning", "lockname:\"%s\"", m->lock_object.lo_name);
lock_delay_arg_init(&lda, &mtx_spin_delay);
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif

View File

@ -948,11 +948,6 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
if (SCHEDULER_STOPPED())
return;
#if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(v == RW_UNLOCKED))
v = RW_READ_VALUE(rw);
@ -971,6 +966,12 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
#if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif

View File

@ -620,12 +620,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
if (SCHEDULER_STOPPED())
return (0);
#if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(x == SX_LOCK_UNLOCKED))
x = SX_READ_VALUE(sx);
@ -645,6 +639,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
#if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init_noadapt(&lda);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif