locks: fix compilation for KDTRACE_HOOKS && !ADAPTIVE_* case

Reported by:	Michael Butler <imb protected-networks.net>
This commit is contained in:
Mateusz Guzik 2016-08-02 03:05:59 +00:00
parent 0412689595
commit fa5000a4f3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=303655
3 changed files with 15 additions and 5 deletions

View File

@ -452,8 +452,10 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
if (SCHEDULER_STOPPED()) if (SCHEDULER_STOPPED())
return; return;
#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) #if defined(ADAPTIVE_MUTEXES)
lock_delay_arg_init(&lda, &mtx_delay); lock_delay_arg_init(&lda, &mtx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif #endif
m = mtxlock2mtx(c); m = mtxlock2mtx(c);

View File

@ -396,8 +396,10 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
if (SCHEDULER_STOPPED()) if (SCHEDULER_STOPPED())
return; return;
#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) #if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay); lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif #endif
rw = rwlock2rw(c); rw = rwlock2rw(c);
@ -782,8 +784,10 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (SCHEDULER_STOPPED()) if (SCHEDULER_STOPPED())
return; return;
#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) #if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay); lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif #endif
rw = rwlock2rw(c); rw = rwlock2rw(c);

View File

@ -554,8 +554,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
if (SCHEDULER_STOPPED()) if (SCHEDULER_STOPPED())
return (0); return (0);
#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) #if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay); lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif #endif
/* If we already hold an exclusive lock, then recurse. */ /* If we already hold an exclusive lock, then recurse. */
@ -861,8 +863,10 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
if (SCHEDULER_STOPPED()) if (SCHEDULER_STOPPED())
return (0); return (0);
#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) #if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay); lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif #endif
#ifdef KDTRACE_HOOKS #ifdef KDTRACE_HOOKS
state = sx->sx_lock; state = sx->sx_lock;