Rename the 'mtx_object', 'rw_object', and 'sx_object' members of mutexes,

rwlocks, and sx locks to 'lock_object'.
This commit is contained in:
John Baldwin 2007-03-21 21:20:51 +00:00
parent 503916a7c1
commit aa89d8cd52
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=167787
18 changed files with 237 additions and 237 deletions

View File

@ -104,9 +104,9 @@ cv_wait(struct cv *cvp, struct mtx *mp)
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, mp, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->lock_object,
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
WITNESS_SAVE(&mp->lock_object, mp);
if (cold || panicstr) {
/*
@ -124,7 +124,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
DROP_GIANT();
mtx_unlock(mp);
sleepq_add(cvp, &mp->mtx_object, cvp->cv_description, SLEEPQ_CONDVAR,
sleepq_add(cvp, &mp->lock_object, cvp->cv_description, SLEEPQ_CONDVAR,
0);
sleepq_wait(cvp);
@ -134,7 +134,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
WITNESS_RESTORE(&mp->lock_object, mp);
}
/*
@ -152,7 +152,7 @@ cv_wait_unlock(struct cv *cvp, struct mtx *mp)
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, mp, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->lock_object,
"Waiting on \"%s\"", cvp->cv_description);
if (cold || panicstr) {
@ -172,7 +172,7 @@ cv_wait_unlock(struct cv *cvp, struct mtx *mp)
DROP_GIANT();
mtx_unlock(mp);
sleepq_add(cvp, &mp->mtx_object, cvp->cv_description, SLEEPQ_CONDVAR,
sleepq_add(cvp, &mp->lock_object, cvp->cv_description, SLEEPQ_CONDVAR,
0);
sleepq_wait(cvp);
@ -204,9 +204,9 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, mp, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->lock_object,
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
WITNESS_SAVE(&mp->lock_object, mp);
if (cold || panicstr) {
/*
@ -224,7 +224,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
DROP_GIANT();
mtx_unlock(mp);
sleepq_add(cvp, &mp->mtx_object, cvp->cv_description, SLEEPQ_CONDVAR |
sleepq_add(cvp, &mp->lock_object, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE, 0);
rval = sleepq_wait_sig(cvp);
@ -234,7 +234,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
WITNESS_RESTORE(&mp->lock_object, mp);
return (rval);
}
@ -258,9 +258,9 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, mp, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->lock_object,
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
WITNESS_SAVE(&mp->lock_object, mp);
if (cold || panicstr) {
/*
@ -278,7 +278,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT();
mtx_unlock(mp);
sleepq_add(cvp, &mp->mtx_object, cvp->cv_description, SLEEPQ_CONDVAR,
sleepq_add(cvp, &mp->lock_object, cvp->cv_description, SLEEPQ_CONDVAR,
0);
sleepq_set_timeout(cvp, timo);
rval = sleepq_timedwait(cvp);
@ -289,7 +289,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
WITNESS_RESTORE(&mp->lock_object, mp);
return (rval);
}
@ -316,9 +316,9 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, mp, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->lock_object,
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
WITNESS_SAVE(&mp->lock_object, mp);
if (cold || panicstr) {
/*
@ -336,7 +336,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT();
mtx_unlock(mp);
sleepq_add(cvp, &mp->mtx_object, cvp->cv_description, SLEEPQ_CONDVAR |
sleepq_add(cvp, &mp->lock_object, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE, 0);
sleepq_set_timeout(cvp, timo);
rval = sleepq_timedwait_sig(cvp);
@ -347,7 +347,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
#endif
PICKUP_GIANT();
mtx_lock(mp);
WITNESS_RESTORE(&mp->mtx_object, mp);
WITNESS_RESTORE(&mp->lock_object, mp);
return (rval);
}

View File

@ -495,7 +495,7 @@ exit1(struct thread *td, int rv)
*/
cpu_exit(td);
WITNESS_WARN(WARN_PANIC, &proctree_lock.sx_object,
WITNESS_WARN(WARN_PANIC, &proctree_lock.lock_object,
"process (pid %d) exiting", p->p_pid);
PROC_LOCK(p);

View File

@ -1253,7 +1253,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_LOCK(p);
if (p->p_upsleeps)
wakeup(&p->p_completed);
WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
WITNESS_WARN(WARN_PANIC, &p->p_mtx.lock_object,
"thread exiting in userret");
sigqueue_flush(&td->td_sigqueue);
mtx_lock_spin(&sched_lock);

View File

@ -217,7 +217,7 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&lkp->lk_interlock->mtx_object,
&lkp->lk_interlock->lock_object,
"Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
if (panicstr != NULL) {

View File

@ -181,16 +181,16 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
}
@ -200,16 +200,16 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
curthread->td_locks--;
WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
lock_profile_release_lock(&m->mtx_object);
lock_profile_release_lock(&m->lock_object);
_rel_sleep_lock(m, curthread, opts, file, line);
}
@ -220,15 +220,15 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
("mtx_lock_spin() of sleep mutex %s @ %s:%d",
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
m->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
_get_spin_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
void
@ -238,15 +238,15 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
m->mtx_object.lo_name, file, line));
WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
m->lock_object.lo_name, file, line));
WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
lock_profile_release_lock(&m->mtx_object);
lock_profile_release_lock(&m->lock_object);
_rel_spin_lock(m);
}
@ -264,24 +264,24 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
rval = 1;
} else
rval = _obtain_lock(m, (uintptr_t)curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
if (rval) {
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
curthread->td_locks++;
if (m->mtx_recurse == 0)
lock_profile_obtain_lock_success(&m->mtx_object, contested,
lock_profile_obtain_lock_success(&m->lock_object, contested,
waittime, file, line);
}
@ -308,23 +308,23 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
uintptr_t v;
if (mtx_owned(m)) {
KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
m->mtx_object.lo_name, file, line));
m->lock_object.lo_name, file, line));
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
return;
}
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR4(KTR_LOCK,
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
while (!_obtain_lock(m, tid)) {
turnstile_lock(&m->mtx_object);
turnstile_lock(&m->lock_object);
v = m->mtx_lock;
/*
@ -332,7 +332,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
* the turnstile chain lock.
*/
if (v == MTX_UNOWNED) {
turnstile_release(&m->mtx_object);
turnstile_release(&m->lock_object);
cpu_spinwait();
continue;
}
@ -348,7 +348,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
*/
if (v == MTX_CONTESTED) {
m->mtx_lock = tid | MTX_CONTESTED;
turnstile_claim(&m->mtx_object);
turnstile_claim(&m->lock_object);
break;
}
#endif
@ -360,7 +360,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
*/
if ((v & MTX_CONTESTED) == 0 &&
!atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
turnstile_release(&m->mtx_object);
turnstile_release(&m->lock_object);
cpu_spinwait();
continue;
}
@ -377,7 +377,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (m != &Giant && TD_IS_RUNNING(owner))
#endif
{
turnstile_release(&m->mtx_object);
turnstile_release(&m->lock_object);
while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
cpu_spinwait();
}
@ -394,9 +394,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
if (!cont_logged) {
CTR6(KTR_CONTENTION,
"contention: %p at %s:%d wants %s, taken by %s:%d",
(void *)tid, file, line, m->mtx_object.lo_name,
WITNESS_FILE(&m->mtx_object),
WITNESS_LINE(&m->mtx_object));
(void *)tid, file, line, m->lock_object.lo_name,
WITNESS_FILE(&m->lock_object),
WITNESS_LINE(&m->lock_object));
cont_logged = 1;
}
#endif
@ -404,14 +404,14 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
/*
* Block on the turnstile.
*/
turnstile_wait(&m->mtx_object, mtx_owner(m),
turnstile_wait(&m->lock_object, mtx_owner(m),
TS_EXCLUSIVE_QUEUE);
}
#ifdef KTR
if (cont_logged) {
CTR4(KTR_CONTENTION,
"contention end: %s acquired by %p at %s:%d",
m->mtx_object.lo_name, (void *)tid, file, line);
m->lock_object.lo_name, (void *)tid, file, line);
}
#endif
return;
@ -431,7 +431,7 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int i = 0;
struct thread *td;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
while (!_obtain_lock(m, tid)) {
@ -453,9 +453,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
continue;
printf(
"spin lock %p (%s) held by %p (tid %d) too long\n",
m, m->mtx_object.lo_name, td, td->td_tid);
m, m->lock_object.lo_name, td, td->td_tid);
#ifdef WITNESS
witness_display_spinlock(&m->mtx_object, td);
witness_display_spinlock(&m->lock_object, td);
#endif
panic("spin lock held too long");
}
@ -464,7 +464,7 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
spinlock_enter();
}
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
return;
@ -488,22 +488,22 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)
atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
return;
}
turnstile_lock(&m->mtx_object);
ts = turnstile_lookup(&m->mtx_object);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
turnstile_lock(&m->lock_object);
ts = turnstile_lookup(&m->lock_object);
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
if (ts == NULL) {
_release_lock_quick(m);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
turnstile_release(&m->mtx_object);
turnstile_release(&m->lock_object);
return;
}
#else
@ -519,11 +519,11 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
#else
if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
_release_lock_quick(m);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
} else {
m->mtx_lock = MTX_CONTESTED;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
m);
}
@ -548,7 +548,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
struct ithd *it = td->td_ithd;
if (it->it_interrupted) {
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR2(KTR_LOCK,
"_mtx_unlock_sleep: %p interrupted %p",
it, it->it_interrupted);
@ -556,13 +556,13 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
}
}
#endif
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR2(KTR_LOCK,
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
mi_switch(SW_INVOL, NULL);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
m, (void *)m->mtx_lock);
}
@ -593,20 +593,20 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
case MA_OWNED | MA_NOTRECURSED:
if (!mtx_owned(m))
panic("mutex %s not owned at %s:%d",
m->mtx_object.lo_name, file, line);
m->lock_object.lo_name, file, line);
if (mtx_recursed(m)) {
if ((what & MA_NOTRECURSED) != 0)
panic("mutex %s recursed at %s:%d",
m->mtx_object.lo_name, file, line);
m->lock_object.lo_name, file, line);
} else if ((what & MA_RECURSED) != 0) {
panic("mutex %s unrecursed at %s:%d",
m->mtx_object.lo_name, file, line);
m->lock_object.lo_name, file, line);
}
break;
case MA_NOTOWNED:
if (mtx_owned(m))
panic("mutex %s owned at %s:%d",
m->mtx_object.lo_name, file, line);
m->lock_object.lo_name, file, line);
break;
default:
panic("unknown mtx_assert at %s:%d", file, line);
@ -697,8 +697,8 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
m->mtx_lock = MTX_UNOWNED;
m->mtx_recurse = 0;
lock_profile_object_init(&m->mtx_object, class, name);
lock_init(&m->mtx_object, class, name, type, flags);
lock_profile_object_init(&m->lock_object, class, name);
lock_init(&m->lock_object, class, name, type, flags);
}
/*
@ -717,19 +717,19 @@ mtx_destroy(struct mtx *m)
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
/* Perform the non-mtx related part of mtx_unlock_spin(). */
if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
spinlock_exit();
else
curthread->td_locks--;
/* Tell witness this isn't locked to make it happy. */
WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
__LINE__);
}
m->mtx_lock = MTX_DESTROYED;
lock_profile_object_destroy(&m->mtx_object);
lock_destroy(&m->mtx_object);
lock_profile_object_destroy(&m->lock_object);
lock_destroy(&m->lock_object);
}
/*
@ -770,9 +770,9 @@ db_show_mtx(struct lock_object *lock)
db_printf("SPIN");
else
db_printf("DEF");
if (m->mtx_object.lo_flags & LO_RECURSABLE)
if (m->lock_object.lo_flags & LO_RECURSABLE)
db_printf(", RECURSE");
if (m->mtx_object.lo_flags & LO_DUPOK)
if (m->lock_object.lo_flags & LO_DUPOK)
db_printf(", DUPOK");
db_printf("}\n");
db_printf(" state: {");

View File

@ -118,8 +118,8 @@ rw_init(struct rwlock *rw, const char *name)
rw->rw_lock = RW_UNLOCKED;
lock_profile_object_init(&rw->rw_object, &lock_class_rw, name);
lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
lock_profile_object_init(&rw->lock_object, &lock_class_rw, name);
lock_init(&rw->lock_object, &lock_class_rw, name, NULL, LO_WITNESS |
LO_RECURSABLE | LO_UPGRADABLE);
}
@ -128,8 +128,8 @@ rw_destroy(struct rwlock *rw)
{
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
lock_profile_object_destroy(&rw->rw_object);
lock_destroy(&rw->rw_object);
lock_profile_object_destroy(&rw->lock_object);
lock_destroy(&rw->lock_object);
}
void
@ -154,12 +154,12 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(rw_wowner(rw) != curthread,
("%s (%s): wlock already held @ %s:%d", __func__,
rw->rw_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
rw->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
__rw_wlock(rw, curthread, file, line);
LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
}
@ -170,9 +170,9 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
MPASS(curthread != NULL);
_rw_assert(rw, RA_WLOCKED, file, line);
curthread->td_locks--;
WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
lock_profile_release_lock(&rw->rw_object);
WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line);
lock_profile_release_lock(&rw->lock_object);
__rw_wunlock(rw, curthread, file, line);
}
@ -188,8 +188,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
KASSERT(rw_wowner(rw) != curthread,
("%s (%s): wlock already held @ %s:%d", __func__,
rw->rw_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
rw->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
/*
* Note that we don't make any attempt to try to block read
@ -224,21 +224,21 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
MPASS((x & RW_LOCK_READ_WAITERS) == 0);
if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
x + RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p", __func__,
rw, (void *)x,
(void *)(x + RW_ONE_READER));
if (RW_READERS(x) == 0)
lock_profile_obtain_lock_success(
&rw->rw_object, contested, waittime,
&rw->lock_object, contested, waittime,
file, line);
break;
}
cpu_spinwait();
continue;
}
lock_profile_obtain_lock_failed(&rw->rw_object, &contested,
lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
&waittime);
/*
@ -246,7 +246,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* has a write lock, so acquire the turnstile lock so we can
* begin the process of blocking.
*/
turnstile_lock(&rw->rw_object);
turnstile_lock(&rw->lock_object);
/*
* The lock might have been released while we spun, so
@ -255,7 +255,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
*/
x = rw->rw_lock;
if (x & RW_LOCK_READ) {
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
cpu_spinwait();
continue;
}
@ -269,11 +269,11 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
if (!(x & RW_LOCK_READ_WAITERS)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, x,
x | RW_LOCK_READ_WAITERS)) {
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
cpu_spinwait();
continue;
}
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set read waiters flag",
__func__, rw);
}
@ -286,8 +286,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(x);
if (TD_IS_RUNNING(owner)) {
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
turnstile_release(&rw->lock_object);
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
@ -301,11 +301,11 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* We were unable to acquire the lock and the read waiters
* flag is set, so we must block on the turnstile.
*/
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
rw);
turnstile_wait(&rw->rw_object, rw_owner(rw), TS_SHARED_QUEUE);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
turnstile_wait(&rw->lock_object, rw_owner(rw), TS_SHARED_QUEUE);
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
}
@ -316,8 +316,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* turnstile_wait() currently.
*/
LOCK_LOG_LOCK("RLOCK", &rw->rw_object, 0, 0, file, line);
WITNESS_LOCK(&rw->rw_object, 0, file, line);
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
curthread->td_locks++;
}
@ -329,8 +329,8 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
_rw_assert(rw, RA_RLOCKED, file, line);
curthread->td_locks--;
WITNESS_UNLOCK(&rw->rw_object, 0, file, line);
LOCK_LOG_LOCK("RUNLOCK", &rw->rw_object, 0, 0, file, line);
WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
/* TODO: drop "owner of record" here. */
@ -343,7 +343,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
if (RW_READERS(x) > 1) {
if (atomic_cmpset_ptr(&rw->rw_lock, x,
x - RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeeded %p -> %p",
__func__, rw, (void *)x,
@ -377,7 +377,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
MPASS(x == RW_READERS_LOCK(1));
if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
RW_UNLOCKED)) {
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, rw);
break;
@ -395,7 +395,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
* Ok, we know we have a waiting writer and we think we
* are the last reader, so grab the turnstile lock.
*/
turnstile_lock(&rw->rw_object);
turnstile_lock(&rw->lock_object);
/*
* Try to drop our lock leaving the lock in a unlocked
@ -415,10 +415,10 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
*/
if (!atomic_cmpset_ptr(&rw->rw_lock,
RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
continue;
}
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
__func__, rw);
@ -429,13 +429,13 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
* block again if they run before the new lock holder(s)
* release the lock.
*/
ts = turnstile_lookup(&rw->rw_object);
ts = turnstile_lookup(&rw->lock_object);
MPASS(ts != NULL);
turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
turnstile_unpend(ts, TS_SHARED_LOCK);
break;
}
lock_profile_release_lock(&rw->rw_object);
lock_profile_release_lock(&rw->lock_object);
}
/*
@ -451,12 +451,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
#endif
uintptr_t v;
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
rw->rw_object.lo_name, (void *)rw->rw_lock, file, line);
rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
while (!_rw_write_lock(rw, tid)) {
turnstile_lock(&rw->rw_object);
turnstile_lock(&rw->lock_object);
v = rw->rw_lock;
/*
@ -464,7 +464,7 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
* turnstile chain lock, try again.
*/
if (v == RW_UNLOCKED) {
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
cpu_spinwait();
continue;
}
@ -483,12 +483,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (atomic_cmpset_acq_ptr(&rw->rw_lock,
RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
tid | RW_LOCK_WRITE_WAITERS)) {
turnstile_claim(&rw->rw_object);
turnstile_claim(&rw->lock_object);
CTR2(KTR_LOCK, "%s: %p claimed by new writer",
__func__, rw);
break;
}
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
cpu_spinwait();
continue;
}
@ -501,11 +501,11 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (!(v & RW_LOCK_WRITE_WAITERS)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_WRITE_WAITERS)) {
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
cpu_spinwait();
continue;
}
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set write waiters flag",
__func__, rw);
}
@ -518,8 +518,8 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
turnstile_release(&rw->lock_object);
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
@ -533,12 +533,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
* We were unable to acquire the lock and the write waiters
* flag is set, so we must block on the turnstile.
*/
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
rw);
turnstile_wait(&rw->rw_object, rw_owner(rw),
turnstile_wait(&rw->lock_object, rw_owner(rw),
TS_EXCLUSIVE_QUEUE);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
}
@ -559,11 +559,11 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
("%s: neither of the waiter flags are set", __func__));
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
turnstile_lock(&rw->rw_object);
ts = turnstile_lookup(&rw->rw_object);
turnstile_lock(&rw->lock_object);
ts = turnstile_lookup(&rw->lock_object);
#ifdef SMP
/*
@ -573,9 +573,9 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
*/
if (ts == NULL) {
atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
return;
}
#else
@ -624,7 +624,7 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
* disown the turnstile and return.
*/
if (turnstile_empty(ts, queue)) {
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
atomic_store_rel_ptr(&rw->rw_lock, v);
turnstile_disown(ts);
@ -633,7 +633,7 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
#endif
/* Wake up all waiters for the specific queue. */
if (LOCK_LOG_TEST(&rw->rw_object, 0))
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
queue == TS_SHARED_QUEUE ? "read" : "write");
turnstile_broadcast(ts, queue);
@ -672,7 +672,7 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
* Ok, we think we have write waiters, so lock the
* turnstile.
*/
turnstile_lock(&rw->rw_object);
turnstile_lock(&rw->lock_object);
/*
* Try to switch from one reader to a writer again. This time
@ -686,17 +686,17 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
tid | v);
#ifdef SMP
if (success && v && turnstile_lookup(&rw->rw_object) != NULL)
if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
#else
if (success && v)
#endif
turnstile_claim(&rw->rw_object);
turnstile_claim(&rw->lock_object);
else
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
out:
LOCK_LOG_TRY("WUPGRADE", &rw->rw_object, 0, success, file, line);
LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
if (success)
WITNESS_UPGRADE(&rw->rw_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
return (success);
}
@ -712,7 +712,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
_rw_assert(rw, RA_WLOCKED, file, line);
WITNESS_DOWNGRADE(&rw->rw_object, 0, file, line);
WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
/*
* Convert from a writer to a single reader. First we handle
@ -728,7 +728,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
* Ok, we think we have waiters, so lock the turnstile so we can
* read the waiter flags without any races.
*/
turnstile_lock(&rw->rw_object);
turnstile_lock(&rw->lock_object);
v = rw->rw_lock;
MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
@ -743,7 +743,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
* the RW_LOCK_WRITE_WAITERS flag if at least one writer is
* blocked on the turnstile.
*/
ts = turnstile_lookup(&rw->rw_object);
ts = turnstile_lookup(&rw->lock_object);
#ifdef SMP
if (ts == NULL)
v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
@ -764,12 +764,12 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
#ifdef SMP
else if (ts == NULL)
turnstile_release(&rw->rw_object);
turnstile_release(&rw->lock_object);
#endif
else
turnstile_disown(ts);
out:
LOCK_LOG_LOCK("WDOWNGRADE", &rw->rw_object, 0, 0, file, line);
LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
}
#ifdef INVARIANT_SUPPORT
@ -793,7 +793,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
case RA_LOCKED | LA_NOTRECURSED:
case RA_RLOCKED:
#ifdef WITNESS
witness_assert(&rw->rw_object, what, file, line);
witness_assert(&rw->lock_object, what, file, line);
#else
/*
* If some other thread has a write lock or we have one
@ -804,18 +804,18 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
(!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
rw_wowner(rw) != curthread)))
panic("Lock %s not %slocked @ %s:%d\n",
rw->rw_object.lo_name, (what == RA_RLOCKED) ?
rw->lock_object.lo_name, (what == RA_RLOCKED) ?
"read " : "", file, line);
#endif
break;
case RA_WLOCKED:
if (rw_wowner(rw) != curthread)
panic("Lock %s not exclusively locked @ %s:%d\n",
rw->rw_object.lo_name, file, line);
rw->lock_object.lo_name, file, line);
break;
case RA_UNLOCKED:
#ifdef WITNESS
witness_assert(&rw->rw_object, what, file, line);
witness_assert(&rw->lock_object, what, file, line);
#else
/*
* If we hold a write lock fail. We can't reliably check
@ -823,7 +823,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
*/
if (rw_wowner(rw) == curthread)
panic("Lock %s exclusively locked @ %s:%d\n",
rw->rw_object.lo_name, file, line);
rw->lock_object.lo_name, file, line);
#endif
break;
default:

View File

@ -2424,7 +2424,7 @@ ptracestop(struct thread *td, int sig)
PROC_LOCK_ASSERT(p, MA_OWNED);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&p->p_mtx.mtx_object, "Stopping for traced signal");
&p->p_mtx.lock_object, "Stopping for traced signal");
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_XSIG;
@ -2614,7 +2614,7 @@ issignal(td)
break; /* == ignore */
mtx_unlock(&ps->ps_mtx);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
&p->p_mtx.mtx_object, "Catching SIGSTOP");
&p->p_mtx.lock_object, "Catching SIGSTOP");
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
mtx_lock_spin(&sched_lock);

View File

@ -118,8 +118,8 @@ sx_init(struct sx *sx, const char *description)
cv_init(&sx->sx_excl_cv, description);
sx->sx_excl_wcnt = 0;
sx->sx_xholder = NULL;
lock_profile_object_init(&sx->sx_object, &lock_class_sx, description);
lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
lock_profile_object_init(&sx->lock_object, &lock_class_sx, description);
lock_init(&sx->lock_object, &lock_class_sx, description, NULL,
LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
}
@ -129,14 +129,14 @@ sx_destroy(struct sx *sx)
KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
0), ("%s (%s): holders or waiters\n", __func__,
sx->sx_object.lo_name));
sx->lock_object.lo_name));
sx->sx_lock = NULL;
cv_destroy(&sx->sx_shrd_cv);
cv_destroy(&sx->sx_excl_cv);
lock_profile_object_destroy(&sx->sx_object);
lock_destroy(&sx->sx_object);
lock_profile_object_destroy(&sx->lock_object);
lock_destroy(&sx->lock_object);
}
void
@ -148,15 +148,15 @@ _sx_slock(struct sx *sx, const char *file, int line)
mtx_lock(sx->sx_lock);
KASSERT(sx->sx_xholder != curthread,
("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
sx->sx_object.lo_name, file, line));
WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
sx->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
/*
* Loop in case we lose the race for lock acquisition.
*/
while (sx->sx_cnt < 0) {
sx->sx_shrd_wcnt++;
lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
sx->sx_shrd_wcnt--;
}
@ -165,10 +165,10 @@ _sx_slock(struct sx *sx, const char *file, int line)
sx->sx_cnt++;
if (sx->sx_cnt == 1)
lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, 0, file, line);
curthread->td_locks++;
mtx_unlock(sx->sx_lock);
@ -181,13 +181,13 @@ _sx_try_slock(struct sx *sx, const char *file, int line)
mtx_lock(sx->sx_lock);
if (sx->sx_cnt >= 0) {
sx->sx_cnt++;
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
curthread->td_locks++;
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
return (0);
}
@ -210,14 +210,14 @@ _sx_xlock(struct sx *sx, const char *file, int line)
*/
KASSERT(sx->sx_xholder != curthread,
("%s (%s): xlock already held @ %s:%d", __func__,
sx->sx_object.lo_name, file, line));
WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
sx->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
sx->sx_excl_wcnt++;
lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
cv_wait(&sx->sx_excl_cv, sx->sx_lock);
sx->sx_excl_wcnt--;
}
@ -228,9 +228,9 @@ _sx_xlock(struct sx *sx, const char *file, int line)
sx->sx_cnt--;
sx->sx_xholder = curthread;
lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
mtx_unlock(sx->sx_lock);
@ -244,14 +244,14 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
if (sx->sx_cnt == 0) {
sx->sx_cnt--;
sx->sx_xholder = curthread;
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
line);
curthread->td_locks++;
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
return (0);
}
@ -264,13 +264,13 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
mtx_lock(sx->sx_lock);
curthread->td_locks--;
WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
/* Release. */
sx->sx_cnt--;
if (sx->sx_cnt == 0) {
lock_profile_release_lock(&sx->sx_object);
lock_profile_release_lock(&sx->lock_object);
}
/*
@ -285,7 +285,7 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
} else if (sx->sx_shrd_wcnt > 0)
cv_broadcast(&sx->sx_shrd_cv);
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
}
@ -298,7 +298,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
MPASS(sx->sx_cnt == -1);
curthread->td_locks--;
WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
/* Release. */
sx->sx_cnt++;
@ -312,9 +312,9 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
else if (sx->sx_excl_wcnt > 0)
cv_signal(&sx->sx_excl_cv);
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, 0, file, line);
lock_profile_release_lock(&sx->sx_object);
lock_profile_release_lock(&sx->lock_object);
mtx_unlock(sx->sx_lock);
}
@ -329,14 +329,14 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
sx->sx_cnt = -1;
sx->sx_xholder = curthread;
LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 1, file, line);
WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
return (0);
}
@ -350,14 +350,14 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
mtx_lock(sx->sx_lock);
MPASS(sx->sx_cnt == -1);
WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
sx->sx_cnt = 1;
sx->sx_xholder = NULL;
if (sx->sx_shrd_wcnt > 0)
cv_broadcast(&sx->sx_shrd_cv);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
}
@ -383,13 +383,13 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
case SX_LOCKED | LA_NOTRECURSED:
case SX_SLOCKED:
#ifdef WITNESS
witness_assert(&sx->sx_object, what, file, line);
witness_assert(&sx->lock_object, what, file, line);
#else
mtx_lock(sx->sx_lock);
if (sx->sx_cnt <= 0 &&
(what == SX_SLOCKED || sx->sx_xholder != curthread))
panic("Lock %s not %slocked @ %s:%d\n",
sx->sx_object.lo_name, (what == SX_SLOCKED) ?
sx->lock_object.lo_name, (what == SX_SLOCKED) ?
"share " : "", file, line);
mtx_unlock(sx->sx_lock);
#endif
@ -398,12 +398,12 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
mtx_lock(sx->sx_lock);
if (sx->sx_xholder != curthread)
panic("Lock %s not exclusively locked @ %s:%d\n",
sx->sx_object.lo_name, file, line);
sx->lock_object.lo_name, file, line);
mtx_unlock(sx->sx_lock);
break;
case SX_UNLOCKED:
#ifdef WITNESS
witness_assert(&sx->sx_object, what, file, line);
witness_assert(&sx->lock_object, what, file, line);
#else
/*
* We are able to check only exclusive lock here,
@ -412,7 +412,7 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
mtx_lock(sx->sx_lock);
if (sx->sx_xholder == curthread)
panic("Lock %s exclusively locked @ %s:%d\n",
sx->sx_object.lo_name, file, line);
sx->lock_object.lo_name, file, line);
mtx_unlock(sx->sx_lock);
#endif
break;
@ -469,7 +469,7 @@ sx_chain(struct thread *td, struct thread **ownerp)
* condition variable.
*/
sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv));
if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
sx->sx_excl_wcnt > 0)
goto ok;
@ -478,7 +478,7 @@ sx_chain(struct thread *td, struct thread **ownerp)
* condition variable.
*/
sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv));
if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
sx->sx_shrd_wcnt > 0)
goto ok;

View File

@ -270,13 +270,13 @@ msleep_spin(ident, mtx, wmesg, timo)
DROP_GIANT();
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(&mtx->mtx_object, mtx);
WITNESS_SAVE(&mtx->lock_object, mtx);
mtx_unlock_spin(mtx);
/*
* We put ourselves on the sleep queue and start our timeout.
*/
sleepq_add(ident, &mtx->mtx_object, wmesg, SLEEPQ_SLEEP, 0);
sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
if (timo)
sleepq_set_timeout(ident, timo);
@ -312,7 +312,7 @@ msleep_spin(ident, mtx, wmesg, timo)
#endif
PICKUP_GIANT();
mtx_lock_spin(mtx);
WITNESS_RESTORE(&mtx->mtx_object, mtx);
WITNESS_RESTORE(&mtx->lock_object, mtx);
return (rval);
}

View File

@ -954,14 +954,14 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
* lock, then skip it.
*/
if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
lock == &Giant.mtx_object)
lock == &Giant.lock_object)
continue;
/*
* If we are locking a sleepable lock and this lock
* is Giant, then skip it.
*/
if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
lock1->li_lock == &Giant.mtx_object)
lock1->li_lock == &Giant.lock_object)
continue;
/*
* If we are locking a sleepable lock and this lock
@ -977,7 +977,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
* lock, then treat it as a reversal.
*/
if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
lock == &Giant.mtx_object)
lock == &Giant.lock_object)
goto reversal;
/*
* Check the lock order hierarchy for a reveresal.
@ -999,7 +999,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
if (blessed(w, w1))
return;
#endif
if (lock1->li_lock == &Giant.mtx_object) {
if (lock1->li_lock == &Giant.lock_object) {
if (w1->w_Giant_squawked)
return;
else
@ -1018,7 +1018,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
printf(
"lock order reversal: (sleepable after non-sleepable)\n");
else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
&& lock == &Giant.mtx_object)
&& lock == &Giant.lock_object)
printf(
"lock order reversal: (Giant after non-sleepable)\n");
else
@ -1073,7 +1073,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
* always come before Giant.
*/
if (flags & LOP_NEWORDER &&
!(lock1->li_lock == &Giant.mtx_object &&
!(lock1->li_lock == &Giant.lock_object &&
(lock->lo_flags & LO_SLEEPABLE) != 0)) {
CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
lock->lo_type, lock1->li_lock->lo_type);
@ -1325,7 +1325,7 @@ witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
if (lock1->li_lock == lock)
continue;
if (flags & WARN_GIANTOK &&
lock1->li_lock == &Giant.mtx_object)
lock1->li_lock == &Giant.lock_object)
continue;
if (flags & WARN_SLEEPOK &&
(lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)

View File

@ -35,7 +35,7 @@
* Sleep/spin mutex.
*/
struct mtx {
struct lock_object mtx_object; /* Common lock properties. */
struct lock_object lock_object; /* Common lock properties. */
volatile uintptr_t mtx_lock; /* Owner and flags. */
volatile u_int mtx_recurse; /* Number of recursive holds. */
};

View File

@ -36,7 +36,7 @@
* Reader/writer lock.
*/
struct rwlock {
struct lock_object rw_object;
struct lock_object lock_object;
volatile uintptr_t rw_lock;
};

View File

@ -326,14 +326,14 @@ const char *witness_file(struct lock_object *);
* wherever they please without having to actually grab a lock to do so.
*/
#define witness_check_mutex(m) \
WITNESS_CHECKORDER(&(m)->mtx_object, LOP_EXCLUSIVE, LOCK_FILE, \
WITNESS_CHECKORDER(&(m)->lock_object, LOP_EXCLUSIVE, LOCK_FILE, \
LOCK_LINE)
#define witness_check_shared_sx(sx) \
WITNESS_CHECKORDER(&(sx)->sx_object, 0, LOCK_FILE, LOCK_LINE)
WITNESS_CHECKORDER(&(sx)->lock_object, 0, LOCK_FILE, LOCK_LINE)
#define witness_check_exclusive_sx(sx) \
WITNESS_CHECKORDER(&(sx)->sx_object, LOP_EXCLUSIVE, LOCK_FILE, \
WITNESS_CHECKORDER(&(sx)->lock_object, LOP_EXCLUSIVE, LOCK_FILE, \
LOCK_LINE)
#endif /* _KERNEL */

View File

@ -159,11 +159,11 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
int contested = 0; \
uint64_t waittime = 0; \
if (!_obtain_lock((mp), _tid)) { \
lock_profile_obtain_lock_failed(&(mp)->mtx_object, \
lock_profile_obtain_lock_failed(&(mp)->lock_object, \
&contested, &waittime); \
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
} \
lock_profile_obtain_lock_success(&(mp)->mtx_object, contested, \
lock_profile_obtain_lock_success(&(mp)->lock_object, contested, \
waittime, (file), (line)); \
} while (0)
#endif
@ -186,12 +186,12 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
if ((mp)->mtx_lock == _tid) \
(mp)->mtx_recurse++; \
else { \
lock_profile_obtain_lock_failed(&(mp)->mtx_object, \
lock_profile_obtain_lock_failed(&(mp)->lock_object, \
&contested, &waittime); \
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} \
} \
lock_profile_obtain_lock_success(&(mp)->mtx_object, contested, \
lock_profile_obtain_lock_success(&(mp)->lock_object, contested, \
waittime, (file), (line)); \
} while (0)
#else /* SMP */
@ -340,15 +340,15 @@ extern struct mtx_pool *mtxpool_sleep;
_mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
#define mtx_sleep(chan, mtx, pri, wmesg, timo) \
_sleep((chan), &(mtx)->mtx_object, (pri), (wmesg), (timo))
_sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo))
#define mtx_initialized(m) lock_initalized(&(m)->mtx_object)
#define mtx_initialized(m) lock_initalized(&(m)->lock_object)
#define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread)
#define mtx_recursed(m) ((m)->mtx_recurse != 0)
#define mtx_name(m) ((m)->mtx_object.lo_name)
#define mtx_name(m) ((m)->lock_object.lo_name)
/*
* Global locks.
@ -370,7 +370,7 @@ do { \
WITNESS_SAVE_DECL(Giant); \
\
if (mtx_owned(&Giant)) \
WITNESS_SAVE(&Giant.mtx_object, Giant); \
WITNESS_SAVE(&Giant.lock_object, Giant); \
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
mtx_unlock(&Giant)
@ -379,7 +379,7 @@ do { \
while (_giantcnt--) \
mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant.mtx_object, Giant); \
WITNESS_RESTORE(&Giant.lock_object, Giant); \
} while (0)
#define PARTIAL_PICKUP_GIANT() \
@ -387,7 +387,7 @@ do { \
while (_giantcnt--) \
mtx_lock(&Giant); \
if (mtx_owned(&Giant)) \
WITNESS_RESTORE(&Giant.mtx_object, Giant)
WITNESS_RESTORE(&Giant.lock_object, Giant)
#endif
/*

View File

@ -698,7 +698,7 @@ MALLOC_DECLARE(M_ZOMBIE);
} while (0)
#define _STOPEVENT(p, e, v) do { \
PROC_LOCK_ASSERT(p, MA_OWNED); \
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.mtx_object, \
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \
"checking stopevent %d", (e)); \
if ((p)->p_stops & (e)) \
stopevent((p), (e), (v)); \

View File

@ -101,11 +101,11 @@
uint64_t waitstart = 0; \
\
if (!_rw_write_lock((rw), _tid)) { \
lock_profile_obtain_lock_failed(&(rw)->rw_object, \
lock_profile_obtain_lock_failed(&(rw)->lock_object, \
&contested, &waitstart); \
_rw_wlock_hard((rw), _tid, (file), (line)); \
} \
lock_profile_obtain_lock_success(&(rw)->rw_object, contested, \
lock_profile_obtain_lock_success(&(rw)->lock_object, contested, \
waitstart, (file), (line)); \
} while (0)
@ -164,9 +164,9 @@ void _rw_assert(struct rwlock *rw, int what, const char *file, int line);
#define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
#define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
#define rw_sleep(chan, rw, pri, wmesg, timo) \
_sleep((chan), &(rw)->rw_object, (pri), (wmesg), (timo))
_sleep((chan), &(rw)->lock_object, (pri), (wmesg), (timo))
#define rw_initialized(rw) lock_initalized(&(rw)->rw_object)
#define rw_initialized(rw) lock_initalized(&(rw)->lock_object)
struct rw_args {
struct rwlock *ra_rw;

View File

@ -35,7 +35,7 @@
#include <sys/condvar.h> /* XXX */
struct sx {
struct lock_object sx_object; /* Common lock properties. */
struct lock_object lock_object; /* Common lock properties. */
struct mtx *sx_lock; /* General protection lock. */
int sx_cnt; /* -1: xlock, > 0: slock count. */
struct cv sx_shrd_cv; /* slock waiters. */
@ -95,7 +95,7 @@ struct sx_args {
sx_sunlock(sx); \
} while (0)
#define sx_sleep(chan, sx, pri, wmesg, timo) \
_sleep((chan), &(sx)->sx_object, (pri), (wmesg), (timo))
_sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo))
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define SX_LOCKED LA_LOCKED

View File

@ -311,7 +311,7 @@ static __inline void splx(intrmask_t ipl __unused) { return; }
int _sleep(void *chan, struct lock_object *lock, int pri, const char *wmesg,
int timo) __nonnull(1);
#define msleep(chan, mtx, pri, wmesg, timo) \
_sleep((chan), &(mtx)->mtx_object, (pri), (wmesg), (timo))
_sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo))
int msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int timo)
__nonnull(1);
int pause(const char *wmesg, int timo);