The umtx_lock mutex is used by top-half of the kernel, but is
currently a spin lock. Apparently, the only reason for this is that umtx_thread_exit() is called under the process spinlock, which put the requirement on the umtx_lock. Note that the witness static order list is wrong for the umtx_lock, umtx_lock is explicitely before any thread lock, so it is also before sleepq locks. Change umtx_lock to be the sleepable mutex. For the reason above, the calls to umtx_thread_exit() are moved from thread_exit() earlier in each caller, when the process spin lock is not yet taken. Discussed with: jhb Tested by: pho (previous version) Sponsored by: The FreeBSD Foundation MFC after: 3 weeks
This commit is contained in:
parent
c483877531
commit
13dad10871
@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/sdt.h>
|
||||
#include <sys/shm.h>
|
||||
#include <sys/sem.h>
|
||||
#include <sys/umtx.h>
|
||||
#ifdef KTRACE
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
@ -633,6 +634,7 @@ exit1(struct thread *td, int rv)
|
||||
wakeup(p->p_pptr);
|
||||
cv_broadcast(&p->p_pwait);
|
||||
sched_exit(p->p_pptr, td);
|
||||
umtx_thread_exit(td);
|
||||
PROC_SLOCK(p);
|
||||
p->p_state = PRS_ZOMBIE;
|
||||
PROC_UNLOCK(p->p_pptr);
|
||||
|
@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/signalvar.h>
|
||||
#include <sys/sx.h>
|
||||
#include <sys/umtx.h>
|
||||
#include <sys/unistd.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/sched.h>
|
||||
@ -339,6 +340,7 @@ kthread_exit(void)
|
||||
}
|
||||
LIST_REMOVE(curthread, td_hash);
|
||||
rw_wunlock(&tidhash_lock);
|
||||
umtx_thread_exit(curthread);
|
||||
PROC_SLOCK(p);
|
||||
thread_exit();
|
||||
}
|
||||
|
@ -322,6 +322,7 @@ sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
|
||||
LIST_REMOVE(td, td_hash);
|
||||
rw_wunlock(&tidhash_lock);
|
||||
tdsigcleanup(td);
|
||||
umtx_thread_exit(td);
|
||||
PROC_SLOCK(p);
|
||||
thread_stopped(p);
|
||||
thread_exit();
|
||||
|
@ -413,7 +413,6 @@ thread_exit(void)
|
||||
#ifdef AUDIT
|
||||
AUDIT_SYSCALL_EXIT(0, td);
|
||||
#endif
|
||||
umtx_thread_exit(td);
|
||||
/*
|
||||
* drop FPU & debug register state storage, or any other
|
||||
* architecture specific resources that
|
||||
@ -864,6 +863,7 @@ thread_suspend_check(int return_instead)
|
||||
tidhash_remove(td);
|
||||
PROC_LOCK(p);
|
||||
tdsigcleanup(td);
|
||||
umtx_thread_exit(td);
|
||||
PROC_SLOCK(p);
|
||||
thread_stopped(p);
|
||||
thread_exit();
|
||||
|
@ -396,7 +396,7 @@ umtxq_sysinit(void *arg __unused)
|
||||
#ifdef UMTX_PROFILING
|
||||
umtx_init_profiling();
|
||||
#endif
|
||||
mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
|
||||
mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
|
||||
EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
|
||||
EVENTHANDLER_PRI_ANY);
|
||||
}
|
||||
@ -1467,9 +1467,9 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
|
||||
struct umtx_q *uq, *uq_owner;
|
||||
|
||||
uq_owner = owner->td_umtxq;
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (pi->pi_owner == owner) {
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1477,7 +1477,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
|
||||
/*
|
||||
* userland may have already messed the mutex, sigh.
|
||||
*/
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
return (EPERM);
|
||||
}
|
||||
umtx_pi_setowner(pi, owner);
|
||||
@ -1491,7 +1491,7 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
|
||||
sched_lend_user_prio(owner, pri);
|
||||
thread_unlock(owner);
|
||||
}
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1506,7 +1506,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
|
||||
struct umtx_pi *pi;
|
||||
|
||||
uq = td->td_umtxq;
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
/*
|
||||
* Pick up the lock that td is blocked on.
|
||||
*/
|
||||
@ -1515,7 +1515,7 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
|
||||
umtx_pi_adjust_thread(pi, td);
|
||||
umtx_repropagate_priority(pi);
|
||||
}
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1537,12 +1537,12 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
|
||||
UMTXQ_LOCKED_ASSERT(uc);
|
||||
KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
|
||||
umtxq_insert(uq);
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (pi->pi_owner == NULL) {
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
/* XXX Only look up thread in current process. */
|
||||
td1 = tdfind(owner, curproc->p_pid);
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (td1 != NULL) {
|
||||
if (pi->pi_owner == NULL)
|
||||
umtx_pi_setowner(pi, td1);
|
||||
@ -1566,20 +1566,20 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
|
||||
td->td_flags |= TDF_UPIBLOCKED;
|
||||
thread_unlock(td);
|
||||
umtx_propagate_priority(td);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
umtxq_unbusy(&uq->uq_key);
|
||||
|
||||
error = umtxq_sleep(uq, wmesg, timo);
|
||||
umtxq_remove(uq);
|
||||
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
uq->uq_pi_blocked = NULL;
|
||||
thread_lock(td);
|
||||
td->td_flags &= ~TDF_UPIBLOCKED;
|
||||
thread_unlock(td);
|
||||
TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
|
||||
umtx_repropagate_priority(pi);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
umtxq_unlock(&uq->uq_key);
|
||||
|
||||
return (error);
|
||||
@ -1611,7 +1611,7 @@ umtx_pi_unref(struct umtx_pi *pi)
|
||||
UMTXQ_LOCKED_ASSERT(uc);
|
||||
KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
|
||||
if (--pi->pi_refcount == 0) {
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (pi->pi_owner != NULL) {
|
||||
TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
|
||||
pi, pi_link);
|
||||
@ -1619,7 +1619,7 @@ umtx_pi_unref(struct umtx_pi *pi)
|
||||
}
|
||||
KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
|
||||
("blocked queue not empty"));
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
|
||||
umtx_pi_free(pi);
|
||||
}
|
||||
@ -1873,11 +1873,11 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
|
||||
umtxq_busy(&key);
|
||||
count = umtxq_count_pi(&key, &uq_first);
|
||||
if (uq_first != NULL) {
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
pi = uq_first->uq_pi_blocked;
|
||||
KASSERT(pi != NULL, ("pi == NULL?"));
|
||||
if (pi->pi_owner != curthread) {
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
umtxq_unbusy(&key);
|
||||
umtxq_unlock(&key);
|
||||
umtx_key_release(&key);
|
||||
@ -1903,7 +1903,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
|
||||
thread_lock(curthread);
|
||||
sched_lend_user_prio(curthread, pri);
|
||||
thread_unlock(curthread);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
if (uq_first)
|
||||
umtxq_signal_thread(uq_first);
|
||||
} else {
|
||||
@ -1920,10 +1920,10 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
|
||||
* umtx_pi, and unlocked the umtxq.
|
||||
* If the current thread owns it, it must disown it.
|
||||
*/
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (pi->pi_owner == td)
|
||||
umtx_pi_disown(pi);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
}
|
||||
}
|
||||
umtxq_unlock(&key);
|
||||
@ -1986,9 +1986,9 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
|
||||
goto out;
|
||||
}
|
||||
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1999,7 +1999,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
|
||||
sched_lend_user_prio(td, uq->uq_inherited_pri);
|
||||
thread_unlock(td);
|
||||
}
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
|
||||
rv = casueword32(&m->m_owner,
|
||||
UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
|
||||
@ -2034,7 +2034,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
|
||||
umtxq_remove(uq);
|
||||
umtxq_unlock(&uq->uq_key);
|
||||
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
uq->uq_inherited_pri = old_inherited_pri;
|
||||
pri = PRI_MAX;
|
||||
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
|
||||
@ -2049,11 +2049,11 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
|
||||
thread_lock(td);
|
||||
sched_lend_user_prio(td, pri);
|
||||
thread_unlock(td);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
}
|
||||
|
||||
if (error != 0) {
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
uq->uq_inherited_pri = old_inherited_pri;
|
||||
pri = PRI_MAX;
|
||||
TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
|
||||
@ -2068,7 +2068,7 @@ do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
|
||||
thread_lock(td);
|
||||
sched_lend_user_prio(td, pri);
|
||||
thread_unlock(td);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -2140,7 +2140,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
|
||||
if (error == -1)
|
||||
error = EFAULT;
|
||||
else {
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
if (su != 0)
|
||||
uq->uq_inherited_pri = new_inherited_pri;
|
||||
pri = PRI_MAX;
|
||||
@ -2156,7 +2156,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
|
||||
thread_lock(td);
|
||||
sched_lend_user_prio(td, pri);
|
||||
thread_unlock(td);
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
}
|
||||
umtx_key_release(&key);
|
||||
return (error);
|
||||
@ -3841,13 +3841,13 @@ umtx_thread_cleanup(struct thread *td)
|
||||
if ((uq = td->td_umtxq) == NULL)
|
||||
return;
|
||||
|
||||
mtx_lock_spin(&umtx_lock);
|
||||
mtx_lock(&umtx_lock);
|
||||
uq->uq_inherited_pri = PRI_MAX;
|
||||
while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
|
||||
pi->pi_owner = NULL;
|
||||
TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
|
||||
}
|
||||
mtx_unlock_spin(&umtx_lock);
|
||||
mtx_unlock(&umtx_lock);
|
||||
thread_lock(td);
|
||||
sched_lend_user_prio(td, PRI_MAX);
|
||||
thread_unlock(td);
|
||||
|
@ -491,6 +491,11 @@ static struct witness_order_list_entry order_lists[] = {
|
||||
#endif
|
||||
{ "time lock", &lock_class_mtx_sleep },
|
||||
{ NULL, NULL },
|
||||
/*
|
||||
* umtx
|
||||
*/
|
||||
{ "umtx lock", &lock_class_mtx_sleep },
|
||||
{ NULL, NULL },
|
||||
/*
|
||||
* Sockets
|
||||
*/
|
||||
@ -637,7 +642,6 @@ static struct witness_order_list_entry order_lists[] = {
|
||||
#endif
|
||||
{ "process slock", &lock_class_mtx_spin },
|
||||
{ "sleepq chain", &lock_class_mtx_spin },
|
||||
{ "umtx lock", &lock_class_mtx_spin },
|
||||
{ "rm_spinlock", &lock_class_mtx_spin },
|
||||
{ "turnstile chain", &lock_class_mtx_spin },
|
||||
{ "turnstile lock", &lock_class_mtx_spin },
|
||||
|
Loading…
Reference in New Issue
Block a user