Reduce umtx-related work on exec and exit

- there is no need to take the process lock to iterate the thread
  list after single-threading is enforced
- typically there are no mutexes to clean up (testable without taking
  the global umtx lock)
- typically there is no need to adjust the priority (testable without
  taking thread lock)

Reviewed by:	kib
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D20160
This commit is contained in:
Mateusz Guzik 2019-05-08 16:30:38 +00:00
parent 2b03b6bd45
commit ac97da9ad8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=347355
4 changed files with 60 additions and 13 deletions

View File

@ -4411,20 +4411,20 @@ umtx_exec_hook(void *arg __unused, struct proc *p,
struct thread *td;
KASSERT(p == curproc, ("need curproc"));
PROC_LOCK(p);
KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
(p->p_flag & P_STOPPED_SINGLE) != 0,
("curproc must be single-threaded"));
/*
* There is no need to lock the list as only this thread can be
* running.
*/
FOREACH_THREAD_IN_PROC(p, td) {
KASSERT(td == curthread ||
((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
("running thread %p %p", p, td));
PROC_UNLOCK(p);
umtx_thread_cleanup(td);
PROC_LOCK(p);
td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
}
PROC_UNLOCK(p);
}
/*
@ -4541,18 +4541,22 @@ umtx_thread_cleanup(struct thread *td)
*/
uq = td->td_umtxq;
if (uq != NULL) {
mtx_lock(&umtx_lock);
uq->uq_inherited_pri = PRI_MAX;
while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
pi->pi_owner = NULL;
TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
if (uq->uq_inherited_pri != PRI_MAX ||
!TAILQ_EMPTY(&uq->uq_pi_contested)) {
mtx_lock(&umtx_lock);
uq->uq_inherited_pri = PRI_MAX;
while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
pi->pi_owner = NULL;
TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
}
mtx_unlock(&umtx_lock);
}
mtx_unlock(&umtx_lock);
thread_lock(td);
sched_lend_user_prio(td, PRI_MAX);
thread_unlock(td);
sched_lend_user_prio_cond(td, PRI_MAX);
}
if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
return;
/*
* Handle terminated robust mutexes. Must be done after
* robust pi disown, otherwise unlock could see unowned

View File

@ -930,6 +930,27 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_NEEDRESCHED;
}
/*
* Like the above but first check if there is anything to do.
*/
void
sched_lend_user_prio_cond(struct thread *td, u_char prio)
{
if (td->td_lend_user_pri != prio)
goto lend;
if (td->td_user_pri != min(prio, td->td_base_user_pri))
goto lend;
if (td->td_priority >= td->td_user_pri)
goto lend;
return;
lend:
thread_lock(td);
sched_lend_user_prio(td, prio);
thread_unlock(td);
}
void
sched_sleep(struct thread *td, int pri)
{

View File

@ -1861,6 +1861,27 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_NEEDRESCHED;
}
/*
* Like the above but first check if there is anything to do.
*/
void
sched_lend_user_prio_cond(struct thread *td, u_char prio)
{
if (td->td_lend_user_pri != prio)
goto lend;
if (td->td_user_pri != min(prio, td->td_base_user_pri))
goto lend;
if (td->td_priority >= td->td_user_pri)
goto lend;
return;
lend:
thread_lock(td);
sched_lend_user_prio(td, prio);
thread_unlock(td);
}
#ifdef SMP
/*
* This tdq is about to idle. Try to steal a thread from another CPU before

View File

@ -96,6 +96,7 @@ u_int sched_estcpu(struct thread *td);
void sched_fork_thread(struct thread *td, struct thread *child);
void sched_lend_prio(struct thread *td, u_char prio);
void sched_lend_user_prio(struct thread *td, u_char pri);
void sched_lend_user_prio_cond(struct thread *td, u_char pri);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td, int prio);