- Follow r216313, the sched_unlend_user_prio is no longer needed, always

use sched_lend_user_prio to set lent priority.
- Improve pthread priority-inherit mutex, when a contender's priority is
  lowered, repropagete priorities, this may cause mutex owner's priority
  to be lowerd, in old code, mutex owner's priority is rise-only.
This commit is contained in:
David Xu 2010-12-29 09:26:46 +00:00
parent eb68ccd676
commit c8e368a933
5 changed files with 30 additions and 78 deletions

View File

@ -489,8 +489,10 @@ rtp_to_pri(struct rtprio *rtp, struct thread *td)
if (curthread == td)
sched_prio(curthread, td->td_user_pri); /* XXX dubious */
if (TD_ON_UPILOCK(td) && oldpri != newpri) {
critical_enter();
thread_unlock(td);
umtx_pi_adjust(td, oldpri);
critical_exit();
} else
thread_unlock(td);
return (0);

View File

@ -243,7 +243,6 @@ static int umtx_key_get(void *addr, int type, int share,
static void umtx_key_release(struct umtx_key *key);
static struct umtx_pi *umtx_pi_alloc(int);
static void umtx_pi_free(struct umtx_pi *pi);
static void umtx_pi_adjust_locked(struct thread *td, u_char oldpri);
static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
static void umtx_thread_cleanup(struct thread *td);
static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
@ -1428,9 +1427,10 @@ umtx_propagate_priority(struct thread *td)
*/
uq = td->td_umtxq;
pi = uq->uq_pi_blocked;
/* Resort td on the list if needed. */
if (!umtx_pi_adjust_thread(pi, td))
if (pi == NULL)
break;
/* Resort td on the list if needed. */
umtx_pi_adjust_thread(pi, td);
}
}
@ -1439,11 +1439,11 @@ umtx_propagate_priority(struct thread *td)
* it is interrupted by signal or resumed by others.
*/
static void
umtx_unpropagate_priority(struct umtx_pi *pi)
umtx_repropagate_priority(struct umtx_pi *pi)
{
struct umtx_q *uq, *uq_owner;
struct umtx_pi *pi2;
int pri, oldpri;
int pri;
mtx_assert(&umtx_lock, MA_OWNED);
@ -1462,12 +1462,10 @@ umtx_unpropagate_priority(struct umtx_pi *pi)
if (pri > uq_owner->uq_inherited_pri)
pri = uq_owner->uq_inherited_pri;
thread_lock(pi->pi_owner);
oldpri = pi->pi_owner->td_user_pri;
sched_unlend_user_prio(pi->pi_owner, pri);
sched_lend_user_prio(pi->pi_owner, pri);
thread_unlock(pi->pi_owner);
if (uq_owner->uq_pi_blocked != NULL)
umtx_pi_adjust_locked(pi->pi_owner, oldpri);
pi = uq_owner->uq_pi_blocked;
if ((pi = uq_owner->uq_pi_blocked) != NULL)
umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
}
}
@ -1524,31 +1522,6 @@ umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
return (0);
}
static void
umtx_pi_adjust_locked(struct thread *td, u_char oldpri)
{
struct umtx_q *uq;
struct umtx_pi *pi;
uq = td->td_umtxq;
/*
* Pick up the lock that td is blocked on.
*/
pi = uq->uq_pi_blocked;
MPASS(pi != NULL);
/* Resort the turnstile on the list. */
if (!umtx_pi_adjust_thread(pi, td))
return;
/*
* If our priority was lowered and we are at the head of the
* turnstile, then propagate our new priority up the chain.
*/
if (uq == TAILQ_FIRST(&pi->pi_blocked) && UPRI(td) < oldpri)
umtx_propagate_priority(td);
}
/*
* Adjust a thread's order position in its blocked PI mutex,
* this may result new priority propagating process.
@ -1565,8 +1538,10 @@ umtx_pi_adjust(struct thread *td, u_char oldpri)
* Pick up the lock that td is blocked on.
*/
pi = uq->uq_pi_blocked;
if (pi != NULL)
umtx_pi_adjust_locked(td, oldpri);
if (pi != NULL) {
umtx_pi_adjust_thread(pi, td);
umtx_repropagate_priority(pi);
}
mtx_unlock_spin(&umtx_lock);
}
@ -1635,7 +1610,7 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
td->td_flags &= ~TDF_UPIBLOCKED;
thread_unlock(td);
TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
umtx_unpropagate_priority(pi);
umtx_repropagate_priority(pi);
mtx_unlock_spin(&umtx_lock);
umtxq_unlock(&uq->uq_key);
@ -1937,7 +1912,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
}
}
thread_lock(curthread);
sched_unlend_user_prio(curthread, pri);
sched_lend_user_prio(curthread, pri);
thread_unlock(curthread);
mtx_unlock_spin(&umtx_lock);
if (uq_first)
@ -2062,7 +2037,7 @@ _do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, int timo,
if (pri > uq->uq_inherited_pri)
pri = uq->uq_inherited_pri;
thread_lock(td);
sched_unlend_user_prio(td, pri);
sched_lend_user_prio(td, pri);
thread_unlock(td);
mtx_unlock_spin(&umtx_lock);
}
@ -2081,7 +2056,7 @@ _do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, int timo,
if (pri > uq->uq_inherited_pri)
pri = uq->uq_inherited_pri;
thread_lock(td);
sched_unlend_user_prio(td, pri);
sched_lend_user_prio(td, pri);
thread_unlock(td);
mtx_unlock_spin(&umtx_lock);
}
@ -2172,7 +2147,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
if (pri > uq->uq_inherited_pri)
pri = uq->uq_inherited_pri;
thread_lock(td);
sched_unlend_user_prio(td, pri);
sched_lend_user_prio(td, pri);
thread_unlock(td);
mtx_unlock_spin(&umtx_lock);
}
@ -3680,6 +3655,6 @@ umtx_thread_cleanup(struct thread *td)
}
mtx_unlock_spin(&umtx_lock);
thread_lock(td);
sched_unlend_user_prio(td, PRI_MAX);
sched_lend_user_prio(td, PRI_MAX);
thread_unlock(td);
}

View File

@ -892,24 +892,12 @@ sched_lend_user_prio(struct thread *td, u_char prio)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (prio < td->td_lend_user_pri)
td->td_lend_user_pri = prio;
if (prio < td->td_user_pri)
td->td_user_pri = prio;
}
void
sched_unlend_user_prio(struct thread *td, u_char prio)
{
u_char base_pri;
THREAD_LOCK_ASSERT(td, MA_OWNED);
base_pri = td->td_base_user_pri;
td->td_lend_user_pri = prio;
if (prio > base_pri)
td->td_user_pri = base_pri;
else
td->td_user_pri = prio;
td->td_user_pri = min(prio, td->td_base_user_pri);
if (td->td_priority > td->td_user_pri)
sched_prio(td, td->td_user_pri);
else if (td->td_priority != td->td_user_pri)
td->td_flags |= TDF_NEEDRESCHED;
}
void

View File

@ -1687,24 +1687,12 @@ sched_lend_user_prio(struct thread *td, u_char prio)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (prio < td->td_lend_user_pri)
td->td_lend_user_pri = prio;
if (prio < td->td_user_pri)
td->td_user_pri = prio;
}
void
sched_unlend_user_prio(struct thread *td, u_char prio)
{
u_char base_pri;
THREAD_LOCK_ASSERT(td, MA_OWNED);
base_pri = td->td_base_user_pri;
td->td_lend_user_pri = prio;
if (prio > base_pri)
td->td_user_pri = base_pri;
else
td->td_user_pri = prio;
td->td_user_pri = min(prio, td->td_base_user_pri);
if (td->td_priority > td->td_user_pri)
sched_prio(td, td->td_user_pri);
else if (td->td_priority != td->td_user_pri)
td->td_flags |= TDF_NEEDRESCHED;
}
/*

View File

@ -99,7 +99,6 @@ void sched_sleep(struct thread *td, int prio);
void sched_switch(struct thread *td, struct thread *newtd, int flags);
void sched_throw(struct thread *td);
void sched_unlend_prio(struct thread *td, u_char prio);
void sched_unlend_user_prio(struct thread *td, u_char pri);
void sched_user_prio(struct thread *td, u_char prio);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);