- Assert that the proc lock and sched_lock are held in sched_nice().

- For the 4BSD scheduler, this means that all callers of the static
  function resetpriority() now always hold sched_lock, so don't lock
  sched_lock explicitly in that function.
This commit is contained in:
John Baldwin 2003-04-22 20:50:38 +00:00
parent a15cc35909
commit 0b5318c81a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=113873
2 changed files with 5 additions and 2 deletions

View File

@ -378,7 +378,6 @@ resetpriority(struct ksegrp *kg)
register unsigned int newpriority;
struct thread *td;
mtx_lock_spin(&sched_lock);
if (kg->kg_pri_class == PRI_TIMESHARE) {
newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
@ -389,7 +388,6 @@ resetpriority(struct ksegrp *kg)
FOREACH_THREAD_IN_GROUP(kg, td) {
maybe_resched(td); /* XXXKSE silly */
}
mtx_unlock_spin(&sched_lock);
}
/* ARGSUSED */
@ -514,6 +512,9 @@ sched_fork_thread(struct thread *td, struct thread *child)
void
sched_nice(struct ksegrp *kg, int nice)
{
PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
kg->kg_nice = nice;
resetpriority(kg);
}

View File

@ -693,6 +693,8 @@ sched_nice(struct ksegrp *kg, int nice)
struct thread *td;
struct kseq *kseq;
PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
/*
* We need to adjust the nice counts for running KSEs.
*/