diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 248e4ea0edd4..d6c49d27bbfa 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -405,7 +405,7 @@ statclock(frame) } } - sched_clock(td); + sched_clock(ke); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index fee6caf62a9f..fc967e9af35a 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -493,6 +493,13 @@ exit1(struct thread *td, int rv) PCPU_SET(switchticks, ticks); cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */ + /* + * Allow the scheduler to adjust the priority of the + * parent when a kseg is exiting. + */ + if (p->p_pid != 1) + sched_exit(p->p_pptr, p); + /* * Make sure the scheduler takes this thread out of its tables etc. * This will also release this thread's reference to the ucred. @@ -575,17 +582,6 @@ wait1(struct thread *td, struct wait_args *uap, int compat) nfound++; if (p->p_state == PRS_ZOMBIE) { - /* - * Allow the scheduler to adjust the priority of the - * parent when a kseg is exiting. - */ - if (curthread->td_proc->p_pid != 1) { - mtx_lock_spin(&sched_lock); - sched_exit(curthread->td_ksegrp, - FIRST_KSEGRP_IN_PROC(p)); - mtx_unlock_spin(&sched_lock); - } - td->td_retval[0] = p->p_pid; #ifdef COMPAT_43 if (compat) diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 4cec7e317ec6..8ca68f583e8e 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -496,7 +496,7 @@ fork1(td, flags, pages, procp) * Allow the scheduler to adjust the priority of the child and * parent while we hold the sched_lock. */ - sched_fork(td->td_ksegrp, kg2); + sched_fork(p1, p2); mtx_unlock_spin(&sched_lock); p2->p_ucred = crhold(td->td_ucred); diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 5dfbfd725e88..b72520c8b1e0 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -407,7 +407,7 @@ rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg) default: return (EINVAL); } - kg->kg_pri_class = rtp->type; + sched_class(kg, rtp->type); if (curthread->td_ksegrp == kg) { curthread->td_base_pri = kg->kg_user_pri; curthread->td_priority = kg->kg_user_pri; /* XXX dubious */ diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 068911fa8b93..585ca5d42c57 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -35,32 +35,43 @@ int sched_rr_interval(void); int sched_runnable(void); +/* + * Proc related scheduling hooks. + */ +void sched_exit(struct proc *p, struct proc *child); +void sched_fork(struct proc *p, struct proc *child); + /* * KSE Groups contain scheduling priority information. They record the * behavior of groups of KSEs and threads. */ -void sched_exit(struct ksegrp *kg, struct ksegrp *child); -void sched_fork(struct ksegrp *kg, struct ksegrp *child); +void sched_class(struct ksegrp *kg, int class); +void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child); +void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child); void sched_nice(struct ksegrp *kg, int nice); -void sched_prio(struct thread *td, u_char prio); -void sched_userret(struct thread *td); /* * Threads are switched in and out, block on resources, and have temporary * priorities inherited from their ksegs. */ -void sched_clock(struct thread *td); +void sched_exit_thread(struct thread *td, struct thread *child); +void sched_fork_thread(struct thread *td, struct thread *child); +void sched_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td, u_char prio); void sched_switchin(struct thread *td); void sched_switchout(struct thread *td); +void sched_userret(struct thread *td); void sched_wakeup(struct thread *td); /* * KSEs are moved on and off of run queues. */ void sched_add(struct kse *ke); -void sched_rem(struct kse *ke); struct kse *sched_choose(void); +void sched_clock(struct kse *ke); +void sched_exit_kse(struct kse *ke, struct kse *child); +void sched_fork_kse(struct kse *ke, struct kse *child); +void sched_rem(struct kse *ke); /* * and they use up cpu time.