- Adjust sched hooks for fork and exec to take processes as arguments instead

of ksegs since they primarily operation on processes.
 - KSEs take ticks so pass the kse through sched_clock().
 - Add a sched_class() routine that adjusts a ksegrp pri class.
 - Define a sched_fork_{kse,thread,ksegrp} and sched_exit_{kse,thread,ksegrp}
   that will be used to tell the scheduler about new instances of these
   structures within the same process.  These will be used by THR and KSE.
 - Change sched_4bsd to reflect this API update.
This commit is contained in:
Jeff Roberson 2003-04-11 03:39:07 +00:00
parent 7842e8b37b
commit f6f230febe
5 changed files with 27 additions and 20 deletions

View File

@ -405,7 +405,7 @@ statclock(frame)
}
}
sched_clock(td);
sched_clock(ke);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&

View File

@ -493,6 +493,13 @@ exit1(struct thread *td, int rv)
PCPU_SET(switchticks, ticks);
cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */
/*
* Allow the scheduler to adjust the priority of the
* parent when a kseg is exiting.
*/
if (p->p_pid != 1)
sched_exit(p->p_pptr, p);
/*
* Make sure the scheduler takes this thread out of its tables etc.
* This will also release this thread's reference to the ucred.
@ -575,17 +582,6 @@ wait1(struct thread *td, struct wait_args *uap, int compat)
nfound++;
if (p->p_state == PRS_ZOMBIE) {
/*
* Allow the scheduler to adjust the priority of the
* parent when a kseg is exiting.
*/
if (curthread->td_proc->p_pid != 1) {
mtx_lock_spin(&sched_lock);
sched_exit(curthread->td_ksegrp,
FIRST_KSEGRP_IN_PROC(p));
mtx_unlock_spin(&sched_lock);
}
td->td_retval[0] = p->p_pid;
#ifdef COMPAT_43
if (compat)

View File

@ -496,7 +496,7 @@ fork1(td, flags, pages, procp)
* Allow the scheduler to adjust the priority of the child and
* parent while we hold the sched_lock.
*/
sched_fork(td->td_ksegrp, kg2);
sched_fork(p1, p2);
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);

View File

@ -407,7 +407,7 @@ rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
default:
return (EINVAL);
}
kg->kg_pri_class = rtp->type;
sched_class(kg, rtp->type);
if (curthread->td_ksegrp == kg) {
curthread->td_base_pri = kg->kg_user_pri;
curthread->td_priority = kg->kg_user_pri; /* XXX dubious */

View File

@ -35,32 +35,43 @@
int sched_rr_interval(void);
int sched_runnable(void);
/*
* Proc related scheduling hooks.
*/
void sched_exit(struct proc *p, struct proc *child);
void sched_fork(struct proc *p, struct proc *child);
/*
* KSE Groups contain scheduling priority information. They record the
* behavior of groups of KSEs and threads.
*/
void sched_exit(struct ksegrp *kg, struct ksegrp *child);
void sched_fork(struct ksegrp *kg, struct ksegrp *child);
void sched_class(struct ksegrp *kg, int class);
void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_nice(struct ksegrp *kg, int nice);
void sched_prio(struct thread *td, u_char prio);
void sched_userret(struct thread *td);
/*
* Threads are switched in and out, block on resources, and have temporary
* priorities inherited from their ksegs.
*/
void sched_clock(struct thread *td);
void sched_exit_thread(struct thread *td, struct thread *child);
void sched_fork_thread(struct thread *td, struct thread *child);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td, u_char prio);
void sched_switchin(struct thread *td);
void sched_switchout(struct thread *td);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
/*
* KSEs are moved on and off of run queues.
*/
void sched_add(struct kse *ke);
void sched_rem(struct kse *ke);
struct kse *sched_choose(void);
void sched_clock(struct kse *ke);
void sched_exit_kse(struct kse *ke, struct kse *child);
void sched_fork_kse(struct kse *ke, struct kse *child);
void sched_rem(struct kse *ke);
/*
* and they use up cpu time.