When calling scheduler entrypoints for creating new threads and processes,

specify "us" as the thread not the process/ksegrp/kse.
You can always find the others from the thread but the converse is not true.
Theorotically this would lead to runtime being allocated to the wrong
entity in some cases though it is not clear how often this actually happenned.
(would only affect threaded processes and would probably be pretty benign,
but it WAS a bug..)

Reviewed by: peter
This commit is contained in:
julian 2004-07-18 23:36:13 +00:00
parent 9d0e4372ee
commit 550fffe52b
8 changed files with 46 additions and 43 deletions

View File

@ -526,7 +526,7 @@ exit1(struct thread *td, int rv)
PCPU_SET(switchticks, ticks);
cnt.v_swtch++;
sched_exit(p->p_pptr, p);
sched_exit(p->p_pptr, td);
/*
* Make sure the scheduler takes this thread out of its tables etc.

View File

@ -515,7 +515,7 @@ fork1(td, flags, pages, procp)
* Allow the scheduler to adjust the priority of the child and
* parent while we hold the sched_lock.
*/
sched_fork(p1, p2);
sched_fork(td, p2);
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);

View File

@ -522,7 +522,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
return (EPROCLIM);
}
ksegrp_link(newkg, p);
sched_fork_ksegrp(kg, newkg);
sched_fork_ksegrp(td, newkg);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
@ -569,7 +569,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
sched_fork_kse(td->td_kse, newke);
sched_fork_kse(td, newke);
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);

View File

@ -87,7 +87,7 @@ thr_exit1(void)
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
kse_unlink(ke);
sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke);
sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), td);
/*
* If we were stopped while waiting for all threads to exit and this
@ -177,7 +177,7 @@ thr_create(struct thread *td, struct thr_create_args *uap)
td0->td_kse = ke0;
ke0->ke_thread = td0;
sched_fork_kse(td->td_kse, ke0);
sched_fork_kse(td, ke0);
sched_fork_thread(td, td0);
TD_SET_CAN_RUN(td0);

View File

@ -651,7 +651,7 @@ thread_exit(void)
upcall_remove(td);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
/*
@ -660,7 +660,7 @@ thread_exit(void)
if (ke->ke_flags & KEF_EXIT) {
kse_unlink(ke);
if (kg->kg_kses == 0) {
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
ksegrp_unlink(kg);
}
}

View File

@ -536,24 +536,24 @@ sched_clock(struct thread *td)
* aggregated all the estcpu into the 'built-in' ksegrp.
*/
void
sched_exit(struct proc *p, struct proc *p1)
sched_exit(struct proc *p, struct thread *td)
{
sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
}
void
sched_exit_kse(struct kse *ke, struct kse *child)
sched_exit_kse(struct kse *ke, struct thread *child)
{
}
void
sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
{
mtx_assert(&sched_lock, MA_OWNED);
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
}
void
@ -564,24 +564,24 @@ sched_exit_thread(struct thread *td, struct thread *child)
}
void
sched_fork(struct proc *p, struct proc *p1)
sched_fork(struct thread *td, struct proc *p1)
{
sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
}
void
sched_fork_kse(struct kse *ke, struct kse *child)
sched_fork_kse(struct thread *td, struct kse *child)
{
child->ke_sched->ske_cpticks = 0;
}
void
sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
{
mtx_assert(&sched_lock, MA_OWNED);
child->kg_estcpu = kg->kg_estcpu;
child->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
void

View File

@ -1256,21 +1256,23 @@ sched_wakeup(struct thread *td)
* priority.
*/
void
sched_fork(struct proc *p, struct proc *p1)
sched_fork(struct thread *td, struct proc *p1)
{
mtx_assert(&sched_lock, MA_OWNED);
p1->p_nice = p->p_nice;
sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
p1->p_nice = td->td_proc->p_nice;
sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
}
void
sched_fork_kse(struct kse *ke, struct kse *child)
sched_fork_kse(struct thread *td, struct kse *child)
{
struct kse *ke = td->td_kse;
child->ke_slice = 1; /* Attempt to quickly learn interactivity. */
child->ke_cpu = ke->ke_cpu;
child->ke_runq = NULL;
@ -1282,8 +1284,9 @@ sched_fork_kse(struct kse *ke, struct kse *child)
}
void
sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
{
struct ksegrp *kg = td->td_ksegrp;
PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
child->kg_slptime = kg->kg_slptime;
@ -1357,24 +1360,24 @@ sched_class(struct ksegrp *kg, int class)
* Return some of the child's priority and interactivity to the parent.
*/
void
sched_exit(struct proc *p, struct proc *child)
sched_exit(struct proc *p, struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
}
void
sched_exit_kse(struct kse *ke, struct kse *child)
sched_exit_kse(struct kse *ke, struct thread *td)
{
kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse);
}
void
sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
{
/* kg->kg_slptime += child->kg_slptime; */
kg->kg_runtime += child->kg_runtime;
/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
kg->kg_runtime += td->td_ksegrp->kg_runtime;
sched_interact_update(kg);
}

View File

@ -45,16 +45,16 @@ int sched_runnable(void);
/*
* Proc related scheduling hooks.
*/
void sched_exit(struct proc *p, struct proc *child);
void sched_fork(struct proc *p, struct proc *child);
void sched_exit(struct proc *p, struct thread *childtd);
void sched_fork(struct thread *td, struct proc *child);
/*
* KSE Groups contain scheduling priority information. They record the
* behavior of groups of KSEs and threads.
*/
void sched_class(struct ksegrp *kg, int class);
void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd);
void sched_fork_ksegrp(struct thread *td, struct ksegrp *child);
void sched_nice(struct proc *p, int nice);
/*
@ -90,8 +90,8 @@ static __inline void sched_unpin(void);
/*
* These interfaces will eventually be removed.
*/
void sched_exit_kse(struct kse *ke, struct kse *child);
void sched_fork_kse(struct kse *ke, struct kse *child);
void sched_exit_kse(struct kse *ke, struct thread *childtd);
void sched_fork_kse(struct thread *td, struct kse *child);
/*
* These procedures tell the process data structure allocation code how