- Move some common code out of sched_fork_exit() and back into fork_exit().

This commit is contained in:
Jeff Roberson 2007-06-12 07:47:09 +00:00
parent ec32b37ecd
commit fe54587ffa
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170600
3 changed files with 20 additions and 30 deletions

View File

@ -768,6 +768,7 @@ fork_exit(callout, arg, frame)
{
struct proc *p;
struct thread *td;
struct thread *dtd;
td = curthread;
p = td->td_proc;
@ -777,6 +778,17 @@ fork_exit(callout, arg, frame)
td, td->td_sched, p->p_pid, p->p_comm);
sched_fork_exit(td);
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
* instead, so we must do much the same things as mi_switch() would.
*/
if ((dtd = PCPU_GET(deadthread))) {
PCPU_SET(deadthread, NULL);
thread_stash(dtd);
}
thread_unlock(td);
/*
* cpu_set_fork_handler intercepts this function call to
* have this call a non-return function to stay in kernel mode.

View File

@ -1410,27 +1410,16 @@ sched_throw(struct thread *td)
}
void
sched_fork_exit(struct thread *ctd)
sched_fork_exit(struct thread *td)
{
struct thread *td;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
ctd->td_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_lock = (uintptr_t)ctd;
THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED);
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
* instead, so we must do much the same things as mi_switch() would.
*/
if ((td = PCPU_GET(deadthread))) {
PCPU_SET(deadthread, NULL);
thread_stash(td);
}
thread_unlock(ctd);
td->td_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_lock = (uintptr_t)td;
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
#define KERN_SWITCH_INCLUDE 1

View File

@ -2145,27 +2145,16 @@ sched_throw(struct thread *td)
}
void
sched_fork_exit(struct thread *ctd)
sched_fork_exit(struct thread *td)
{
struct thread *td;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
ctd->td_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_lock = (uintptr_t)ctd;
THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED);
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
* instead, so we must do much the same things as mi_switch() would.
*/
if ((td = PCPU_GET(deadthread))) {
PCPU_SET(deadthread, NULL);
thread_stash(td);
}
thread_unlock(ctd);
td->td_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_lock = (uintptr_t)td;
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");