Move the schedlock owner state update following the context

switch in fork_exit() to before anything else is done (but keep
schedlock for the deadthread check).  This means one less
nasty bug if ever in the future whatever might have been called
before the update played with schedlock or critical sections.

Discussed with: tjr
This commit is contained in:
Bosko Milekic 2004-07-27 03:46:31 +00:00
parent 5dd954f3de
commit 0047b9a96a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132682

View File

@ -782,6 +782,21 @@ fork_exit(callout, arg, frame)
struct proc *p;
struct thread *td;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
td = curthread;
p = td->td_proc;
td->td_oncpu = PCPU_GET(cpuid);
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
sched_lock.mtx_lock = (uintptr_t)td;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
@ -793,19 +808,6 @@ fork_exit(callout, arg, frame)
thread_stash(td);
}
td = curthread;
p = td->td_proc;
td->td_oncpu = PCPU_GET(cpuid);
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
*/
sched_lock.mtx_lock = (uintptr_t)td;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
mtx_unlock_spin(&sched_lock);
/*