Initialize the idle thread's lock sooner so it's not evaluated on every fork
exit and we can rely on it elsewhere. Reviewed by: mav, kib, jhb, markj Differential Revision: https://reviews.freebsd.org/D22624
This commit is contained in:
parent
fb6a57ef89
commit
e15046952d
@ -1463,7 +1463,7 @@ sched_setup(void *dummy)
|
|||||||
|
|
||||||
/* Add thread0's load since it's running. */
|
/* Add thread0's load since it's running. */
|
||||||
TDQ_LOCK(tdq);
|
TDQ_LOCK(tdq);
|
||||||
thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
|
thread0.td_lock = TDQ_LOCKPTR(tdq);
|
||||||
tdq_load_add(tdq, &thread0);
|
tdq_load_add(tdq, &thread0);
|
||||||
tdq->tdq_lowpri = thread0.td_priority;
|
tdq->tdq_lowpri = thread0.td_priority;
|
||||||
TDQ_UNLOCK(tdq);
|
TDQ_UNLOCK(tdq);
|
||||||
@ -2913,6 +2913,7 @@ sched_throw(struct thread *td)
|
|||||||
spinlock_exit();
|
spinlock_exit();
|
||||||
PCPU_SET(switchtime, cpu_ticks());
|
PCPU_SET(switchtime, cpu_ticks());
|
||||||
PCPU_SET(switchticks, ticks);
|
PCPU_SET(switchticks, ticks);
|
||||||
|
PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(tdq);
|
||||||
} else {
|
} else {
|
||||||
tdq = TDQ_SELF();
|
tdq = TDQ_SELF();
|
||||||
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
||||||
@ -2943,8 +2944,6 @@ sched_fork_exit(struct thread *td)
|
|||||||
*/
|
*/
|
||||||
cpuid = PCPU_GET(cpuid);
|
cpuid = PCPU_GET(cpuid);
|
||||||
tdq = TDQ_SELF();
|
tdq = TDQ_SELF();
|
||||||
if (TD_IS_IDLETHREAD(td))
|
|
||||||
td->td_lock = TDQ_LOCKPTR(tdq);
|
|
||||||
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
||||||
td->td_oncpu = cpuid;
|
td->td_oncpu = cpuid;
|
||||||
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
|
||||||
|
Loading…
Reference in New Issue
Block a user