sched: split sched_ap_entry() out of sched_throw()

sched_throw() can no longer take a NULL thread, APs enter through
sched_ap_entry() instead.  This completely removes branching in the
common case and cleans up both paths.  No functional change intended.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D32829
This commit is contained in:
Kyle Evans 2021-11-03 19:09:56 -05:00
parent 15d4459cb2
commit 6a8ea6d174
9 changed files with 101 additions and 46 deletions

View File

@ -217,7 +217,7 @@ init_secondary(int cpu)
CTR0(KTR_SMP, "go into scheduler");
/* Enter the scheduler */
sched_throw(NULL);
sched_ap_entry();
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */

View File

@ -293,7 +293,7 @@ init_secondary(uint64_t cpu)
MPASS(PCPU_GET(curpcb) == NULL);
/* Enter the scheduler */
sched_throw(NULL);
sched_ap_entry();
panic("scheduler returned us to init_secondary");
/* NOTREACHED */

View File

@ -1662,12 +1662,22 @@ sched_idletd(void *dummy)
}
}
static void
sched_throw_tail(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
cpu_throw(td, choosethread()); /* doesn't return */
}
/*
* A CPU is entering for the first time or a thread is exiting.
* A CPU is entering for the first time.
*/
void
sched_throw(struct thread *td)
sched_ap_entry(void)
{
/*
* Correct spinlock nesting. The idle thread context that we are
* borrowing was created so that it would start out with a single
@ -1677,20 +1687,29 @@ sched_throw(struct thread *td)
* spinlock_exit() will simply adjust the counts without allowing
* spin lock using code to interrupt us.
*/
if (td == NULL) {
mtx_lock_spin(&sched_lock);
spinlock_exit();
PCPU_SET(switchtime, cpu_ticks());
PCPU_SET(switchticks, ticks);
} else {
lock_profile_release_lock(&sched_lock.lock_object, true);
MPASS(td->td_lock == &sched_lock);
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
}
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
cpu_throw(td, choosethread()); /* doesn't return */
mtx_lock_spin(&sched_lock);
spinlock_exit();
PCPU_SET(switchtime, cpu_ticks());
PCPU_SET(switchticks, ticks);
sched_throw_tail(NULL);
}
/*
* A thread is exiting.
*/
void
sched_throw(struct thread *td)
{
MPASS(td != NULL);
MPASS(td->td_lock == &sched_lock);
lock_profile_release_lock(&sched_lock.lock_object, true);
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
sched_throw_tail(td);
}
void

View File

@ -2985,7 +2985,52 @@ sched_idletd(void *dummy)
}
/*
* A CPU is entering for the first time or a thread is exiting.
* sched_throw_grab() chooses a thread from the queue to switch to
* next. It returns with the tdq lock dropped in a spinlock section to
* keep interrupts disabled until the CPU is running in a proper threaded
* context.
*/
static struct thread *
sched_throw_grab(struct tdq *tdq)
{
struct thread *newtd;
newtd = choosethread();
spinlock_enter();
TDQ_UNLOCK(tdq);
KASSERT(curthread->td_md.md_spinlock_count == 1,
("invalid count %d", curthread->td_md.md_spinlock_count));
return (newtd);
}
/*
* A CPU is entering for the first time.
*/
void
sched_ap_entry(void)
{
struct thread *newtd;
struct tdq *tdq;
tdq = TDQ_SELF();
/* This should have been setup in schedinit_ap(). */
THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq));
TDQ_LOCK(tdq);
/* Correct spinlock nesting. */
spinlock_exit();
PCPU_SET(switchtime, cpu_ticks());
PCPU_SET(switchticks, ticks);
newtd = sched_throw_grab(tdq);
/* doesn't return */
cpu_throw(NULL, newtd);
}
/*
* A thread is exiting.
*/
void
sched_throw(struct thread *td)
@ -2994,30 +3039,20 @@ sched_throw(struct thread *td)
struct tdq *tdq;
tdq = TDQ_SELF();
if (__predict_false(td == NULL)) {
TDQ_LOCK(tdq);
/* Correct spinlock nesting. */
spinlock_exit();
PCPU_SET(switchtime, cpu_ticks());
PCPU_SET(switchticks, ticks);
} else {
THREAD_LOCK_ASSERT(td, MA_OWNED);
THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq));
tdq_load_rem(tdq, td);
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
thread_lock_block(td);
}
newtd = choosethread();
spinlock_enter();
TDQ_UNLOCK(tdq);
KASSERT(curthread->td_md.md_spinlock_count == 1,
("invalid count %d", curthread->td_md.md_spinlock_count));
MPASS(td != NULL);
THREAD_LOCK_ASSERT(td, MA_OWNED);
THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq));
tdq_load_rem(tdq, td);
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
thread_lock_block(td);
newtd = sched_throw_grab(tdq);
/* doesn't return */
if (__predict_false(td == NULL))
cpu_throw(td, newtd); /* doesn't return */
else
cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
}
/*

View File

@ -335,7 +335,7 @@ smp_init_secondary(u_int32_t cpuid)
cpu_initclocks_ap();
/* enter the scheduler */
sched_throw(NULL);
sched_ap_entry();
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */

View File

@ -112,7 +112,7 @@ machdep_ap_bootstrap(void)
cpu_initclocks_ap();
/* Announce ourselves awake, and enter the scheduler */
sched_throw(NULL);
sched_ap_entry();
}
void

View File

@ -291,7 +291,7 @@ init_secondary(uint64_t hart)
MPASS(PCPU_GET(curpcb) == NULL);
/* Enter the scheduler */
sched_throw(NULL);
sched_ap_entry();
panic("scheduler returned us to init_secondary");
/* NOTREACHED */

View File

@ -91,6 +91,7 @@ void sched_nice(struct proc *p, int nice);
* Threads are switched in and out, block on resources, have temporary
* priorities inherited from their procs, and use up cpu time.
*/
void sched_ap_entry(void);
void sched_exit_thread(struct thread *td, struct thread *child);
u_int sched_estcpu(struct thread *td);
void sched_fork_thread(struct thread *td, struct thread *child);

View File

@ -1099,7 +1099,7 @@ init_secondary_tail(void)
*/
MPASS(PCPU_GET(curpcb) == NULL);
sched_throw(NULL);
sched_ap_entry();
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */