softclock: Use dedicated ithreads for running callouts.

Rather than using the swi infrastructure, rewrite softclock() as a
thread loop (softclock_thread()) and use it as the main routine of the
softclock threads.  The threads use the CC_LOCK as the thread lock
when idle.

Reviewed by:	mav, imp, kib
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D33683
This commit is contained in:
John Baldwin 2021-12-30 14:54:29 -08:00
parent dda9847275
commit 74cf7cae4d
2 changed files with 77 additions and 41 deletions

View File

@ -52,14 +52,17 @@ __FBSDID("$FreeBSD$");
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/kthread.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/sleepqueue.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/unistd.h>
#ifdef DDB
#include <ddb/ddb.h>
@ -77,6 +80,8 @@ SDT_PROVIDER_DEFINE(callout_execute);
SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
static void softclock_thread(void *arg);
#ifdef CALLOUT_PROFILING
static int avg_depth;
SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
@ -166,7 +171,7 @@ struct callout_cpu {
struct callout_tailq cc_expireq;
sbintime_t cc_firstevent;
sbintime_t cc_lastscan;
void *cc_cookie;
struct thread *cc_thread;
u_int cc_bucket;
u_int cc_inited;
#ifdef KTR
@ -222,7 +227,7 @@ static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
* relevant callout completes.
* cc_cancel - Changing to 1 with both callout_lock and cc_lock held
* guarantees that the current callout will not run.
* The softclock() function sets this to 0 before it
* The softclock_call_cc() function sets this to 0 before it
* drops callout_lock to acquire c_lock, and it calls
* the handler only if curr_cancelled is still 0 after
* cc_lock is successfully acquired.
@ -316,7 +321,7 @@ callout_cpu_init(struct callout_cpu *cc, int cpu)
{
int i;
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN);
cc->cc_inited = 1;
cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
callwheelsize, M_CALLOUT,
@ -369,28 +374,38 @@ callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
static void
start_softclock(void *dummy)
{
struct proc *p;
struct thread *td;
struct callout_cpu *cc;
char name[MAXCOMLEN];
int cpu;
int cpu, error;
bool pin_swi;
struct intr_event *ie;
p = NULL;
CPU_FOREACH(cpu) {
cc = CC_CPU(cpu);
snprintf(name, sizeof(name), "clock (%d)", cpu);
ie = NULL;
if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
INTR_MPSAFE, &cc->cc_cookie))
panic("died while creating standard software ithreads");
error = kproc_kthread_add(softclock_thread, cc, &p, &td,
RFSTOPPED, 0, "clock", "clock (%d)", cpu);
if (error != 0)
panic("failed to create softclock thread for cpu %d: %d",
cpu, error);
CC_LOCK(cc);
cc->cc_thread = td;
thread_lock(td);
sched_class(td, PRI_ITHD);
sched_prio(td, PI_SWI(SWI_CLOCK));
TD_SET_IWAIT(td);
thread_lock_set(td, (struct mtx *)&cc->cc_lock);
thread_unlock(td);
if (cpu == cc_default_cpu)
pin_swi = pin_default_swi;
else
pin_swi = pin_pcpu_swi;
if (pin_swi && (intr_event_bind(ie, cpu) != 0)) {
printf("%s: %s clock couldn't be pinned to cpu %d\n",
__func__,
cpu == cc_default_cpu ? "default" : "per-cpu",
cpu);
if (pin_swi) {
error = cpuset_setithread(td->td_tid, cpu);
if (error != 0)
printf("%s: %s clock couldn't be pinned to cpu %d: %d\n",
__func__, cpu == cc_default_cpu ?
"default" : "per-cpu", cpu, error);
}
}
}
@ -418,6 +433,7 @@ callout_process(sbintime_t now)
struct callout *tmp, *tmpn;
struct callout_cpu *cc;
struct callout_list *sc;
struct thread *td;
sbintime_t first, last, max, tmp_max;
uint32_t lookahead;
u_int firstb, lastb, nowb;
@ -529,13 +545,15 @@ callout_process(sbintime_t now)
avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
#endif
if (!TAILQ_EMPTY(&cc->cc_expireq)) {
td = cc->cc_thread;
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
sched_add(td, SRQ_INTR);
} else
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
} else
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
/*
* swi_sched acquires the thread lock, so we don't want to call it
* with cc_lock held; incorrect locking order.
*/
if (!TAILQ_EMPTY(&cc->cc_expireq))
swi_sched(cc->cc_cookie, 0);
}
static struct callout_cpu *
@ -797,20 +815,39 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
*/
/*
* Software (low priority) clock interrupt.
* Software (low priority) clock interrupt thread handler.
* Run periodic events from timeout queue.
*/
void
softclock(void *arg)
static void
softclock_thread(void *arg)
{
struct thread *td = curthread;
struct callout_cpu *cc;
struct callout *c;
#ifdef CALLOUT_PROFILING
int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
int depth, gcalls, lockcalls, mpcalls;
#endif
cc = (struct callout_cpu *)arg;
CC_LOCK(cc);
for (;;) {
while (TAILQ_EMPTY(&cc->cc_expireq)) {
/*
* Use CC_LOCK(cc) as the thread_lock while
* idle.
*/
thread_lock(td);
thread_lock_set(td, (struct mtx *)&cc->cc_lock);
TD_SET_IWAIT(td);
mi_switch(SW_VOL | SWT_IWAIT);
/* mi_switch() drops thread_lock(). */
CC_LOCK(cc);
}
#ifdef CALLOUT_PROFILING
depth = gcalls = lockcalls = mpcalls = 0;
#endif
while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
softclock_call_cc(c, cc,
@ -828,7 +865,7 @@ softclock(void *arg)
avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
#endif
CC_UNLOCK(cc);
}
}
void

View File

@ -471,7 +471,6 @@ int sysbeep(int hertz, sbintime_t duration);
void hardclock(int cnt, int usermode);
void hardclock_sync(int cpu);
void softclock(void *);
void statclock(int cnt, int usermode);
void profclock(int cnt, int usermode, uintfptr_t pc);