- Use DPCPU for SCHED_STATS. This is somewhat awkward because the
offset of the stat is not known until link time so we must emit a function to call SYSCTL_ADD_PROC rather than using SYSCTL_PROC directly. - Eliminate the atomic from SCHED_STAT_INC now that it's using per-cpu variables. Sched stats are always incremented while we're holding a spinlock so no further protection is required. Reviewed by: sam
This commit is contained in:
parent
a90de6966f
commit
826fa583d1
@ -79,31 +79,45 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
||||
* with SCHED_STAT_DEFINE().
|
||||
*/
|
||||
#ifdef SCHED_STATS
|
||||
long sched_switch_stats[SWT_COUNT]; /* Switch reasons from mi_switch(). */
|
||||
|
||||
SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
|
||||
SCHED_STAT_DEFINE_VAR(uncategorized, &sched_switch_stats[SWT_NONE], "");
|
||||
SCHED_STAT_DEFINE_VAR(preempt, &sched_switch_stats[SWT_PREEMPT], "");
|
||||
SCHED_STAT_DEFINE_VAR(owepreempt, &sched_switch_stats[SWT_OWEPREEMPT], "");
|
||||
SCHED_STAT_DEFINE_VAR(turnstile, &sched_switch_stats[SWT_TURNSTILE], "");
|
||||
SCHED_STAT_DEFINE_VAR(sleepq, &sched_switch_stats[SWT_SLEEPQ], "");
|
||||
SCHED_STAT_DEFINE_VAR(sleepqtimo, &sched_switch_stats[SWT_SLEEPQTIMO], "");
|
||||
SCHED_STAT_DEFINE_VAR(relinquish, &sched_switch_stats[SWT_RELINQUISH], "");
|
||||
SCHED_STAT_DEFINE_VAR(needresched, &sched_switch_stats[SWT_NEEDRESCHED], "");
|
||||
SCHED_STAT_DEFINE_VAR(idle, &sched_switch_stats[SWT_IDLE], "");
|
||||
SCHED_STAT_DEFINE_VAR(iwait, &sched_switch_stats[SWT_IWAIT], "");
|
||||
SCHED_STAT_DEFINE_VAR(suspend, &sched_switch_stats[SWT_SUSPEND], "");
|
||||
SCHED_STAT_DEFINE_VAR(remotepreempt, &sched_switch_stats[SWT_REMOTEPREEMPT],
|
||||
"");
|
||||
SCHED_STAT_DEFINE_VAR(remotewakeidle, &sched_switch_stats[SWT_REMOTEWAKEIDLE],
|
||||
"");
|
||||
|
||||
/* Switch reasons from mi_switch(). */
|
||||
DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
|
||||
SCHED_STAT_DEFINE_VAR(uncategorized,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
|
||||
SCHED_STAT_DEFINE_VAR(preempt,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
|
||||
SCHED_STAT_DEFINE_VAR(owepreempt,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
|
||||
SCHED_STAT_DEFINE_VAR(turnstile,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
|
||||
SCHED_STAT_DEFINE_VAR(sleepq,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
|
||||
SCHED_STAT_DEFINE_VAR(sleepqtimo,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
|
||||
SCHED_STAT_DEFINE_VAR(relinquish,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
|
||||
SCHED_STAT_DEFINE_VAR(needresched,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
|
||||
SCHED_STAT_DEFINE_VAR(idle,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
|
||||
SCHED_STAT_DEFINE_VAR(iwait,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
|
||||
SCHED_STAT_DEFINE_VAR(suspend,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
|
||||
SCHED_STAT_DEFINE_VAR(remotepreempt,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
|
||||
SCHED_STAT_DEFINE_VAR(remotewakeidle,
|
||||
&DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
|
||||
|
||||
static int
|
||||
sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
struct sysctl_oid *p;
|
||||
uintptr_t counter;
|
||||
int error;
|
||||
int val;
|
||||
int i;
|
||||
|
||||
val = 0;
|
||||
error = sysctl_handle_int(oidp, &val, 0, req);
|
||||
@ -118,7 +132,12 @@ sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
|
||||
SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
|
||||
if (p == oidp || p->oid_arg1 == NULL)
|
||||
continue;
|
||||
*(long *)p->oid_arg1 = 0;
|
||||
counter = (uintptr_t)p->oid_arg1;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
*(long *)(dpcpu_off[i] + counter) = 0;
|
||||
}
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -162,14 +162,27 @@ sched_unpin(void)
|
||||
|
||||
/* Scheduler stats. */
|
||||
#ifdef SCHED_STATS
|
||||
extern long sched_switch_stats[SWT_COUNT];
|
||||
DPCPU_DECLARE(long, sched_switch_stats[SWT_COUNT]);
|
||||
|
||||
#define SCHED_STAT_DEFINE_VAR(name, ptr, descr) \
|
||||
SYSCTL_LONG(_kern_sched_stats, OID_AUTO, name, CTLFLAG_RD, ptr, 0, descr)
|
||||
static void name ## _add_proc(void *dummy __unused) \
|
||||
{ \
|
||||
\
|
||||
SYSCTL_ADD_PROC(NULL, \
|
||||
SYSCTL_STATIC_CHILDREN(_kern_sched_stats), OID_AUTO, \
|
||||
#name, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, \
|
||||
ptr, 0, sysctl_dpcpu_long, "LU", descr); \
|
||||
} \
|
||||
SYSINIT(name, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, name ## _add_proc, NULL);
|
||||
|
||||
#define SCHED_STAT_DEFINE(name, descr) \
|
||||
unsigned long name; \
|
||||
SCHED_STAT_DEFINE_VAR(name, &name, descr)
|
||||
#define SCHED_STAT_INC(var) atomic_add_long(&(var), 1)
|
||||
DPCPU_DEFINE(unsigned long, name); \
|
||||
SCHED_STAT_DEFINE_VAR(name, &DPCPU_NAME(name), descr)
|
||||
/*
|
||||
* Sched stats are always incremented in critical sections so no atomic
|
||||
* is necesssary to increment them.
|
||||
*/
|
||||
#define SCHED_STAT_INC(var) DPCPU_GET(var)++;
|
||||
#else
|
||||
#define SCHED_STAT_DEFINE_VAR(name, descr, ptr)
|
||||
#define SCHED_STAT_DEFINE(name, descr)
|
||||
|
Loading…
Reference in New Issue
Block a user