Add schedgraph traces for callout handlers. Specifically, a callwheel logs
a running event each time it executes a callout function. The event includes the function pointer, argument, and whether or not it was run from hardware interrupt context. The callwheel is marked idle when each handler completes. This effectively logs the duration of each callout routine in the graph.
This commit is contained in:
parent
a6a73044a0
commit
877e018654
@ -163,6 +163,7 @@ struct callout_cpu {
|
||||
sbintime_t cc_lastscan;
|
||||
void *cc_cookie;
|
||||
u_int cc_bucket;
|
||||
char cc_ktr_event_name[20];
|
||||
};
|
||||
|
||||
#define cc_exec_curr cc_exec_entity[0].cc_curr
|
||||
@ -201,7 +202,7 @@ struct callout_cpu cc_cpu;
|
||||
|
||||
static int timeout_cpu;
|
||||
|
||||
static void callout_cpu_init(struct callout_cpu *cc);
|
||||
static void callout_cpu_init(struct callout_cpu *cc, int cpu);
|
||||
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
||||
#ifdef CALLOUT_PROFILING
|
||||
int *mpcalls, int *lockcalls, int *gcalls,
|
||||
@ -302,7 +303,7 @@ callout_callwheel_init(void *dummy)
|
||||
cc = CC_CPU(timeout_cpu);
|
||||
cc->cc_callout = malloc(ncallout * sizeof(struct callout),
|
||||
M_CALLOUT, M_WAITOK);
|
||||
callout_cpu_init(cc);
|
||||
callout_cpu_init(cc, timeout_cpu);
|
||||
}
|
||||
SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
|
||||
|
||||
@ -310,7 +311,7 @@ SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
|
||||
* Initialize the per-cpu callout structures.
|
||||
*/
|
||||
static void
|
||||
callout_cpu_init(struct callout_cpu *cc)
|
||||
callout_cpu_init(struct callout_cpu *cc, int cpu)
|
||||
{
|
||||
struct callout *c;
|
||||
int i;
|
||||
@ -325,6 +326,8 @@ callout_cpu_init(struct callout_cpu *cc)
|
||||
cc->cc_firstevent = SBT_MAX;
|
||||
for (i = 0; i < 2; i++)
|
||||
cc_cce_cleanup(cc, i);
|
||||
snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
|
||||
"callwheel cpu %d", cpu);
|
||||
if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
|
||||
return;
|
||||
for (i = 0; i < ncallout; i++) {
|
||||
@ -396,7 +399,7 @@ start_softclock(void *dummy)
|
||||
continue;
|
||||
cc = CC_CPU(cpu);
|
||||
cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
|
||||
callout_cpu_init(cc);
|
||||
callout_cpu_init(cc, cpu);
|
||||
snprintf(name, sizeof(name), "clock (%d)", cpu);
|
||||
ie = NULL;
|
||||
if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
|
||||
@ -711,6 +714,8 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
||||
CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
|
||||
c, c_func, c_arg);
|
||||
}
|
||||
KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
|
||||
"func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
|
||||
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
|
||||
sbt1 = sbinuptime();
|
||||
#endif
|
||||
@ -733,6 +738,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
||||
lastfunc = c_func;
|
||||
}
|
||||
#endif
|
||||
KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
|
||||
CTR1(KTR_CALLOUT, "callout %p finished", c);
|
||||
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
|
||||
class->lc_unlock(c_lock);
|
||||
|
@ -80,8 +80,6 @@
|
||||
("runq rem", "yellow"),
|
||||
("thread exit", "grey"),
|
||||
("proc exit", "grey"),
|
||||
("callwheel idle", "grey"),
|
||||
("callout running", "green"),
|
||||
("lock acquire", "blue"),
|
||||
("lock contest", "purple"),
|
||||
("failed lock try", "red"),
|
||||
@ -903,7 +901,6 @@ def __init__(self, file):
|
||||
self.timestamp_f = None
|
||||
self.timestamp_l = None
|
||||
self.locks = {}
|
||||
self.callwheels = {}
|
||||
self.ticks = {}
|
||||
self.load = {}
|
||||
self.crit = {}
|
||||
|
Loading…
Reference in New Issue
Block a user