- Implement generic macros for producing KTR records that are compatible

with src/tools/sched/schedgraph.py.  This allows developers to quickly
   create a graphical view of ktr data for any resource in the system.
 - Add sched_tdname() and the pcpu field 'name' for quickly and uniformly
   identifying records associated with a thread or cpu.
 - Reimplement the KTR_SCHED traces using the new generic facility.

Obtained from:	attilio
Discussed with:	jhb
Sponsored by:	Nokia
This commit is contained in:
Jeff Roberson 2009-01-17 07:17:57 +00:00
parent 6ec3512368
commit 8f51ad55e7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=187357
8 changed files with 233 additions and 51 deletions

View File

@ -498,8 +498,8 @@ statclock(int usermode)
rss = pgtok(vmspace_resident_count(vm));
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
td, td->td_name, td->td_priority, (stathz)?stathz:hz);
KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
"prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
thread_lock_flags(td, MTX_QUIET);
sched_clock(td);
thread_unlock(td);

View File

@ -71,6 +71,13 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#endif
#define KTDSTATE(td) \
(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
static void synch_setup(void *dummy);
SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
NULL);
@ -425,25 +432,19 @@ mi_switch(int flags, struct thread *newtd)
td->td_tid, td->td_sched, p->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
td, td->td_name, td->td_priority);
else if (newtd != NULL)
CTR5(KTR_SCHED,
"mi_switch: %p(%s) prio %d preempted by %p(%s)",
td, td->td_name, td->td_priority, newtd,
newtd->td_name);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
"prio:%d", td->td_priority);
else
CTR6(KTR_SCHED,
"mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
td, td->td_name, td->td_priority,
td->td_inhibitors, td->td_wmesg, td->td_lockname);
KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
"prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
"lockname:\"%s\"", td->td_lockname);
#endif
#ifdef XEN
PT_UPDATES_FLUSH();
#endif
sched_switch(td, newtd, flags);
CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
td, td->td_name, td->td_priority);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);

View File

@ -82,6 +82,8 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
#endif
#define NICE_WEIGHT 1 /* Priorities per nice level. */
#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__STRING(UINT_MAX)))
/*
* The schedulable entity that runs a context.
* This is an extension to the thread structure and is tailored to
@ -93,6 +95,9 @@ struct td_sched {
int ts_slptime; /* (j) Seconds !RUNNING. */
int ts_flags;
struct runq *ts_runq; /* runq the thread is currently on */
#ifdef KTR
char ts_name[TS_NAME_LEN];
#endif
};
/* flags kept in td_flags */
@ -243,15 +248,17 @@ SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
static __inline void
sched_load_add(void)
{
sched_tdcnt++;
CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
}
static __inline void
sched_load_rem(void)
{
sched_tdcnt--;
CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
}
/*
* Arrange to reschedule if necessary, taking the priorities and
@ -705,8 +712,9 @@ void
sched_exit(struct proc *p, struct thread *td)
{
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
td, td->td_name, td->td_priority);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
"prio:td", td->td_priority);
PROC_LOCK_ASSERT(p, MA_OWNED);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
}
@ -715,8 +723,8 @@ void
sched_exit_thread(struct thread *td, struct thread *child)
{
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
child, child->td_name, child->td_priority);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
"prio:td", child->td_priority);
thread_lock(td);
td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
thread_unlock(td);
@ -773,10 +781,16 @@ sched_class(struct thread *td, int class)
static void
sched_priority(struct thread *td, u_char prio)
{
CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
td, td->td_name, td->td_priority, prio, curthread,
curthread->td_name);
KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
"prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
sched_tdname(curthread));
if (td != curthread && prio > td->td_priority) {
KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
"lend prio", "prio:%d", td->td_priority, "new prio:%d",
prio, KTR_ATTR_LINKED, sched_tdname(td));
}
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
return;
@ -1205,9 +1219,13 @@ sched_add(struct thread *td, int flags)
("sched_add: bad thread state"));
KASSERT(td->td_flags & TDF_INMEM,
("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
"prio:%d", td->td_priority, KTR_ATTR_LINKED,
sched_tdname(curthread));
KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
KTR_ATTR_LINKED, sched_tdname(td));
/*
* Now that the thread is moving to the run-queue, set the lock
@ -1289,9 +1307,11 @@ sched_add(struct thread *td, int flags)
("sched_add: bad thread state"));
KASSERT(td->td_flags & TDF_INMEM,
("sched_add: thread swapped out"));
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
"prio:%d", td->td_priority, KTR_ATTR_LINKED,
sched_tdname(curthread));
KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
KTR_ATTR_LINKED, sched_tdname(td));
/*
* Now that the thread is moving to the run-queue, set the lock
@ -1336,9 +1356,9 @@ sched_rem(struct thread *td)
KASSERT(TD_ON_RUNQ(td),
("sched_rem: thread not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
"prio:%d", td->td_priority, KTR_ATTR_LINKED,
sched_tdname(curthread));
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
@ -1570,6 +1590,22 @@ sched_fork_exit(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
char *
sched_tdname(struct thread *td)
{
#ifdef KTR
struct td_sched *ts;
ts = td->td_sched;
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
return (ts->ts_name);
#else
return (td->td_name);
#endif
}
void
sched_affinity(struct thread *td)
{

View File

@ -86,6 +86,10 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
#define KTR_ULE 0
#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__STRING(UINT_MAX)))
#define TDQ_NAME_LEN (sizeof("sched lock ") + sizeof(__STRING(MAXCPU)))
#define TDQ_LOADNAME_LEN (PCPU_NAME_LEN + sizeof(" load"))
/*
* Thread scheduler specific section. All fields are protected
* by the thread lock.
@ -101,6 +105,9 @@ struct td_sched {
int ts_ltick; /* Last tick that we were running on */
int ts_ftick; /* First tick that we were running on */
int ts_ticks; /* Tick count */
#ifdef KTR
char ts_name[TS_NAME_LEN];
#endif
};
/* flags kept in ts_flags */
#define TSF_BOUND 0x0001 /* Thread can not migrate. */
@ -216,7 +223,10 @@ struct tdq {
struct runq tdq_realtime; /* real-time run queue. */
struct runq tdq_timeshare; /* timeshare run queue. */
struct runq tdq_idle; /* Queue of IDLE threads. */
char tdq_name[sizeof("sched lock") + 6];
char tdq_name[TDQ_NAME_LEN];
#ifdef KTR
char tdq_loadname[TDQ_LOADNAME_LEN];
#endif
} __aligned(64);
/* Idle thread states and config. */
@ -489,7 +499,7 @@ tdq_load_add(struct tdq *tdq, struct thread *td)
tdq->tdq_load++;
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload++;
CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
/*
@ -508,7 +518,7 @@ tdq_load_rem(struct tdq *tdq, struct thread *td)
tdq->tdq_load--;
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload--;
CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
/*
@ -1237,6 +1247,10 @@ tdq_setup(struct tdq *tdq)
"sched lock %d", (int)TDQ_ID(tdq));
mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
MTX_SPIN | MTX_RECURSE);
#ifdef KTR
snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
"CPU %d load", (int)TDQ_ID(tdq));
#endif
}
#ifdef SMP
@ -1559,9 +1573,14 @@ sched_thread_priority(struct thread *td, u_char prio)
struct tdq *tdq;
int oldpri;
CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
td, td->td_name, td->td_priority, prio, curthread,
curthread->td_name);
KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
"prio:%d", td->td_priority, "new prio:%d", prio,
KTR_ATTR_LINKED, sched_tdname(curthread));
if (td != curthread && prio > td->td_priority) {
KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
"lend prio", "prio:%d", td->td_priority, "new prio:%d",
prio, KTR_ATTR_LINKED, sched_tdname(td));
}
ts = td->td_sched;
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
@ -1990,6 +2009,9 @@ sched_fork_thread(struct thread *td, struct thread *child)
ts2->ts_slptime = ts->ts_slptime;
ts2->ts_runtime = ts->ts_runtime;
ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
#ifdef KTR
bzero(ts2->ts_name, sizeof(ts2->ts_name));
#endif
}
/*
@ -2012,10 +2034,9 @@ void
sched_exit(struct proc *p, struct thread *child)
{
struct thread *td;
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
child, child->td_name, child->td_priority);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
"prio:td", child->td_priority);
PROC_LOCK_ASSERT(p, MA_OWNED);
td = FIRST_THREAD_IN_PROC(p);
sched_exit_thread(td, child);
@ -2031,9 +2052,8 @@ void
sched_exit_thread(struct thread *td, struct thread *child)
{
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
child, child->td_name, child->td_priority);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
"prio:td", child->td_priority);
/*
* Give the child's runtime to the parent without returning the
* sleep time as a penalty to the parent. This causes shells that
@ -2291,9 +2311,12 @@ sched_add(struct thread *td, int flags)
#ifdef SMP
int cpu;
#endif
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
"prio:%d", td->td_priority, KTR_ATTR_LINKED,
sched_tdname(curthread));
KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
KTR_ATTR_LINKED, sched_tdname(td));
THREAD_LOCK_ASSERT(td, MA_OWNED);
/*
* Recalculate the priority before we select the target cpu or
@ -2337,9 +2360,8 @@ sched_rem(struct thread *td)
{
struct tdq *tdq;
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
"prio:%d", td->td_priority);
tdq = TDQ_CPU(td->td_sched->ts_cpu);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
@ -2605,6 +2627,25 @@ sched_fork_exit(struct thread *td)
&TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
}
/*
* Create on first use to catch odd startup conditons.
*/
char *
sched_tdname(struct thread *td)
{
#ifdef KTR
struct td_sched *ts;
ts = td->td_sched;
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
return (ts->ts_name);
#else
return (td->td_name);
#endif
}
#ifdef SMP
/*

View File

@ -76,6 +76,9 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
cpu_pcpu_init(pcpu, cpuid, size);
pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
#ifdef KTR
snprintf(pcpu->pc_name, sizeof(pcpu->pc_name), "CPU %d", cpuid);
#endif
}

View File

@ -164,6 +164,95 @@ void ktr_tracepoint(u_int mask, const char *file, int line,
#define TR5(d, p1, p2, p3, p4, p5) CTR5(KTR_GEN, d, p1, p2, p3, p4, p5)
#define TR6(d, p1, p2, p3, p4, p5, p6) CTR6(KTR_GEN, d, p1, p2, p3, p4, p5, p6)
/*
* The event macros implement KTR graphic plotting facilities provided
* by src/tools/sched/schedgraph.py. Three generic types of events are
* supported: states, counters, and points.
*
* m is the ktr class for ktr_mask.
* ident is the string identifier that owns the event (ie: "thread 10001")
* etype is the type of event to plot (state, counter, point)
* edat is the event specific data (state name, counter value, point name)
* up to four attributes may be supplied as a name, value pair of arguments.
*
* etype and attribute names must be string constants. This minimizes the
* number of ktr slots required by construction the final format strings
* at compile time. Both must also include a colon and format specifier
* (ie. "prio:%d", prio). It is recommended that string arguments be
* contained within escaped quotes if they may contain ',' or ':' characters.
*
* The special attribute (KTR_ATTR_LINKED, ident) creates a reference to another
* id on the graph for easy traversal of related graph elements.
*/
#define KTR_ATTR_LINKED "linkedto:\"%s\""
#define KTR_EFMT(egroup, ident, etype) \
"KTRGRAPH group:\"" egroup "\", id:\"%s\", " etype ", attributes: "
#define KTR_EVENT0(m, egroup, ident, etype, edat) \
CTR2(m, KTR_EFMT(egroup, ident, etype) "none", ident, edat)
#define KTR_EVENT1(m, egroup, ident, etype, edat, a0, v0) \
CTR3(m, KTR_EFMT(egroup, ident, etype) a0, ident, edat, (v0))
#define KTR_EVENT2(m, egroup, ident, etype, edat, a0, v0, a1, v1) \
CTR4(m, KTR_EFMT(egroup, ident, etype) a0 ", " a1, \
ident, edat, (v0), (v1))
#define KTR_EVENT3(m, egroup, ident, etype, edat, a0, v0, a1, v1, a2, v2)\
CTR5(m,KTR_EFMT(egroup, ident, etype) a0 ", " a1 ", " a2, \
ident, edat, (v0), (v1), (v2))
#define KTR_EVENT4(m, egroup, ident, etype, edat, \
a0, v0, a1, v1, a2, v2, a3, v3) \
CTR6(m,KTR_EFMT(egroup, ident, etype) a0 ", " a1 ", " a2, ", ", a3,\
ident, edat, (v0), (v1), (v2), (v3))
/*
* State functions graph state changes on an ident.
*/
#define KTR_STATE0(m, egroup, ident, state) \
KTR_EVENT0(m, egroup, ident, "state:\"%s\"", state)
#define KTR_STATE1(m, egroup, ident, state, a0, v0) \
KTR_EVENT1(m, egroup, ident, "state:\"%s\"", state, a0, (v0))
#define KTR_STATE2(m, egroup, ident, state, a0, v0, a1, v1) \
KTR_EVENT2(m, egroup, ident, "state:\"%s\"", state, a0, (v0), a1, (v1))
#define KTR_STATE3(m, egroup, ident, state, a0, v0, a1, v1, a2, v2) \
KTR_EVENT3(m, egroup, ident, "state:\"%s\"", \
state, a0, (v0), a1, (v1), a2, (v2))
#define KTR_STATE4(m, egroup, ident, state, a0, v0, a1, v1, a2, v2, a3, v3)\
KTR_EVENT4(m, egroup, ident, "state:\"%s\"", \
state, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
/*
* Counter functions graph counter values. The counter id
* must not be intermixed with a state id.
*/
#define KTR_COUNTER0(m, egroup, ident, counter) \
KTR_EVENT0(m, egroup, ident, "counter:%d", counter)
#define KTR_COUNTER1(m, egroup, ident, edat, a0, v0) \
KTR_EVENT1(m, egroup, ident, "counter:%d", counter, a0, (v0))
#define KTR_COUNTER2(m, egroup, ident, counter, a0, v0, a1, v1) \
KTR_EVENT2(m, egroup, ident, "counter:%d", counter, a0, (v0), a1, (v1))
#define KTR_COUNTER3(m, egroup, ident, counter, a0, v0, a1, v1, a2, v2) \
KTR_EVENT3(m, egroup, ident, "counter:%d", \
counter, a0, (v0), a1, (v1), a2, (v2))
#define KTR_COUNTER4(m, egroup, ident, counter, a0, v0, a1, v1, a2, v2, a3, v3)\
KTR_EVENT4(m, egroup, ident, "counter:%d", \
counter, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
/*
* Point functions plot points of interest on counter or state graphs.
*/
#define KTR_POINT0(m, egroup, ident, point) \
KTR_EVENT0(m, egroup, ident, "point:\"%s\"", point)
#define KTR_POINT1(m, egroup, ident, point, a0, v0) \
KTR_EVENT1(m, egroup, ident, "point:\"%s\"", point, a0, (v0))
#define KTR_POINT2(m, egroup, ident, point, a0, v0, a1, v1) \
KTR_EVENT2(m, egroup, ident, "point:\"%s\"", point, a0, (v0), a1, (v1))
#define KTR_POINT3(m, egroup, ident, point, a0, v0, a1, v1, a2, v2) \
KTR_EVENT3(m, egroup, ident, "point:\"%s\"", point, \
a0, (v0), a1, (v1), a2, (v2))
#define KTR_POINT4(m, egroup, ident, point, a0, v0, a1, v1, a2, v2, a3, v3)\
KTR_EVENT4(m, egroup, ident, "point:\"%s\"", \
point, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
/*
* Trace initialization events, similar to CTR with KTR_INIT, but
* completely ifdef'ed out if KTR_INIT isn't in KTR_COMPILE (to

View File

@ -54,6 +54,9 @@ struct rm_queue {
struct rm_queue* volatile rmq_prev;
};
#define PCPU_NAME_LEN (sizeof("CPU ") + sizeof(__STRING(MAXCPU) + 1))
/*
* This structure maps out the global data that needs to be kept on a
* per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
@ -76,6 +79,9 @@ struct pcpu {
#ifdef KTR_PERCPU
int pc_ktr_idx; /* Index into trace table */
char *pc_ktr_buf;
#endif
#ifdef KTR
char pc_name[PCPU_NAME_LEN]; /* String name for KTR. */
#endif
struct vmmeter pc_cnt; /* VM stats counters */
long pc_cp_time[CPUSTATES]; /* statclock ticks */

View File

@ -134,6 +134,12 @@ void sched_affinity(struct thread *td);
int sched_sizeof_proc(void);
int sched_sizeof_thread(void);
/*
* This routine provides a consistent thread name for use with KTR graphing
* functions.
*/
char *sched_tdname(struct thread *td);
static __inline void
sched_pin(void)
{