Increase the amount of data exported by KTR in the KTR_RUNQ setting.

This extra data is needed to really follow what is going on in the
threaded case.
This commit is contained in:
julian 2004-08-09 18:21:12 +00:00
parent ae5342c0a1
commit ce15d5c35c
5 changed files with 28 additions and 21 deletions

View File

@ -794,8 +794,8 @@ fork_exit(callout, arg, frame)
sched_lock.mtx_lock = (uintptr_t)td; sched_lock.mtx_lock = (uintptr_t)td;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit(); cpu_critical_fork_exit();
CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid, CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
p->p_comm); td, td->td_kse, p->p_pid, p->p_comm);
/* /*
* Processes normally resume in mi_switch() after being * Processes normally resume in mi_switch() after being

View File

@ -213,8 +213,8 @@ kse_reassign(struct kse *ke)
kg->kg_last_assigned = td; kg->kg_last_assigned = td;
td->td_kse = ke; td->td_kse = ke;
ke->ke_thread = td; ke->ke_thread = td;
sched_add(td);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
sched_add(td);
return; return;
} }
@ -327,7 +327,8 @@ setrunqueue(struct thread *td)
struct thread *td2; struct thread *td2;
struct thread *tda; struct thread *tda;
CTR1(KTR_RUNQ, "setrunqueue: td%p", td); CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d",
td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid);
mtx_assert(&sched_lock, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("setrunqueue: bad thread state")); ("setrunqueue: bad thread state"));
@ -351,6 +352,8 @@ setrunqueue(struct thread *td)
* There is a free one so it's ours for the asking.. * There is a free one so it's ours for the asking..
*/ */
ke = TAILQ_FIRST(&kg->kg_iq); ke = TAILQ_FIRST(&kg->kg_iq);
CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p",
kg, ke);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_THREAD; ke->ke_state = KES_THREAD;
kg->kg_idle_kses--; kg->kg_idle_kses--;
@ -359,6 +362,9 @@ setrunqueue(struct thread *td)
* None free, but there is one we can commandeer. * None free, but there is one we can commandeer.
*/ */
ke = tda->td_kse; ke = tda->td_kse;
CTR3(KTR_RUNQ,
"setrunqueue: kg:%p: take ke:%p from td: %p",
kg, ke, tda);
sched_rem(tda); sched_rem(tda);
tda->td_kse = NULL; tda->td_kse = NULL;
ke->ke_thread = NULL; ke->ke_thread = NULL;
@ -423,6 +429,9 @@ setrunqueue(struct thread *td)
ke->ke_thread = td2; ke->ke_thread = td2;
} }
sched_add(ke->ke_thread); sched_add(ke->ke_thread);
} else {
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
td, td->td_ksegrp, td->td_proc->p_pid);
} }
} }
@ -639,8 +648,8 @@ runq_add(struct runq *rq, struct kse *ke)
ke->ke_rqindex = pri; ke->ke_rqindex = pri;
runq_setbit(rq, pri); runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri]; rqh = &rq->rq_queues[pri];
CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
TAILQ_INSERT_TAIL(rqh, ke, ke_procq); TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
} }
@ -706,8 +715,8 @@ runq_remove(struct runq *rq, struct kse *ke)
("runq_remove: process swapped out")); ("runq_remove: process swapped out"));
pri = ke->ke_rqindex; pri = ke->ke_rqindex;
rqh = &rq->rq_queues[pri]; rqh = &rq->rq_queues[pri];
CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p", CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
ke, ke->ke_thread->td_priority, pri, rqh); ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
TAILQ_REMOVE(rqh, ke, ke_procq); TAILQ_REMOVE(rqh, ke, ke_procq);
if (TAILQ_EMPTY(rqh)) { if (TAILQ_EMPTY(rqh)) {

View File

@ -345,14 +345,14 @@ mi_switch(int flags, struct thread *newtd)
cnt.v_swtch++; cnt.v_swtch++;
PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks); PCPU_SET(switchticks, ticks);
CTR3(KTR_PROC, "mi_switch: old thread %p (pid %ld, %s)", CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
(void *)td, (long)p->p_pid, p->p_comm); (void *)td, td->td_kse, (long)p->p_pid, p->p_comm);
if (td->td_proc->p_flag & P_SA) if (td->td_proc->p_flag & P_SA)
thread_switchout(td); thread_switchout(td);
sched_switch(td, newtd); sched_switch(td, newtd);
CTR3(KTR_PROC, "mi_switch: new thread %p (pid %ld, %s)", CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
(void *)td, (long)p->p_pid, p->p_comm); (void *)td, td->td_kse, (long)p->p_pid, p->p_comm);
/* /*
* If the last thread was exiting, finish cleaning it up. * If the last thread was exiting, finish cleaning it up.

View File

@ -682,9 +682,8 @@ thread_exit(void)
} else { } else {
PROC_UNLOCK(p); PROC_UNLOCK(p);
} }
td->td_state = TDS_INACTIVE; td->td_state = TDS_INACTIVE;
/* XXX Shouldn't cpu_throw() here. */ CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
mtx_assert(&sched_lock, MA_OWNED);
cpu_throw(td, choosethread()); cpu_throw(td, choosethread());
panic("I'm a teapot!"); panic("I'm a teapot!");
/* NOTREACHED */ /* NOTREACHED */

View File

@ -49,8 +49,6 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <sys/sx.h> #include <sys/sx.h>
#define KTR_4BSD 0x0
/* /*
* INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
* the range 100-256 Hz (approximately). * the range 100-256 Hz (approximately).
@ -725,14 +723,15 @@ sched_add(struct thread *td)
#ifdef SMP #ifdef SMP
if (KSE_CAN_MIGRATE(ke)) { if (KSE_CAN_MIGRATE(ke)) {
CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke); CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
ke->ke_runq = &runq; ke->ke_runq = &runq;
} else { } else {
CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke); CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p)to pcpu runq", ke, td);
if (!SKE_RUNQ_PCPU(ke)) if (!SKE_RUNQ_PCPU(ke))
ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)];
} }
#else #else
CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
ke->ke_runq = &runq; ke->ke_runq = &runq;
#endif #endif
if ((td->td_proc->p_flag & P_NOLOAD) == 0) if ((td->td_proc->p_flag & P_NOLOAD) == 0)
@ -777,12 +776,12 @@ sched_choose(void)
if (ke == NULL || if (ke == NULL ||
(kecpu != NULL && (kecpu != NULL &&
kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu, CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
PCPU_GET(cpuid)); PCPU_GET(cpuid));
ke = kecpu; ke = kecpu;
rq = &runq_pcpu[PCPU_GET(cpuid)]; rq = &runq_pcpu[PCPU_GET(cpuid)];
} else { } else {
CTR1(KTR_4BSD, "choosing kse %p from main runq", ke); CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
} }
#else #else