Don't zero td_runtime when billing thread CPU usage to the process;

maintain a separate td_incruntime to hold unbilled CPU usage for
the thread that has the previous properties of td_runtime.

When thread information is requested using the thread monitoring
sysctls, export thread td_runtime instead of process rusage runtime
in kinfo_proc.

This restores the display of individual ithread and other kernel
thread CPU usage since inception in ps -H and top -SH, as well for
libthr user threads, valuable debugging information lost with the
move to try kthreads since they are no longer independent processes.

There is universal agreement that we should rewrite the process and
thread export sysctls, but this commit gets things going a bit
better in the mean time.  Likewise, there are resevations about the
continued validity of statclock given the speed of modern processors.

Reviewed by:		attilio, emaste, jhb, julian
This commit is contained in:
Robert Watson 2008-01-10 22:11:20 +00:00
parent a3ab9923ff
commit d92909c1d4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175219
5 changed files with 25 additions and 12 deletions

View File

@ -514,6 +514,7 @@ proc0_post(void *dummy __unused)
struct timespec ts;
struct proc *p;
struct rusage ru;
struct thread *td;
/*
* Now we can look at the time, having had a chance to verify the
@ -529,6 +530,9 @@ proc0_post(void *dummy __unused)
p->p_rux.rux_uticks = 0;
p->p_rux.rux_sticks = 0;
p->p_rux.rux_iticks = 0;
FOREACH_THREAD_IN_PROC(p, td) {
td->td_runtime = 0;
}
}
sx_sunlock(&allproc_lock);
PCPU_SET(switchtime, cpu_ticks());

View File

@ -84,7 +84,8 @@ MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
static void doenterpgrp(struct proc *, struct pgrp *);
static void orphanpg(struct pgrp *pg);
static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp);
static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp);
static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp,
int preferthread);
static void pgadjustjobc(struct pgrp *pgrp, int entering);
static void pgdelete(struct pgrp *);
static int proc_ctor(void *mem, int size, void *arg, int flags);
@ -765,11 +766,12 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
}
/*
* Fill in information that is thread specific.
* Must be called with p_slock locked.
* Fill in information that is thread specific. Must be called with p_slock
* locked. If 'preferthread' is set, overwrite certain process-related
* fields that are maintained for both threads and processes.
*/
static void
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
{
struct proc *p;
@ -829,6 +831,9 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_pri.pri_class = td->td_pri_class;
kp->ki_pri.pri_user = td->td_user_pri;
if (preferthread)
kp->ki_runtime = cputick2usec(td->td_runtime);
/* We can't get this anymore but ps etc never used it anyway. */
kp->ki_rqindex = 0;
@ -848,7 +853,7 @@ fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
fill_kinfo_proc_only(p, kp);
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp);
fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
PROC_SUNLOCK(p);
}
@ -918,7 +923,8 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
if (flags & KERN_PROC_NOTHREADS) {
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), &kinfo_proc);
fill_kinfo_thread(FIRST_THREAD_IN_PROC(p),
&kinfo_proc, 0);
PROC_SUNLOCK(p);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
@ -926,7 +932,7 @@ sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
PROC_SLOCK(p);
if (FIRST_THREAD_IN_PROC(p) != NULL)
FOREACH_THREAD_IN_PROC(p, td) {
fill_kinfo_thread(td, &kinfo_proc);
fill_kinfo_thread(td, &kinfo_proc, 1);
error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
sizeof(kinfo_proc));
if (error)

View File

@ -849,7 +849,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp)
}
/* Make sure the per-thread stats are current. */
FOREACH_THREAD_IN_PROC(p, td) {
if (td->td_runtime == 0)
if (td->td_incruntime == 0)
continue;
thread_lock(td);
ruxagg(&p->p_rux, td);
@ -1021,11 +1021,11 @@ ruxagg(struct rusage_ext *rux, struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
rux->rux_runtime += td->td_runtime;
rux->rux_runtime += td->td_incruntime;
rux->rux_uticks += td->td_uticks;
rux->rux_sticks += td->td_sticks;
rux->rux_iticks += td->td_iticks;
td->td_runtime = 0;
td->td_incruntime = 0;
td->td_uticks = 0;
td->td_iticks = 0;
td->td_sticks = 0;

View File

@ -371,7 +371,7 @@ wakeup_one(ident)
void
mi_switch(int flags, struct thread *newtd)
{
uint64_t new_switchtime;
uint64_t runtime, new_switchtime;
struct thread *td;
struct proc *p;
@ -409,7 +409,9 @@ mi_switch(int flags, struct thread *newtd)
* thread was running, and add that to its total so far.
*/
new_switchtime = cpu_ticks();
td->td_runtime += new_switchtime - PCPU_GET(switchtime);
runtime = new_switchtime - PCPU_GET(switchtime);
td->td_runtime += runtime;
td->td_incruntime += runtime;
PCPU_SET(switchtime, new_switchtime);
td->td_generation++; /* bump preempt-detect counter */
PCPU_INC(cnt.v_swtch);

View File

@ -244,6 +244,7 @@ struct thread {
u_int td_estcpu; /* (t) estimated cpu utilization */
u_int td_slptick; /* (t) Time at sleep. */
struct rusage td_ru; /* (t) rusage information */
uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */
uint64_t td_runtime; /* (t) How many cpu ticks we've run. */
u_int td_pticks; /* (t) Statclock hits for profiling */
u_int td_sticks; /* (t) Statclock hits in system mode. */