- Redefine p_swtime and td_slptime as p_swtick and td_slptick. This

changes the units from seconds to the value of 'ticks' when swapped
   in/out.  ULE does not have a periodic timer that scans all threads in
   the system and as such maintaining a per-second counter is difficult.
 - Change computations requiring the unit in seconds to subtract ticks
   and divide by hz.  This does make the wraparound condition hz times
   more frequent but this is still in the range of several months to
   years and the adverse effects are minimal.

Approved by:	re
This commit is contained in:
Jeff Roberson 2007-09-21 04:10:23 +00:00
parent f462501739
commit 54b0e65f84
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=172264
5 changed files with 31 additions and 28 deletions

View File

@ -500,6 +500,7 @@ fork1(td, flags, pages, procp)
* Increase reference counts on shared objects.
*/
p2->p_flag = P_INMEM;
p2->p_swtick = ticks;
if (p1->p_flag & P_PROFIL)
startprofclock(p2);
td2->td_ucred = crhold(p2->p_ucred);

View File

@ -694,7 +694,8 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
kp->ki_sflag = PS_INMEM;
else
kp->ki_sflag = 0;
kp->ki_swtime = p->p_swtime;
/* Calculate legacy swtime as seconds since 'swtick'. */
kp->ki_swtime = (ticks - p->p_swtick) / hz;
kp->ki_pid = p->p_pid;
kp->ki_nice = p->p_nice;
rufetch(p, &kp->ki_rusage);
@ -812,7 +813,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_kstack = (void *)td->td_kstack;
kp->ki_pctcpu = sched_pctcpu(td);
kp->ki_estcpu = td->td_estcpu;
kp->ki_slptime = td->td_slptime;
kp->ki_slptime = (ticks - td->td_slptick) / hz;
kp->ki_pri.pri_class = td->td_pri_class;
kp->ki_pri.pri_user = td->td_user_pri;

View File

@ -84,6 +84,7 @@ struct td_sched {
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
u_char ts_rqindex; /* (j) Run queue index. */
int ts_cpticks; /* (j) Ticks of cpu time. */
int ts_slptime; /* (j) Seconds !RUNNING. */
struct runq *ts_runq; /* runq the thread is currently on */
};
@ -379,11 +380,6 @@ schedcpu(void)
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
PROC_SLOCK(p);
/*
* Increment time in/out of memory. We ignore overflow; with
* 16-bit int's (remember them?) overflow takes 45 days.
*/
p->p_swtime++;
FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
thread_lock(td);
@ -440,7 +436,7 @@ XXX this is broken
*/
if (awake) {
if (td->td_slptime > 1) {
if (ts->ts_slptime > 1) {
/*
* In an ideal world, this should not
* happen, because whoever woke us
@ -452,10 +448,10 @@ XXX this is broken
*/
updatepri(td);
}
td->td_slptime = 0;
ts->ts_slptime = 0;
} else
td->td_slptime++;
if (td->td_slptime > 1) {
ts->ts_slptime++;
if (ts->ts_slptime > 1) {
thread_unlock(td);
continue;
}
@ -490,16 +486,18 @@ schedcpu_thread(void)
static void
updatepri(struct thread *td)
{
register fixpt_t loadfac;
register unsigned int newcpu;
struct td_sched *ts;
fixpt_t loadfac;
unsigned int newcpu;
ts = td->td_sched;
loadfac = loadfactor(averunnable.ldavg[0]);
if (td->td_slptime > 5 * loadfac)
if (ts->ts_slptime > 5 * loadfac)
td->td_estcpu = 0;
else {
newcpu = td->td_estcpu;
td->td_slptime--; /* was incremented in schedcpu() */
while (newcpu && --td->td_slptime)
ts->ts_slptime--; /* was incremented in schedcpu() */
while (newcpu && --ts->ts_slptime)
newcpu = decay_cpu(loadfac, newcpu);
td->td_estcpu = newcpu;
}
@ -827,7 +825,8 @@ sched_sleep(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptime = 0;
td->td_slptick = ticks;
td->td_sched->ts_slptime = 0;
}
void
@ -939,12 +938,16 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
void
sched_wakeup(struct thread *td)
{
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_slptime > 1) {
ts = td->td_sched;
if (ts->ts_slptime > 1) {
updatepri(td);
resetpriority(td);
}
td->td_slptime = 0;
td->td_slptick = ticks;
ts->ts_slptime = 0;
sched_add(td, SRQ_BORING);
}

View File

@ -88,7 +88,6 @@ struct td_sched {
short ts_flags; /* TSF_* flags. */
u_char ts_rqindex; /* Run queue index. */
u_char ts_cpu; /* CPU that we have affinity for. */
int ts_slptick; /* Tick when we went to sleep. */
int ts_slice; /* Ticks of slice remaining. */
u_int ts_slptime; /* Number of ticks we vol. slept */
u_int ts_runtime; /* Number of ticks we were running */
@ -1914,7 +1913,7 @@ sched_sleep(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_sched->ts_slptick = ticks;
td->td_slptick = ticks;
}
/*
@ -1933,8 +1932,8 @@ sched_wakeup(struct thread *td)
* If we slept for more than a tick update our interactivity and
* priority.
*/
slptick = ts->ts_slptick;
ts->ts_slptick = 0;
slptick = td->td_slptick;
td->td_slptick = 0;
if (slptick && slptick != ticks) {
u_int hzticks;
@ -2435,7 +2434,6 @@ sched_pctcpu(struct thread *td)
rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
}
td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
thread_unlock(td);
return (pctcpu);
@ -2636,8 +2634,8 @@ SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
"True when a topology has been specified by the MD code.");
#endif
/* ps compat */
static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
/* ps compat. All cpu percentages from ULE are weighted. */
static int ccpu = 0.0;
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");

View File

@ -242,7 +242,7 @@ struct thread {
struct thread *td_standin; /* (k + a) Use this for an upcall. */
struct kse_upcall *td_upcall; /* (k + t) Upcall structure. */
u_int td_estcpu; /* (t) estimated cpu utilization */
u_int td_slptime; /* (t) How long completely blocked. */
u_int td_slptick; /* (t) Time at sleep. */
struct rusage td_ru; /* (t) rusage information */
uint64_t td_runtime; /* (t) How many cpu ticks we've run. */
u_int td_pticks; /* (t) Statclock hits for profiling */
@ -520,7 +520,7 @@ struct proc {
#define p_startzero p_oppid
pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */
struct vmspace *p_vmspace; /* (b) Address space. */
u_int p_swtime; /* (j) Time swapped in or out. */
u_int p_swtick; /* (j) Tick when swapped in or out. */
struct itimerval p_realtimer; /* (c) Alarm timer. */
struct rusage p_ru; /* (a) Exit information. */
struct rusage_ext p_rux; /* (cj) Internal resource usage. */