The struct thread td_estcpu member is only used by the 4BSD scheduler.
Move it to the struct td_sched for 4BSD, removing always present field, otherwise unused for ULE. New scheduler method sched_estcpu() returns the estimation for kinfo_proc consumption. As before, it always returns 0 for ULE. Remove sched_tick() scheduler method, unused both by 4BSD and ULE. Update locking comment for the 4BSD struct td_sched, copying it from the same comment for ULE. Spell MAXPRI as PRI_MAX_TIMESHARE in the 4BSD comment. Based on some notes from, and reviewed by: bde Sponsored by: The FreeBSD Foundation
This commit is contained in:
parent
5aaf17e8ed
commit
f16910a47e
@ -449,7 +449,6 @@ hardclock_cpu(int usermode)
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
sched_tick(1);
|
||||
td->td_flags |= flags;
|
||||
thread_unlock(td);
|
||||
|
||||
@ -539,7 +538,6 @@ hardclock_cnt(int cnt, int usermode)
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
sched_tick(cnt);
|
||||
td->td_flags |= flags;
|
||||
thread_unlock(td);
|
||||
|
||||
|
@ -855,7 +855,7 @@ fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp)
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
thread_lock(td);
|
||||
kp->ki_pctcpu += sched_pctcpu(td);
|
||||
kp->ki_estcpu += td->td_estcpu;
|
||||
kp->ki_estcpu += sched_estcpu(td);
|
||||
thread_unlock(td);
|
||||
}
|
||||
}
|
||||
@ -1101,7 +1101,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
|
||||
rufetchtd(td, &kp->ki_rusage);
|
||||
kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime);
|
||||
kp->ki_pctcpu = sched_pctcpu(td);
|
||||
kp->ki_estcpu = td->td_estcpu;
|
||||
kp->ki_estcpu = sched_estcpu(td);
|
||||
kp->ki_cow = td->td_cow;
|
||||
}
|
||||
|
||||
|
@ -87,12 +87,14 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
|
||||
/*
|
||||
* The schedulable entity that runs a context.
|
||||
* This is an extension to the thread structure and is tailored to
|
||||
* the requirements of this scheduler
|
||||
* the requirements of this scheduler.
|
||||
* All fields are protected by the scheduler lock.
|
||||
*/
|
||||
struct td_sched {
|
||||
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
|
||||
int ts_cpticks; /* (j) Ticks of cpu time. */
|
||||
int ts_slptime; /* (j) Seconds !RUNNING. */
|
||||
fixpt_t ts_pctcpu; /* %cpu during p_swtime. */
|
||||
u_int ts_estcpu; /* Estimated cpu utilization. */
|
||||
int ts_cpticks; /* Ticks of cpu time. */
|
||||
int ts_slptime; /* Seconds !RUNNING. */
|
||||
int ts_slice; /* Remaining part of time slice. */
|
||||
int ts_flags;
|
||||
struct runq *ts_runq; /* runq the thread is currently on */
|
||||
@ -382,20 +384,20 @@ maybe_preempt(struct thread *td)
|
||||
|
||||
/*
|
||||
* Constants for digital decay and forget:
|
||||
* 90% of (td_estcpu) usage in 5 * loadav time
|
||||
* 90% of (ts_estcpu) usage in 5 * loadav time
|
||||
* 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
|
||||
* Note that, as ps(1) mentions, this can let percentages
|
||||
* total over 100% (I've seen 137.9% for 3 processes).
|
||||
*
|
||||
* Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
|
||||
* Note that schedclock() updates ts_estcpu and p_cpticks asynchronously.
|
||||
*
|
||||
* We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
|
||||
* We wish to decay away 90% of ts_estcpu in (5 * loadavg) seconds.
|
||||
* That is, the system wants to compute a value of decay such
|
||||
* that the following for loop:
|
||||
* for (i = 0; i < (5 * loadavg); i++)
|
||||
* td_estcpu *= decay;
|
||||
* ts_estcpu *= decay;
|
||||
* will compute
|
||||
* td_estcpu *= 0.1;
|
||||
* ts_estcpu *= 0.1;
|
||||
* for all values of loadavg:
|
||||
*
|
||||
* Mathematically this loop can be expressed by saying:
|
||||
@ -559,7 +561,7 @@ schedcpu(void)
|
||||
thread_unlock(td);
|
||||
continue;
|
||||
}
|
||||
td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
|
||||
ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu);
|
||||
resetpriority(td);
|
||||
resetpriority_thread(td);
|
||||
thread_unlock(td);
|
||||
@ -584,8 +586,8 @@ schedcpu_thread(void)
|
||||
|
||||
/*
|
||||
* Recalculate the priority of a process after it has slept for a while.
|
||||
* For all load averages >= 1 and max td_estcpu of 255, sleeping for at
|
||||
* least six times the loadfactor will decay td_estcpu to zero.
|
||||
* For all load averages >= 1 and max ts_estcpu of 255, sleeping for at
|
||||
* least six times the loadfactor will decay ts_estcpu to zero.
|
||||
*/
|
||||
static void
|
||||
updatepri(struct thread *td)
|
||||
@ -597,13 +599,13 @@ updatepri(struct thread *td)
|
||||
ts = td->td_sched;
|
||||
loadfac = loadfactor(averunnable.ldavg[0]);
|
||||
if (ts->ts_slptime > 5 * loadfac)
|
||||
td->td_estcpu = 0;
|
||||
ts->ts_estcpu = 0;
|
||||
else {
|
||||
newcpu = td->td_estcpu;
|
||||
newcpu = ts->ts_estcpu;
|
||||
ts->ts_slptime--; /* was incremented in schedcpu() */
|
||||
while (newcpu && --ts->ts_slptime)
|
||||
newcpu = decay_cpu(loadfac, newcpu);
|
||||
td->td_estcpu = newcpu;
|
||||
ts->ts_estcpu = newcpu;
|
||||
}
|
||||
}
|
||||
|
||||
@ -615,15 +617,15 @@ updatepri(struct thread *td)
|
||||
static void
|
||||
resetpriority(struct thread *td)
|
||||
{
|
||||
register unsigned int newpriority;
|
||||
u_int newpriority;
|
||||
|
||||
if (td->td_pri_class == PRI_TIMESHARE) {
|
||||
newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
|
||||
NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
|
||||
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
|
||||
PRI_MAX_TIMESHARE);
|
||||
sched_user_prio(td, newpriority);
|
||||
}
|
||||
if (td->td_pri_class != PRI_TIMESHARE)
|
||||
return;
|
||||
newpriority = PUSER + td->td_sched->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
|
||||
NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
|
||||
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
|
||||
PRI_MAX_TIMESHARE);
|
||||
sched_user_prio(td, newpriority);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -709,18 +711,18 @@ sched_rr_interval(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We adjust the priority of the current process. The priority of
|
||||
* a process gets worse as it accumulates CPU time. The cpu usage
|
||||
* estimator (td_estcpu) is increased here. resetpriority() will
|
||||
* compute a different priority each time td_estcpu increases by
|
||||
* INVERSE_ESTCPU_WEIGHT
|
||||
* (until MAXPRI is reached). The cpu usage estimator ramps up
|
||||
* quite quickly when the process is running (linearly), and decays
|
||||
* away exponentially, at a rate which is proportionally slower when
|
||||
* the system is busy. The basic principle is that the system will
|
||||
* 90% forget that the process used a lot of CPU time in 5 * loadav
|
||||
* seconds. This causes the system to favor processes which haven't
|
||||
* run much recently, and to round-robin among other processes.
|
||||
* We adjust the priority of the current process. The priority of a
|
||||
* process gets worse as it accumulates CPU time. The cpu usage
|
||||
* estimator (ts_estcpu) is increased here. resetpriority() will
|
||||
* compute a different priority each time ts_estcpu increases by
|
||||
* INVERSE_ESTCPU_WEIGHT (until PRI_MAX_TIMESHARE is reached). The
|
||||
* cpu usage estimator ramps up quite quickly when the process is
|
||||
* running (linearly), and decays away exponentially, at a rate which
|
||||
* is proportionally slower when the system is busy. The basic
|
||||
* principle is that the system will 90% forget that the process used
|
||||
* a lot of CPU time in 5 * loadav seconds. This causes the system to
|
||||
* favor processes which haven't run much recently, and to round-robin
|
||||
* among other processes.
|
||||
*/
|
||||
void
|
||||
sched_clock(struct thread *td)
|
||||
@ -732,8 +734,8 @@ sched_clock(struct thread *td)
|
||||
ts = td->td_sched;
|
||||
|
||||
ts->ts_cpticks++;
|
||||
td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
|
||||
if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
|
||||
ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
|
||||
if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
|
||||
resetpriority(td);
|
||||
resetpriority_thread(td);
|
||||
}
|
||||
@ -773,7 +775,8 @@ sched_exit_thread(struct thread *td, struct thread *child)
|
||||
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
|
||||
"prio:%d", child->td_priority);
|
||||
thread_lock(td);
|
||||
td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
|
||||
td->td_sched->ts_estcpu = ESTCPULIM(td->td_sched->ts_estcpu +
|
||||
child->td_sched->ts_estcpu);
|
||||
thread_unlock(td);
|
||||
thread_lock(child);
|
||||
if ((child->td_flags & TDF_NOLOAD) == 0)
|
||||
@ -794,12 +797,12 @@ sched_fork_thread(struct thread *td, struct thread *childtd)
|
||||
|
||||
childtd->td_oncpu = NOCPU;
|
||||
childtd->td_lastcpu = NOCPU;
|
||||
childtd->td_estcpu = td->td_estcpu;
|
||||
childtd->td_lock = &sched_lock;
|
||||
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
|
||||
childtd->td_priority = childtd->td_base_pri;
|
||||
ts = childtd->td_sched;
|
||||
bzero(ts, sizeof(*ts));
|
||||
ts->ts_estcpu = td->td_sched->ts_estcpu;
|
||||
ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
|
||||
ts->ts_slice = 1;
|
||||
}
|
||||
@ -1621,9 +1624,11 @@ sched_pctcpu_delta(struct thread *td)
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
sched_tick(int cnt)
|
||||
u_int
|
||||
sched_estcpu(struct thread *td)
|
||||
{
|
||||
|
||||
return (td->td_sched->ts_estcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2288,13 +2288,11 @@ sched_clock(struct thread *td)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called once per hz tick.
|
||||
*/
|
||||
void
|
||||
sched_tick(int cnt)
|
||||
u_int
|
||||
sched_estcpu(struct thread *td __unused)
|
||||
{
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -250,7 +250,6 @@ struct thread {
|
||||
int td_pinned; /* (k) Temporary cpu pin count. */
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
struct plimit *td_limit; /* (k) Resource limits. */
|
||||
u_int td_estcpu; /* (t) estimated cpu utilization */
|
||||
int td_slptick; /* (t) Time at sleep. */
|
||||
int td_blktick; /* (t) Time spent blocked. */
|
||||
int td_swvoltick; /* (t) Time at last SW_VOL switch. */
|
||||
|
@ -90,6 +90,7 @@ void sched_nice(struct proc *p, int nice);
|
||||
* priorities inherited from their procs, and use up cpu time.
|
||||
*/
|
||||
void sched_exit_thread(struct thread *td, struct thread *child);
|
||||
u_int sched_estcpu(struct thread *td);
|
||||
void sched_fork_thread(struct thread *td, struct thread *child);
|
||||
void sched_lend_prio(struct thread *td, u_char prio);
|
||||
void sched_lend_user_prio(struct thread *td, u_char pri);
|
||||
@ -102,7 +103,6 @@ void sched_unlend_prio(struct thread *td, u_char prio);
|
||||
void sched_user_prio(struct thread *td, u_char prio);
|
||||
void sched_userret(struct thread *td);
|
||||
void sched_wakeup(struct thread *td);
|
||||
void sched_preempt(struct thread *td);
|
||||
#ifdef RACCT
|
||||
#ifdef SCHED_4BSD
|
||||
fixpt_t sched_pctcpu_delta(struct thread *td);
|
||||
@ -114,8 +114,8 @@ fixpt_t sched_pctcpu_delta(struct thread *td);
|
||||
*/
|
||||
void sched_add(struct thread *td, int flags);
|
||||
void sched_clock(struct thread *td);
|
||||
void sched_preempt(struct thread *td);
|
||||
void sched_rem(struct thread *td);
|
||||
void sched_tick(int cnt);
|
||||
void sched_relinquish(struct thread *td);
|
||||
struct thread *sched_choose(void);
|
||||
void sched_idletd(void *);
|
||||
|
Loading…
Reference in New Issue
Block a user