- Shift the tick count by 10 and back around sched_pctcpu_update()
calculations. Keep this changes local to the function so the tick count is in its natural form otherwise. Previously 1000 was added each time a tick fired and we divided by 1000 when it was reported. This is done to reduce rounding errors.
This commit is contained in:
parent
9f7b7e45d1
commit
65c8760dbf
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=111793
@ -418,8 +418,14 @@ sched_pctcpu_update(struct kse *ke)
|
||||
/*
|
||||
* Adjust counters and watermark for pctcpu calc.
|
||||
*/
|
||||
/*
|
||||
* Shift the tick count out so that the divide doesn't round away
|
||||
* our results.
|
||||
*/
|
||||
ke->ke_ticks <<= 10;
|
||||
ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
|
||||
SCHED_CPU_TICKS;
|
||||
ke->ke_ticks >>= 10;
|
||||
ke->ke_ltick = ticks;
|
||||
ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
|
||||
}
|
||||
@ -665,7 +671,7 @@ sched_clock(struct thread *td)
|
||||
KASSERT((td != NULL), ("schedclock: null thread pointer"));
|
||||
|
||||
/* Adjust ticks for pctcpu */
|
||||
ke->ke_ticks += 10000;
|
||||
ke->ke_ticks++;
|
||||
ke->ke_ltick = ticks;
|
||||
/* Go up to one second beyond our max and then trim back down */
|
||||
if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
|
||||
@ -865,7 +871,7 @@ sched_pctcpu(struct kse *ke)
|
||||
sched_pctcpu_update(ke);
|
||||
|
||||
/* How many rtick per second ? */
|
||||
rtick = ke->ke_ticks / (SCHED_CPU_TIME * 10000);
|
||||
rtick = ke->ke_ticks / SCHED_CPU_TIME;
|
||||
pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user