Handle multiple clock interrupts simultaneously in sched_clock().

Reviewed by:	kib, markj, mav
Differential Revision:	https://reviews.freebsd.org/D22625
This commit is contained in:
Jeff Roberson 2019-12-08 01:17:38 +00:00
parent fb1d575ceb
commit c3cccf95bf
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355512
4 changed files with 22 additions and 11 deletions

View File

@ -711,8 +711,7 @@ statclock(int cnt, int usermode)
td->td_incruntime += runtime;
PCPU_SET(switchtime, new_switchtime);
for ( ; cnt > 0; cnt--)
sched_clock(td);
sched_clock(td, cnt);
thread_unlock(td);
#ifdef HWPMC_HOOKS
if (td->td_intr_frame != NULL)

View File

@ -706,8 +706,8 @@ sched_rr_interval(void)
* favor processes which haven't run much recently, and to round-robin
* among other processes.
*/
void
sched_clock(struct thread *td)
static void
sched_clock_tick(struct thread *td)
{
struct pcpuidlestat *stat;
struct td_sched *ts;
@ -736,6 +736,14 @@ sched_clock(struct thread *td)
stat->idlecalls = 0;
}
void
sched_clock(struct thread *td, int cnt)
{
for ( ; cnt > 0; cnt--)
sched_clock_tick(td);
}
/*
* Charge child's scheduling CPU usage to parent.
*/

View File

@ -2421,7 +2421,7 @@ sched_userret_slowpath(struct thread *td)
* threads.
*/
void
sched_clock(struct thread *td)
sched_clock(struct thread *td, int cnt)
{
struct tdq *tdq;
struct td_sched *ts;
@ -2432,8 +2432,10 @@ sched_clock(struct thread *td)
/*
* We run the long term load balancer infrequently on the first cpu.
*/
if (balance_tdq == tdq && smp_started != 0 && rebalance != 0) {
if (balance_ticks && --balance_ticks == 0)
if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
balance_ticks != 0) {
balance_ticks -= cnt;
if (balance_ticks <= 0)
sched_balance();
}
#endif
@ -2455,14 +2457,15 @@ sched_clock(struct thread *td)
}
ts = td_get_sched(td);
sched_pctcpu_update(ts, 1);
if (td->td_pri_class & PRI_FIFO_BIT)
if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td))
return;
if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
/*
* We used a tick; charge it to the thread so
* that we can compute our interactivity.
*/
td_get_sched(td)->ts_runtime += tickincr;
td_get_sched(td)->ts_runtime += tickincr * cnt;
sched_interact_update(td);
sched_priority(td);
}
@ -2471,7 +2474,8 @@ sched_clock(struct thread *td)
* Force a context switch if the current thread has used up a full
* time slice (default is 100ms).
*/
if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) {
ts->ts_slice += cnt;
if (ts->ts_slice >= tdq_slice(tdq)) {
ts->ts_slice = 0;
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
}

View File

@ -135,7 +135,7 @@ sched_userret(struct thread *td)
* Threads are moved on and off of run queues
*/
void sched_add(struct thread *td, int flags);
void sched_clock(struct thread *td);
void sched_clock(struct thread *td, int ticks);
void sched_preempt(struct thread *td);
void sched_rem(struct thread *td);
void sched_relinquish(struct thread *td);