Make kern_tc.c provide minimum frequency of tc_ticktock() calls, required

to handle current timecounter wraps. Make kern_clocksource.c to honor that
requirement, scheduling sleeps on first CPU for no more then specified
period. Allow other CPUs to sleep up to 1/4 second (for any case).
This commit is contained in:
Alexander Motin 2010-09-14 08:48:06 +00:00
parent 4763a8b8c1
commit 0e18987383
6 changed files with 22 additions and 9 deletions

View File

@ -457,7 +457,7 @@ hardclock(int usermode, uintfptr_t pc)
atomic_add_int((volatile int *)&ticks, 1);
hardclock_cpu(usermode);
tc_ticktock();
tc_ticktock(1);
cpu_tick_calibration();
/*
* If no separate statistics clock is available, run it from here.
@ -538,7 +538,7 @@ hardclock_anycpu(int cnt, int usermode)
if (newticks > 0) {
/* Dangerous and no need to call these things concurrently. */
if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
tc_ticktock();
tc_ticktock(newticks);
#ifdef DEVICE_POLLING
/* This is very short and quick. */
hardclock_device_poll();

View File

@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <machine/atomic.h>
#include <machine/clock.h>
@ -247,7 +248,10 @@ getnextcpuevent(struct bintime *event, int idle)
state = DPCPU_PTR(timerstate);
*event = state->nexthard;
if (idle) { /* If CPU is idle - ask callouts for how long. */
skip = callout_tickstofirst() - 1;
skip = 4;
if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
skip = tc_min_ticktock_freq;
skip = callout_tickstofirst(hz / skip) - 1;
CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip);
tmp = hardperiod;
bintime_mul(&tmp, skip);

View File

@ -87,6 +87,8 @@ static struct timehands *volatile timehands = &th0;
struct timecounter *timecounter = &dummy_timecounter;
static struct timecounter *timecounters = &dummy_timecounter;
int tc_min_ticktock_freq = 1;
time_t time_second = 1;
time_t time_uptime = 1;
@ -482,6 +484,8 @@ tc_windup(void)
if (th->th_counter != timecounter) {
th->th_counter = timecounter;
th->th_offset_count = ncount;
tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
(((uint64_t)timecounter->tc_counter_mask + 1) / 3));
}
/*-
@ -767,11 +771,12 @@ static int tc_tick;
SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
void
tc_ticktock(void)
tc_ticktock(int cnt)
{
static int count;
if (++count < tc_tick)
count += cnt;
if (count < tc_tick)
return;
count = 0;
tc_windup();

View File

@ -280,7 +280,7 @@ callout_tick(void)
}
int
callout_tickstofirst(void)
callout_tickstofirst(int limit)
{
struct callout_cpu *cc;
struct callout *c;
@ -291,7 +291,7 @@ callout_tickstofirst(void)
cc = CC_SELF();
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
curticks = cc->cc_ticks;
while( skip < ncallout && skip < hz/8 ) {
while( skip < ncallout && skip < limit ) {
sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
/* search scanning ticks */
TAILQ_FOREACH( c, sc, c_links.tqe ){

View File

@ -96,7 +96,7 @@ int callout_schedule_on(struct callout *, int, int);
#define callout_stop(c) _callout_stop_safe(c, 0)
int _callout_stop_safe(struct callout *, int);
void callout_tick(void);
int callout_tickstofirst(void);
int callout_tickstofirst(int limit);
extern void (*callout_new_inserted)(int cpu, int ticks);
#endif

View File

@ -65,11 +65,15 @@ struct timecounter {
};
extern struct timecounter *timecounter;
extern int tc_min_ticktock_freq; /*
* Minimal tc_ticktock() call frequency,
* required to handle counter wraps.
*/
u_int64_t tc_getfrequency(void);
void tc_init(struct timecounter *tc);
void tc_setclock(struct timespec *ts);
void tc_ticktock(void);
void tc_ticktock(int cnt);
void cpu_tick_calibration(void);
#ifdef SYSCTL_DECL