better integrate cyclic module with clocksource/eventtimer subsystem
Now in the case when one-shot timers are used cyclic events should fire closer to theier scheduled times. As the cyclic is currently used only to drive DTrace profile provider, this is the area where the change makes a difference. Reviewed by: mav (earlier version, a while ago) X-MFC after: clocksource/eventtimer subsystem
This commit is contained in:
parent
87065c6765
commit
dd7498ae03
@ -53,7 +53,7 @@ gethrtime(void) {
|
||||
struct timespec ts;
|
||||
hrtime_t nsec;
|
||||
|
||||
getnanouptime(&ts);
|
||||
nanouptime(&ts);
|
||||
nsec = (hrtime_t)ts.tv_sec * NANOSEC + ts.tv_nsec;
|
||||
return (nsec);
|
||||
}
|
||||
|
@ -341,6 +341,16 @@ static cyc_backend_t cyclic_backend;
|
||||
|
||||
MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
|
||||
|
||||
static __inline hrtime_t
|
||||
cyc_gethrtime(void)
|
||||
{
|
||||
struct bintime bt;
|
||||
|
||||
binuptime(&bt);
|
||||
return ((hrtime_t)bt.sec * NANOSEC +
|
||||
(((uint64_t)NANOSEC * (uint32_t)(bt.frac >> 32)) >> 32));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if the upheap propagated to the root, 0 if it did not. This
|
||||
* allows the caller to reprogram the backend only when the root has been
|
||||
@ -507,7 +517,7 @@ cyclic_fire(cpu_t *c)
|
||||
cyc_index_t *heap = cpu->cyp_heap;
|
||||
cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
|
||||
void *arg = be->cyb_arg;
|
||||
hrtime_t now = gethrtime();
|
||||
hrtime_t now = cyc_gethrtime();
|
||||
hrtime_t exp;
|
||||
|
||||
if (cpu->cyp_nelems == 0) {
|
||||
@ -687,7 +697,7 @@ cyclic_add_xcall(cyc_xcallarg_t *arg)
|
||||
* If a start time hasn't been explicitly specified, we'll
|
||||
* start on the next interval boundary.
|
||||
*/
|
||||
cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
|
||||
cyclic->cy_expire = (cyc_gethrtime() / cyclic->cy_interval + 1) *
|
||||
cyclic->cy_interval;
|
||||
} else {
|
||||
cyclic->cy_expire = when->cyt_when;
|
||||
|
@ -30,6 +30,7 @@ static void enable(cyb_arg_t);
|
||||
static void disable(cyb_arg_t);
|
||||
static void reprogram(cyb_arg_t, hrtime_t);
|
||||
static void xcall(cyb_arg_t, cpu_t *, cyc_func_t, void *);
|
||||
static void cyclic_clock(struct trapframe *frame);
|
||||
|
||||
static cyc_backend_t be = {
|
||||
NULL, /* cyb_configure */
|
||||
@ -45,6 +46,7 @@ static void
|
||||
cyclic_ap_start(void *dummy)
|
||||
{
|
||||
/* Initialise the rest of the CPUs. */
|
||||
cyclic_clock_func = cyclic_clock;
|
||||
cyclic_mp_init();
|
||||
}
|
||||
|
||||
@ -63,18 +65,10 @@ cyclic_machdep_init(void)
|
||||
static void
|
||||
cyclic_machdep_uninit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
/* Reset the cyclic clock callback hook. */
|
||||
cyclic_clock_func[i] = NULL;
|
||||
|
||||
/* De-register the cyclic backend. */
|
||||
cyclic_uninit();
|
||||
}
|
||||
|
||||
static hrtime_t exp_due[MAXCPU];
|
||||
|
||||
/*
|
||||
* This function is the one registered by the machine dependent
|
||||
* initialiser as the callback for high speed timer events.
|
||||
@ -84,7 +78,7 @@ cyclic_clock(struct trapframe *frame)
|
||||
{
|
||||
cpu_t *c = &solaris_cpu[curcpu];
|
||||
|
||||
if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) {
|
||||
if (c->cpu_cyclic != NULL) {
|
||||
if (TRAPF_USERMODE(frame)) {
|
||||
c->cpu_profile_pc = 0;
|
||||
c->cpu_profile_upc = TRAPF_PC(frame);
|
||||
@ -102,26 +96,34 @@ cyclic_clock(struct trapframe *frame)
|
||||
}
|
||||
}
|
||||
|
||||
static void enable(cyb_arg_t arg)
|
||||
static void
|
||||
enable(cyb_arg_t arg __unused)
|
||||
{
|
||||
/* Register the cyclic clock callback function. */
|
||||
cyclic_clock_func[curcpu] = cyclic_clock;
|
||||
|
||||
}
|
||||
|
||||
static void disable(cyb_arg_t arg)
|
||||
static void
|
||||
disable(cyb_arg_t arg __unused)
|
||||
{
|
||||
/* Reset the cyclic clock callback function. */
|
||||
cyclic_clock_func[curcpu] = NULL;
|
||||
|
||||
}
|
||||
|
||||
static void reprogram(cyb_arg_t arg, hrtime_t exp)
|
||||
static void
|
||||
reprogram(cyb_arg_t arg __unused, hrtime_t exp)
|
||||
{
|
||||
exp_due[curcpu] = exp;
|
||||
struct bintime bt;
|
||||
struct timespec ts;
|
||||
|
||||
ts.tv_sec = exp / 1000000000;
|
||||
ts.tv_nsec = exp % 1000000000;
|
||||
timespec2bintime(&ts, &bt);
|
||||
clocksource_cyc_set(&bt);
|
||||
}
|
||||
|
||||
static void xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param)
|
||||
static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func,
|
||||
void *param)
|
||||
{
|
||||
|
||||
smp_rendezvous_cpus((cpumask_t) (1 << c->cpuid),
|
||||
smp_rendezvous_cpus((cpumask_t)1 << c->cpuid,
|
||||
smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#ifdef KDTRACE_HOOKS
|
||||
#include <sys/dtrace_bsd.h>
|
||||
cyclic_clock_func_t cyclic_clock_func[MAXCPU];
|
||||
cyclic_clock_func_t cyclic_clock_func = NULL;
|
||||
#endif
|
||||
|
||||
int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
|
||||
@ -128,6 +128,9 @@ struct pcpu_state {
|
||||
struct bintime nexthard; /* Next hardlock() event. */
|
||||
struct bintime nextstat; /* Next statclock() event. */
|
||||
struct bintime nextprof; /* Next profclock() event. */
|
||||
#ifdef KDTRACE_HOOKS
|
||||
struct bintime nextcyc; /* Next OpenSolaris cyclics event. */
|
||||
#endif
|
||||
int ipi; /* This CPU needs IPI. */
|
||||
int idle; /* This CPU is in idle mode. */
|
||||
};
|
||||
@ -190,17 +193,10 @@ handleevents(struct bintime *now, int fake)
|
||||
usermode = TRAPF_USERMODE(frame);
|
||||
pc = TRAPF_PC(frame);
|
||||
}
|
||||
#ifdef KDTRACE_HOOKS
|
||||
/*
|
||||
* If the DTrace hooks are configured and a callback function
|
||||
* has been registered, then call it to process the high speed
|
||||
* timers.
|
||||
*/
|
||||
if (!fake && cyclic_clock_func[curcpu] != NULL)
|
||||
(*cyclic_clock_func[curcpu])(frame);
|
||||
#endif
|
||||
|
||||
runs = 0;
|
||||
state = DPCPU_PTR(timerstate);
|
||||
|
||||
while (bintime_cmp(now, &state->nexthard, >=)) {
|
||||
bintime_add(&state->nexthard, &hardperiod);
|
||||
runs++;
|
||||
@ -224,6 +220,16 @@ handleevents(struct bintime *now, int fake)
|
||||
}
|
||||
} else
|
||||
state->nextprof = state->nextstat;
|
||||
|
||||
#ifdef KDTRACE_HOOKS
|
||||
if (fake == 0 && cyclic_clock_func != NULL &&
|
||||
state->nextcyc.sec != -1 &&
|
||||
bintime_cmp(now, &state->nextcyc, >=)) {
|
||||
state->nextcyc.sec = -1;
|
||||
(*cyclic_clock_func)(frame);
|
||||
}
|
||||
#endif
|
||||
|
||||
getnextcpuevent(&t, 0);
|
||||
if (fake == 2) {
|
||||
state->nextevent = t;
|
||||
@ -263,10 +269,13 @@ getnextcpuevent(struct bintime *event, int idle)
|
||||
} else { /* If CPU is active - handle all types of events. */
|
||||
if (bintime_cmp(event, &state->nextstat, >))
|
||||
*event = state->nextstat;
|
||||
if (profiling &&
|
||||
bintime_cmp(event, &state->nextprof, >))
|
||||
if (profiling && bintime_cmp(event, &state->nextprof, >))
|
||||
*event = state->nextprof;
|
||||
}
|
||||
#ifdef KDTRACE_HOOKS
|
||||
if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
|
||||
*event = state->nextcyc;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -590,6 +599,9 @@ cpu_initclocks_bsp(void)
|
||||
CPU_FOREACH(cpu) {
|
||||
state = DPCPU_ID_PTR(cpu, timerstate);
|
||||
mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
|
||||
#ifdef KDTRACE_HOOKS
|
||||
state->nextcyc.sec = -1;
|
||||
#endif
|
||||
}
|
||||
#ifdef SMP
|
||||
callout_new_inserted = cpu_new_callout;
|
||||
@ -784,6 +796,43 @@ cpu_activeclock(void)
|
||||
spinlock_exit();
|
||||
}
|
||||
|
||||
#ifdef KDTRACE_HOOKS
|
||||
void
|
||||
clocksource_cyc_set(const struct bintime *t)
|
||||
{
|
||||
struct bintime now;
|
||||
struct pcpu_state *state;
|
||||
|
||||
state = DPCPU_PTR(timerstate);
|
||||
if (periodic)
|
||||
now = state->now;
|
||||
else
|
||||
binuptime(&now);
|
||||
|
||||
CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
|
||||
curcpu, now.sec, (unsigned int)(now.frac >> 32),
|
||||
(unsigned int)(now.frac & 0xffffffff));
|
||||
CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
|
||||
curcpu, t->sec, (unsigned int)(t->frac >> 32),
|
||||
(unsigned int)(t->frac & 0xffffffff));
|
||||
|
||||
ET_HW_LOCK(state);
|
||||
if (bintime_cmp(t, &state->nextcyc, ==)) {
|
||||
ET_HW_UNLOCK(state);
|
||||
return;
|
||||
}
|
||||
state->nextcyc = *t;
|
||||
if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
|
||||
ET_HW_UNLOCK(state);
|
||||
return;
|
||||
}
|
||||
state->nextevent = state->nextcyc;
|
||||
if (!periodic)
|
||||
loadtimer(&now, 0);
|
||||
ET_HW_UNLOCK(state);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SMP
|
||||
static void
|
||||
cpu_new_callout(int cpu, int ticks)
|
||||
|
@ -44,14 +44,9 @@ struct reg;
|
||||
* subsystem into the appropriate timer interrupt.
|
||||
*/
|
||||
typedef void (*cyclic_clock_func_t)(struct trapframe *);
|
||||
extern cyclic_clock_func_t cyclic_clock_func;
|
||||
|
||||
/*
|
||||
* These external variables are actually machine-dependent, so
|
||||
* they might not actually exist.
|
||||
*
|
||||
* Defining them here avoids a proliferation of header files.
|
||||
*/
|
||||
extern cyclic_clock_func_t cyclic_clock_func[];
|
||||
void clocksource_cyc_set(const struct bintime *t);
|
||||
|
||||
/*
|
||||
* The dtrace module handles traps that occur during a DTrace probe.
|
||||
|
Loading…
Reference in New Issue
Block a user