Adapt sparc64 and sun4v timer code for the new event timers infrastructure.

Reviewed by:	marius@
This commit is contained in:
Alexander Motin 2010-07-29 12:08:46 +00:00
parent 753358d725
commit 6c8dd81fa9
19 changed files with 261 additions and 118 deletions

View File

@ -59,6 +59,7 @@ dev/syscons/scterm-teken.c optional sc
dev/syscons/scvtb.c optional sc
dev/uart/uart_cpu_sparc64.c optional uart
dev/uart/uart_kbd_sun.c optional uart sc
kern/kern_clocksource.c standard
kern/syscalls.c optional ktr
libkern/ffs.c standard
libkern/ffsl.c standard

View File

@ -29,6 +29,7 @@ dev/ofw/openfirm.c standard
dev/ofw/openfirmio.c standard
dev/ofw/openpromio.c standard
dev/uart/uart_cpu_sparc64.c optional uart
kern/kern_clocksource.c standard
kern/syscalls.c optional ktr
libkern/ffs.c standard
libkern/ffsl.c standard

View File

@ -46,6 +46,8 @@
#define PIL_AST 4 /* ast ipi */
#define PIL_STOP 5 /* stop cpu ipi */
#define PIL_PREEMPT 6 /* preempt idle thread cpu ipi */
#define PIL_HARDCLOCK 7 /* hardclock broadcast */
#define PIL_STATCLOCK 8 /* statclock broadcast */
#define PIL_FILTER 12 /* filter interrupts */
#define PIL_FAST 13 /* fast interrupts */
#define PIL_TICK 14 /* tick interrupts */

View File

@ -53,6 +53,7 @@ struct pmap;
vm_offset_t pc_addr; \
u_long pc_tickref; \
u_long pc_tickadj; \
u_long pc_tickincrement; \
u_int pc_clock; \
u_int pc_impl; \
u_int pc_mid; \

View File

@ -58,6 +58,8 @@
#define IPI_AST PIL_AST
#define IPI_RENDEZVOUS PIL_RENDEZVOUS
#define IPI_PREEMPT PIL_PREEMPT
#define IPI_HARDCLOCK PIL_HARDCLOCK
#define IPI_STATCLOCK PIL_STATCLOCK
#define IPI_STOP PIL_STOP
#define IPI_STOP_HARD PIL_STOP

View File

@ -32,7 +32,6 @@
extern u_int hardclock_use_stick;
void tick_clear(u_int cpu_impl);
void tick_start(void);
void tick_stop(u_int cpu_impl);
#endif

View File

@ -81,14 +81,3 @@ delay_tick(int usec)
sched_unpin();
}
void
cpu_startprofclock(void)
{
}
void
cpu_stopprofclock(void)
{
}

View File

@ -96,7 +96,9 @@ static const char *const pil_names[] = {
"ast", /* PIL_AST */
"stop", /* PIL_STOP */
"preempt", /* PIL_PREEMPT */
"stray", "stray", "stray", "stray", "stray",
"hardclock", /* PIL_HARDCLOCK */
"statclock", /* PIL_STATCLOCK */
"stray", "stray", "stray",
"filter", /* PIL_FILTER */
"fast", /* PIL_FAST */
"tick", /* PIL_TICK */

View File

@ -96,7 +96,9 @@ __FBSDID("$FreeBSD$");
#define SUNW_STOPSELF "SUNW,stop-self"
static ih_func_t cpu_ipi_ast;
static ih_func_t cpu_ipi_hardclock;
static ih_func_t cpu_ipi_preempt;
static ih_func_t cpu_ipi_statclock;
static ih_func_t cpu_ipi_stop;
/*
@ -279,6 +281,8 @@ cpu_mp_start(void)
-1, NULL, NULL);
intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
intr_setup(PIL_PREEMPT, cpu_ipi_preempt, -1, NULL, NULL);
intr_setup(PIL_HARDCLOCK, cpu_ipi_hardclock, -1, NULL, NULL);
intr_setup(PIL_STATCLOCK, cpu_ipi_statclock, -1, NULL, NULL);
cpuid_to_mid[curcpu] = PCPU_GET(mid);
@ -437,9 +441,6 @@ cpu_mp_bootstrap(struct pcpu *pc)
wrpr(pil, 0, PIL_TICK);
wrpr(pstate, 0, PSTATE_KERNEL);
/* Start the (S)TICK interrupts. */
tick_start();
smp_cpus++;
KASSERT(curthread != NULL, ("%s: curthread", __func__));
PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
@ -451,6 +452,9 @@ cpu_mp_bootstrap(struct pcpu *pc)
while (csa->csa_count != 0)
;
/* Start per-CPU event timers. */
cpu_initclocks_ap();
/* Ok, now enter the scheduler. */
sched_throw(NULL);
}
@ -507,6 +511,20 @@ cpu_ipi_preempt(struct trapframe *tf)
sched_preempt(curthread);
}
static void
cpu_ipi_hardclock(struct trapframe *tf)
{
hardclockintr(tf);
}
static void
cpu_ipi_statclock(struct trapframe *tf)
{
statclockintr(tf);
}
static void
spitfire_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
{

View File

@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <dev/ofw/openfirm.h>
@ -48,9 +49,7 @@ __FBSDID("$FreeBSD$");
#include <machine/tick.h>
#include <machine/ver.h>
/* 10000 ticks proved okay for 500MHz. */
#define TICK_GRACE(clock) ((clock) / 1000000 * 2 * 10)
#define STICK_QUALITY -500
#define TICK_QUALITY_MP 10
#define TICK_QUALITY_UP 1000
@ -76,20 +75,25 @@ u_int hardclock_use_stick = 0;
SYSCTL_INT(_machdep_tick, OID_AUTO, hardclock_use_stick, CTLFLAG_RD,
&hardclock_use_stick, 0, "hardclock uses STICK instead of TICK timer");
static struct timecounter stick_tc;
static struct timecounter tick_tc;
static u_long tick_increment;
static struct eventtimer tick_et;
static uint64_t tick_cputicks(void);
static timecounter_get_t stick_get_timecount;
static timecounter_get_t tick_get_timecount_up;
#ifdef SMP
static timecounter_get_t tick_get_timecount_mp;
#endif
static void tick_hardclock(struct trapframe *tf);
static void tick_hardclock_bbwar(struct trapframe *tf);
static int tick_et_start(struct eventtimer *et,
struct bintime *first, struct bintime *period);
static int tick_et_stop(struct eventtimer *et);
static void tick_intr(struct trapframe *tf);
static void tick_intr_bbwar(struct trapframe *tf);
static inline void tick_hardclock_common(struct trapframe *tf, u_long tick,
u_long adj);
static inline void tick_process(struct trapframe *tf);
static void stick_hardclock(struct trapframe *tf);
static void stick_intr(struct trapframe *tf);
static uint64_t
tick_cputicks(void)
@ -101,39 +105,34 @@ tick_cputicks(void)
void
cpu_initclocks(void)
{
uint32_t clock;
stathz = hz;
uint32_t clock, sclock;
clock = PCPU_GET(clock);
sclock = 0;
if (PCPU_GET(impl) == CPU_IMPL_SPARC64V ||
PCPU_GET(impl) >= CPU_IMPL_ULTRASPARCIII) {
if (OF_getprop(OF_parent(PCPU_GET(node)), "stick-frequency",
&sclock, sizeof(sclock)) == -1) {
panic("%s: could not determine STICK frequency",
__func__);
}
}
/*
* Given that the STICK timers typically are driven at rather low
* frequencies they shouldn't be used except when really necessary.
*/
if (hardclock_use_stick != 0) {
if (OF_getprop(OF_parent(PCPU_GET(node)), "stick-frequency",
&clock, sizeof(clock)) == -1)
panic("%s: could not determine STICK frequency", __func__);
intr_setup(PIL_TICK, stick_hardclock, -1, NULL, NULL);
intr_setup(PIL_TICK, stick_intr, -1, NULL, NULL);
/*
* We don't provide a CPU ticker as long as the frequency
* supplied isn't actually used per-CPU.
*/
} else {
clock = PCPU_GET(clock);
intr_setup(PIL_TICK, PCPU_GET(impl) >= CPU_IMPL_ULTRASPARCI &&
PCPU_GET(impl) < CPU_IMPL_ULTRASPARCIII ?
tick_hardclock_bbwar : tick_hardclock, -1, NULL, NULL);
tick_intr_bbwar : tick_intr, -1, NULL, NULL);
set_cputicker(tick_cputicks, clock, 0);
}
tick_increment = clock / hz;
/*
* Avoid stopping of hardclock in terms of a lost (S)TICK interrupt
* by ensuring that the (S)TICK period is at least TICK_GRACE ticks.
*/
if (tick_increment < TICK_GRACE(clock))
panic("%s: HZ too high, decrease to at least %d",
__func__, clock / TICK_GRACE(clock));
tick_start();
/*
* Initialize the TICK-based timecounter. This must not happen
@ -142,7 +141,7 @@ cpu_initclocks(void)
tick_tc.tc_get_timecount = tick_get_timecount_up;
tick_tc.tc_poll_pps = NULL;
tick_tc.tc_counter_mask = ~0u;
tick_tc.tc_frequency = PCPU_GET(clock);
tick_tc.tc_frequency = clock;
tick_tc.tc_name = "tick";
tick_tc.tc_quality = TICK_QUALITY_UP;
tick_tc.tc_priv = NULL;
@ -161,19 +160,46 @@ cpu_initclocks(void)
}
#endif
tc_init(&tick_tc);
if (sclock != 0) {
stick_tc.tc_get_timecount = stick_get_timecount;
stick_tc.tc_poll_pps = NULL;
stick_tc.tc_counter_mask = ~0u;
stick_tc.tc_frequency = sclock;
stick_tc.tc_name = "stick";
stick_tc.tc_quality = STICK_QUALITY;
stick_tc.tc_priv = NULL;
tc_init(&stick_tc);
}
tick_et.et_name = hardclock_use_stick ? "stick" : "tick";
tick_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
ET_FLAGS_PERCPU;
tick_et.et_quality = 1000;
tick_et.et_frequency = hardclock_use_stick ? sclock : clock;
tick_et.et_min_period.sec = 0;
tick_et.et_min_period.frac = 0x00010000LLU << 32; /* To be safe. */
tick_et.et_max_period.sec = 3600 * 24; /* No practical limit. */
tick_et.et_max_period.frac = 0;
tick_et.et_start = tick_et_start;
tick_et.et_stop = tick_et_stop;
tick_et.et_priv = NULL;
et_register(&tick_et);
cpu_initclocks_bsp();
}
static inline void
tick_process(struct trapframe *tf)
{
struct trapframe *oldframe;
struct thread *td;
if (curcpu == 0)
hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
else
hardclock_cpu(TRAPF_USERMODE(tf));
if (profprocs != 0)
profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
statclock(TRAPF_USERMODE(tf));
if (tick_et.et_active) {
td = curthread;
oldframe = td->td_intr_frame;
td->td_intr_frame = tf;
tick_et.et_event_cb(&tick_et, tick_et.et_arg);
td->td_intr_frame = oldframe;
}
}
/*
@ -184,48 +210,60 @@ tick_process(struct trapframe *tf)
*/
static void
tick_hardclock(struct trapframe *tf)
tick_intr(struct trapframe *tf)
{
u_long adj, tick;
u_long adj, tick, tick_increment;
register_t s;
critical_enter();
adj = PCPU_GET(tickadj);
tick_increment = PCPU_GET(tickincrement);
s = intr_disable();
tick = rd(tick);
wr(tick_cmpr, tick + tick_increment - adj, 0);
if (tick_increment != 0)
wr(tick_cmpr, tick + tick_increment - adj, 0);
else
wr(tick_cmpr, 1L << 63, 0);
intr_restore(s);
tick_hardclock_common(tf, tick, adj);
critical_exit();
}
static void
tick_hardclock_bbwar(struct trapframe *tf)
tick_intr_bbwar(struct trapframe *tf)
{
u_long adj, tick;
u_long adj, tick, tick_increment;
register_t s;
critical_enter();
adj = PCPU_GET(tickadj);
tick_increment = PCPU_GET(tickincrement);
s = intr_disable();
tick = rd(tick);
wrtickcmpr(tick + tick_increment - adj, 0);
if (tick_increment != 0)
wrtickcmpr(tick + tick_increment - adj, 0);
else
wrtickcmpr(1L << 63, 0);
intr_restore(s);
tick_hardclock_common(tf, tick, adj);
critical_exit();
}
static void
stick_hardclock(struct trapframe *tf)
stick_intr(struct trapframe *tf)
{
u_long adj, stick;
u_long adj, stick, tick_increment;
register_t s;
critical_enter();
adj = PCPU_GET(tickadj);
tick_increment = PCPU_GET(tickincrement);
s = intr_disable();
stick = rdstick();
wrstickcmpr(stick + tick_increment - adj, 0);
if (tick_increment != 0)
wrstickcmpr(stick + tick_increment - adj, 0);
else
wrstickcmpr(1L << 63, 0);
intr_restore(s);
tick_hardclock_common(tf, stick, adj);
critical_exit();
@ -234,10 +272,11 @@ stick_hardclock(struct trapframe *tf)
static inline void
tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
{
u_long ref;
u_long ref, tick_increment;
long delta;
int count;
tick_increment = PCPU_GET(tickincrement);
ref = PCPU_GET(tickref);
delta = tick - ref;
count = 0;
@ -248,6 +287,8 @@ tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
if (adj != 0)
adjust_ticks++;
count++;
if (tick_increment == 0)
break;
}
if (count > 0) {
adjust_missed += count - 1;
@ -265,6 +306,13 @@ tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
PCPU_SET(tickadj, adj);
}
static u_int
stick_get_timecount(struct timecounter *tc)
{
return ((u_int)rdstick());
}
static u_int
tick_get_timecount_up(struct timecounter *tc)
{
@ -294,12 +342,28 @@ tick_get_timecount_mp(struct timecounter *tc)
}
#endif
void
tick_start(void)
static int
tick_et_start(struct eventtimer *et,
struct bintime *first, struct bintime *period)
{
u_long fdiv, div;
u_long base;
register_t s;
if (period != NULL) {
div = (tick_et.et_frequency * (period->frac >> 32)) >> 32;
if (period->sec != 0)
div += tick_et.et_frequency * period->sec;
} else
div = 0;
if (first != NULL) {
fdiv = (tick_et.et_frequency * (first->frac >> 32)) >> 32;
if (first->sec != 0)
fdiv += tick_et.et_frequency * first->sec;
} else
fdiv = div;
PCPU_SET(tickincrement, div);
/*
* Try to make the (S)TICK interrupts as synchronously as possible
* on all CPUs to avoid inaccuracies for migrating processes. Leave
@ -312,14 +376,25 @@ tick_start(void)
base = rdstick();
else
base = rd(tick);
base = roundup(base, tick_increment);
if (div != 0)
base = roundup(base, div);
PCPU_SET(tickref, base);
if (hardclock_use_stick != 0)
wrstickcmpr(base + tick_increment, 0);
wrstickcmpr(base + fdiv, 0);
else
wrtickcmpr(base + tick_increment, 0);
wrtickcmpr(base + fdiv, 0);
intr_restore(s);
critical_exit();
return (0);
}
static int
tick_et_stop(struct eventtimer *et)
{
PCPU_SET(tickincrement, 0);
tick_stop(PCPU_GET(impl));
return (0);
}
void

View File

@ -29,7 +29,6 @@
#ifndef _MACHINE_CLOCK_H_
#define _MACHINE_CLOCK_H_
extern u_long tick_increment;
extern u_long tick_freq;
extern u_long tick_MHz;

View File

@ -46,6 +46,8 @@
#define PIL_AST 4 /* ast ipi */
#define PIL_STOP 5 /* stop cpu ipi */
#define PIL_PREEMPT 6 /* preempt idle thread cpu ipi */
#define PIL_HARDCLOCK 7 /* hardclock broadcast */
#define PIL_STATCLOCK 8 /* statclock broadcast */
#define PIL_FAST 13 /* fast interrupts */
#define PIL_TICK 14

View File

@ -73,6 +73,7 @@ struct pmap;
uint64_t pc_nrq_size; \
u_long pc_tickref; \
u_long pc_tickadj; \
u_long pc_tickincrement; \
struct rwindow pc_kwbuf; \
u_long pc_kwbuf_sp; \
u_int pc_kwbuf_full; \

View File

@ -46,7 +46,8 @@
#define IPI_STOP PIL_STOP
#define IPI_STOP_HARD PIL_STOP
#define IPI_PREEMPT PIL_PREEMPT
#define IPI_HARDCLOCK PIL_HARDCLOCK
#define IPI_STATCLOCK PIL_STATCLOCK
#define IPI_RETRIES 5000
@ -81,6 +82,8 @@ void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
void cpu_ipi_ast(struct trapframe *tf);
void cpu_ipi_stop(struct trapframe *tf);
void cpu_ipi_preempt(struct trapframe *tf);
void cpu_ipi_hardclock(struct trapframe *tf);
void cpu_ipi_statclock(struct trapframe *tf);
void ipi_selected(u_int cpus, u_int ipi);
void ipi_all_but_self(u_int ipi);

View File

@ -30,7 +30,5 @@
#define _MACHINE_TICK_H_
void tick_init(u_long clock);
void tick_start(void);
void tick_stop(void);
#endif

View File

@ -30,7 +30,6 @@
#include <sys/systm.h>
#include <machine/clock.h>
u_long tick_increment;
u_long tick_freq;
u_long tick_MHz;
@ -46,13 +45,3 @@ DELAY(int n)
while (rd(tick) < end)
;
}
void
cpu_startprofclock(void)
{
}
void
cpu_stopprofclock(void)
{
}

View File

@ -109,7 +109,9 @@ static char *pil_names[] = {
"ast", /* PIL_AST */
"stop", /* PIL_STOP */
"preempt", /* PIL_PREEMPT */
"stray", "stray", "stray", "stray", "stray", "stray",
"hardclock", /* PIL_HARDCLOCK */
"statclock", /* PIL_STATCLOCK */
"stray", "stray", "stray", "stray",
"fast", /* PIL_FAST */
"tick", /* PIL_TICK */
};
@ -262,6 +264,8 @@ intr_init(void)
intr_handlers[PIL_RENDEZVOUS] = (ih_func_t *)smp_rendezvous_action;
intr_handlers[PIL_STOP]= cpu_ipi_stop;
intr_handlers[PIL_PREEMPT]= cpu_ipi_preempt;
intr_handlers[PIL_HARDCLOCK]= cpu_ipi_hardclock;
intr_handlers[PIL_STATCLOCK]= cpu_ipi_statclock;
#endif
mtx_init(&intr_table_lock, "intr table", NULL, MTX_SPIN);
cpu_intrq_alloc();

View File

@ -392,7 +392,6 @@ cpu_mp_bootstrap(struct pcpu *pc)
tte_hash_set_scratchpad_kernel(kernel_pmap->pm_hash);
trap_init();
cpu_intrq_init();
tick_start();
#ifdef TRAP_TRACING
mp_trap_trace_init();
@ -413,6 +412,10 @@ cpu_mp_bootstrap(struct pcpu *pc)
while (csa->csa_count != 0)
;
/* Start per-CPU event timers. */
cpu_initclocks_ap();
/* ok, now enter the scheduler */
sched_throw(NULL);
}
@ -466,6 +469,20 @@ cpu_ipi_preempt(struct trapframe *tf)
sched_preempt(curthread);
}
void
cpu_ipi_hardclock(struct trapframe *tf)
{
hardclockintr(tf);
}
void
cpu_ipi_statclock(struct trapframe *tf)
{
statclockintr(tf);
}
void
cpu_ipi_selected(int cpu_count, uint16_t *cpulist, u_long d0, u_long d1, u_long d2, uint64_t *ackmask)
{

View File

@ -33,7 +33,9 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <machine/clock.h>
@ -46,8 +48,6 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#endif
#define TICK_GRACE 10000
SYSCTL_NODE(_machdep, OID_AUTO, tick, CTLFLAG_RD, 0, "tick statistics");
static int adjust_edges = 0;
@ -66,7 +66,12 @@ static int adjust_ticks = 0;
SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks,
0, "total number of tick interrupts with adjustment");
static void tick_hardclock(struct trapframe *);
static struct eventtimer tick_et;
static int tick_et_start(struct eventtimer *et,
struct bintime *first, struct bintime *period);
static int tick_et_stop(struct eventtimer *et);
static void tick_intr(struct trapframe *);
static uint64_t
tick_cputicks(void)
@ -79,28 +84,44 @@ void
cpu_initclocks(void)
{
stathz = hz;
tick_start();
intr_setup(PIL_TICK, tick_intr, -1, NULL, NULL);
tick_et.et_name = "tick";
tick_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
ET_FLAGS_PERCPU;
tick_et.et_quality = 1000;
tick_et.et_frequency = tick_freq;
tick_et.et_min_period.sec = 0;
tick_et.et_min_period.frac = 0x00010000LLU << 32; /* To be safe. */
tick_et.et_max_period.sec = 3600 * 24; /* No practical limit. */
tick_et.et_max_period.frac = 0;
tick_et.et_start = tick_et_start;
tick_et.et_stop = tick_et_stop;
tick_et.et_priv = NULL;
et_register(&tick_et);
cpu_initclocks_bsp();
}
static __inline void
tick_process(struct trapframe *tf)
{
struct trapframe *oldframe;
struct thread *td;
if (curcpu == 0)
hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
else
hardclock_cpu(TRAPF_USERMODE(tf));
if (profprocs != 0)
profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
statclock(TRAPF_USERMODE(tf));
if (tick_et.et_active) {
td = curthread;
oldframe = td->td_intr_frame;
td->td_intr_frame = tf;
tick_et.et_event_cb(&tick_et, tick_et.et_arg);
td->td_intr_frame = oldframe;
}
}
static void
tick_hardclock(struct trapframe *tf)
tick_intr(struct trapframe *tf)
{
u_long adj, s, tick, ref;
u_long adj, ref, s, tick, tick_increment;
long delta;
int count;
@ -108,6 +129,7 @@ tick_hardclock(struct trapframe *tf)
if (curthread->td_critnest > 2 || curthread->td_critnest < 1)
panic("nested hardclock %d\n", curthread->td_critnest);
#endif
tick_increment = PCPU_GET(tickincrement);
/*
* The sequence of reading the TICK register, calculating the value
* of the next tick and writing it to the TICK_CMPR register must not
@ -118,7 +140,10 @@ tick_hardclock(struct trapframe *tf)
adj = PCPU_GET(tickadj);
s = intr_disable_all();
tick = rd(tick);
wrtickcmpr(tick + tick_increment - adj, 0);
if (tick_increment != 0)
wrtickcmpr(tick + tick_increment - adj, 0);
else
wrtickcmpr(1L << 63, 0);
intr_restore_all(s);
ref = PCPU_GET(tickref);
@ -131,6 +156,8 @@ tick_hardclock(struct trapframe *tf)
if (adj != 0)
adjust_ticks++;
count++;
if (tick_increment == 0)
break;
}
if (count > 0) {
adjust_missed += count - 1;
@ -146,7 +173,6 @@ tick_hardclock(struct trapframe *tf)
}
PCPU_SET(tickref, ref);
PCPU_SET(tickadj, adj);
}
void
@ -155,41 +181,55 @@ tick_init(u_long clock)
tick_freq = clock;
tick_MHz = clock / 1000000;
tick_increment = clock / hz;
/*
* Avoid stopping of hardclock in terms of a lost tick interrupt
* by ensuring that the tick period is at least TICK_GRACE ticks.
*/
printf("tick_freq=%ld hz=%d tick_increment=%ld\n",
tick_freq, hz, tick_increment);
#ifndef SIMULATOR
if (tick_increment < TICK_GRACE)
panic("%s: HZ too high, decrease to at least %ld", __func__,
clock / TICK_GRACE);
#endif
set_cputicker(tick_cputicks, tick_freq, 0);
}
void
tick_start(void)
static int
tick_et_start(struct eventtimer *et,
struct bintime *first, struct bintime *period)
{
u_long base, s;
u_long fdiv, div;
u_long base;
register_t s;
if (curcpu == 0)
intr_setup(PIL_TICK, tick_hardclock, -1, NULL, NULL);
if (period != NULL) {
div = (tick_et.et_frequency * (period->frac >> 32)) >> 32;
if (period->sec != 0)
div += tick_et.et_frequency * period->sec;
} else
div = 0;
if (first != NULL) {
fdiv = (tick_et.et_frequency * (first->frac >> 32)) >> 32;
if (first->sec != 0)
fdiv += tick_et.et_frequency * first->sec;
} else
fdiv = div;
PCPU_SET(tickincrement, div);
/*
* Try to make the tick interrupts as synchronously as possible on
* all CPUs to avoid inaccuracies for migrating processes. Leave out
* one tick to make sure that it is not missed.
*/
critical_enter();
PCPU_SET(tickadj, 0);
s = intr_disable_all();
base = rd(tick);
base = roundup(base, tick_increment);
if (div != 0)
base = roundup(base, div);
PCPU_SET(tickref, base);
wrtickcmpr(base + tick_increment, 0);
wrtickcmpr(base + fdiv, 0);
intr_restore_all(s);
critical_exit();
return (0);
}
static int
tick_et_stop(struct eventtimer *et)
{
PCPU_SET(tickincrement, 0);
wrtickcmpr(1L << 63, 0);
return (0);
}