2002-04-30 20:42:06 +00:00
|
|
|
/*-
|
2000-03-20 14:09:06 +00:00
|
|
|
* ----------------------------------------------------------------------------
|
|
|
|
* "THE BEER-WARE LICENSE" (Revision 42):
|
|
|
|
* <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
|
|
|
|
* can do whatever you want with this stuff. If we meet some day, and you think
|
|
|
|
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
|
|
|
|
* ----------------------------------------------------------------------------
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1999-03-11 15:09:51 +00:00
|
|
|
#include "opt_ntp.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2000-03-20 14:09:06 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/sysctl.h>
|
2004-01-21 21:05:40 +00:00
|
|
|
#include <sys/syslog.h>
|
2000-03-20 14:09:06 +00:00
|
|
|
#include <sys/systm.h>
|
1999-03-11 15:09:51 +00:00
|
|
|
#include <sys/timepps.h>
|
2002-05-03 08:46:03 +00:00
|
|
|
#include <sys/timetc.h>
|
2002-04-30 20:42:06 +00:00
|
|
|
#include <sys/timex.h>
|
|
|
|
|
Fix leap second processing by the kernel time keeping routines.
Before, we would add/subtract the leap second when the system had been
up for an even multiple of days, rather than at the end of the day, as
a leap second is defined (at least wrt ntp). We do this by
calculating the notion of UTC earlier in the loop, and passing that to
get it adjusted. Any adjustments that ntp_update_second makes to this
time are then transferred to boot time. We can't pass it either the
boot time or the uptime because their sum is what determines when a
leap second is needed. This code adds an extra assignment and two
extra compare in the typical case, which is as cheap as I could made
it.
I have confirmed with this code the kernel time does the correct thing
for both positive and negative leap seconds. Since the ntp interface
doesn't allow for +2 or -2, those cases can't be tested (and the folks
in the know here say there will never be a +2s or -2s leap event, but
rather two +1s or -1s leap events).
There will very likely be no leap seconds for a while, given how the
earth is speeding up and slowing down, so there will be plenty of time
for this fix to propigate. UT1-UTC is currently at "about -0.4s" and
decrementing by .1s every 8 months or so. 6 * 8 is 48 months, or 4
years.
-stable has different code, but a similar bug that was introduced
about the time of the last leap second, which is why nobody has
noticed until now.
MFC After: 3 weeks
Reviewed by: phk
"Furthermore, leap seconds must die." -- Cato the Elder
2003-06-25 21:23:51 +00:00
|
|
|
/*
|
2003-08-20 19:12:46 +00:00
|
|
|
* A large step happens on boot. This constant detects such steps.
|
|
|
|
* It is relatively small so that ntp_update_second gets called enough
|
|
|
|
* in the typical 'missed a couple of seconds' case, but doesn't loop
|
|
|
|
* forever when the time step is large.
|
Fix leap second processing by the kernel time keeping routines.
Before, we would add/subtract the leap second when the system had been
up for an even multiple of days, rather than at the end of the day, as
a leap second is defined (at least wrt ntp). We do this by
calculating the notion of UTC earlier in the loop, and passing that to
get it adjusted. Any adjustments that ntp_update_second makes to this
time are then transferred to boot time. We can't pass it either the
boot time or the uptime because their sum is what determines when a
leap second is needed. This code adds an extra assignment and two
extra compare in the typical case, which is as cheap as I could made
it.
I have confirmed with this code the kernel time does the correct thing
for both positive and negative leap seconds. Since the ntp interface
doesn't allow for +2 or -2, those cases can't be tested (and the folks
in the know here say there will never be a +2s or -2s leap event, but
rather two +1s or -1s leap events).
There will very likely be no leap seconds for a while, given how the
earth is speeding up and slowing down, so there will be plenty of time
for this fix to propigate. UT1-UTC is currently at "about -0.4s" and
decrementing by .1s every 8 months or so. 6 * 8 is 48 months, or 4
years.
-stable has different code, but a similar bug that was introduced
about the time of the last leap second, which is why nobody has
noticed until now.
MFC After: 3 weeks
Reviewed by: phk
"Furthermore, leap seconds must die." -- Cato the Elder
2003-06-25 21:23:51 +00:00
|
|
|
*/
|
|
|
|
#define LARGE_STEP 200
|
|
|
|
|
1998-10-23 10:44:52 +00:00
|
|
|
/*
|
2002-04-26 21:51:08 +00:00
|
|
|
* Implement a dummy timecounter which we can use until we get a real one
|
|
|
|
* in the air. This allows the console and other early stuff to use
|
2002-04-30 20:42:06 +00:00
|
|
|
* time services.
|
1998-10-23 10:44:52 +00:00
|
|
|
*/
|
|
|
|
|
2002-04-28 18:24:21 +00:00
|
|
|
static u_int
|
2002-04-26 21:51:08 +00:00
|
|
|
dummy_get_timecount(struct timecounter *tc)
|
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
static u_int now;
|
2002-04-26 21:51:08 +00:00
|
|
|
|
|
|
|
return (++now);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct timecounter dummy_timecounter = {
|
2003-08-16 08:23:53 +00:00
|
|
|
dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
|
2002-04-26 21:51:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct timehands {
|
|
|
|
/* These fields must be initialized by the driver. */
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timecounter *th_counter;
|
|
|
|
int64_t th_adjustment;
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t th_scale;
|
2002-04-28 18:24:21 +00:00
|
|
|
u_int th_offset_count;
|
|
|
|
struct bintime th_offset;
|
|
|
|
struct timeval th_microtime;
|
|
|
|
struct timespec th_nanotime;
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Fields not to be copied in tc_windup start with th_generation. */
|
2002-04-28 18:24:21 +00:00
|
|
|
volatile u_int th_generation;
|
|
|
|
struct timehands *th_next;
|
2002-04-26 21:51:08 +00:00
|
|
|
};
|
|
|
|
|
2005-09-07 10:06:14 +00:00
|
|
|
static struct timehands th0;
|
2002-04-30 20:42:06 +00:00
|
|
|
static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
|
|
|
|
static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
|
|
|
|
static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
|
|
|
|
static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
|
|
|
|
static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
|
|
|
|
static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
|
|
|
|
static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
|
|
|
|
static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
|
|
|
|
static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
|
2002-04-27 07:28:54 +00:00
|
|
|
static struct timehands th0 = {
|
|
|
|
&dummy_timecounter,
|
|
|
|
0,
|
2002-04-30 20:42:06 +00:00
|
|
|
(uint64_t)-1 / 1000000,
|
2002-04-27 07:28:54 +00:00
|
|
|
0,
|
2002-04-28 16:51:36 +00:00
|
|
|
{1, 0},
|
2002-04-27 07:28:54 +00:00
|
|
|
{0, 0},
|
|
|
|
{0, 0},
|
|
|
|
1,
|
|
|
|
&th1
|
|
|
|
};
|
2002-04-26 21:51:08 +00:00
|
|
|
|
|
|
|
static struct timehands *volatile timehands = &th0;
|
|
|
|
struct timecounter *timecounter = &dummy_timecounter;
|
|
|
|
static struct timecounter *timecounters = &dummy_timecounter;
|
1998-10-23 10:44:52 +00:00
|
|
|
|
2010-09-14 08:48:06 +00:00
|
|
|
int tc_min_ticktock_freq = 1;
|
|
|
|
|
2002-05-03 08:46:03 +00:00
|
|
|
time_t time_second = 1;
|
2005-09-19 22:16:31 +00:00
|
|
|
time_t time_uptime = 1;
|
1998-03-30 09:56:58 +00:00
|
|
|
|
Implement flexible BPF timestamping framework.
- Allow setting format, resolution and accuracy of BPF time stamps per
listener. Previously, we were only able to use microtime(9). Now we can
set various resolutions and accuracies with ioctl(2) BIOCSTSTAMP command.
Similarly, we can get the current resolution and accuracy with BIOCGTSTAMP
command. Document all supported options in bpf(4) and their uses.
- Introduce new time stamp 'struct bpf_ts' and header 'struct bpf_xhdr'.
The new time stamp has both 64-bit second and fractional parts. bpf_xhdr
has this time stamp instead of 'struct timeval' for bh_tstamp. The new
structures let us use bh_tstamp of same size on both 32-bit and 64-bit
platforms without adding additional shims for 32-bit binaries. On 64-bit
platforms, size of BPF header does not change compared to bpf_hdr as its
members are already all 64-bit long. On 32-bit platforms, the size may
increase by 8 bytes. For backward compatibility, struct bpf_hdr with
struct timeval is still the default header unless new time stamp format is
explicitly requested. However, the behaviour may change in the future and
all relevant code is wrapped around "#ifdef BURN_BRIDGES" for now.
- Add experimental support for tagging mbufs with time stamps from a lower
layer, e.g., device driver. Currently, mbuf_tags(9) is used to tag mbufs.
The time stamps must be uptime in 'struct bintime' format as binuptime(9)
and getbinuptime(9) do.
Reviewed by: net@
2010-06-15 19:28:44 +00:00
|
|
|
struct bintime boottimebin;
|
2002-04-30 20:42:06 +00:00
|
|
|
struct timeval boottime;
|
2004-10-11 22:04:16 +00:00
|
|
|
static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
|
|
|
|
SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
|
|
|
|
NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
|
1999-09-13 14:22:27 +00:00
|
|
|
|
2000-03-20 14:09:06 +00:00
|
|
|
SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
|
2006-06-16 20:29:05 +00:00
|
|
|
SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
|
2000-03-20 14:09:06 +00:00
|
|
|
|
2004-01-21 21:05:40 +00:00
|
|
|
static int timestepwarnings;
|
|
|
|
SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
|
|
|
|
×tepwarnings, 0, "");
|
|
|
|
|
2002-04-26 12:37:36 +00:00
|
|
|
static void tc_windup(void);
|
2006-02-11 09:33:07 +00:00
|
|
|
static void cpu_tick_calibrate(int);
|
2002-04-26 12:37:36 +00:00
|
|
|
|
2004-10-11 22:04:16 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
#ifdef SCTL_MASK32
|
|
|
|
int tv[2];
|
|
|
|
|
|
|
|
if (req->flags & SCTL_MASK32) {
|
|
|
|
tv[0] = boottime.tv_sec;
|
|
|
|
tv[1] = boottime.tv_usec;
|
|
|
|
return SYSCTL_OUT(req, tv, sizeof(tv));
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
return SYSCTL_OUT(req, &boottime, sizeof(boottime));
|
|
|
|
}
|
2006-02-07 21:22:02 +00:00
|
|
|
|
2006-06-16 20:29:05 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
u_int ncount;
|
|
|
|
struct timecounter *tc = arg1;
|
|
|
|
|
|
|
|
ncount = tc->tc_get_timecount(tc);
|
2007-06-04 18:25:08 +00:00
|
|
|
return sysctl_handle_int(oidp, &ncount, 0, req);
|
2006-06-16 20:29:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t freq;
|
2006-06-16 20:29:05 +00:00
|
|
|
struct timecounter *tc = arg1;
|
|
|
|
|
|
|
|
freq = tc->tc_frequency;
|
2007-06-04 18:25:08 +00:00
|
|
|
return sysctl_handle_quad(oidp, &freq, 0, req);
|
2006-06-16 20:29:05 +00:00
|
|
|
}
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
|
|
|
* Return the difference between the timehands' counter value now and what
|
|
|
|
* was when we copied it to the timehands' offset_count.
|
|
|
|
*/
|
2002-04-28 18:24:21 +00:00
|
|
|
static __inline u_int
|
|
|
|
tc_delta(struct timehands *th)
|
1998-05-28 09:30:28 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timecounter *tc;
|
1998-05-28 09:30:28 +00:00
|
|
|
|
2002-04-28 18:24:21 +00:00
|
|
|
tc = th->th_counter;
|
|
|
|
return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
|
|
|
|
tc->tc_counter_mask);
|
1998-05-28 09:30:28 +00:00
|
|
|
}
|
1998-03-26 20:54:05 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* Functions for reading the time. We have to loop until we are sure that
|
2002-04-30 20:42:06 +00:00
|
|
|
* the timehands that we operated on was not updated under our feet. See
|
|
|
|
* the comment in <sys/time.h> for a description of these 12 functions.
|
2002-04-28 18:24:21 +00:00
|
|
|
*/
|
|
|
|
|
2002-02-07 21:21:55 +00:00
|
|
|
void
|
|
|
|
binuptime(struct bintime *bt)
|
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
2002-02-24 20:04:07 +00:00
|
|
|
|
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
*bt = th->th_offset;
|
|
|
|
bintime_addx(bt, th->th_scale * tc_delta(th));
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
2002-02-07 21:21:55 +00:00
|
|
|
}
|
|
|
|
|
2002-04-26 10:19:29 +00:00
|
|
|
void
|
2002-04-30 20:42:06 +00:00
|
|
|
nanouptime(struct timespec *tsp)
|
2002-04-26 10:19:29 +00:00
|
|
|
{
|
|
|
|
struct bintime bt;
|
|
|
|
|
|
|
|
binuptime(&bt);
|
2002-04-30 20:42:06 +00:00
|
|
|
bintime2timespec(&bt, tsp);
|
2002-04-26 10:19:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-30 20:42:06 +00:00
|
|
|
microuptime(struct timeval *tvp)
|
2002-04-26 10:19:29 +00:00
|
|
|
{
|
|
|
|
struct bintime bt;
|
|
|
|
|
|
|
|
binuptime(&bt);
|
2002-04-30 20:42:06 +00:00
|
|
|
bintime2timeval(&bt, tvp);
|
2002-04-26 10:19:29 +00:00
|
|
|
}
|
|
|
|
|
2002-02-07 21:21:55 +00:00
|
|
|
void
|
|
|
|
bintime(struct bintime *bt)
|
|
|
|
{
|
|
|
|
|
|
|
|
binuptime(bt);
|
|
|
|
bintime_add(bt, &boottimebin);
|
|
|
|
}
|
|
|
|
|
1998-03-26 20:54:05 +00:00
|
|
|
void
|
2002-04-30 20:42:06 +00:00
|
|
|
nanotime(struct timespec *tsp)
|
2002-04-26 10:19:29 +00:00
|
|
|
{
|
|
|
|
struct bintime bt;
|
|
|
|
|
|
|
|
bintime(&bt);
|
2002-04-30 20:42:06 +00:00
|
|
|
bintime2timespec(&bt, tsp);
|
2002-04-26 10:19:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-30 20:42:06 +00:00
|
|
|
microtime(struct timeval *tvp)
|
2002-04-26 10:19:29 +00:00
|
|
|
{
|
|
|
|
struct bintime bt;
|
|
|
|
|
|
|
|
bintime(&bt);
|
2002-04-30 20:42:06 +00:00
|
|
|
bintime2timeval(&bt, tvp);
|
2002-04-26 10:19:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
getbinuptime(struct bintime *bt)
|
1998-03-26 20:54:05 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-03-26 20:54:05 +00:00
|
|
|
|
2002-02-24 20:04:07 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
*bt = th->th_offset;
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
1998-04-04 13:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-26 10:19:29 +00:00
|
|
|
getnanouptime(struct timespec *tsp)
|
1998-04-04 13:26:20 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-04-04 13:26:20 +00:00
|
|
|
|
2002-02-24 20:04:07 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
bintime2timespec(&th->th_offset, tsp);
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
1998-04-04 13:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(struct timeval *tvp)
|
1998-04-04 13:26:20 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-04-04 13:26:20 +00:00
|
|
|
|
2002-02-24 20:04:07 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
bintime2timeval(&th->th_offset, tvp);
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
1998-03-26 20:54:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-26 10:19:29 +00:00
|
|
|
getbintime(struct bintime *bt)
|
1998-03-26 20:54:05 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-03-26 20:54:05 +00:00
|
|
|
|
2002-02-24 20:04:07 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
*bt = th->th_offset;
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
2002-04-26 10:19:29 +00:00
|
|
|
bintime_add(bt, &boottimebin);
|
1998-03-26 20:54:05 +00:00
|
|
|
}
|
|
|
|
|
1998-02-15 13:55:06 +00:00
|
|
|
void
|
2002-04-26 10:19:29 +00:00
|
|
|
getnanotime(struct timespec *tsp)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2002-04-26 10:19:29 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
*tsp = th->th_nanotime;
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-04-26 10:19:29 +00:00
|
|
|
getmicrotime(struct timeval *tvp)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
u_int gen;
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2002-04-26 10:19:29 +00:00
|
|
|
do {
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
gen = th->th_generation;
|
|
|
|
*tvp = th->th_microtime;
|
|
|
|
} while (gen == 0 || gen != th->th_generation);
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2003-08-16 08:23:53 +00:00
|
|
|
* Initialize a new timecounter and possibly use it.
|
2002-04-28 18:24:21 +00:00
|
|
|
*/
|
1998-02-20 16:36:17 +00:00
|
|
|
void
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_init(struct timecounter *tc)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
2003-11-13 10:03:58 +00:00
|
|
|
u_int u;
|
2006-06-16 20:29:05 +00:00
|
|
|
struct sysctl_oid *tc_root;
|
1998-10-23 10:44:52 +00:00
|
|
|
|
2003-09-03 08:14:16 +00:00
|
|
|
u = tc->tc_frequency / tc->tc_counter_mask;
|
2003-11-13 10:03:58 +00:00
|
|
|
/* XXX: We need some margin here, 10% is a guess */
|
|
|
|
u *= 11;
|
|
|
|
u /= 10;
|
2003-09-03 08:14:16 +00:00
|
|
|
if (u > hz && tc->tc_quality >= 0) {
|
|
|
|
tc->tc_quality = -2000;
|
|
|
|
if (bootverbose) {
|
|
|
|
printf("Timecounter \"%s\" frequency %ju Hz",
|
2003-11-13 10:03:58 +00:00
|
|
|
tc->tc_name, (uintmax_t)tc->tc_frequency);
|
2003-09-03 08:14:16 +00:00
|
|
|
printf(" -- Insufficient hz, needs at least %u\n", u);
|
|
|
|
}
|
|
|
|
} else if (tc->tc_quality >= 0 || bootverbose) {
|
2003-11-13 10:03:58 +00:00
|
|
|
printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
|
|
|
|
tc->tc_name, (uintmax_t)tc->tc_frequency,
|
2003-08-16 08:23:53 +00:00
|
|
|
tc->tc_quality);
|
2002-09-04 19:32:18 +00:00
|
|
|
}
|
2003-09-03 08:14:16 +00:00
|
|
|
|
2002-04-26 21:51:08 +00:00
|
|
|
tc->tc_next = timecounters;
|
|
|
|
timecounters = tc;
|
2006-06-16 20:29:05 +00:00
|
|
|
/*
|
|
|
|
* Set up sysctl tree for this counter.
|
|
|
|
*/
|
|
|
|
tc_root = SYSCTL_ADD_NODE(NULL,
|
|
|
|
SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
|
|
|
|
CTLFLAG_RW, 0, "timecounter description");
|
|
|
|
SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
|
|
|
|
"mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
|
|
|
|
"mask for implemented bits");
|
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
|
|
|
|
"counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
|
|
|
|
sysctl_kern_timecounter_get, "IU", "current timecounter value");
|
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
|
|
|
|
"frequency", CTLTYPE_QUAD | CTLFLAG_RD, tc, sizeof(*tc),
|
2007-06-04 18:25:08 +00:00
|
|
|
sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
|
2006-06-16 20:29:05 +00:00
|
|
|
SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
|
|
|
|
"quality", CTLFLAG_RD, &(tc->tc_quality), 0,
|
|
|
|
"goodness of time counter");
|
2003-11-13 10:03:58 +00:00
|
|
|
/*
|
|
|
|
* Never automatically use a timecounter with negative quality.
|
|
|
|
* Even though we run on the dummy counter, switching here may be
|
|
|
|
* worse since this timecounter may not be monotonous.
|
|
|
|
*/
|
2003-08-16 08:23:53 +00:00
|
|
|
if (tc->tc_quality < 0)
|
|
|
|
return;
|
|
|
|
if (tc->tc_quality < timecounter->tc_quality)
|
|
|
|
return;
|
2003-11-13 10:03:58 +00:00
|
|
|
if (tc->tc_quality == timecounter->tc_quality &&
|
|
|
|
tc->tc_frequency < timecounter->tc_frequency)
|
|
|
|
return;
|
|
|
|
(void)tc->tc_get_timecount(tc);
|
|
|
|
(void)tc->tc_get_timecount(tc);
|
1998-02-20 16:36:17 +00:00
|
|
|
timecounter = tc;
|
2002-04-26 21:51:08 +00:00
|
|
|
}
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Report the frequency of the current timecounter. */
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t
|
2002-04-26 21:51:08 +00:00
|
|
|
tc_getfrequency(void)
|
|
|
|
{
|
|
|
|
|
2002-04-28 18:24:21 +00:00
|
|
|
return (timehands->th_counter->tc_frequency);
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2003-06-23 20:14:08 +00:00
|
|
|
* Step our concept of UTC. This is done by modifying our estimate of
|
2004-01-21 21:05:40 +00:00
|
|
|
* when we booted.
|
|
|
|
* XXX: not locked.
|
2002-04-28 18:24:21 +00:00
|
|
|
*/
|
1998-02-20 16:36:17 +00:00
|
|
|
void
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_setclock(struct timespec *ts)
|
1998-02-15 13:55:06 +00:00
|
|
|
{
|
2006-03-04 06:06:43 +00:00
|
|
|
struct timespec tbef, taft;
|
2004-01-21 21:05:40 +00:00
|
|
|
struct bintime bt, bt2;
|
1998-04-04 13:26:20 +00:00
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
cpu_tick_calibrate(1);
|
2006-03-04 06:06:43 +00:00
|
|
|
nanotime(&tbef);
|
2004-01-21 21:05:40 +00:00
|
|
|
timespec2bintime(ts, &bt);
|
2006-03-04 06:06:43 +00:00
|
|
|
binuptime(&bt2);
|
2004-01-21 21:05:40 +00:00
|
|
|
bintime_sub(&bt, &bt2);
|
|
|
|
bintime_add(&bt2, &boottimebin);
|
|
|
|
boottimebin = bt;
|
|
|
|
bintime2timeval(&bt, &boottime);
|
2002-04-30 20:42:06 +00:00
|
|
|
|
|
|
|
/* XXX fiddle all the little crinkly bits around the fiords... */
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_windup();
|
2006-03-04 06:06:43 +00:00
|
|
|
nanotime(&taft);
|
2004-01-21 21:05:40 +00:00
|
|
|
if (timestepwarnings) {
|
2006-03-04 06:06:43 +00:00
|
|
|
log(LOG_INFO,
|
|
|
|
"Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
|
|
|
|
(intmax_t)tbef.tv_sec, tbef.tv_nsec,
|
|
|
|
(intmax_t)taft.tv_sec, taft.tv_nsec,
|
2004-01-22 19:50:06 +00:00
|
|
|
(intmax_t)ts->tv_sec, ts->tv_nsec);
|
2004-01-21 21:05:40 +00:00
|
|
|
}
|
2006-02-11 09:33:07 +00:00
|
|
|
cpu_tick_calibrate(1);
|
1998-02-15 13:55:06 +00:00
|
|
|
}
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
|
|
|
* Initialize the next struct timehands in the ring and make
|
2002-04-28 18:24:21 +00:00
|
|
|
* it the active timehands. Along the way we might switch to a different
|
|
|
|
* timecounter and/or do seconds processing in NTP. Slightly magic.
|
|
|
|
*/
|
2002-04-26 12:37:36 +00:00
|
|
|
static void
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_windup(void)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
2002-02-07 21:21:55 +00:00
|
|
|
struct bintime bt;
|
2002-04-30 20:42:06 +00:00
|
|
|
struct timehands *th, *tho;
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t scale;
|
2002-04-30 20:42:06 +00:00
|
|
|
u_int delta, ncount, ogen;
|
|
|
|
int i;
|
Fix leap second processing by the kernel time keeping routines.
Before, we would add/subtract the leap second when the system had been
up for an even multiple of days, rather than at the end of the day, as
a leap second is defined (at least wrt ntp). We do this by
calculating the notion of UTC earlier in the loop, and passing that to
get it adjusted. Any adjustments that ntp_update_second makes to this
time are then transferred to boot time. We can't pass it either the
boot time or the uptime because their sum is what determines when a
leap second is needed. This code adds an extra assignment and two
extra compare in the typical case, which is as cheap as I could made
it.
I have confirmed with this code the kernel time does the correct thing
for both positive and negative leap seconds. Since the ntp interface
doesn't allow for +2 or -2, those cases can't be tested (and the folks
in the know here say there will never be a +2s or -2s leap event, but
rather two +1s or -1s leap events).
There will very likely be no leap seconds for a while, given how the
earth is speeding up and slowing down, so there will be plenty of time
for this fix to propigate. UT1-UTC is currently at "about -0.4s" and
decrementing by .1s every 8 months or so. 6 * 8 is 48 months, or 4
years.
-stable has different code, but a similar bug that was introduced
about the time of the last leap second, which is why nobody has
noticed until now.
MFC After: 3 weeks
Reviewed by: phk
"Furthermore, leap seconds must die." -- Cato the Elder
2003-06-25 21:23:51 +00:00
|
|
|
time_t t;
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* Make the next timehands a copy of the current one, but do not
|
|
|
|
* overwrite the generation or next pointer. While we update
|
|
|
|
* the contents, the generation must be zero.
|
|
|
|
*/
|
|
|
|
tho = timehands;
|
|
|
|
th = tho->th_next;
|
|
|
|
ogen = th->th_generation;
|
|
|
|
th->th_generation = 0;
|
2002-04-30 20:42:06 +00:00
|
|
|
bcopy(tho, th, offsetof(struct timehands, th_generation));
|
2002-04-28 18:24:21 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* Capture a timecounter delta on the current timecounter and if
|
|
|
|
* changing timecounters, a counter value from the new timecounter.
|
|
|
|
* Update the offset fields accordingly.
|
|
|
|
*/
|
|
|
|
delta = tc_delta(th);
|
|
|
|
if (th->th_counter != timecounter)
|
2002-04-26 21:51:08 +00:00
|
|
|
ncount = timecounter->tc_get_timecount(timecounter);
|
2002-04-30 20:42:06 +00:00
|
|
|
else
|
|
|
|
ncount = 0;
|
2002-04-28 18:24:21 +00:00
|
|
|
th->th_offset_count += delta;
|
|
|
|
th->th_offset_count &= th->th_counter->tc_counter_mask;
|
|
|
|
bintime_addx(&th->th_offset, th->th_scale * delta);
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* Hardware latching timecounters may not generate interrupts on
|
|
|
|
* PPS events, so instead we poll them. There is a finite risk that
|
|
|
|
* the hardware might capture a count which is later than the one we
|
|
|
|
* got above, and therefore possibly in the next NTP second which might
|
|
|
|
* have a different rate than the current NTP second. It doesn't
|
|
|
|
* matter in practice.
|
1998-07-04 19:12:21 +00:00
|
|
|
*/
|
2002-04-28 18:24:21 +00:00
|
|
|
if (tho->th_counter->tc_poll_pps)
|
|
|
|
tho->th_counter->tc_poll_pps(tho->th_counter);
|
|
|
|
|
2003-08-20 19:12:46 +00:00
|
|
|
/*
|
|
|
|
* Deal with NTP second processing. The for loop normally
|
|
|
|
* iterates at most once, but in extreme situations it might
|
|
|
|
* keep NTP sane if timeouts are not run for several seconds.
|
|
|
|
* At boot, the time step can be large when the TOD hardware
|
|
|
|
* has been read, so on really large steps, we call
|
|
|
|
* ntp_update_second only twice. We need to call it twice in
|
|
|
|
* case we missed a leap second.
|
Fix leap second processing by the kernel time keeping routines.
Before, we would add/subtract the leap second when the system had been
up for an even multiple of days, rather than at the end of the day, as
a leap second is defined (at least wrt ntp). We do this by
calculating the notion of UTC earlier in the loop, and passing that to
get it adjusted. Any adjustments that ntp_update_second makes to this
time are then transferred to boot time. We can't pass it either the
boot time or the uptime because their sum is what determines when a
leap second is needed. This code adds an extra assignment and two
extra compare in the typical case, which is as cheap as I could made
it.
I have confirmed with this code the kernel time does the correct thing
for both positive and negative leap seconds. Since the ntp interface
doesn't allow for +2 or -2, those cases can't be tested (and the folks
in the know here say there will never be a +2s or -2s leap event, but
rather two +1s or -1s leap events).
There will very likely be no leap seconds for a while, given how the
earth is speeding up and slowing down, so there will be plenty of time
for this fix to propigate. UT1-UTC is currently at "about -0.4s" and
decrementing by .1s every 8 months or so. 6 * 8 is 48 months, or 4
years.
-stable has different code, but a similar bug that was introduced
about the time of the last leap second, which is why nobody has
noticed until now.
MFC After: 3 weeks
Reviewed by: phk
"Furthermore, leap seconds must die." -- Cato the Elder
2003-06-25 21:23:51 +00:00
|
|
|
*/
|
|
|
|
bt = th->th_offset;
|
|
|
|
bintime_add(&bt, &boottimebin);
|
2003-08-20 05:34:27 +00:00
|
|
|
i = bt.sec - tho->th_microtime.tv_sec;
|
|
|
|
if (i > LARGE_STEP)
|
|
|
|
i = 2;
|
|
|
|
for (; i > 0; i--) {
|
Fix leap second processing by the kernel time keeping routines.
Before, we would add/subtract the leap second when the system had been
up for an even multiple of days, rather than at the end of the day, as
a leap second is defined (at least wrt ntp). We do this by
calculating the notion of UTC earlier in the loop, and passing that to
get it adjusted. Any adjustments that ntp_update_second makes to this
time are then transferred to boot time. We can't pass it either the
boot time or the uptime because their sum is what determines when a
leap second is needed. This code adds an extra assignment and two
extra compare in the typical case, which is as cheap as I could made
it.
I have confirmed with this code the kernel time does the correct thing
for both positive and negative leap seconds. Since the ntp interface
doesn't allow for +2 or -2, those cases can't be tested (and the folks
in the know here say there will never be a +2s or -2s leap event, but
rather two +1s or -1s leap events).
There will very likely be no leap seconds for a while, given how the
earth is speeding up and slowing down, so there will be plenty of time
for this fix to propigate. UT1-UTC is currently at "about -0.4s" and
decrementing by .1s every 8 months or so. 6 * 8 is 48 months, or 4
years.
-stable has different code, but a similar bug that was introduced
about the time of the last leap second, which is why nobody has
noticed until now.
MFC After: 3 weeks
Reviewed by: phk
"Furthermore, leap seconds must die." -- Cato the Elder
2003-06-25 21:23:51 +00:00
|
|
|
t = bt.sec;
|
|
|
|
ntp_update_second(&th->th_adjustment, &bt.sec);
|
|
|
|
if (bt.sec != t)
|
|
|
|
boottimebin.sec += bt.sec - t;
|
|
|
|
}
|
2003-08-20 19:12:46 +00:00
|
|
|
/* Update the UTC timestamps used by the get*() functions. */
|
|
|
|
/* XXX shouldn't do this here. Should force non-`get' versions. */
|
|
|
|
bintime2timeval(&bt, &th->th_microtime);
|
|
|
|
bintime2timespec(&bt, &th->th_nanotime);
|
2002-04-28 18:24:21 +00:00
|
|
|
|
|
|
|
/* Now is a good time to change timecounters. */
|
|
|
|
if (th->th_counter != timecounter) {
|
|
|
|
th->th_counter = timecounter;
|
|
|
|
th->th_offset_count = ncount;
|
2010-09-14 08:48:06 +00:00
|
|
|
tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
|
|
|
|
(((uint64_t)timecounter->tc_counter_mask + 1) / 3));
|
2002-04-27 07:28:54 +00:00
|
|
|
}
|
1998-03-16 10:19:12 +00:00
|
|
|
|
2010-07-18 20:57:53 +00:00
|
|
|
/*-
|
2002-04-28 18:24:21 +00:00
|
|
|
* Recalculate the scaling factor. We want the number of 1/2^64
|
|
|
|
* fractions of a second per period of the hardware counter, taking
|
|
|
|
* into account the th_adjustment factor which the NTP PLL/adjtime(2)
|
|
|
|
* processing provides us with.
|
|
|
|
*
|
|
|
|
* The th_adjustment is nanoseconds per second with 32 bit binary
|
2003-07-02 08:01:52 +00:00
|
|
|
* fraction and we want 64 bit binary fraction of second:
|
2002-04-28 18:24:21 +00:00
|
|
|
*
|
|
|
|
* x = a * 2^32 / 10^9 = a * 4.294967296
|
|
|
|
*
|
|
|
|
* The range of th_adjustment is +/- 5000PPM so inside a 64bit int
|
2006-02-11 09:33:07 +00:00
|
|
|
* we can only multiply by about 850 without overflowing, that
|
|
|
|
* leaves no suitably precise fractions for multiply before divide.
|
2002-04-28 18:24:21 +00:00
|
|
|
*
|
|
|
|
* Divide before multiply with a fraction of 2199/512 results in a
|
|
|
|
* systematic undercompensation of 10PPM of th_adjustment. On a
|
|
|
|
* 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
|
|
|
|
*
|
|
|
|
* We happily sacrifice the lowest of the 64 bits of our result
|
|
|
|
* to the goddess of code clarity.
|
2002-04-30 20:42:06 +00:00
|
|
|
*
|
2002-04-28 18:24:21 +00:00
|
|
|
*/
|
2010-06-21 09:55:56 +00:00
|
|
|
scale = (uint64_t)1 << 63;
|
2002-04-28 18:24:21 +00:00
|
|
|
scale += (th->th_adjustment / 1024) * 2199;
|
|
|
|
scale /= th->th_counter->tc_frequency;
|
|
|
|
th->th_scale = scale * 2;
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
|
|
|
* Now that the struct timehands is again consistent, set the new
|
2002-04-28 18:24:21 +00:00
|
|
|
* generation number, making sure to not make it zero.
|
|
|
|
*/
|
|
|
|
if (++ogen == 0)
|
2002-04-30 20:42:06 +00:00
|
|
|
ogen = 1;
|
2002-04-28 18:24:21 +00:00
|
|
|
th->th_generation = ogen;
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Go live with the new struct timehands. */
|
2002-04-28 18:24:21 +00:00
|
|
|
time_second = th->th_microtime.tv_sec;
|
2002-11-01 18:52:20 +00:00
|
|
|
time_uptime = th->th_offset.sec;
|
2002-04-28 18:24:21 +00:00
|
|
|
timehands = th;
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Report or change the active timecounter hardware. */
|
1999-07-18 15:07:20 +00:00
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
|
1999-07-18 15:07:20 +00:00
|
|
|
{
|
|
|
|
char newname[32];
|
|
|
|
struct timecounter *newtc, *tc;
|
|
|
|
int error;
|
|
|
|
|
2002-04-26 21:51:08 +00:00
|
|
|
tc = timecounter;
|
2002-10-17 20:03:38 +00:00
|
|
|
strlcpy(newname, tc->tc_name, sizeof(newname));
|
|
|
|
|
1999-07-18 15:07:20 +00:00
|
|
|
error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
|
2002-04-30 20:42:06 +00:00
|
|
|
if (error != 0 || req->newptr == NULL ||
|
|
|
|
strcmp(newname, tc->tc_name) == 0)
|
|
|
|
return (error);
|
2002-04-26 21:51:08 +00:00
|
|
|
for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
|
2002-04-30 20:42:06 +00:00
|
|
|
if (strcmp(newname, newtc->tc_name) != 0)
|
2002-04-26 21:51:08 +00:00
|
|
|
continue;
|
2002-04-30 20:42:06 +00:00
|
|
|
|
2002-04-26 21:51:08 +00:00
|
|
|
/* Warm up new timecounter. */
|
|
|
|
(void)newtc->tc_get_timecount(newtc);
|
|
|
|
(void)newtc->tc_get_timecount(newtc);
|
2002-04-30 20:42:06 +00:00
|
|
|
|
2002-04-26 21:51:08 +00:00
|
|
|
timecounter = newtc;
|
|
|
|
return (0);
|
1999-07-18 15:07:20 +00:00
|
|
|
}
|
2002-04-26 21:51:08 +00:00
|
|
|
return (EINVAL);
|
1999-07-18 15:07:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
|
|
|
|
0, 0, sysctl_kern_timecounter_hardware, "A", "");
|
|
|
|
|
2003-08-16 08:23:53 +00:00
|
|
|
|
|
|
|
/* Report or change the active timecounter hardware. */
|
|
|
|
static int
|
|
|
|
sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
char buf[32], *spc;
|
|
|
|
struct timecounter *tc;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
spc = "";
|
|
|
|
error = 0;
|
|
|
|
for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
|
|
|
|
sprintf(buf, "%s%s(%d)",
|
|
|
|
spc, tc->tc_name, tc->tc_quality);
|
|
|
|
error = SYSCTL_OUT(req, buf, strlen(buf));
|
|
|
|
spc = " ";
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
|
|
|
|
0, 0, sysctl_kern_timecounter_choice, "A", "");
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* RFC 2783 PPS-API implementation.
|
|
|
|
*/
|
1999-03-11 15:09:51 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
1999-10-09 14:49:56 +00:00
|
|
|
pps_params_t *app;
|
|
|
|
struct pps_fetch_args *fapi;
|
1999-10-10 16:18:36 +00:00
|
|
|
#ifdef PPS_SYNC
|
1999-10-09 14:49:56 +00:00
|
|
|
struct pps_kcbind_args *kapi;
|
1999-10-10 16:18:36 +00:00
|
|
|
#endif
|
1999-10-09 14:49:56 +00:00
|
|
|
|
2004-08-14 08:33:49 +00:00
|
|
|
KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
|
1999-10-09 14:49:56 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case PPS_IOC_CREATE:
|
|
|
|
return (0);
|
|
|
|
case PPS_IOC_DESTROY:
|
|
|
|
return (0);
|
|
|
|
case PPS_IOC_SETPARAMS:
|
|
|
|
app = (pps_params_t *)data;
|
|
|
|
if (app->mode & ~pps->ppscap)
|
|
|
|
return (EINVAL);
|
2002-04-28 18:24:21 +00:00
|
|
|
pps->ppsparam = *app;
|
1999-10-09 14:49:56 +00:00
|
|
|
return (0);
|
|
|
|
case PPS_IOC_GETPARAMS:
|
|
|
|
app = (pps_params_t *)data;
|
|
|
|
*app = pps->ppsparam;
|
|
|
|
app->api_version = PPS_API_VERS_1;
|
|
|
|
return (0);
|
|
|
|
case PPS_IOC_GETCAP:
|
|
|
|
*(int*)data = pps->ppscap;
|
|
|
|
return (0);
|
|
|
|
case PPS_IOC_FETCH:
|
|
|
|
fapi = (struct pps_fetch_args *)data;
|
|
|
|
if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
|
|
|
|
return (EINVAL);
|
|
|
|
if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
|
|
|
|
return (EOPNOTSUPP);
|
2002-04-28 18:24:21 +00:00
|
|
|
pps->ppsinfo.current_mode = pps->ppsparam.mode;
|
1999-10-09 14:49:56 +00:00
|
|
|
fapi->pps_info_buf = pps->ppsinfo;
|
|
|
|
return (0);
|
|
|
|
case PPS_IOC_KCBIND:
|
|
|
|
#ifdef PPS_SYNC
|
|
|
|
kapi = (struct pps_kcbind_args *)data;
|
|
|
|
/* XXX Only root should be able to do this */
|
|
|
|
if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
|
|
|
|
return (EINVAL);
|
|
|
|
if (kapi->kernel_consumer != PPS_KC_HARDPPS)
|
|
|
|
return (EINVAL);
|
|
|
|
if (kapi->edge & ~pps->ppscap)
|
|
|
|
return (EINVAL);
|
|
|
|
pps->kcmode = kapi->edge;
|
|
|
|
return (0);
|
|
|
|
#else
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
#endif
|
|
|
|
default:
|
2005-03-26 20:04:28 +00:00
|
|
|
return (ENOIOCTL);
|
1999-10-09 14:49:56 +00:00
|
|
|
}
|
1999-03-11 15:09:51 +00:00
|
|
|
}
|
1998-03-16 10:19:12 +00:00
|
|
|
|
1999-03-11 15:09:51 +00:00
|
|
|
void
|
|
|
|
pps_init(struct pps_state *pps)
|
|
|
|
{
|
|
|
|
pps->ppscap |= PPS_TSFMT_TSPEC;
|
|
|
|
if (pps->ppscap & PPS_CAPTUREASSERT)
|
|
|
|
pps->ppscap |= PPS_OFFSETASSERT;
|
|
|
|
if (pps->ppscap & PPS_CAPTURECLEAR)
|
|
|
|
pps->ppscap |= PPS_OFFSETCLEAR;
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
|
1999-03-11 15:09:51 +00:00
|
|
|
void
|
2002-04-26 20:24:28 +00:00
|
|
|
pps_capture(struct pps_state *pps)
|
|
|
|
{
|
2002-04-28 18:24:21 +00:00
|
|
|
struct timehands *th;
|
|
|
|
|
2004-08-14 08:33:49 +00:00
|
|
|
KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
|
2002-04-28 18:24:21 +00:00
|
|
|
th = timehands;
|
|
|
|
pps->capgen = th->th_generation;
|
|
|
|
pps->capth = th;
|
|
|
|
pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
|
|
|
|
if (pps->capgen != th->th_generation)
|
|
|
|
pps->capgen = 0;
|
2002-04-26 20:24:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pps_event(struct pps_state *pps, int event)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
2002-04-30 20:42:06 +00:00
|
|
|
struct bintime bt;
|
1999-03-11 15:09:51 +00:00
|
|
|
struct timespec ts, *tsp, *osp;
|
2002-04-28 18:24:21 +00:00
|
|
|
u_int tcount, *pcount;
|
1999-03-11 15:09:51 +00:00
|
|
|
int foff, fhard;
|
2002-04-30 20:42:06 +00:00
|
|
|
pps_seq_t *pseq;
|
1999-03-11 15:09:51 +00:00
|
|
|
|
2004-08-14 08:33:49 +00:00
|
|
|
KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
|
2002-04-30 20:42:06 +00:00
|
|
|
/* If the timecounter was wound up underneath us, bail out. */
|
|
|
|
if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
|
2002-04-26 20:24:28 +00:00
|
|
|
return;
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Things would be easier with arrays. */
|
1999-03-11 15:09:51 +00:00
|
|
|
if (event == PPS_CAPTUREASSERT) {
|
|
|
|
tsp = &pps->ppsinfo.assert_timestamp;
|
|
|
|
osp = &pps->ppsparam.assert_offset;
|
|
|
|
foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
|
1999-10-09 14:49:56 +00:00
|
|
|
fhard = pps->kcmode & PPS_CAPTUREASSERT;
|
1999-03-11 15:09:51 +00:00
|
|
|
pcount = &pps->ppscount[0];
|
|
|
|
pseq = &pps->ppsinfo.assert_sequence;
|
|
|
|
} else {
|
|
|
|
tsp = &pps->ppsinfo.clear_timestamp;
|
|
|
|
osp = &pps->ppsparam.clear_offset;
|
|
|
|
foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
|
1999-10-09 14:49:56 +00:00
|
|
|
fhard = pps->kcmode & PPS_CAPTURECLEAR;
|
1999-03-11 15:09:51 +00:00
|
|
|
pcount = &pps->ppscount[1];
|
|
|
|
pseq = &pps->ppsinfo.clear_sequence;
|
|
|
|
}
|
1998-03-16 10:19:12 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* If the timecounter changed, we cannot compare the count values, so
|
|
|
|
* we have to drop the rest of the PPS-stuff until the next event.
|
|
|
|
*/
|
|
|
|
if (pps->ppstc != pps->capth->th_counter) {
|
|
|
|
pps->ppstc = pps->capth->th_counter;
|
2002-04-26 20:24:28 +00:00
|
|
|
*pcount = pps->capcount;
|
|
|
|
pps->ppscount[2] = pps->capcount;
|
1999-03-11 15:09:51 +00:00
|
|
|
return;
|
|
|
|
}
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* Convert the count to a timespec. */
|
2002-04-28 18:24:21 +00:00
|
|
|
tcount = pps->capcount - pps->capth->th_offset_count;
|
|
|
|
tcount &= pps->capth->th_counter->tc_counter_mask;
|
|
|
|
bt = pps->capth->th_offset;
|
|
|
|
bintime_addx(&bt, pps->capth->th_scale * tcount);
|
2002-05-30 16:26:39 +00:00
|
|
|
bintime_add(&bt, &boottimebin);
|
2002-02-07 21:21:55 +00:00
|
|
|
bintime2timespec(&bt, &ts);
|
1999-03-11 15:09:51 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/* If the timecounter was wound up underneath us, bail out. */
|
2002-04-28 18:24:21 +00:00
|
|
|
if (pps->capgen != pps->capth->th_generation)
|
2002-04-26 20:24:28 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
*pcount = pps->capcount;
|
1999-03-11 15:09:51 +00:00
|
|
|
(*pseq)++;
|
|
|
|
*tsp = ts;
|
1999-10-09 14:49:56 +00:00
|
|
|
|
1999-03-11 15:09:51 +00:00
|
|
|
if (foff) {
|
|
|
|
timespecadd(tsp, osp);
|
|
|
|
if (tsp->tv_nsec < 0) {
|
|
|
|
tsp->tv_nsec += 1000000000;
|
|
|
|
tsp->tv_sec -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef PPS_SYNC
|
|
|
|
if (fhard) {
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t scale;
|
2003-01-16 20:06:45 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-28 18:24:21 +00:00
|
|
|
* Feed the NTP PLL/FLL.
|
2003-01-16 19:22:13 +00:00
|
|
|
* The FLL wants to know how many (hardware) nanoseconds
|
|
|
|
* elapsed since the previous event.
|
2002-04-28 18:24:21 +00:00
|
|
|
*/
|
2002-04-26 20:24:28 +00:00
|
|
|
tcount = pps->capcount - pps->ppscount[2];
|
|
|
|
pps->ppscount[2] = pps->capcount;
|
2002-04-28 18:24:21 +00:00
|
|
|
tcount &= pps->capth->th_counter->tc_counter_mask;
|
2010-06-21 09:55:56 +00:00
|
|
|
scale = (uint64_t)1 << 63;
|
2003-01-16 19:22:13 +00:00
|
|
|
scale /= pps->capth->th_counter->tc_frequency;
|
|
|
|
scale *= 2;
|
2002-02-07 21:21:55 +00:00
|
|
|
bt.sec = 0;
|
|
|
|
bt.frac = 0;
|
2003-01-16 19:22:13 +00:00
|
|
|
bintime_addx(&bt, scale * tcount);
|
2002-02-07 21:21:55 +00:00
|
|
|
bintime2timespec(&bt, &ts);
|
|
|
|
hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
|
1999-03-11 15:09:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2002-04-26 12:37:36 +00:00
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
2002-04-26 12:37:36 +00:00
|
|
|
* Timecounters need to be updated every so often to prevent the hardware
|
|
|
|
* counter from overflowing. Updating also recalculates the cached values
|
|
|
|
* used by the get*() family of functions, so their precision depends on
|
|
|
|
* the update frequency.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int tc_tick;
|
2003-01-04 17:33:55 +00:00
|
|
|
SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
|
2002-04-26 12:37:36 +00:00
|
|
|
|
2002-09-04 10:15:19 +00:00
|
|
|
void
|
2010-09-14 08:48:06 +00:00
|
|
|
tc_ticktock(int cnt)
|
2002-04-26 12:37:36 +00:00
|
|
|
{
|
2002-09-04 10:15:19 +00:00
|
|
|
static int count;
|
2002-04-26 12:37:36 +00:00
|
|
|
|
2010-09-14 08:48:06 +00:00
|
|
|
count += cnt;
|
|
|
|
if (count < tc_tick)
|
2002-09-04 10:15:19 +00:00
|
|
|
return;
|
|
|
|
count = 0;
|
2002-04-26 12:37:36 +00:00
|
|
|
tc_windup();
|
|
|
|
}
|
|
|
|
|
2002-04-28 18:24:21 +00:00
|
|
|
static void
|
2002-04-26 12:37:36 +00:00
|
|
|
inittimecounter(void *dummy)
|
|
|
|
{
|
|
|
|
u_int p;
|
|
|
|
|
2002-04-30 20:42:06 +00:00
|
|
|
/*
|
|
|
|
* Set the initial timeout to
|
|
|
|
* max(1, <approx. number of hardclock ticks in a millisecond>).
|
|
|
|
* People should probably not use the sysctl to set the timeout
|
|
|
|
* to smaller than its inital value, since that value is the
|
|
|
|
* smallest reasonable one. If they want better timestamps they
|
|
|
|
* should use the non-"get"* functions.
|
|
|
|
*/
|
2002-04-26 12:37:36 +00:00
|
|
|
if (hz > 1000)
|
|
|
|
tc_tick = (hz + 500) / 1000;
|
|
|
|
else
|
|
|
|
tc_tick = 1;
|
|
|
|
p = (tc_tick * 1000000) / hz;
|
|
|
|
printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
|
2002-04-30 20:42:06 +00:00
|
|
|
|
2002-05-03 08:46:03 +00:00
|
|
|
/* warm up new timecounter (again) and get rolling. */
|
2002-04-30 20:42:06 +00:00
|
|
|
(void)timecounter->tc_get_timecount(timecounter);
|
|
|
|
(void)timecounter->tc_get_timecount(timecounter);
|
2002-04-26 12:37:36 +00:00
|
|
|
}
|
|
|
|
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
|
2006-02-07 21:22:02 +00:00
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
/* Cpu tick handling -------------------------------------------------*/
|
|
|
|
|
|
|
|
static int cpu_tick_variable;
|
|
|
|
static uint64_t cpu_tick_frequency;
|
|
|
|
|
2006-03-07 22:17:26 +00:00
|
|
|
static uint64_t
|
2006-02-07 21:22:02 +00:00
|
|
|
tc_cpu_ticks(void)
|
|
|
|
{
|
|
|
|
static uint64_t base;
|
|
|
|
static unsigned last;
|
2006-02-11 09:33:07 +00:00
|
|
|
unsigned u;
|
2006-02-07 21:22:02 +00:00
|
|
|
struct timecounter *tc;
|
|
|
|
|
|
|
|
tc = timehands->th_counter;
|
|
|
|
u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
|
|
|
|
if (u < last)
|
2006-03-04 06:07:26 +00:00
|
|
|
base += (uint64_t)tc->tc_counter_mask + 1;
|
2006-02-07 21:22:02 +00:00
|
|
|
last = u;
|
|
|
|
return (u + base);
|
|
|
|
}
|
|
|
|
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
void
|
|
|
|
cpu_tick_calibration(void)
|
|
|
|
{
|
|
|
|
static time_t last_calib;
|
|
|
|
|
|
|
|
if (time_uptime != last_calib && !(time_uptime & 0xf)) {
|
|
|
|
cpu_tick_calibrate(0);
|
|
|
|
last_calib = time_uptime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
/*
|
2008-02-17 02:46:54 +00:00
|
|
|
* This function gets called every 16 seconds on only one designated
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
* CPU in the system from hardclock() via cpu_tick_calibration()().
|
2006-02-11 09:33:07 +00:00
|
|
|
*
|
|
|
|
* Whenever the real time clock is stepped we get called with reset=1
|
|
|
|
* to make sure we handle suspend/resume and similar events correctly.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
cpu_tick_calibrate(int reset)
|
|
|
|
{
|
|
|
|
static uint64_t c_last;
|
|
|
|
uint64_t c_this, c_delta;
|
|
|
|
static struct bintime t_last;
|
|
|
|
struct bintime t_this, t_delta;
|
2006-03-02 08:09:46 +00:00
|
|
|
uint32_t divi;
|
2006-02-11 09:33:07 +00:00
|
|
|
|
|
|
|
if (reset) {
|
|
|
|
/* The clock was stepped, abort & reset */
|
|
|
|
t_last.sec = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we don't calibrate fixed rate cputicks */
|
|
|
|
if (!cpu_tick_variable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
getbinuptime(&t_this);
|
|
|
|
c_this = cpu_ticks();
|
|
|
|
if (t_last.sec != 0) {
|
|
|
|
c_delta = c_this - c_last;
|
|
|
|
t_delta = t_this;
|
|
|
|
bintime_sub(&t_delta, &t_last);
|
|
|
|
/*
|
2010-07-11 16:47:45 +00:00
|
|
|
* Headroom:
|
|
|
|
* 2^(64-20) / 16[s] =
|
|
|
|
* 2^(44) / 16[s] =
|
|
|
|
* 17.592.186.044.416 / 16 =
|
|
|
|
* 1.099.511.627.776 [Hz]
|
2006-02-11 09:33:07 +00:00
|
|
|
*/
|
2010-07-11 16:47:45 +00:00
|
|
|
divi = t_delta.sec << 20;
|
|
|
|
divi |= t_delta.frac >> (64 - 20);
|
|
|
|
c_delta <<= 20;
|
|
|
|
c_delta /= divi;
|
|
|
|
if (c_delta > cpu_tick_frequency) {
|
|
|
|
if (0 && bootverbose)
|
|
|
|
printf("cpu_tick increased to %ju Hz\n",
|
|
|
|
c_delta);
|
|
|
|
cpu_tick_frequency = c_delta;
|
2006-02-11 09:33:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
c_last = c_this;
|
|
|
|
t_last = t_this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (func == NULL) {
|
|
|
|
cpu_ticks = tc_cpu_ticks;
|
|
|
|
} else {
|
|
|
|
cpu_tick_frequency = freq;
|
|
|
|
cpu_tick_variable = var;
|
|
|
|
cpu_ticks = func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
cpu_tickrate(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (cpu_ticks == tc_cpu_ticks)
|
|
|
|
return (tc_getfrequency());
|
|
|
|
return (cpu_tick_frequency);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to be slightly careful converting cputicks to microseconds.
|
|
|
|
* There is plenty of margin in 64 bits of microseconds (half a million
|
|
|
|
* years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
|
|
|
|
* before divide conversion (to retain precision) we find that the
|
|
|
|
* margin shrinks to 1.5 hours (one millionth of 146y).
|
2006-08-04 07:56:35 +00:00
|
|
|
* With a three prong approach we never lose significant bits, no
|
2006-02-11 09:33:07 +00:00
|
|
|
* matter what the cputick rate and length of timeinterval is.
|
|
|
|
*/
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
cputick2usec(uint64_t tick)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */
|
|
|
|
return (tick / (cpu_tickrate() / 1000000LL));
|
|
|
|
else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */
|
|
|
|
return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
|
|
|
|
else
|
|
|
|
return ((tick * 1000000LL) / cpu_tickrate());
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_tick_f *cpu_ticks = tc_cpu_ticks;
|