Use per-cpu values for base and last in tc_cpu_ticks(). The values
are updated lockess, different CPUs write its own view of timecounter state. The critical section is done for safety, callers of tc_cpu_ticks() are supposed to already enter critical section, or to own a spinlock. The change fixes sporadical reports of too high values reported for the (W)CPU on platforms that do not provide cpu ticker and use tc_cpu_ticks(), in particular, arm*. Diagnosed and reviewed by: jhb Sponsored by: The FreeBSD Foundation MFC after: 1 week
This commit is contained in:
parent
e675024a02
commit
b2557db607
@ -1924,20 +1924,27 @@ SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
|
||||
static int cpu_tick_variable;
|
||||
static uint64_t cpu_tick_frequency;
|
||||
|
||||
static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
|
||||
static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
|
||||
|
||||
static uint64_t
|
||||
tc_cpu_ticks(void)
|
||||
{
|
||||
static uint64_t base;
|
||||
static unsigned last;
|
||||
unsigned u;
|
||||
struct timecounter *tc;
|
||||
uint64_t res, *base;
|
||||
unsigned u, *last;
|
||||
|
||||
critical_enter();
|
||||
base = DPCPU_PTR(tc_cpu_ticks_base);
|
||||
last = DPCPU_PTR(tc_cpu_ticks_last);
|
||||
tc = timehands->th_counter;
|
||||
u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
|
||||
if (u < last)
|
||||
base += (uint64_t)tc->tc_counter_mask + 1;
|
||||
last = u;
|
||||
return (u + base);
|
||||
if (u < *last)
|
||||
*base += (uint64_t)tc->tc_counter_mask + 1;
|
||||
*last = u;
|
||||
res = u + *base;
|
||||
critical_exit();
|
||||
return (res);
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
x
Reference in New Issue
Block a user