eal/armv8: fix timer frequency calibration with PMU

get_tsc_freq uses 'nanosleep' system call to calculate the CPU
frequency. However, 'nanosleep' results in the process getting
un-scheduled. The kernel saves and restores the PMU state. This
ensures that the PMU cycles are not counted towards a sleeping
process. When RTE_ARM_EAL_RDTSC_USE_PMU is defined, this results
in incorrect CPU frequency calculation. This logic is replaced
with generic counter based loop.

Bugzilla ID: 450
Fixes: f91bcbb2d9 ("eal/armv8: use high-resolution cycle counter")
Cc: stable@dpdk.org

Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Honnappa Nagarahalli 2020-06-26 15:35:01 -05:00 committed by David Marchand
parent a7551b6c60
commit 97c910139b
2 changed files with 63 additions and 9 deletions

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015 Cavium, Inc
* Copyright(c) 2020 Arm Limited
*/
#ifndef _RTE_CYCLES_ARM64_H_
@ -11,6 +12,33 @@ extern "C" {
#include "generic/rte_cycles.h"
/** Read generic counter frequency */
static __rte_always_inline uint64_t
__rte_arm64_cntfrq(void)
{
uint64_t freq;
asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
return freq;
}
/** Read generic counter */
static __rte_always_inline uint64_t
__rte_arm64_cntvct(void)
{
uint64_t tsc;
asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
return tsc;
}
static __rte_always_inline uint64_t
__rte_arm64_cntvct_precise(void)
{
asm volatile("isb" : : : "memory");
return __rte_arm64_cntvct();
}
/**
* Read the time base register.
*
@ -25,10 +53,7 @@ extern "C" {
static inline uint64_t
rte_rdtsc(void)
{
uint64_t tsc;
asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
return tsc;
return __rte_arm64_cntvct();
}
#else
/**
@ -49,14 +74,22 @@ rte_rdtsc(void)
* asm volatile("msr pmcr_el0, %0" : : "r" (val));
*
*/
static inline uint64_t
rte_rdtsc(void)
/** Read PMU cycle counter */
static __rte_always_inline uint64_t
__rte_arm64_pmccntr(void)
{
uint64_t tsc;
asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc));
return tsc;
}
static inline uint64_t
rte_rdtsc(void)
{
return __rte_arm64_pmccntr();
}
#endif
static inline uint64_t

View File

@ -3,14 +3,35 @@
*/
#include "eal_private.h"
#include "rte_cycles.h"
uint64_t
get_tsc_freq_arch(void)
{
#if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU
uint64_t freq;
asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
return freq;
return __rte_arm64_cntfrq();
#elif defined RTE_ARCH_ARM64 && defined RTE_ARM_EAL_RDTSC_USE_PMU
#define CYC_PER_1MHZ 1E6
/* Use the generic counter ticks to calculate the PMU
* cycle frequency.
*/
uint64_t ticks;
uint64_t start_ticks, cur_ticks;
uint64_t start_pmu_cycles, end_pmu_cycles;
/* Number of ticks for 1/10 second */
ticks = __rte_arm64_cntfrq() / 10;
start_ticks = __rte_arm64_cntvct_precise();
start_pmu_cycles = rte_rdtsc_precise();
do {
cur_ticks = __rte_arm64_cntvct();
} while ((cur_ticks - start_ticks) < ticks);
end_pmu_cycles = rte_rdtsc_precise();
/* Adjust the cycles to next 1Mhz */
return RTE_ALIGN_MUL_CEIL(end_pmu_cycles - start_pmu_cycles,
CYC_PER_1MHZ) * 10;
#else
return 0;
#endif