97c910139b
get_tsc_freq uses 'nanosleep' system call to calculate the CPU
frequency. However, 'nanosleep' results in the process getting
un-scheduled. The kernel saves and restores the PMU state. This
ensures that the PMU cycles are not counted towards a sleeping
process. When RTE_ARM_EAL_RDTSC_USE_PMU is defined, this results
in incorrect CPU frequency calculation. This logic is replaced
with generic counter based loop.
Bugzilla ID: 450
Fixes: f91bcbb2d9
("eal/armv8: use high-resolution cycle counter")
Cc: stable@dpdk.org
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
39 lines
966 B
C
39 lines
966 B
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2015 Cavium, Inc
|
|
*/
|
|
|
|
#include "eal_private.h"
|
|
#include "rte_cycles.h"
|
|
|
|
uint64_t
|
|
get_tsc_freq_arch(void)
|
|
{
|
|
#if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU
|
|
return __rte_arm64_cntfrq();
|
|
#elif defined RTE_ARCH_ARM64 && defined RTE_ARM_EAL_RDTSC_USE_PMU
|
|
#define CYC_PER_1MHZ 1E6
|
|
/* Use the generic counter ticks to calculate the PMU
|
|
* cycle frequency.
|
|
*/
|
|
uint64_t ticks;
|
|
uint64_t start_ticks, cur_ticks;
|
|
uint64_t start_pmu_cycles, end_pmu_cycles;
|
|
|
|
/* Number of ticks for 1/10 second */
|
|
ticks = __rte_arm64_cntfrq() / 10;
|
|
|
|
start_ticks = __rte_arm64_cntvct_precise();
|
|
start_pmu_cycles = rte_rdtsc_precise();
|
|
do {
|
|
cur_ticks = __rte_arm64_cntvct();
|
|
} while ((cur_ticks - start_ticks) < ticks);
|
|
end_pmu_cycles = rte_rdtsc_precise();
|
|
|
|
/* Adjust the cycles to next 1Mhz */
|
|
return RTE_ALIGN_MUL_CEIL(end_pmu_cycles - start_pmu_cycles,
|
|
CYC_PER_1MHZ) * 10;
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|