Add a tunable "machdep.disable_tsc" to turn off TSC. Specifically, it turns

off boot-time CPU frequency calibration, DELAY(9) with TSC, and using TSC as
a CPU ticker.  Note tsc_present does not change by this tunable.
This commit is contained in:
Jung-uk Kim 2011-03-11 00:44:32 +00:00
parent 9a7836dce1
commit 79422085d4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=219473
5 changed files with 71 additions and 36 deletions

View File

@ -80,6 +80,9 @@ __FBSDID("$FreeBSD$");
#include <sys/reboot.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
#include <sys/smp.h>
#endif
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@ -544,20 +547,22 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
return (EINVAL);
/* If TSC is P-state invariant, DELAY(9) based logic fails. */
if (tsc_is_invariant)
if (tsc_is_invariant && tsc_freq != 0)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
if (cold) {
if (cold && tsc_freq != 0) {
*rate = tsc_freq;
return (0);
}
#ifdef SMP
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
if (smp_cpus > 1) {
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
}
#endif
/* Calibrate by measuring a short delay. */
@ -568,12 +573,24 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
intr_restore(reg);
#ifdef SMP
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (smp_cpus > 1) {
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
}
#endif
*rate = (tsc2 - tsc1) * 1000;
tsc2 -= tsc1;
if (tsc_freq != 0) {
*rate = tsc2 * 1000;
return (0);
}
/*
* Subtract 0.5% of the total. Empirical testing has shown that
* overhead in DELAY() works out to approximately this value.
*/
*rate = tsc2 * 1000 - tsc2 * 5;
return (0);
}

View File

@ -78,10 +78,10 @@
#include <machine/clock.h>
#endif
#if defined(__amd64__) || defined(__i386__)
#include <machine/cpu.h> /* for cpu_feature or tsc_present */
#include <machine/cpufunc.h> /* for pentium tsc */
#if defined(__NetBSD__) || defined(__OpenBSD__)
#include <machine/specialreg.h> /* for CPUID_TSC */
#include <machine/cpu.h> /* for cpu_feature */
#endif
#endif /* __amd64 || __i386__ */
@ -927,7 +927,7 @@ init_machclk_setup(void)
#if defined(__amd64__) || defined(__i386__)
/* check if TSC is available */
#ifdef __FreeBSD__
if (tsc_freq == 0)
if (!tsc_present || tsc_freq == 0)
#else
if ((cpu_feature & CPUID_TSC) == 0)
#endif

View File

@ -79,6 +79,9 @@ __FBSDID("$FreeBSD$");
#include <sys/reboot.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
#include <sys/smp.h>
#endif
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@ -1142,20 +1145,22 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
return (EOPNOTSUPP);
/* If TSC is P-state invariant, DELAY(9) based logic fails. */
if (tsc_is_invariant)
if (tsc_is_invariant && tsc_freq != 0)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
if (cold) {
if (cold && tsc_freq != 0) {
*rate = tsc_freq;
return (0);
}
#ifdef SMP
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
if (smp_cpus > 1) {
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
}
#endif
/* Calibrate by measuring a short delay. */
@ -1166,9 +1171,11 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
intr_restore(reg);
#ifdef SMP
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (smp_cpus > 1) {
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
}
#endif
tsc2 -= tsc1;

View File

@ -77,6 +77,9 @@ __FBSDID("$FreeBSD$");
#include <sys/reboot.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
#include <sys/smp.h>
#endif
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@ -1077,16 +1080,18 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
if (cold) {
if (cold && tsc_freq != 0) {
*rate = tsc_freq;
return (0);
}
#ifdef SMP
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
if (smp_cpus > 1) {
/* Schedule ourselves on the indicated cpu. */
thread_lock(curthread);
sched_bind(curthread, cpu_id);
thread_unlock(curthread);
}
#endif
/* Calibrate by measuring a short delay. */
@ -1097,9 +1102,11 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
intr_restore(reg);
#ifdef SMP
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (smp_cpus > 1) {
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
}
#endif
tsc2 -= tsc1;

View File

@ -63,6 +63,11 @@ SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc);
#endif
static int tsc_disabled;
SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
"Disable x86 Time Stamp Counter");
TUNABLE_INT("machdep.disable_tsc", &tsc_disabled);
static void tsc_freq_changed(void *arg, const struct cf_level *level,
int status);
static void tsc_freq_changing(void *arg, const struct cf_level *level,
@ -84,12 +89,11 @@ init_TSC(void)
{
u_int64_t tscval[2];
if (cpu_feature & CPUID_TSC)
tsc_present = 1;
else
tsc_present = 0;
if ((cpu_feature & CPUID_TSC) == 0)
return;
tsc_present = 1;
if (!tsc_present)
if (tsc_disabled)
return;
if (bootverbose)
@ -151,7 +155,7 @@ void
init_TSC_tc(void)
{
if (!tsc_present)
if (!tsc_present || tsc_disabled)
return;
/*
@ -248,7 +252,7 @@ tsc_freq_changed(void *arg, const struct cf_level *level, int status)
{
/* If there was an error during the transition, don't do anything. */
if (status != 0)
if (tsc_disabled || status != 0)
return;
/* Total setting for this level gives the new frequency in MHz. */