On some Intel CPUs with a P-state but not C-state invariant TSC the TSC
may also halt in C2 and not just C3 (it seems that in some cases the BIOS advertises its C3 state as a C2 state in _CST). Just play it safe and disable both C2 and C3 states if a user forces the use of the TSC as the timecounter on such CPUs. PR: 192316 Differential Revision: https://reviews.freebsd.org/D1441 No objection from: jkim MFC after: 1 week
This commit is contained in:
parent
ea343b0642
commit
55d0376a65
@ -839,7 +839,7 @@ cpu_idle(int busy)
|
||||
}
|
||||
|
||||
/* Apply AMD APIC timer C1E workaround. */
|
||||
if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
|
||||
if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
|
||||
msr = rdmsr(MSR_AMDK8_IPM);
|
||||
if (msr & AMDK8_CMPHALT)
|
||||
wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
|
||||
|
@ -85,6 +85,7 @@ struct acpi_cpu_softc {
|
||||
int cpu_prev_sleep;/* Last idle sleep duration. */
|
||||
int cpu_features; /* Child driver supported features. */
|
||||
/* Runtime state. */
|
||||
int cpu_non_c2; /* Index of lowest non-C2 state. */
|
||||
int cpu_non_c3; /* Index of lowest non-C3 state. */
|
||||
u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
|
||||
/* Values for sysctl. */
|
||||
@ -668,8 +669,10 @@ acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
|
||||
cx_ptr->type = ACPI_STATE_C1;
|
||||
cx_ptr->trans_lat = 0;
|
||||
cx_ptr++;
|
||||
sc->cpu_non_c2 = sc->cpu_cx_count;
|
||||
sc->cpu_non_c3 = sc->cpu_cx_count;
|
||||
sc->cpu_cx_count++;
|
||||
cpu_deepest_sleep = 1;
|
||||
|
||||
/*
|
||||
* The spec says P_BLK must be 6 bytes long. However, some systems
|
||||
@ -695,6 +698,7 @@ acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
|
||||
cx_ptr++;
|
||||
sc->cpu_non_c3 = sc->cpu_cx_count;
|
||||
sc->cpu_cx_count++;
|
||||
cpu_deepest_sleep = 2;
|
||||
}
|
||||
}
|
||||
if (sc->cpu_p_blk_len < 6)
|
||||
@ -711,7 +715,7 @@ acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
|
||||
cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
|
||||
cx_ptr++;
|
||||
sc->cpu_cx_count++;
|
||||
cpu_can_deep_sleep = 1;
|
||||
cpu_deepest_sleep = 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -757,6 +761,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
||||
count = MAX_CX_STATES;
|
||||
}
|
||||
|
||||
sc->cpu_non_c2 = 0;
|
||||
sc->cpu_non_c3 = 0;
|
||||
sc->cpu_cx_count = 0;
|
||||
cx_ptr = sc->cpu_cx_states;
|
||||
@ -768,6 +773,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
||||
cx_ptr->type = ACPI_STATE_C0;
|
||||
cx_ptr++;
|
||||
sc->cpu_cx_count++;
|
||||
cpu_deepest_sleep = 1;
|
||||
|
||||
/* Set up all valid states. */
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -788,6 +794,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
||||
/* This is the first C1 state. Use the reserved slot. */
|
||||
sc->cpu_cx_states[0] = *cx_ptr;
|
||||
} else {
|
||||
sc->cpu_non_c2 = sc->cpu_cx_count;
|
||||
sc->cpu_non_c3 = sc->cpu_cx_count;
|
||||
cx_ptr++;
|
||||
sc->cpu_cx_count++;
|
||||
@ -795,6 +802,8 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
||||
continue;
|
||||
case ACPI_STATE_C2:
|
||||
sc->cpu_non_c3 = sc->cpu_cx_count;
|
||||
if (cpu_deepest_sleep < 2)
|
||||
cpu_deepest_sleep = 2;
|
||||
break;
|
||||
case ACPI_STATE_C3:
|
||||
default:
|
||||
@ -804,7 +813,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
||||
device_get_unit(sc->cpu_dev), i));
|
||||
continue;
|
||||
} else
|
||||
cpu_can_deep_sleep = 1;
|
||||
cpu_deepest_sleep = 3;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -993,7 +1002,9 @@ acpi_cpu_idle(sbintime_t sbt)
|
||||
if (sbt >= 0 && us > (sbt >> 12))
|
||||
us = (sbt >> 12);
|
||||
cx_next_idx = 0;
|
||||
if (cpu_disable_deep_sleep)
|
||||
if (cpu_disable_c2_sleep)
|
||||
i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
|
||||
else if (cpu_disable_c3_sleep)
|
||||
i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
|
||||
else
|
||||
i = sc->cpu_cx_lowest;
|
||||
|
@ -1480,7 +1480,7 @@ cpu_idle(int busy)
|
||||
|
||||
#ifndef XEN
|
||||
/* Apply AMD APIC timer C1E workaround. */
|
||||
if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
|
||||
if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
|
||||
msr = rdmsr(MSR_AMDK8_IPM);
|
||||
if (msr & AMDK8_CMPHALT)
|
||||
wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
|
||||
|
@ -54,8 +54,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/smp.h>
|
||||
|
||||
int cpu_can_deep_sleep = 0; /* C3 state is available. */
|
||||
int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
|
||||
int cpu_deepest_sleep = 0; /* Deepest Cx state available. */
|
||||
int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */
|
||||
int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */
|
||||
|
||||
static void setuptimer(void);
|
||||
static void loadtimer(sbintime_t now, int first);
|
||||
@ -605,7 +606,7 @@ cpu_initclocks_bsp(void)
|
||||
else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
|
||||
periodic = 1;
|
||||
if (timer->et_flags & ET_FLAGS_C3STOP)
|
||||
cpu_disable_deep_sleep++;
|
||||
cpu_disable_c3_sleep++;
|
||||
|
||||
/*
|
||||
* We honor the requested 'hz' value.
|
||||
@ -871,9 +872,9 @@ sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
|
||||
configtimer(0);
|
||||
et_free(timer);
|
||||
if (et->et_flags & ET_FLAGS_C3STOP)
|
||||
cpu_disable_deep_sleep++;
|
||||
cpu_disable_c3_sleep++;
|
||||
if (timer->et_flags & ET_FLAGS_C3STOP)
|
||||
cpu_disable_deep_sleep--;
|
||||
cpu_disable_c3_sleep--;
|
||||
periodic = want_periodic;
|
||||
timer = et;
|
||||
et_init(timer, timercb, NULL, NULL);
|
||||
|
@ -1330,10 +1330,10 @@ tc_windup(void)
|
||||
/* Now is a good time to change timecounters. */
|
||||
if (th->th_counter != timecounter) {
|
||||
#ifndef __arm__
|
||||
if ((timecounter->tc_flags & TC_FLAGS_C3STOP) != 0)
|
||||
cpu_disable_deep_sleep++;
|
||||
if ((th->th_counter->tc_flags & TC_FLAGS_C3STOP) != 0)
|
||||
cpu_disable_deep_sleep--;
|
||||
if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
|
||||
cpu_disable_c2_sleep++;
|
||||
if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
|
||||
cpu_disable_c2_sleep--;
|
||||
#endif
|
||||
th->th_counter = timecounter;
|
||||
th->th_offset_count = ncount;
|
||||
|
@ -296,8 +296,9 @@ sbintime_t cpu_idleclock(void);
|
||||
void cpu_activeclock(void);
|
||||
void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt);
|
||||
void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq);
|
||||
extern int cpu_can_deep_sleep;
|
||||
extern int cpu_disable_deep_sleep;
|
||||
extern int cpu_deepest_sleep;
|
||||
extern int cpu_disable_c2_sleep;
|
||||
extern int cpu_disable_c3_sleep;
|
||||
|
||||
int cr_cansee(struct ucred *u1, struct ucred *u2);
|
||||
int cr_canseesocket(struct ucred *cred, struct socket *so);
|
||||
|
@ -58,7 +58,7 @@ struct timecounter {
|
||||
* means "only use at explicit request".
|
||||
*/
|
||||
u_int tc_flags;
|
||||
#define TC_FLAGS_C3STOP 1 /* Timer dies in C3. */
|
||||
#define TC_FLAGS_C2STOP 1 /* Timer dies in C2+. */
|
||||
#define TC_FLAGS_SUSPEND_SAFE 2 /*
|
||||
* Timer functional across
|
||||
* suspend/resume.
|
||||
|
@ -522,16 +522,16 @@ init_TSC_tc(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot use the TSC if it stops incrementing in deep sleep.
|
||||
* Currently only Intel CPUs are known for this problem unless
|
||||
* the invariant TSC bit is set.
|
||||
* We cannot use the TSC if it stops incrementing while idle.
|
||||
* Intel CPUs without a C-state invariant TSC can stop the TSC
|
||||
* in either C2 or C3.
|
||||
*/
|
||||
if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL &&
|
||||
if (cpu_deepest_sleep >= 2 && cpu_vendor_id == CPU_VENDOR_INTEL &&
|
||||
(amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
|
||||
tsc_timecounter.tc_quality = -1000;
|
||||
tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP;
|
||||
tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP;
|
||||
if (bootverbose)
|
||||
printf("TSC timecounter disabled: C3 enabled.\n");
|
||||
printf("TSC timecounter disabled: C2/C3 may halt it.\n");
|
||||
goto init;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user