acpi_cpu: separate a notion of current deepest allowed+available Cx level

... from a user-set persistent limit on the said level.
Allow to set the user-imposed limit below current deepest available level
as the available levels may be dynamically changed by ACPI platform
in both directions.
Allow "Cmax" as an input value for cx_lowest sysctls to mean that there
is not limit and OS can use all available C-states.
Retire global cpu_cx_count as it no longer serves any meaningful
purpose.

Reviewed by:	jhb, gianni, sbruno
Tested by:	sbruno, Vitaly Magerya <vmagerya@gmail.com>
MFC after:	2 weeks
This commit is contained in:
Andriy Gapon 2012-07-13 08:11:55 +00:00
parent 1424b561e1
commit d30b88af05

View File

@ -89,6 +89,7 @@ struct acpi_cpu_softc {
struct sysctl_ctx_list cpu_sysctl_ctx; struct sysctl_ctx_list cpu_sysctl_ctx;
struct sysctl_oid *cpu_sysctl_tree; struct sysctl_oid *cpu_sysctl_tree;
int cpu_cx_lowest; int cpu_cx_lowest;
int cpu_cx_lowest_lim;
char cpu_cx_supported[64]; char cpu_cx_supported[64];
int cpu_rid; int cpu_rid;
}; };
@ -138,13 +139,12 @@ static int cpu_quirks; /* Indicate any hardware bugs. */
/* Runtime state. */ /* Runtime state. */
static int cpu_disable_idle; /* Disable entry to idle function */ static int cpu_disable_idle; /* Disable entry to idle function */
static int cpu_cx_count; /* Number of valid Cx states */
/* Values for sysctl. */ /* Values for sysctl. */
static struct sysctl_ctx_list cpu_sysctl_ctx; static struct sysctl_ctx_list cpu_sysctl_ctx;
static struct sysctl_oid *cpu_sysctl_tree; static struct sysctl_oid *cpu_sysctl_tree;
static int cpu_cx_generic; static int cpu_cx_generic;
static int cpu_cx_lowest; static int cpu_cx_lowest_lim;
static device_t *cpu_devices; static device_t *cpu_devices;
static int cpu_ndevices; static int cpu_ndevices;
@ -173,7 +173,7 @@ static void acpi_cpu_idle(void);
static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
static int acpi_cpu_quirks(void); static int acpi_cpu_quirks(void);
static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val); static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
@ -590,6 +590,7 @@ acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
/* Use initial sleep value of 1 sec. to start with lowest idle state. */ /* Use initial sleep value of 1 sec. to start with lowest idle state. */
sc->cpu_prev_sleep = 1000000; sc->cpu_prev_sleep = 1000000;
sc->cpu_cx_lowest = 0; sc->cpu_cx_lowest = 0;
sc->cpu_cx_lowest_lim = 0;
/* /*
* Check for the ACPI 2.0 _CST sleep states object. If we can't find * Check for the ACPI 2.0 _CST sleep states object. If we can't find
@ -820,7 +821,6 @@ acpi_cpu_startup(void *arg)
*/ */
acpi_cpu_quirks(); acpi_cpu_quirks();
cpu_cx_count = 0;
if (cpu_cx_generic) { if (cpu_cx_generic) {
/* /*
* We are using generic Cx mode, probe for available Cx states * We are using generic Cx mode, probe for available Cx states
@ -829,24 +829,10 @@ acpi_cpu_startup(void *arg)
for (i = 0; i < cpu_ndevices; i++) { for (i = 0; i < cpu_ndevices; i++) {
sc = device_get_softc(cpu_devices[i]); sc = device_get_softc(cpu_devices[i]);
acpi_cpu_generic_cx_probe(sc); acpi_cpu_generic_cx_probe(sc);
if (sc->cpu_cx_count > cpu_cx_count)
cpu_cx_count = sc->cpu_cx_count;
}
/*
* Find the highest Cx state common to all CPUs
* in the system, taking quirks into account.
*/
for (i = 0; i < cpu_ndevices; i++) {
sc = device_get_softc(cpu_devices[i]);
if (sc->cpu_cx_count < cpu_cx_count)
cpu_cx_count = sc->cpu_cx_count;
} }
} else { } else {
/* /*
* We are using _CST mode, remove C3 state if necessary. * We are using _CST mode, remove C3 state if necessary.
* Update the largest Cx state supported in the global cpu_cx_count.
* It will be used in the global Cx sysctl handler.
* As we now know for sure that we will be using _CST mode * As we now know for sure that we will be using _CST mode
* install our notify handler. * install our notify handler.
*/ */
@ -855,8 +841,6 @@ acpi_cpu_startup(void *arg)
if (cpu_quirks & CPU_QUIRK_NO_C3) { if (cpu_quirks & CPU_QUIRK_NO_C3) {
sc->cpu_cx_count = sc->cpu_non_c3 + 1; sc->cpu_cx_count = sc->cpu_non_c3 + 1;
} }
if (sc->cpu_cx_count > cpu_cx_count)
cpu_cx_count = sc->cpu_cx_count;
AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
acpi_cpu_notify, sc); acpi_cpu_notify, sc);
} }
@ -875,7 +859,7 @@ acpi_cpu_startup(void *arg)
"Global lowest Cx sleep state to use"); "Global lowest Cx sleep state to use");
/* Take over idling from cpu_idle_default(). */ /* Take over idling from cpu_idle_default(). */
cpu_cx_lowest = 0; cpu_cx_lowest_lim = 0;
cpu_disable_idle = FALSE; cpu_disable_idle = FALSE;
cpu_idle_hook = acpi_cpu_idle; cpu_idle_hook = acpi_cpu_idle;
} }
@ -1058,8 +1042,6 @@ static void
acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
{ {
struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
struct acpi_cpu_softc *isc;
int i;
if (notify != ACPI_NOTIFY_CX_STATES) if (notify != ACPI_NOTIFY_CX_STATES)
return; return;
@ -1068,16 +1050,8 @@ acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
acpi_cpu_cx_cst(sc); acpi_cpu_cx_cst(sc);
acpi_cpu_cx_list(sc); acpi_cpu_cx_list(sc);
/* Update the new lowest useable Cx state for all CPUs. */
ACPI_SERIAL_BEGIN(cpu); ACPI_SERIAL_BEGIN(cpu);
cpu_cx_count = 0; acpi_cpu_set_cx_lowest(sc);
for (i = 0; i < cpu_ndevices; i++) {
isc = device_get_softc(cpu_devices[i]);
if (isc->cpu_cx_count > cpu_cx_count)
cpu_cx_count = isc->cpu_cx_count;
}
if (sc->cpu_cx_lowest < cpu_cx_lowest)
acpi_cpu_set_cx_lowest(sc, min(cpu_cx_lowest, sc->cpu_cx_count - 1));
ACPI_SERIAL_END(cpu); ACPI_SERIAL_END(cpu);
} }
@ -1205,12 +1179,12 @@ acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
} }
static int static int
acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val) acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
{ {
int i; int i;
ACPI_SERIAL_ASSERT(cpu); ACPI_SERIAL_ASSERT(cpu);
sc->cpu_cx_lowest = val; sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
/* If not disabling, cache the new lowest non-C3 state. */ /* If not disabling, cache the new lowest non-C3 state. */
sc->cpu_non_c3 = 0; sc->cpu_non_c3 = 0;
@ -1234,18 +1208,23 @@ acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
int val, error; int val, error;
sc = (struct acpi_cpu_softc *) arg1; sc = (struct acpi_cpu_softc *) arg1;
snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1); snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
error = sysctl_handle_string(oidp, state, sizeof(state), req); error = sysctl_handle_string(oidp, state, sizeof(state), req);
if (error != 0 || req->newptr == NULL) if (error != 0 || req->newptr == NULL)
return (error); return (error);
if (strlen(state) < 2 || toupper(state[0]) != 'C') if (strlen(state) < 2 || toupper(state[0]) != 'C')
return (EINVAL); return (EINVAL);
val = (int) strtol(state + 1, NULL, 10) - 1; if (strcasecmp(state, "Cmax") == 0)
if (val < 0 || val > sc->cpu_cx_count - 1) val = MAX_CX_STATES;
return (EINVAL); else {
val = (int) strtol(state + 1, NULL, 10);
if (val < 1 || val > MAX_CX_STATES)
return (EINVAL);
}
ACPI_SERIAL_BEGIN(cpu); ACPI_SERIAL_BEGIN(cpu);
acpi_cpu_set_cx_lowest(sc, val); sc->cpu_cx_lowest_lim = val - 1;
acpi_cpu_set_cx_lowest(sc);
ACPI_SERIAL_END(cpu); ACPI_SERIAL_END(cpu);
return (0); return (0);
@ -1258,22 +1237,27 @@ acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
char state[8]; char state[8];
int val, error, i; int val, error, i;
snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
error = sysctl_handle_string(oidp, state, sizeof(state), req); error = sysctl_handle_string(oidp, state, sizeof(state), req);
if (error != 0 || req->newptr == NULL) if (error != 0 || req->newptr == NULL)
return (error); return (error);
if (strlen(state) < 2 || toupper(state[0]) != 'C') if (strlen(state) < 2 || toupper(state[0]) != 'C')
return (EINVAL); return (EINVAL);
val = (int) strtol(state + 1, NULL, 10) - 1; if (strcasecmp(state, "Cmax") == 0)
if (val < 0 || val > cpu_cx_count - 1) val = MAX_CX_STATES;
return (EINVAL); else {
cpu_cx_lowest = val; val = (int) strtol(state + 1, NULL, 10);
if (val < 1 || val > MAX_CX_STATES)
return (EINVAL);
}
/* Update the new lowest useable Cx state for all CPUs. */ /* Update the new lowest useable Cx state for all CPUs. */
ACPI_SERIAL_BEGIN(cpu); ACPI_SERIAL_BEGIN(cpu);
cpu_cx_lowest_lim = val - 1;
for (i = 0; i < cpu_ndevices; i++) { for (i = 0; i < cpu_ndevices; i++) {
sc = device_get_softc(cpu_devices[i]); sc = device_get_softc(cpu_devices[i]);
acpi_cpu_set_cx_lowest(sc, min(val, sc->cpu_cx_count - 1)); sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
acpi_cpu_set_cx_lowest(sc);
} }
ACPI_SERIAL_END(cpu); ACPI_SERIAL_END(cpu);