2001-07-07 10:27:17 +00:00
|
|
|
/*-
|
2005-02-04 05:36:40 +00:00
|
|
|
* Copyright (c) 2003-2005 Nate Lawson (SDG)
|
2001-07-07 10:27:17 +00:00
|
|
|
* Copyright (c) 2001 Michael Smith
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-08-24 17:55:58 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2001-07-07 10:27:17 +00:00
|
|
|
#include "opt_acpi.h"
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/bus.h>
|
2005-02-04 05:36:40 +00:00
|
|
|
#include <sys/cpu.h>
|
2003-11-19 20:27:06 +00:00
|
|
|
#include <sys/kernel.h>
|
2003-12-03 20:27:48 +00:00
|
|
|
#include <sys/malloc.h>
|
2004-05-30 20:08:47 +00:00
|
|
|
#include <sys/module.h>
|
2003-11-19 20:27:06 +00:00
|
|
|
#include <sys/pcpu.h>
|
2002-03-04 18:46:13 +00:00
|
|
|
#include <sys/power.h>
|
2003-11-19 20:27:06 +00:00
|
|
|
#include <sys/proc.h>
|
2012-11-22 14:40:26 +00:00
|
|
|
#include <sys/sched.h>
|
2003-11-15 19:26:06 +00:00
|
|
|
#include <sys/sbuf.h>
|
|
|
|
#include <sys/smp.h>
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
#include <dev/pci/pcivar.h>
|
2003-11-19 20:27:06 +00:00
|
|
|
#include <machine/atomic.h>
|
2001-07-07 10:27:17 +00:00
|
|
|
#include <machine/bus.h>
|
2010-12-14 20:07:51 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
#include <machine/clock.h>
|
2015-05-09 12:28:48 +00:00
|
|
|
#include <machine/specialreg.h>
|
|
|
|
#include <machine/md_var.h>
|
2010-12-14 20:07:51 +00:00
|
|
|
#endif
|
2003-11-19 20:27:06 +00:00
|
|
|
#include <sys/rman.h>
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2009-06-05 18:44:36 +00:00
|
|
|
#include <contrib/dev/acpica/include/acpi.h>
|
|
|
|
#include <contrib/dev/acpica/include/accommon.h>
|
|
|
|
|
2001-07-07 10:27:17 +00:00
|
|
|
#include <dev/acpica/acpivar.h>
|
|
|
|
|
|
|
|
/*
|
2005-02-06 21:10:19 +00:00
|
|
|
* Support for ACPI Processor devices, including C[1-3] sleep states.
|
2001-07-07 10:27:17 +00:00
|
|
|
*/
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Hooks for the ACPI CA debugging infrastructure */
|
2001-07-07 10:27:17 +00:00
|
|
|
#define _COMPONENT ACPI_PROCESSOR
|
2002-02-23 05:26:45 +00:00
|
|
|
ACPI_MODULE_NAME("PROCESSOR")
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
struct acpi_cx {
|
|
|
|
struct resource *p_lvlx; /* Register to read to enter state. */
|
|
|
|
uint32_t type; /* C1-3 (C4 and up treated as C3). */
|
|
|
|
uint32_t trans_lat; /* Transition latency (usec). */
|
|
|
|
uint32_t power; /* Power consumed (mW). */
|
2005-02-05 22:29:03 +00:00
|
|
|
int res_type; /* Resource type for p_lvlx. */
|
2012-11-22 14:40:26 +00:00
|
|
|
int res_rid; /* Resource ID for p_lvlx. */
|
2015-05-09 12:28:48 +00:00
|
|
|
bool do_mwait;
|
|
|
|
uint32_t mwait_hint;
|
|
|
|
bool mwait_hw_coord;
|
|
|
|
bool mwait_bm_avoidance;
|
2003-11-15 19:26:06 +00:00
|
|
|
};
|
|
|
|
#define MAX_CX_STATES 8
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
struct acpi_cpu_softc {
|
|
|
|
device_t cpu_dev;
|
|
|
|
ACPI_HANDLE cpu_handle;
|
2005-02-04 05:36:40 +00:00
|
|
|
struct pcpu *cpu_pcpu;
|
|
|
|
uint32_t cpu_acpi_id; /* ACPI processor id */
|
2003-11-19 20:27:06 +00:00
|
|
|
uint32_t cpu_p_blk; /* ACPI P_BLK location */
|
|
|
|
uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
|
2003-11-15 19:26:06 +00:00
|
|
|
struct acpi_cx cpu_cx_states[MAX_CX_STATES];
|
2003-11-19 20:27:06 +00:00
|
|
|
int cpu_cx_count; /* Number of valid Cx states. */
|
2004-06-05 07:02:18 +00:00
|
|
|
int cpu_prev_sleep;/* Last idle sleep duration. */
|
2005-04-04 15:46:57 +00:00
|
|
|
int cpu_features; /* Child driver supported features. */
|
2007-01-07 21:53:42 +00:00
|
|
|
/* Runtime state. */
|
2015-01-05 20:44:44 +00:00
|
|
|
int cpu_non_c2; /* Index of lowest non-C2 state. */
|
2007-01-07 21:53:42 +00:00
|
|
|
int cpu_non_c3; /* Index of lowest non-C3 state. */
|
|
|
|
u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
|
|
|
|
/* Values for sysctl. */
|
|
|
|
struct sysctl_ctx_list cpu_sysctl_ctx;
|
|
|
|
struct sysctl_oid *cpu_sysctl_tree;
|
2012-07-02 17:55:29 +00:00
|
|
|
int cpu_cx_lowest;
|
2012-07-13 08:11:55 +00:00
|
|
|
int cpu_cx_lowest_lim;
|
2012-12-01 18:01:01 +00:00
|
|
|
int cpu_disable_idle; /* Disable entry to idle function */
|
2007-01-07 21:53:42 +00:00
|
|
|
char cpu_cx_supported[64];
|
2001-07-07 10:27:17 +00:00
|
|
|
};
|
|
|
|
|
2005-02-04 05:36:40 +00:00
|
|
|
struct acpi_cpu_device {
|
2007-01-07 21:53:42 +00:00
|
|
|
struct resource_list ad_rl;
|
2005-02-04 05:36:40 +00:00
|
|
|
};
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
#define CPU_GET_REG(reg, width) \
|
|
|
|
(bus_space_read_ ## width(rman_get_bustag((reg)), \
|
|
|
|
rman_get_bushandle((reg)), 0))
|
|
|
|
#define CPU_SET_REG(reg, width, val) \
|
|
|
|
(bus_space_write_ ## width(rman_get_bustag((reg)), \
|
|
|
|
rman_get_bushandle((reg)), 0, (val)))
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
|
|
|
|
|
2005-02-07 04:03:06 +00:00
|
|
|
#define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-10-11 06:06:42 +00:00
|
|
|
#define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
|
|
|
|
#define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
#define PCI_VENDOR_INTEL 0x8086
|
|
|
|
#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
|
|
|
|
#define PCI_REVISION_A_STEP 0
|
|
|
|
#define PCI_REVISION_B_STEP 1
|
|
|
|
#define PCI_REVISION_4E 2
|
|
|
|
#define PCI_REVISION_4M 3
|
2008-03-09 11:19:03 +00:00
|
|
|
#define PIIX4_DEVACTB_REG 0x58
|
|
|
|
#define PIIX4_BRLD_EN_IRQ0 (1<<0)
|
|
|
|
#define PIIX4_BRLD_EN_IRQ (1<<1)
|
|
|
|
#define PIIX4_BRLD_EN_IRQ8 (1<<5)
|
|
|
|
#define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
|
|
|
|
#define PIIX4_PCNTRL_BST_EN (1<<10)
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2015-05-09 12:28:48 +00:00
|
|
|
#define CST_FFH_VENDOR_INTEL 1
|
|
|
|
#define CST_FFH_INTEL_CL_C1IO 1
|
|
|
|
#define CST_FFH_INTEL_CL_MWAIT 2
|
|
|
|
#define CST_FFH_MWAIT_HW_COORD 0x0001
|
|
|
|
#define CST_FFH_MWAIT_BM_AVOID 0x0002
|
|
|
|
|
2012-05-23 13:45:52 +00:00
|
|
|
/* Allow users to ignore processor orders in MADT. */
|
|
|
|
static int cpu_unordered;
|
|
|
|
SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
|
|
|
|
&cpu_unordered, 0,
|
|
|
|
"Do not use the MADT to match ACPI Processor objects to CPUs.");
|
|
|
|
|
2014-08-04 09:05:28 +00:00
|
|
|
/* Knob to disable acpi_cpu devices */
|
|
|
|
bool acpi_cpu_disabled = false;
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
/* Platform hardware resource information. */
|
|
|
|
static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
|
2003-11-19 20:27:06 +00:00
|
|
|
static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
|
2004-06-05 07:02:18 +00:00
|
|
|
static int cpu_quirks; /* Indicate any hardware bugs. */
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
/* Values for sysctl. */
|
2007-01-07 21:53:42 +00:00
|
|
|
static struct sysctl_ctx_list cpu_sysctl_ctx;
|
|
|
|
static struct sysctl_oid *cpu_sysctl_tree;
|
|
|
|
static int cpu_cx_generic;
|
2012-07-13 08:11:55 +00:00
|
|
|
static int cpu_cx_lowest_lim;
|
2001-07-07 10:27:17 +00:00
|
|
|
|
|
|
|
static device_t *cpu_devices;
|
2003-11-15 19:26:06 +00:00
|
|
|
static int cpu_ndevices;
|
2003-12-03 20:27:48 +00:00
|
|
|
static struct acpi_cpu_softc **cpu_softc;
|
2004-08-13 06:21:47 +00:00
|
|
|
ACPI_SERIAL_DECL(cpu, "ACPI CPU");
|
2001-07-07 10:27:17 +00:00
|
|
|
|
|
|
|
static int acpi_cpu_probe(device_t dev);
|
|
|
|
static int acpi_cpu_attach(device_t dev);
|
2007-06-03 00:40:56 +00:00
|
|
|
static int acpi_cpu_suspend(device_t dev);
|
|
|
|
static int acpi_cpu_resume(device_t dev);
|
2012-05-23 13:45:52 +00:00
|
|
|
static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
|
2005-02-04 05:36:40 +00:00
|
|
|
uint32_t *cpu_id);
|
|
|
|
static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
|
2010-09-10 11:19:03 +00:00
|
|
|
static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
|
2005-02-04 05:36:40 +00:00
|
|
|
int unit);
|
|
|
|
static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
|
|
|
|
uintptr_t *result);
|
2003-11-19 20:27:06 +00:00
|
|
|
static int acpi_cpu_shutdown(device_t dev);
|
2007-01-07 21:53:42 +00:00
|
|
|
static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
|
|
|
|
static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
|
2003-11-15 19:26:06 +00:00
|
|
|
static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
|
|
|
|
static void acpi_cpu_startup(void *arg);
|
2007-01-07 21:53:42 +00:00
|
|
|
static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
|
2008-04-12 12:06:00 +00:00
|
|
|
static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
|
2015-06-11 15:45:33 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2013-02-28 10:46:54 +00:00
|
|
|
static void acpi_cpu_idle(sbintime_t sbt);
|
2015-06-11 15:45:33 +00:00
|
|
|
#endif
|
2003-11-15 19:26:06 +00:00
|
|
|
static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
|
2015-05-21 19:31:10 +00:00
|
|
|
static void acpi_cpu_quirks(void);
|
|
|
|
static void acpi_cpu_quirks_piix4(void);
|
2004-06-05 07:02:18 +00:00
|
|
|
static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
|
2014-04-08 02:36:27 +00:00
|
|
|
static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
|
2012-07-13 08:11:55 +00:00
|
|
|
static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
|
2003-11-15 19:26:06 +00:00
|
|
|
static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
|
2007-01-07 21:53:42 +00:00
|
|
|
static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
|
2015-05-09 12:28:48 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
|
|
|
|
#endif
|
2001-07-07 10:27:17 +00:00
|
|
|
|
|
|
|
static device_method_t acpi_cpu_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, acpi_cpu_probe),
|
|
|
|
DEVMETHOD(device_attach, acpi_cpu_attach),
|
2005-02-04 05:36:40 +00:00
|
|
|
DEVMETHOD(device_detach, bus_generic_detach),
|
2003-11-19 20:27:06 +00:00
|
|
|
DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
|
2007-06-03 00:40:56 +00:00
|
|
|
DEVMETHOD(device_suspend, acpi_cpu_suspend),
|
|
|
|
DEVMETHOD(device_resume, acpi_cpu_resume),
|
2005-02-04 05:36:40 +00:00
|
|
|
|
|
|
|
/* Bus interface */
|
|
|
|
DEVMETHOD(bus_add_child, acpi_cpu_add_child),
|
|
|
|
DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
|
|
|
|
DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
|
|
|
|
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
|
|
|
|
DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
|
|
|
|
DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
|
|
|
|
DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
|
|
|
|
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
|
|
|
|
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
|
|
|
|
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
|
|
|
|
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2011-11-22 21:28:20 +00:00
|
|
|
DEVMETHOD_END
|
2001-07-07 10:27:17 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t acpi_cpu_driver = {
|
2004-05-06 17:25:23 +00:00
|
|
|
"cpu",
|
2001-07-07 10:27:17 +00:00
|
|
|
acpi_cpu_methods,
|
|
|
|
sizeof(struct acpi_cpu_softc),
|
|
|
|
};
|
|
|
|
|
2005-02-06 07:36:08 +00:00
|
|
|
static devclass_t acpi_cpu_devclass;
|
|
|
|
DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
|
2004-05-06 17:25:23 +00:00
|
|
|
MODULE_DEPEND(cpu, acpi, 1, 1, 1);
|
2003-11-19 20:27:06 +00:00
|
|
|
|
2001-07-07 10:27:17 +00:00
|
|
|
static int
|
|
|
|
acpi_cpu_probe(device_t dev)
|
|
|
|
{
|
2005-04-04 15:46:57 +00:00
|
|
|
int acpi_id, cpu_id;
|
2003-11-15 19:26:06 +00:00
|
|
|
ACPI_BUFFER buf;
|
2004-05-06 17:25:23 +00:00
|
|
|
ACPI_HANDLE handle;
|
|
|
|
ACPI_OBJECT *obj;
|
2003-11-15 19:26:06 +00:00
|
|
|
ACPI_STATUS status;
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2014-08-04 09:05:28 +00:00
|
|
|
if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR ||
|
|
|
|
acpi_cpu_disabled)
|
2004-05-06 17:25:23 +00:00
|
|
|
return (ENXIO);
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2004-05-06 17:25:23 +00:00
|
|
|
handle = acpi_get_handle(dev);
|
|
|
|
if (cpu_softc == NULL)
|
|
|
|
cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
|
|
|
|
(mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
/* Get our Processor object. */
|
2004-05-06 17:25:23 +00:00
|
|
|
buf.Pointer = NULL;
|
|
|
|
buf.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
|
2003-08-28 16:06:30 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2004-05-06 17:25:23 +00:00
|
|
|
device_printf(dev, "probe failed to get Processor obj - %s\n",
|
2003-08-28 16:06:30 +00:00
|
|
|
AcpiFormatException(status));
|
2004-05-06 17:25:23 +00:00
|
|
|
return (ENXIO);
|
2001-07-07 10:27:17 +00:00
|
|
|
}
|
2004-05-06 17:25:23 +00:00
|
|
|
obj = (ACPI_OBJECT *)buf.Pointer;
|
|
|
|
if (obj->Type != ACPI_TYPE_PROCESSOR) {
|
|
|
|
device_printf(dev, "Processor object has bad type %d\n", obj->Type);
|
|
|
|
AcpiOsFree(obj);
|
|
|
|
return (ENXIO);
|
2001-07-07 10:27:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-11-26 19:01:33 +00:00
|
|
|
* Find the processor associated with our unit. We could use the
|
|
|
|
* ProcId as a key, however, some boxes do not have the same values
|
|
|
|
* in their Processor object as the ProcId values in the MADT.
|
2001-07-07 10:27:17 +00:00
|
|
|
*/
|
2004-05-06 17:25:23 +00:00
|
|
|
acpi_id = obj->Processor.ProcId;
|
|
|
|
AcpiOsFree(obj);
|
2012-05-23 13:45:52 +00:00
|
|
|
if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
|
2004-05-06 17:25:23 +00:00
|
|
|
return (ENXIO);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if we already probed this processor. We scan the bus twice
|
|
|
|
* so it's possible we've already seen this one.
|
|
|
|
*/
|
|
|
|
if (cpu_softc[cpu_id] != NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
|
2004-05-06 17:25:23 +00:00
|
|
|
/* Mark this processor as in-use and save our derived id for attach. */
|
|
|
|
cpu_softc[cpu_id] = (void *)1;
|
2009-11-07 11:46:38 +00:00
|
|
|
acpi_set_private(dev, (void*)(intptr_t)cpu_id);
|
2005-04-04 15:46:57 +00:00
|
|
|
device_set_desc(dev, "ACPI CPU");
|
2004-05-06 17:25:23 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_cpu_attach(device_t dev)
|
|
|
|
{
|
|
|
|
ACPI_BUFFER buf;
|
2016-04-20 21:26:59 +00:00
|
|
|
ACPI_OBJECT arg, *obj;
|
2005-04-10 19:07:08 +00:00
|
|
|
ACPI_OBJECT_LIST arglist;
|
2005-02-04 05:36:40 +00:00
|
|
|
struct pcpu *pcpu_data;
|
2004-05-06 17:25:23 +00:00
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
struct acpi_softc *acpi_sc;
|
|
|
|
ACPI_STATUS status;
|
2005-04-04 15:46:57 +00:00
|
|
|
u_int features;
|
|
|
|
int cpu_id, drv_count, i;
|
|
|
|
driver_t **drivers;
|
2005-04-10 19:07:08 +00:00
|
|
|
uint32_t cap_set[3];
|
2004-05-06 17:25:23 +00:00
|
|
|
|
2007-08-30 21:18:42 +00:00
|
|
|
/* UUID needed by _OSC evaluation */
|
|
|
|
static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
|
|
|
|
0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
|
|
|
|
0x58, 0x71, 0x39, 0x53 };
|
|
|
|
|
2004-05-06 17:25:23 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
sc->cpu_dev = dev;
|
|
|
|
sc->cpu_handle = acpi_get_handle(dev);
|
2009-11-07 11:46:38 +00:00
|
|
|
cpu_id = (int)(intptr_t)acpi_get_private(dev);
|
2005-02-04 05:36:40 +00:00
|
|
|
cpu_softc[cpu_id] = sc;
|
|
|
|
pcpu_data = pcpu_find(cpu_id);
|
|
|
|
pcpu_data->pc_device = dev;
|
|
|
|
sc->cpu_pcpu = pcpu_data;
|
2007-03-22 18:16:43 +00:00
|
|
|
cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
|
|
|
|
cpu_cst_cnt = AcpiGbl_FADT.CstControl;
|
2004-05-06 17:25:23 +00:00
|
|
|
|
|
|
|
buf.Pointer = NULL;
|
|
|
|
buf.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(dev, "attach failed to get Processor obj - %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
obj = (ACPI_OBJECT *)buf.Pointer;
|
|
|
|
sc->cpu_p_blk = obj->Processor.PblkAddress;
|
|
|
|
sc->cpu_p_blk_len = obj->Processor.PblkLength;
|
2005-02-04 05:36:40 +00:00
|
|
|
sc->cpu_acpi_id = obj->Processor.ProcId;
|
2004-05-06 17:25:23 +00:00
|
|
|
AcpiOsFree(obj);
|
2003-11-19 20:27:06 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
|
|
|
|
device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
/*
|
|
|
|
* If this is the first cpu we attach, create and initialize the generic
|
|
|
|
* resources that will be used by all acpi cpu devices.
|
|
|
|
*/
|
|
|
|
if (device_get_unit(dev) == 0) {
|
|
|
|
/* Assume we won't be using generic Cx mode by default */
|
|
|
|
cpu_cx_generic = FALSE;
|
|
|
|
|
|
|
|
/* Install hw.acpi.cpu sysctl tree */
|
|
|
|
acpi_sc = acpi_device_get_parent_softc(dev);
|
|
|
|
sysctl_ctx_init(&cpu_sysctl_ctx);
|
|
|
|
cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
|
|
|
|
CTLFLAG_RD, 0, "node for CPU children");
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2005-04-04 15:46:57 +00:00
|
|
|
/*
|
|
|
|
* Before calling any CPU methods, collect child driver feature hints
|
2005-04-10 19:21:42 +00:00
|
|
|
* and notify ACPI of them. We support unified SMP power control
|
|
|
|
* so advertise this ourselves. Note this is not the same as independent
|
|
|
|
* SMP control where each CPU can have different settings.
|
2005-04-04 15:46:57 +00:00
|
|
|
*/
|
2015-05-09 12:28:48 +00:00
|
|
|
sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
|
|
|
|
ACPI_CAP_C1_IO_HALT;
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
/*
|
|
|
|
* Ask for MWAIT modes if not disabled and interrupts work
|
|
|
|
* reasonable with MWAIT.
|
|
|
|
*/
|
|
|
|
if (!acpi_disabled("mwait") && cpu_mwait_usable())
|
|
|
|
sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
|
|
|
|
#endif
|
|
|
|
|
2005-04-04 15:46:57 +00:00
|
|
|
if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
|
|
|
|
for (i = 0; i < drv_count; i++) {
|
|
|
|
if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
|
|
|
|
sc->cpu_features |= features;
|
|
|
|
}
|
|
|
|
free(drivers, M_TEMP);
|
|
|
|
}
|
2005-04-10 19:07:08 +00:00
|
|
|
|
|
|
|
/*
|
2010-02-06 12:48:06 +00:00
|
|
|
* CPU capabilities are specified in
|
|
|
|
* Intel Processor Vendor-Specific ACPI Interface Specification.
|
2005-04-10 19:07:08 +00:00
|
|
|
*/
|
|
|
|
if (sc->cpu_features) {
|
2010-02-03 14:35:33 +00:00
|
|
|
cap_set[1] = sc->cpu_features;
|
2016-04-22 17:51:19 +00:00
|
|
|
status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
|
|
|
|
cap_set, false);
|
2010-02-06 12:48:06 +00:00
|
|
|
if (ACPI_SUCCESS(status)) {
|
|
|
|
if (cap_set[0] != 0)
|
|
|
|
device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
|
|
|
|
}
|
|
|
|
else {
|
2016-04-20 21:26:59 +00:00
|
|
|
arglist.Pointer = &arg;
|
2010-02-06 12:48:06 +00:00
|
|
|
arglist.Count = 1;
|
2016-04-20 21:26:59 +00:00
|
|
|
arg.Type = ACPI_TYPE_BUFFER;
|
|
|
|
arg.Buffer.Length = sizeof(cap_set);
|
|
|
|
arg.Buffer.Pointer = (uint8_t *)cap_set;
|
2010-02-06 12:48:06 +00:00
|
|
|
cap_set[0] = 1; /* revision */
|
|
|
|
cap_set[1] = 1; /* number of capabilities integers */
|
|
|
|
cap_set[2] = sc->cpu_features;
|
|
|
|
AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
|
|
|
|
}
|
2005-04-10 19:07:08 +00:00
|
|
|
}
|
2005-04-04 15:46:57 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
/* Probe for Cx state support. */
|
|
|
|
acpi_cpu_cx_probe(sc);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2005-02-04 05:36:40 +00:00
|
|
|
return (0);
|
2001-07-07 10:27:17 +00:00
|
|
|
}
|
|
|
|
|
2010-02-11 08:50:21 +00:00
|
|
|
static void
|
|
|
|
acpi_cpu_postattach(void *unused __unused)
|
|
|
|
{
|
|
|
|
device_t *devices;
|
|
|
|
int err;
|
|
|
|
int i, n;
|
2016-04-21 18:27:05 +00:00
|
|
|
int attached;
|
2010-02-11 08:50:21 +00:00
|
|
|
|
|
|
|
err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
|
|
|
|
if (err != 0) {
|
|
|
|
printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
|
|
|
|
return;
|
|
|
|
}
|
2016-04-21 18:27:05 +00:00
|
|
|
attached = 0;
|
|
|
|
for (i = 0; i < n; i++)
|
2016-04-28 18:53:14 +00:00
|
|
|
if (device_is_attached(devices[i]) &&
|
|
|
|
device_get_driver(devices[i]) == &acpi_cpu_driver)
|
2016-04-21 18:27:05 +00:00
|
|
|
attached = 1;
|
2010-02-11 08:50:21 +00:00
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
bus_generic_probe(devices[i]);
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
bus_generic_attach(devices[i]);
|
|
|
|
free(devices, M_TEMP);
|
2016-04-21 18:27:05 +00:00
|
|
|
|
|
|
|
if (attached) {
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#ifdef EARLY_AP_STARTUP
|
|
|
|
acpi_cpu_startup(NULL);
|
|
|
|
#else
|
2016-04-21 18:27:05 +00:00
|
|
|
/* Queue post cpu-probing task handler */
|
|
|
|
AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#endif
|
2016-04-21 18:27:05 +00:00
|
|
|
}
|
2010-02-11 08:50:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
|
|
|
|
acpi_cpu_postattach, NULL);
|
|
|
|
|
2012-12-01 18:01:01 +00:00
|
|
|
static void
|
|
|
|
disable_idle(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
cpuset_t cpuset;
|
|
|
|
|
|
|
|
CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
|
|
|
|
sc->cpu_disable_idle = TRUE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the CPU is not in idle state or in acpi_cpu_idle().
|
|
|
|
* Note that this code depends on the fact that the rendezvous IPI
|
|
|
|
* can not penetrate context where interrupts are disabled and acpi_cpu_idle
|
|
|
|
* is called and executed in such a context with interrupts being re-enabled
|
|
|
|
* right before return.
|
|
|
|
*/
|
|
|
|
smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
|
|
|
|
smp_no_rendevous_barrier, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enable_idle(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
sc->cpu_disable_idle = FALSE;
|
|
|
|
}
|
|
|
|
|
2015-06-11 15:45:33 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2012-12-01 18:01:01 +00:00
|
|
|
static int
|
|
|
|
is_idle_disabled(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (sc->cpu_disable_idle);
|
|
|
|
}
|
2015-06-11 15:45:33 +00:00
|
|
|
#endif
|
2012-12-01 18:01:01 +00:00
|
|
|
|
2007-06-03 00:40:56 +00:00
|
|
|
/*
|
|
|
|
* Disable any entry to the idle function during suspend and re-enable it
|
|
|
|
* during resume.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_cpu_suspend(device_t dev)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = bus_generic_suspend(dev);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2012-12-01 18:01:01 +00:00
|
|
|
disable_idle(device_get_softc(dev));
|
2007-06-03 00:40:56 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_cpu_resume(device_t dev)
|
|
|
|
{
|
|
|
|
|
2012-12-01 18:01:01 +00:00
|
|
|
enable_idle(device_get_softc(dev));
|
2007-06-03 00:40:56 +00:00
|
|
|
return (bus_generic_resume(dev));
|
|
|
|
}
|
|
|
|
|
2003-11-26 19:01:33 +00:00
|
|
|
/*
|
2012-05-23 13:45:52 +00:00
|
|
|
* Find the processor associated with a given ACPI ID. By default,
|
|
|
|
* use the MADT to map ACPI IDs to APIC IDs and use that to locate a
|
|
|
|
* processor. Some systems have inconsistent ASL and MADT however.
|
|
|
|
* For these systems the cpu_unordered tunable can be set in which
|
|
|
|
* case we assume that Processor objects are listed in the same order
|
|
|
|
* in both the MADT and ASL.
|
2003-11-26 19:01:33 +00:00
|
|
|
*/
|
|
|
|
static int
|
2012-05-23 13:45:52 +00:00
|
|
|
acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
|
2003-11-26 19:01:33 +00:00
|
|
|
{
|
2012-05-23 13:45:52 +00:00
|
|
|
struct pcpu *pc;
|
|
|
|
uint32_t i, idx;
|
2003-12-10 19:10:27 +00:00
|
|
|
|
2003-11-26 19:01:33 +00:00
|
|
|
KASSERT(acpi_id != NULL, ("Null acpi_id"));
|
|
|
|
KASSERT(cpu_id != NULL, ("Null cpu_id"));
|
2012-05-23 13:45:52 +00:00
|
|
|
idx = device_get_unit(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
|
|
|
|
* UP box) use the ACPI ID from the first processor we find.
|
|
|
|
*/
|
|
|
|
if (idx == 0 && mp_ncpus == 1) {
|
|
|
|
pc = pcpu_find(0);
|
|
|
|
if (pc->pc_acpi_id == 0xffffffff)
|
|
|
|
pc->pc_acpi_id = *acpi_id;
|
|
|
|
*cpu_id = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(i) {
|
2012-05-23 13:45:52 +00:00
|
|
|
pc = pcpu_find(i);
|
|
|
|
KASSERT(pc != NULL, ("no pcpu data for %d", i));
|
|
|
|
if (cpu_unordered) {
|
|
|
|
if (idx-- == 0) {
|
|
|
|
/*
|
|
|
|
* If pc_acpi_id doesn't match the ACPI ID from the
|
|
|
|
* ASL, prefer the MADT-derived value.
|
|
|
|
*/
|
|
|
|
if (pc->pc_acpi_id != *acpi_id)
|
|
|
|
*acpi_id = pc->pc_acpi_id;
|
|
|
|
*cpu_id = pc->pc_cpuid;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (pc->pc_acpi_id == *acpi_id) {
|
|
|
|
if (bootverbose)
|
2012-05-23 17:03:09 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Processor %s (ACPI ID %u) -> APIC ID %d\n",
|
|
|
|
acpi_name(acpi_get_handle(dev)), *acpi_id,
|
|
|
|
pc->pc_cpuid);
|
2012-05-23 13:45:52 +00:00
|
|
|
*cpu_id = pc->pc_cpuid;
|
|
|
|
return (0);
|
|
|
|
}
|
2003-11-26 19:01:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-23 13:45:52 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
|
|
|
|
acpi_name(acpi_get_handle(dev)), *acpi_id);
|
|
|
|
|
2003-11-26 19:01:33 +00:00
|
|
|
return (ESRCH);
|
|
|
|
}
|
2003-12-10 19:10:27 +00:00
|
|
|
|
2005-02-04 05:36:40 +00:00
|
|
|
static struct resource_list *
|
|
|
|
acpi_cpu_get_rlist(device_t dev, device_t child)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_device *ad;
|
|
|
|
|
|
|
|
ad = device_get_ivars(child);
|
|
|
|
if (ad == NULL)
|
|
|
|
return (NULL);
|
|
|
|
return (&ad->ad_rl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static device_t
|
2010-09-10 11:19:03 +00:00
|
|
|
acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
|
2005-02-04 05:36:40 +00:00
|
|
|
{
|
2007-01-07 21:53:42 +00:00
|
|
|
struct acpi_cpu_device *ad;
|
|
|
|
device_t child;
|
2005-02-04 05:36:40 +00:00
|
|
|
|
|
|
|
if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
|
2007-01-07 21:53:42 +00:00
|
|
|
return (NULL);
|
2005-02-04 05:36:40 +00:00
|
|
|
|
|
|
|
resource_list_init(&ad->ad_rl);
|
|
|
|
|
|
|
|
child = device_add_child_ordered(dev, order, name, unit);
|
|
|
|
if (child != NULL)
|
2007-01-07 21:53:42 +00:00
|
|
|
device_set_ivars(child, ad);
|
2005-03-27 03:37:43 +00:00
|
|
|
else
|
|
|
|
free(ad, M_TEMP);
|
2005-02-04 05:36:40 +00:00
|
|
|
return (child);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
switch (index) {
|
|
|
|
case ACPI_IVAR_HANDLE:
|
|
|
|
*result = (uintptr_t)sc->cpu_handle;
|
|
|
|
break;
|
|
|
|
case CPU_IVAR_PCPU:
|
|
|
|
*result = (uintptr_t)sc->cpu_pcpu;
|
|
|
|
break;
|
2010-12-14 20:07:51 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
case CPU_IVAR_NOMINAL_MHZ:
|
|
|
|
if (tsc_is_invariant) {
|
2011-04-07 23:28:28 +00:00
|
|
|
*result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
|
2010-12-14 20:07:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
#endif
|
2005-02-04 05:36:40 +00:00
|
|
|
default:
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-11-19 20:27:06 +00:00
|
|
|
static int
|
|
|
|
acpi_cpu_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
2005-02-04 05:36:40 +00:00
|
|
|
/* Allow children to shutdown first. */
|
|
|
|
bus_generic_shutdown(dev);
|
|
|
|
|
2007-11-02 17:29:36 +00:00
|
|
|
/*
|
2012-12-01 18:01:01 +00:00
|
|
|
* Disable any entry to the idle function.
|
|
|
|
*/
|
|
|
|
disable_idle(device_get_softc(dev));
|
|
|
|
|
|
|
|
/*
|
2016-05-03 03:41:25 +00:00
|
|
|
* CPU devices are not truly detached and remain referenced,
|
2012-12-01 18:01:01 +00:00
|
|
|
* so their resources are not freed.
|
2007-11-02 17:29:36 +00:00
|
|
|
*/
|
2003-11-19 20:27:06 +00:00
|
|
|
|
|
|
|
return_VALUE (0);
|
|
|
|
}
|
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
static void
|
2003-11-15 19:26:06 +00:00
|
|
|
acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
2003-11-19 20:27:06 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
/* Use initial sleep value of 1 sec. to start with lowest idle state. */
|
|
|
|
sc->cpu_prev_sleep = 1000000;
|
|
|
|
sc->cpu_cx_lowest = 0;
|
2012-07-13 08:11:55 +00:00
|
|
|
sc->cpu_cx_lowest_lim = 0;
|
2007-01-07 21:53:42 +00:00
|
|
|
|
2004-10-11 06:06:42 +00:00
|
|
|
/*
|
2007-01-07 21:53:42 +00:00
|
|
|
* Check for the ACPI 2.0 _CST sleep states object. If we can't find
|
|
|
|
* any, we'll revert to generic FADT/P_BLK Cx control method which will
|
|
|
|
* be handled by acpi_cpu_startup. We need to defer to after having
|
|
|
|
* probed all the cpus in the system before probing for generic Cx
|
|
|
|
* states as we may already have found cpus with valid _CST packages
|
2004-10-11 06:06:42 +00:00
|
|
|
*/
|
2007-01-07 21:53:42 +00:00
|
|
|
if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
|
|
|
|
/*
|
|
|
|
* We were unable to find a _CST package for this cpu or there
|
|
|
|
* was an error parsing it. Switch back to generic mode.
|
|
|
|
*/
|
|
|
|
cpu_cx_generic = TRUE;
|
2007-01-15 18:17:36 +00:00
|
|
|
if (bootverbose)
|
|
|
|
device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-01-07 21:53:42 +00:00
|
|
|
* TODO: _CSD Package should be checked here.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
static void
|
|
|
|
acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
ACPI_GENERIC_ADDRESS gas;
|
|
|
|
struct acpi_cx *cx_ptr;
|
|
|
|
|
|
|
|
sc->cpu_cx_count = 0;
|
|
|
|
cx_ptr = sc->cpu_cx_states;
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/* Use initial sleep value of 1 sec. to start with lowest idle state. */
|
|
|
|
sc->cpu_prev_sleep = 1000000;
|
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
/* C1 has been required since just after ACPI 1.0 */
|
|
|
|
cx_ptr->type = ACPI_STATE_C1;
|
|
|
|
cx_ptr->trans_lat = 0;
|
|
|
|
cx_ptr++;
|
2015-01-05 20:44:44 +00:00
|
|
|
sc->cpu_non_c2 = sc->cpu_cx_count;
|
2012-07-07 08:19:34 +00:00
|
|
|
sc->cpu_non_c3 = sc->cpu_cx_count;
|
2007-01-07 21:53:42 +00:00
|
|
|
sc->cpu_cx_count++;
|
2015-01-05 20:44:44 +00:00
|
|
|
cpu_deepest_sleep = 1;
|
2007-01-07 21:53:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The spec says P_BLK must be 6 bytes long. However, some systems
|
|
|
|
* use it to indicate a fractional set of features present so we
|
|
|
|
* take 5 as C2. Some may also have a value of 7 to indicate
|
|
|
|
* another C3 but most use _CST for this (as required) and having
|
|
|
|
* "only" C1-C3 is not a hardship.
|
|
|
|
*/
|
|
|
|
if (sc->cpu_p_blk_len < 5)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Validate and allocate resources for C2 (P_LVL2). */
|
2007-03-22 18:16:43 +00:00
|
|
|
gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
|
|
|
|
gas.BitWidth = 8;
|
|
|
|
if (AcpiGbl_FADT.C2Latency <= 100) {
|
2007-01-07 21:53:42 +00:00
|
|
|
gas.Address = sc->cpu_p_blk + 4;
|
2012-11-22 14:40:26 +00:00
|
|
|
cx_ptr->res_rid = 0;
|
|
|
|
acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
|
2007-01-07 21:53:42 +00:00
|
|
|
&gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
|
|
|
|
if (cx_ptr->p_lvlx != NULL) {
|
|
|
|
cx_ptr->type = ACPI_STATE_C2;
|
2007-03-22 18:16:43 +00:00
|
|
|
cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
|
2007-01-07 21:53:42 +00:00
|
|
|
cx_ptr++;
|
2012-07-07 08:19:34 +00:00
|
|
|
sc->cpu_non_c3 = sc->cpu_cx_count;
|
2007-01-07 21:53:42 +00:00
|
|
|
sc->cpu_cx_count++;
|
2015-01-05 20:44:44 +00:00
|
|
|
cpu_deepest_sleep = 2;
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sc->cpu_p_blk_len < 6)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Validate and allocate resources for C3 (P_LVL3). */
|
2008-02-16 02:00:25 +00:00
|
|
|
if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
|
2007-01-07 21:53:42 +00:00
|
|
|
gas.Address = sc->cpu_p_blk + 5;
|
2012-11-22 14:40:26 +00:00
|
|
|
cx_ptr->res_rid = 1;
|
|
|
|
acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
|
|
|
|
&gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
|
2007-01-07 21:53:42 +00:00
|
|
|
if (cx_ptr->p_lvlx != NULL) {
|
|
|
|
cx_ptr->type = ACPI_STATE_C3;
|
2007-03-22 18:16:43 +00:00
|
|
|
cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
|
2007-01-07 21:53:42 +00:00
|
|
|
cx_ptr++;
|
|
|
|
sc->cpu_cx_count++;
|
2015-01-05 20:44:44 +00:00
|
|
|
cpu_deepest_sleep = 3;
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
|
2015-05-11 14:36:34 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2015-05-09 12:28:48 +00:00
|
|
|
static void
|
|
|
|
acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
|
|
|
|
{
|
|
|
|
|
|
|
|
cx_ptr->do_mwait = true;
|
|
|
|
cx_ptr->mwait_hint = address & 0xffffffff;
|
|
|
|
cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
|
|
|
|
cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
|
|
|
|
}
|
2015-05-11 14:36:34 +00:00
|
|
|
#endif
|
2015-05-09 12:28:48 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (cx_ptr->p_lvlx == NULL)
|
|
|
|
return;
|
|
|
|
bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
|
|
|
|
cx_ptr->p_lvlx);
|
|
|
|
cx_ptr->p_lvlx = NULL;
|
|
|
|
}
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
/*
|
|
|
|
* Parse a _CST package and set up its Cx states. Since the _CST object
|
|
|
|
* can change dynamically, our notify handler may call this function
|
|
|
|
* to clean up and probe the new _CST package.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
struct acpi_cx *cx_ptr;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
ACPI_BUFFER buf;
|
|
|
|
ACPI_OBJECT *top;
|
|
|
|
ACPI_OBJECT *pkg;
|
|
|
|
uint32_t count;
|
2015-05-11 14:36:34 +00:00
|
|
|
int i;
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2015-05-09 12:28:48 +00:00
|
|
|
uint64_t address;
|
2015-05-11 14:36:34 +00:00
|
|
|
int vendor, class, accsize;
|
|
|
|
#endif
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2003-11-19 20:27:06 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
buf.Pointer = NULL;
|
|
|
|
buf.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
|
2007-01-15 18:17:36 +00:00
|
|
|
if (ACPI_FAILURE(status))
|
2003-11-15 19:26:06 +00:00
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
/* _CST is a package with a count and at least one Cx package. */
|
|
|
|
top = (ACPI_OBJECT *)buf.Pointer;
|
2003-12-23 18:26:53 +00:00
|
|
|
if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
|
2007-01-15 18:17:36 +00:00
|
|
|
device_printf(sc->cpu_dev, "invalid _CST package\n");
|
2003-11-15 19:26:06 +00:00
|
|
|
AcpiOsFree(buf.Pointer);
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
if (count != top->Package.Count - 1) {
|
2007-01-15 18:17:36 +00:00
|
|
|
device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
|
2003-11-15 19:26:06 +00:00
|
|
|
count, top->Package.Count - 1);
|
|
|
|
count = top->Package.Count - 1;
|
|
|
|
}
|
|
|
|
if (count > MAX_CX_STATES) {
|
2003-11-19 20:27:06 +00:00
|
|
|
device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
|
2003-11-15 19:26:06 +00:00
|
|
|
count = MAX_CX_STATES;
|
|
|
|
}
|
|
|
|
|
2015-01-05 20:44:44 +00:00
|
|
|
sc->cpu_non_c2 = 0;
|
2010-11-12 17:10:12 +00:00
|
|
|
sc->cpu_non_c3 = 0;
|
2003-11-19 20:27:06 +00:00
|
|
|
sc->cpu_cx_count = 0;
|
2003-11-15 19:26:06 +00:00
|
|
|
cx_ptr = sc->cpu_cx_states;
|
2010-11-12 17:10:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* C1 has been required since just after ACPI 1.0.
|
|
|
|
* Reserve the first slot for it.
|
|
|
|
*/
|
|
|
|
cx_ptr->type = ACPI_STATE_C0;
|
|
|
|
cx_ptr++;
|
|
|
|
sc->cpu_cx_count++;
|
2015-01-05 20:44:44 +00:00
|
|
|
cpu_deepest_sleep = 1;
|
2010-11-12 17:10:12 +00:00
|
|
|
|
|
|
|
/* Set up all valid states. */
|
2003-11-15 19:26:06 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
pkg = &top->Package.Elements[i + 1];
|
2003-12-23 18:26:53 +00:00
|
|
|
if (!ACPI_PKG_VALID(pkg, 4) ||
|
|
|
|
acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
|
|
|
|
acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
|
|
|
|
acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
|
|
|
|
|
2005-10-25 21:15:47 +00:00
|
|
|
device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
|
2003-11-15 19:26:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate the state to see if we should use it. */
|
|
|
|
switch (cx_ptr->type) {
|
|
|
|
case ACPI_STATE_C1:
|
2015-05-09 12:28:48 +00:00
|
|
|
acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
|
|
|
|
&accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
|
|
|
|
if (class == CST_FFH_INTEL_CL_C1IO) {
|
|
|
|
/* C1 I/O then Halt */
|
|
|
|
cx_ptr->res_rid = sc->cpu_cx_count;
|
|
|
|
bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
|
|
|
|
cx_ptr->res_rid, address, 1);
|
|
|
|
cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
|
|
|
|
SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
|
|
|
|
RF_SHAREABLE);
|
|
|
|
if (cx_ptr->p_lvlx == NULL) {
|
|
|
|
bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
|
|
|
|
cx_ptr->res_rid);
|
|
|
|
device_printf(sc->cpu_dev,
|
|
|
|
"C1 I/O failed to allocate port %d, "
|
|
|
|
"degrading to C1 Halt", (int)address);
|
|
|
|
}
|
|
|
|
} else if (class == CST_FFH_INTEL_CL_MWAIT) {
|
|
|
|
acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-11-12 17:10:12 +00:00
|
|
|
if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
|
|
|
|
/* This is the first C1 state. Use the reserved slot. */
|
|
|
|
sc->cpu_cx_states[0] = *cx_ptr;
|
|
|
|
} else {
|
2015-01-05 20:44:44 +00:00
|
|
|
sc->cpu_non_c2 = sc->cpu_cx_count;
|
2012-07-07 07:59:14 +00:00
|
|
|
sc->cpu_non_c3 = sc->cpu_cx_count;
|
2010-11-12 17:10:12 +00:00
|
|
|
cx_ptr++;
|
|
|
|
sc->cpu_cx_count++;
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
continue;
|
|
|
|
case ACPI_STATE_C2:
|
2012-07-07 07:59:14 +00:00
|
|
|
sc->cpu_non_c3 = sc->cpu_cx_count;
|
2015-01-05 20:44:44 +00:00
|
|
|
if (cpu_deepest_sleep < 2)
|
|
|
|
cpu_deepest_sleep = 2;
|
2003-11-15 19:26:06 +00:00
|
|
|
break;
|
|
|
|
case ACPI_STATE_C3:
|
|
|
|
default:
|
2010-09-13 09:51:24 +00:00
|
|
|
if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
|
2003-11-19 20:27:06 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"acpi_cpu%d: C3[%d] not available.\n",
|
|
|
|
device_get_unit(sc->cpu_dev), i));
|
2003-11-15 19:26:06 +00:00
|
|
|
continue;
|
2012-07-07 08:12:51 +00:00
|
|
|
} else
|
2015-01-05 20:44:44 +00:00
|
|
|
cpu_deepest_sleep = 3;
|
2003-11-15 19:26:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free up any previous register. */
|
2015-05-09 12:28:48 +00:00
|
|
|
acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
/* Allocate the control register for C2 or C3. */
|
2015-05-09 12:28:48 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
|
|
|
|
&accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
|
|
|
|
class == CST_FFH_INTEL_CL_MWAIT) {
|
|
|
|
/* Native C State Instruction use (mwait) */
|
|
|
|
acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
|
2003-11-19 20:27:06 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2015-05-09 12:28:48 +00:00
|
|
|
"acpi_cpu%d: Got C%d/mwait - %d latency\n",
|
|
|
|
device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
|
2003-11-15 19:26:06 +00:00
|
|
|
cx_ptr++;
|
2003-11-19 20:27:06 +00:00
|
|
|
sc->cpu_cx_count++;
|
2015-05-09 12:28:48 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
cx_ptr->res_rid = sc->cpu_cx_count;
|
|
|
|
acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
|
|
|
|
&cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
|
|
|
|
if (cx_ptr->p_lvlx) {
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"acpi_cpu%d: Got C%d - %d latency\n",
|
|
|
|
device_get_unit(sc->cpu_dev), cx_ptr->type,
|
|
|
|
cx_ptr->trans_lat));
|
|
|
|
cx_ptr++;
|
|
|
|
sc->cpu_cx_count++;
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
AcpiOsFree(buf.Pointer);
|
|
|
|
|
2010-11-12 17:10:12 +00:00
|
|
|
/* If C1 state was not found, we need one now. */
|
|
|
|
cx_ptr = sc->cpu_cx_states;
|
|
|
|
if (cx_ptr->type == ACPI_STATE_C0) {
|
|
|
|
cx_ptr->type = ACPI_STATE_C1;
|
|
|
|
cx_ptr->trans_lat = 0;
|
|
|
|
}
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-07-07 10:27:17 +00:00
|
|
|
/*
|
|
|
|
* Call this *after* all CPUs have been attached.
|
|
|
|
*/
|
|
|
|
static void
|
2003-11-15 19:26:06 +00:00
|
|
|
acpi_cpu_startup(void *arg)
|
2001-07-07 10:27:17 +00:00
|
|
|
{
|
2003-11-19 20:27:06 +00:00
|
|
|
struct acpi_cpu_softc *sc;
|
2007-01-07 21:53:42 +00:00
|
|
|
int i;
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Get set of CPU devices */
|
2005-02-06 07:36:08 +00:00
|
|
|
devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
|
2001-07-07 10:27:17 +00:00
|
|
|
|
2003-11-19 20:27:06 +00:00
|
|
|
/*
|
2007-01-07 21:53:42 +00:00
|
|
|
* Setup any quirks that might necessary now that we have probed
|
|
|
|
* all the CPUs
|
2003-11-19 20:27:06 +00:00
|
|
|
*/
|
2007-01-07 21:53:42 +00:00
|
|
|
acpi_cpu_quirks();
|
|
|
|
|
|
|
|
if (cpu_cx_generic) {
|
|
|
|
/*
|
|
|
|
* We are using generic Cx mode, probe for available Cx states
|
|
|
|
* for all processors.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < cpu_ndevices; i++) {
|
|
|
|
sc = device_get_softc(cpu_devices[i]);
|
|
|
|
acpi_cpu_generic_cx_probe(sc);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We are using _CST mode, remove C3 state if necessary.
|
|
|
|
* As we now know for sure that we will be using _CST mode
|
|
|
|
* install our notify handler.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < cpu_ndevices; i++) {
|
|
|
|
sc = device_get_softc(cpu_devices[i]);
|
2008-02-12 15:26:59 +00:00
|
|
|
if (cpu_quirks & CPU_QUIRK_NO_C3) {
|
2015-01-18 12:45:26 +00:00
|
|
|
sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
|
|
|
AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
|
|
|
|
acpi_cpu_notify, sc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform Cx final initialization. */
|
2003-11-19 20:27:06 +00:00
|
|
|
for (i = 0; i < cpu_ndevices; i++) {
|
|
|
|
sc = device_get_softc(cpu_devices[i]);
|
2007-01-07 21:53:42 +00:00
|
|
|
acpi_cpu_startup_cx(sc);
|
2003-11-19 20:27:06 +00:00
|
|
|
}
|
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
/* Add a sysctl handler to handle global Cx lowest setting */
|
|
|
|
SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
|
|
|
|
OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
|
|
|
|
NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
|
|
|
|
"Global lowest Cx sleep state to use");
|
|
|
|
|
|
|
|
/* Take over idling from cpu_idle_default(). */
|
2012-07-13 08:11:55 +00:00
|
|
|
cpu_cx_lowest_lim = 0;
|
2012-12-01 18:01:01 +00:00
|
|
|
for (i = 0; i < cpu_ndevices; i++) {
|
|
|
|
sc = device_get_softc(cpu_devices[i]);
|
|
|
|
enable_idle(sc);
|
|
|
|
}
|
2015-06-11 15:45:33 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2007-01-07 21:53:42 +00:00
|
|
|
cpu_idle_hook = acpi_cpu_idle;
|
2015-06-11 15:45:33 +00:00
|
|
|
#endif
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
|
2003-11-19 20:27:06 +00:00
|
|
|
static void
|
2008-04-12 12:06:00 +00:00
|
|
|
acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
|
2003-11-19 20:27:06 +00:00
|
|
|
{
|
2004-11-16 18:47:42 +00:00
|
|
|
struct sbuf sb;
|
2003-11-19 20:27:06 +00:00
|
|
|
int i;
|
|
|
|
|
2004-11-16 18:47:42 +00:00
|
|
|
/*
|
2007-01-07 21:53:42 +00:00
|
|
|
* Set up the list of Cx states
|
2004-11-16 18:47:42 +00:00
|
|
|
*/
|
2007-01-07 21:53:42 +00:00
|
|
|
sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
|
|
|
|
SBUF_FIXEDLEN);
|
2012-07-07 08:12:51 +00:00
|
|
|
for (i = 0; i < sc->cpu_cx_count; i++)
|
2012-07-31 10:58:50 +00:00
|
|
|
sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
|
|
|
|
sc->cpu_cx_states[i].trans_lat);
|
2003-11-19 20:27:06 +00:00
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
2008-04-12 12:06:00 +00:00
|
|
|
}
|
2007-01-07 21:53:42 +00:00
|
|
|
|
2008-04-12 12:06:00 +00:00
|
|
|
static void
|
|
|
|
acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
|
|
|
|
{
|
|
|
|
acpi_cpu_cx_list(sc);
|
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
|
|
|
|
OID_AUTO, "cx_supported", CTLFLAG_RD,
|
|
|
|
sc->cpu_cx_supported, 0,
|
|
|
|
"Cx/microsecond values for supported Cx states");
|
|
|
|
SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
|
2004-05-07 05:22:38 +00:00
|
|
|
OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
|
2007-01-07 21:53:42 +00:00
|
|
|
(void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
|
2003-11-19 20:27:06 +00:00
|
|
|
"lowest Cx sleep state to use");
|
2007-01-07 21:53:42 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
|
2004-06-05 07:02:18 +00:00
|
|
|
OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
|
2007-01-07 21:53:42 +00:00
|
|
|
(void *)sc, 0, acpi_cpu_usage_sysctl, "A",
|
2004-06-05 07:02:18 +00:00
|
|
|
"percent usage for each Cx state");
|
2014-04-08 02:36:27 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
|
|
|
|
OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD,
|
|
|
|
(void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
|
|
|
|
"Cx sleep state counters");
|
2015-05-09 12:28:48 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
|
|
|
|
OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD,
|
|
|
|
(void *)sc, 0, acpi_cpu_method_sysctl, "A",
|
|
|
|
"Cx entrance methods");
|
|
|
|
#endif
|
2003-11-19 20:27:06 +00:00
|
|
|
|
|
|
|
/* Signal platform that we can handle _CST notification. */
|
2007-01-07 21:53:42 +00:00
|
|
|
if (!cpu_cx_generic && cpu_cst_cnt != 0) {
|
2004-08-13 06:21:47 +00:00
|
|
|
ACPI_LOCK(acpi);
|
2003-11-19 20:27:06 +00:00
|
|
|
AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
|
2004-08-13 06:21:47 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
2003-11-19 20:27:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 15:45:33 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2003-11-15 19:26:06 +00:00
|
|
|
/*
|
2004-06-05 07:02:18 +00:00
|
|
|
* Idle the CPU in the lowest state possible. This function is called with
|
|
|
|
* interrupts disabled. Note that once it re-enables interrupts, a task
|
|
|
|
* switch can occur so do not access shared data (i.e. the softc) after
|
|
|
|
* interrupts are re-enabled.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
|
|
|
static void
|
2013-02-28 10:46:54 +00:00
|
|
|
acpi_cpu_idle(sbintime_t sbt)
|
2003-11-15 19:26:06 +00:00
|
|
|
{
|
2003-11-19 20:27:06 +00:00
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
struct acpi_cx *cx_next;
|
2012-07-31 10:58:50 +00:00
|
|
|
uint64_t cputicks;
|
2003-11-19 20:27:06 +00:00
|
|
|
uint32_t start_time, end_time;
|
2015-06-09 23:13:37 +00:00
|
|
|
ACPI_STATUS status;
|
2013-02-28 10:46:54 +00:00
|
|
|
int bm_active, cx_next_idx, i, us;
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2003-11-27 16:32:46 +00:00
|
|
|
/*
|
|
|
|
* Look up our CPU id to get our softc. If it's NULL, we'll use C1
|
|
|
|
* since there is no ACPI processor object for this CPU. This occurs
|
|
|
|
* for logical CPUs in the HTT case.
|
|
|
|
*/
|
|
|
|
sc = cpu_softc[PCPU_GET(cpuid)];
|
|
|
|
if (sc == NULL) {
|
|
|
|
acpi_cpu_c1();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-12-01 18:06:05 +00:00
|
|
|
/* If disabled, take the safe path. */
|
2012-12-01 18:01:01 +00:00
|
|
|
if (is_idle_disabled(sc)) {
|
2012-12-01 18:06:05 +00:00
|
|
|
acpi_cpu_c1();
|
2012-12-01 18:01:01 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-03 04:58:44 +00:00
|
|
|
/* Find the lowest state that has small enough latency. */
|
2013-02-28 10:46:54 +00:00
|
|
|
us = sc->cpu_prev_sleep;
|
2013-02-28 11:27:01 +00:00
|
|
|
if (sbt >= 0 && us > (sbt >> 12))
|
|
|
|
us = (sbt >> 12);
|
2009-05-02 22:30:33 +00:00
|
|
|
cx_next_idx = 0;
|
2015-01-05 20:44:44 +00:00
|
|
|
if (cpu_disable_c2_sleep)
|
|
|
|
i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
|
|
|
|
else if (cpu_disable_c3_sleep)
|
2010-09-22 11:32:22 +00:00
|
|
|
i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
|
2012-07-02 17:55:29 +00:00
|
|
|
else
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
i = sc->cpu_cx_lowest;
|
|
|
|
for (; i >= 0; i--) {
|
2013-02-28 10:46:54 +00:00
|
|
|
if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
|
2009-05-02 22:30:33 +00:00
|
|
|
cx_next_idx = i;
|
|
|
|
break;
|
|
|
|
}
|
2005-10-25 21:15:47 +00:00
|
|
|
}
|
2003-11-19 20:27:06 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
/*
|
|
|
|
* Check for bus master activity. If there was activity, clear
|
|
|
|
* the bit and use the lowest non-C3 state. Note that the USB
|
|
|
|
* driver polling for new devices keeps this bit set all the
|
2004-10-11 21:15:10 +00:00
|
|
|
* time if USB is loaded.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
2012-07-31 10:58:50 +00:00
|
|
|
if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
|
|
|
|
cx_next_idx > sc->cpu_non_c3) {
|
2015-06-09 23:13:37 +00:00
|
|
|
status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
|
|
|
|
if (ACPI_SUCCESS(status) && bm_active != 0) {
|
2009-06-05 18:44:36 +00:00
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
|
2012-07-31 10:58:50 +00:00
|
|
|
cx_next_idx = sc->cpu_non_c3;
|
2004-10-11 06:06:42 +00:00
|
|
|
}
|
2004-10-11 21:15:10 +00:00
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/* Select the next state and update statistics. */
|
|
|
|
cx_next = &sc->cpu_cx_states[cx_next_idx];
|
2007-01-07 21:53:42 +00:00
|
|
|
sc->cpu_cx_stats[cx_next_idx]++;
|
2004-06-05 07:02:18 +00:00
|
|
|
KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/*
|
|
|
|
* Execute HLT (or equivalent) and wait for an interrupt. We can't
|
2010-06-19 08:36:12 +00:00
|
|
|
* precisely calculate the time spent in C1 since the place we wake up
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
* is an ISR. Assume we slept no more then half of quantum, unless
|
|
|
|
* we are called inside critical section, delaying context switch.
|
2004-06-05 07:02:18 +00:00
|
|
|
*/
|
|
|
|
if (cx_next->type == ACPI_STATE_C1) {
|
2012-07-31 10:58:50 +00:00
|
|
|
cputicks = cpu_ticks();
|
2015-05-09 12:28:48 +00:00
|
|
|
if (cx_next->p_lvlx != NULL) {
|
|
|
|
/* C1 I/O then Halt */
|
|
|
|
CPU_GET_REG(cx_next->p_lvlx, 1);
|
|
|
|
}
|
|
|
|
if (cx_next->do_mwait)
|
|
|
|
acpi_cpu_idle_mwait(cx_next->mwait_hint);
|
|
|
|
else
|
|
|
|
acpi_cpu_c1();
|
2012-07-31 10:58:50 +00:00
|
|
|
end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
|
|
|
|
if (curthread->td_critnest == 0)
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
end_time = min(end_time, 500000 / hz);
|
|
|
|
sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
|
2004-06-05 07:02:18 +00:00
|
|
|
return;
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-10-11 21:15:10 +00:00
|
|
|
/*
|
|
|
|
* For C3, disable bus master arbitration and enable bus master wake
|
|
|
|
* if BM control is available, otherwise flush the CPU cache.
|
|
|
|
*/
|
2015-05-09 12:28:48 +00:00
|
|
|
if (cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) {
|
2004-10-11 21:15:10 +00:00
|
|
|
if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
|
2009-06-05 18:44:36 +00:00
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
|
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
|
2004-10-11 21:15:10 +00:00
|
|
|
} else
|
|
|
|
ACPI_FLUSH_CPU_CACHE();
|
2004-06-05 07:02:18 +00:00
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/*
|
|
|
|
* Read from P_LVLx to enter C2(+), checking time spent asleep.
|
|
|
|
* Use the ACPI timer for measuring sleep time. Since we need to
|
|
|
|
* get the time very close to the CPU start/stop clock logic, this
|
|
|
|
* is the only reliable time source.
|
|
|
|
*/
|
2012-07-31 10:58:50 +00:00
|
|
|
if (cx_next->type == ACPI_STATE_C3) {
|
|
|
|
AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
|
|
|
|
cputicks = 0;
|
|
|
|
} else {
|
|
|
|
start_time = 0;
|
|
|
|
cputicks = cpu_ticks();
|
|
|
|
}
|
2015-05-09 12:28:48 +00:00
|
|
|
if (cx_next->do_mwait)
|
|
|
|
acpi_cpu_idle_mwait(cx_next->mwait_hint);
|
|
|
|
else
|
|
|
|
CPU_GET_REG(cx_next->p_lvlx, 1);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/*
|
|
|
|
* Read the end time twice. Since it may take an arbitrary time
|
|
|
|
* to enter the idle state, the first read may be executed before
|
|
|
|
* the processor has stopped. Doing it again provides enough
|
|
|
|
* margin that we are certain to have a correct value.
|
|
|
|
*/
|
2009-09-11 22:49:34 +00:00
|
|
|
AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
|
2012-07-31 10:58:50 +00:00
|
|
|
if (cx_next->type == ACPI_STATE_C3) {
|
|
|
|
AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
|
|
|
|
end_time = acpi_TimerDelta(end_time, start_time);
|
|
|
|
} else
|
|
|
|
end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-06-05 07:02:18 +00:00
|
|
|
/* Enable bus master arbitration and disable bus master wakeup. */
|
2015-05-09 12:28:48 +00:00
|
|
|
if ((cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) &&
|
|
|
|
(cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
|
2009-06-05 18:44:36 +00:00
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
|
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
2005-10-25 21:15:47 +00:00
|
|
|
ACPI_ENABLE_IRQS();
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2009-05-02 22:30:33 +00:00
|
|
|
sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
2015-06-11 15:45:33 +00:00
|
|
|
#endif
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
/*
|
2005-02-07 04:03:06 +00:00
|
|
|
* Re-evaluate the _CST object when we are notified that it changed.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
|
2012-12-01 18:06:05 +00:00
|
|
|
|
2005-02-07 04:03:06 +00:00
|
|
|
if (notify != ACPI_NOTIFY_CX_STATES)
|
|
|
|
return;
|
|
|
|
|
2012-12-01 18:06:05 +00:00
|
|
|
/*
|
|
|
|
* C-state data for target CPU is going to be in flux while we execute
|
|
|
|
* acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
|
|
|
|
* Also, it may happen that multiple ACPI taskqueues may concurrently
|
|
|
|
* execute notifications for the same CPU. ACPI_SERIAL is used to
|
|
|
|
* protect against that.
|
|
|
|
*/
|
|
|
|
ACPI_SERIAL_BEGIN(cpu);
|
|
|
|
disable_idle(sc);
|
|
|
|
|
2008-04-12 12:06:00 +00:00
|
|
|
/* Update the list of Cx states. */
|
|
|
|
acpi_cpu_cx_cst(sc);
|
|
|
|
acpi_cpu_cx_list(sc);
|
2012-07-13 08:11:55 +00:00
|
|
|
acpi_cpu_set_cx_lowest(sc);
|
2012-12-01 18:06:05 +00:00
|
|
|
|
|
|
|
enable_idle(sc);
|
2008-04-12 12:06:00 +00:00
|
|
|
ACPI_SERIAL_END(cpu);
|
2012-09-18 08:17:29 +00:00
|
|
|
|
|
|
|
acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
|
2015-05-21 19:31:10 +00:00
|
|
|
static void
|
2007-01-07 21:53:42 +00:00
|
|
|
acpi_cpu_quirks(void)
|
2003-11-15 19:26:06 +00:00
|
|
|
{
|
2007-01-23 07:20:44 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
/*
|
2007-01-07 21:53:42 +00:00
|
|
|
* Bus mastering arbitration control is needed to keep caches coherent
|
|
|
|
* while sleeping in C3. If it's not present but a working flush cache
|
|
|
|
* instruction is present, flush the caches before entering C3 instead.
|
|
|
|
* Otherwise, just disable C3 completely.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
2007-03-22 18:16:43 +00:00
|
|
|
if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
|
|
|
|
AcpiGbl_FADT.Pm2ControlLength == 0) {
|
|
|
|
if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
|
|
|
|
(AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
|
2007-01-07 21:53:42 +00:00
|
|
|
cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2007-01-08 00:45:46 +00:00
|
|
|
"acpi_cpu: no BM control, using flush cache method\n"));
|
2007-01-07 21:53:42 +00:00
|
|
|
} else {
|
|
|
|
cpu_quirks |= CPU_QUIRK_NO_C3;
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2007-01-08 00:45:46 +00:00
|
|
|
"acpi_cpu: no BM control, C3 not available\n"));
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are using generic Cx mode, C3 on multiple CPUs requires using
|
|
|
|
* the expensive flush cache instruction.
|
|
|
|
*/
|
2007-01-08 00:45:46 +00:00
|
|
|
if (cpu_cx_generic && mp_ncpus > 1) {
|
2004-10-11 06:06:42 +00:00
|
|
|
cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
|
2007-01-08 00:45:46 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"acpi_cpu: SMP, using flush cache mode for C3\n"));
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
/* Look for various quirks of the PIIX4 part. */
|
2015-05-21 19:31:10 +00:00
|
|
|
acpi_cpu_quirks_piix4();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_cpu_quirks_piix4(void)
|
|
|
|
{
|
|
|
|
#ifdef __i386__
|
|
|
|
device_t acpi_dev;
|
|
|
|
uint32_t val;
|
2015-06-09 23:13:37 +00:00
|
|
|
ACPI_STATUS status;
|
2015-05-21 19:31:10 +00:00
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
|
|
|
|
if (acpi_dev != NULL) {
|
|
|
|
switch (pci_get_revid(acpi_dev)) {
|
|
|
|
/*
|
|
|
|
* Disable C3 support for all PIIX4 chipsets. Some of these parts
|
|
|
|
* do not report the BMIDE status to the BM status register and
|
|
|
|
* others have a livelock bug if Type-F DMA is enabled. Linux
|
|
|
|
* works around the BMIDE bug by reading the BM status directly
|
|
|
|
* but we take the simpler approach of disabling C3 for these
|
|
|
|
* parts.
|
|
|
|
*
|
2003-12-10 19:10:27 +00:00
|
|
|
* See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
|
2003-11-15 19:26:06 +00:00
|
|
|
* Livelock") from the January 2002 PIIX4 specification update.
|
|
|
|
* Applies to all PIIX4 models.
|
2008-03-09 11:19:03 +00:00
|
|
|
*
|
|
|
|
* Also, make sure that all interrupts cause a "Stop Break"
|
|
|
|
* event to exit from C2 state.
|
2009-02-19 14:39:52 +00:00
|
|
|
* Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
|
|
|
|
* should be set to zero, otherwise it causes C2 to short-sleep.
|
|
|
|
* PIIX4 doesn't properly support C3 and bus master activity
|
|
|
|
* need not break out of C2.
|
2003-11-15 19:26:06 +00:00
|
|
|
*/
|
2008-03-09 11:19:03 +00:00
|
|
|
case PCI_REVISION_A_STEP:
|
|
|
|
case PCI_REVISION_B_STEP:
|
2003-11-15 19:26:06 +00:00
|
|
|
case PCI_REVISION_4E:
|
|
|
|
case PCI_REVISION_4M:
|
|
|
|
cpu_quirks |= CPU_QUIRK_NO_C3;
|
2007-01-08 00:45:46 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"acpi_cpu: working around PIIX4 bug, disabling C3\n"));
|
2008-03-09 11:19:03 +00:00
|
|
|
|
|
|
|
val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
|
|
|
|
if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2009-02-19 14:39:52 +00:00
|
|
|
"acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
|
2008-03-09 11:19:03 +00:00
|
|
|
val |= PIIX4_STOP_BREAK_MASK;
|
|
|
|
pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
|
|
|
|
}
|
2015-06-09 23:13:37 +00:00
|
|
|
status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
|
|
|
|
if (ACPI_SUCCESS(status) && val != 0) {
|
2009-02-19 14:39:52 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
|
|
"acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
|
2009-06-05 18:44:36 +00:00
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
|
2009-02-19 14:39:52 +00:00
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-05-21 19:31:10 +00:00
|
|
|
#endif
|
2003-11-15 19:26:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2004-06-05 07:02:18 +00:00
|
|
|
acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
|
2003-11-15 19:26:06 +00:00
|
|
|
{
|
2007-01-07 21:53:42 +00:00
|
|
|
struct acpi_cpu_softc *sc;
|
2003-11-15 19:26:06 +00:00
|
|
|
struct sbuf sb;
|
|
|
|
char buf[128];
|
|
|
|
int i;
|
2004-06-24 00:38:51 +00:00
|
|
|
uintmax_t fract, sum, whole;
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
sc = (struct acpi_cpu_softc *) arg1;
|
2004-06-19 02:27:23 +00:00
|
|
|
sum = 0;
|
2007-01-07 21:53:42 +00:00
|
|
|
for (i = 0; i < sc->cpu_cx_count; i++)
|
|
|
|
sum += sc->cpu_cx_stats[i];
|
2003-11-15 19:26:06 +00:00
|
|
|
sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
|
2007-01-07 21:53:42 +00:00
|
|
|
for (i = 0; i < sc->cpu_cx_count; i++) {
|
2004-06-19 02:27:23 +00:00
|
|
|
if (sum > 0) {
|
2007-01-07 21:53:42 +00:00
|
|
|
whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
|
2004-06-19 02:27:23 +00:00
|
|
|
fract = (whole % sum) * 100;
|
|
|
|
sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
|
|
|
|
(u_int)(fract / sum));
|
|
|
|
} else
|
2009-05-03 06:25:37 +00:00
|
|
|
sbuf_printf(&sb, "0.00%% ");
|
2004-06-19 02:27:23 +00:00
|
|
|
}
|
2009-05-03 06:25:37 +00:00
|
|
|
sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
|
2003-11-15 19:26:06 +00:00
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
2004-05-07 05:22:38 +00:00
|
|
|
sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
|
|
|
|
sbuf_delete(&sb);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-04-08 02:36:27 +00:00
|
|
|
/*
|
|
|
|
* XXX TODO: actually add support to count each entry/exit
|
|
|
|
* from the Cx states.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
struct sbuf sb;
|
|
|
|
char buf[128];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sc = (struct acpi_cpu_softc *) arg1;
|
|
|
|
|
|
|
|
/* Print out the raw counters */
|
|
|
|
sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
|
|
|
|
|
|
|
|
for (i = 0; i < sc->cpu_cx_count; i++) {
|
|
|
|
sbuf_printf(&sb, "%u ", sc->cpu_cx_stats[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
|
|
|
|
sbuf_delete(&sb);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2015-05-09 12:28:48 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
static int
|
|
|
|
acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
struct acpi_cx *cx;
|
|
|
|
struct sbuf sb;
|
|
|
|
char buf[128];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sc = (struct acpi_cpu_softc *)arg1;
|
|
|
|
sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
|
|
|
|
for (i = 0; i < sc->cpu_cx_count; i++) {
|
|
|
|
cx = &sc->cpu_cx_states[i];
|
|
|
|
sbuf_printf(&sb, "C%d/", i + 1);
|
|
|
|
if (cx->do_mwait) {
|
|
|
|
sbuf_cat(&sb, "mwait");
|
|
|
|
if (cx->mwait_hw_coord)
|
|
|
|
sbuf_cat(&sb, "/hwc");
|
|
|
|
if (cx->mwait_bm_avoidance)
|
|
|
|
sbuf_cat(&sb, "/bma");
|
|
|
|
} else if (cx->type == ACPI_STATE_C1) {
|
|
|
|
sbuf_cat(&sb, "hlt");
|
|
|
|
} else {
|
|
|
|
sbuf_cat(&sb, "io");
|
|
|
|
}
|
|
|
|
if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
|
|
|
|
sbuf_cat(&sb, "/iohlt");
|
|
|
|
sbuf_putc(&sb, ' ');
|
|
|
|
}
|
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
|
|
|
|
sbuf_delete(&sb);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-06-02 20:01:40 +00:00
|
|
|
static int
|
2012-07-13 08:11:55 +00:00
|
|
|
acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
|
2007-06-02 20:01:40 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ACPI_SERIAL_ASSERT(cpu);
|
2012-07-13 08:11:55 +00:00
|
|
|
sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
|
2007-06-02 20:01:40 +00:00
|
|
|
|
|
|
|
/* If not disabling, cache the new lowest non-C3 state. */
|
|
|
|
sc->cpu_non_c3 = 0;
|
|
|
|
for (i = sc->cpu_cx_lowest; i >= 0; i--) {
|
|
|
|
if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
|
|
|
|
sc->cpu_non_c3 = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the statistics counters. */
|
|
|
|
bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-11-15 19:26:06 +00:00
|
|
|
static int
|
|
|
|
acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc;
|
2004-05-07 05:22:38 +00:00
|
|
|
char state[8];
|
2007-06-02 20:01:40 +00:00
|
|
|
int val, error;
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2007-01-07 21:53:42 +00:00
|
|
|
sc = (struct acpi_cpu_softc *) arg1;
|
2012-07-13 08:11:55 +00:00
|
|
|
snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
|
2004-05-07 05:22:38 +00:00
|
|
|
error = sysctl_handle_string(oidp, state, sizeof(state), req);
|
2003-11-15 19:26:06 +00:00
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
2004-05-07 05:22:38 +00:00
|
|
|
if (strlen(state) < 2 || toupper(state[0]) != 'C')
|
|
|
|
return (EINVAL);
|
2012-07-13 08:11:55 +00:00
|
|
|
if (strcasecmp(state, "Cmax") == 0)
|
|
|
|
val = MAX_CX_STATES;
|
|
|
|
else {
|
|
|
|
val = (int) strtol(state + 1, NULL, 10);
|
|
|
|
if (val < 1 || val > MAX_CX_STATES)
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2003-11-15 19:26:06 +00:00
|
|
|
|
2004-08-13 06:21:47 +00:00
|
|
|
ACPI_SERIAL_BEGIN(cpu);
|
2012-07-13 08:11:55 +00:00
|
|
|
sc->cpu_cx_lowest_lim = val - 1;
|
|
|
|
acpi_cpu_set_cx_lowest(sc);
|
2007-01-07 21:53:42 +00:00
|
|
|
ACPI_SERIAL_END(cpu);
|
|
|
|
|
2012-07-02 17:55:29 +00:00
|
|
|
return (0);
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct acpi_cpu_softc *sc;
|
|
|
|
char state[8];
|
2007-06-02 20:01:40 +00:00
|
|
|
int val, error, i;
|
2007-01-07 21:53:42 +00:00
|
|
|
|
2012-07-13 08:11:55 +00:00
|
|
|
snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
|
2007-01-07 21:53:42 +00:00
|
|
|
error = sysctl_handle_string(oidp, state, sizeof(state), req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (strlen(state) < 2 || toupper(state[0]) != 'C')
|
|
|
|
return (EINVAL);
|
2012-07-13 08:11:55 +00:00
|
|
|
if (strcasecmp(state, "Cmax") == 0)
|
|
|
|
val = MAX_CX_STATES;
|
|
|
|
else {
|
|
|
|
val = (int) strtol(state + 1, NULL, 10);
|
|
|
|
if (val < 1 || val > MAX_CX_STATES)
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2007-01-07 21:53:42 +00:00
|
|
|
|
2007-06-02 20:01:40 +00:00
|
|
|
/* Update the new lowest useable Cx state for all CPUs. */
|
2007-01-07 21:53:42 +00:00
|
|
|
ACPI_SERIAL_BEGIN(cpu);
|
2012-07-13 08:11:55 +00:00
|
|
|
cpu_cx_lowest_lim = val - 1;
|
2007-01-07 21:53:42 +00:00
|
|
|
for (i = 0; i < cpu_ndevices; i++) {
|
|
|
|
sc = device_get_softc(cpu_devices[i]);
|
2012-07-13 08:11:55 +00:00
|
|
|
sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
|
|
|
|
acpi_cpu_set_cx_lowest(sc);
|
2007-01-07 21:53:42 +00:00
|
|
|
}
|
2004-08-13 06:21:47 +00:00
|
|
|
ACPI_SERIAL_END(cpu);
|
2003-11-15 19:26:06 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|