2000-10-28 06:59:48 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
|
|
|
|
* Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
|
2001-06-29 20:29:59 +00:00
|
|
|
* Copyright (c) 2000, 2001 Michael Smith
|
2000-10-28 06:59:48 +00:00
|
|
|
* Copyright (c) 2000 BSDi
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2005-03-02 09:22:34 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
#include "opt_acpi.h"
|
2016-04-09 13:58:04 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <sys/param.h>
|
Extract eventfilter declarations to sys/_eventfilter.h
This allows replacing "sys/eventfilter.h" includes with "sys/_eventfilter.h"
in other header files (e.g., sys/{bus,conf,cpu}.h) and reduces header
pollution substantially.
EVENTHANDLER_DECLARE and EVENTHANDLER_LIST_DECLAREs were moved out of .c
files into appropriate headers (e.g., sys/proc.h, powernv/opal.h).
As a side effect of reduced header pollution, many .c files and headers no
longer contain needed definitions. The remainder of the patch addresses
adding appropriate includes to fix those files.
LOCK_DEBUG and LOCK_FILE_LINE_ARG are moved to sys/_lock.h, as required by
sys/mutex.h since r326106 (but silently protected by header pollution prior
to this change).
No functional change (intended). Of course, any out of tree modules that
relied on header pollution for sys/eventhandler.h, sys/lock.h, or
sys/mutex.h inclusion need to be fixed. __FreeBSD_version has been bumped.
2019-05-20 00:38:23 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/proc.h>
|
2003-02-15 01:46:22 +00:00
|
|
|
#include <sys/fcntl.h>
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <sys/malloc.h>
|
2004-05-30 20:08:47 +00:00
|
|
|
#include <sys/module.h>
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/ioccom.h>
|
|
|
|
#include <sys/reboot.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/ctype.h>
|
2001-11-06 15:00:30 +00:00
|
|
|
#include <sys/linker.h>
|
2001-11-01 16:34:07 +00:00
|
|
|
#include <sys/power.h>
|
2003-12-09 06:29:57 +00:00
|
|
|
#include <sys/sbuf.h>
|
2009-03-17 00:48:11 +00:00
|
|
|
#include <sys/sched.h>
|
2004-03-19 07:05:01 +00:00
|
|
|
#include <sys/smp.h>
|
2009-03-17 00:48:11 +00:00
|
|
|
#include <sys/timetc.h>
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2008-08-22 02:14:23 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2018-05-25 07:33:20 +00:00
|
|
|
#include <machine/clock.h>
|
2008-08-22 02:14:23 +00:00
|
|
|
#include <machine/pci_cfgreg.h>
|
|
|
|
#endif
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <machine/resource.h>
|
2003-11-15 19:18:29 +00:00
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <sys/rman.h>
|
2001-08-30 00:50:58 +00:00
|
|
|
#include <isa/isavar.h>
|
2004-06-29 01:33:35 +00:00
|
|
|
#include <isa/pnpvar.h>
|
2001-08-30 00:50:58 +00:00
|
|
|
|
2009-06-05 18:44:36 +00:00
|
|
|
#include <contrib/dev/acpica/include/acpi.h>
|
|
|
|
#include <contrib/dev/acpica/include/accommon.h>
|
|
|
|
#include <contrib/dev/acpica/include/acnamesp.h>
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
#include <dev/acpica/acpivar.h>
|
|
|
|
#include <dev/acpica/acpiio.h>
|
|
|
|
|
2016-12-16 10:40:00 +00:00
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Hooks for the ACPI CA debugging infrastructure */
|
2001-06-29 20:29:59 +00:00
|
|
|
#define _COMPONENT ACPI_BUS
|
2002-02-23 05:21:56 +00:00
|
|
|
ACPI_MODULE_NAME("ACPI")
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
static d_open_t acpiopen;
|
|
|
|
static d_close_t acpiclose;
|
|
|
|
static d_ioctl_t acpiioctl;
|
|
|
|
|
|
|
|
static struct cdevsw acpi_cdevsw = {
|
2004-02-21 21:10:55 +00:00
|
|
|
.d_version = D_VERSION,
|
2003-03-03 12:15:54 +00:00
|
|
|
.d_open = acpiopen,
|
|
|
|
.d_close = acpiclose,
|
|
|
|
.d_ioctl = acpiioctl,
|
|
|
|
.d_name = "acpi",
|
2000-10-28 06:59:48 +00:00
|
|
|
};
|
|
|
|
|
2010-10-26 18:59:50 +00:00
|
|
|
struct acpi_interface {
|
|
|
|
ACPI_STRING *data;
|
|
|
|
int num;
|
|
|
|
};
|
|
|
|
|
2018-01-09 16:42:24 +00:00
|
|
|
static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
|
|
|
|
static char *pcilink_ids[] = { "PNP0C0F", NULL };
|
|
|
|
|
2004-06-30 15:10:02 +00:00
|
|
|
/* Global mutex for locking access to the ACPI subsystem. */
|
2001-06-29 20:29:59 +00:00
|
|
|
struct mtx acpi_mutex;
|
2014-09-22 14:27:26 +00:00
|
|
|
struct callout acpi_sleep_timer;
|
2001-06-29 20:29:59 +00:00
|
|
|
|
2004-06-30 04:49:54 +00:00
|
|
|
/* Bitmap of device quirks. */
|
2004-06-30 15:10:02 +00:00
|
|
|
int acpi_quirks;
|
2004-06-30 04:49:54 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Supported sleep states. */
|
|
|
|
static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
|
|
|
|
|
2015-02-06 16:09:01 +00:00
|
|
|
static void acpi_lookup(void *arg, const char *name, device_t *dev);
|
2001-07-30 08:59:43 +00:00
|
|
|
static int acpi_modevent(struct module *mod, int event, void *junk);
|
2000-10-28 06:59:48 +00:00
|
|
|
static int acpi_probe(device_t dev);
|
|
|
|
static int acpi_attach(device_t dev);
|
2004-12-02 08:07:12 +00:00
|
|
|
static int acpi_suspend(device_t dev);
|
|
|
|
static int acpi_resume(device_t dev);
|
2004-06-05 07:25:58 +00:00
|
|
|
static int acpi_shutdown(device_t dev);
|
2010-09-10 11:19:03 +00:00
|
|
|
static device_t acpi_add_child(device_t bus, u_int order, const char *name,
|
2003-08-28 16:06:30 +00:00
|
|
|
int unit);
|
2000-10-28 06:59:48 +00:00
|
|
|
static int acpi_print_child(device_t bus, device_t child);
|
2004-12-02 08:07:12 +00:00
|
|
|
static void acpi_probe_nomatch(device_t bus, device_t child);
|
|
|
|
static void acpi_driver_added(device_t dev, driver_t *driver);
|
2020-03-09 20:27:25 +00:00
|
|
|
static void acpi_child_deleted(device_t dev, device_t child);
|
2003-08-28 16:06:30 +00:00
|
|
|
static int acpi_read_ivar(device_t dev, device_t child, int index,
|
|
|
|
uintptr_t *result);
|
|
|
|
static int acpi_write_ivar(device_t dev, device_t child, int index,
|
|
|
|
uintptr_t value);
|
2004-06-13 22:52:30 +00:00
|
|
|
static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
|
2010-12-22 20:27:20 +00:00
|
|
|
static void acpi_reserve_resources(device_t dev);
|
2004-08-23 16:28:42 +00:00
|
|
|
static int acpi_sysres_alloc(device_t dev);
|
2010-12-22 20:27:20 +00:00
|
|
|
static int acpi_set_resource(device_t dev, device_t child, int type,
|
2016-01-27 02:23:54 +00:00
|
|
|
int rid, rman_res_t start, rman_res_t count);
|
2003-08-28 16:06:30 +00:00
|
|
|
static struct resource *acpi_alloc_resource(device_t bus, device_t child,
|
2016-01-27 02:23:54 +00:00
|
|
|
int type, int *rid, rman_res_t start, rman_res_t end,
|
|
|
|
rman_res_t count, u_int flags);
|
2011-06-10 12:30:16 +00:00
|
|
|
static int acpi_adjust_resource(device_t bus, device_t child, int type,
|
2016-01-27 02:23:54 +00:00
|
|
|
struct resource *r, rman_res_t start, rman_res_t end);
|
2003-08-28 16:06:30 +00:00
|
|
|
static int acpi_release_resource(device_t bus, device_t child, int type,
|
|
|
|
int rid, struct resource *r);
|
2005-03-22 20:00:57 +00:00
|
|
|
static void acpi_delete_resource(device_t bus, device_t child, int type,
|
|
|
|
int rid);
|
2003-12-18 03:25:22 +00:00
|
|
|
static uint32_t acpi_isa_get_logicalid(device_t dev);
|
|
|
|
static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
|
2018-10-26 00:05:46 +00:00
|
|
|
static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match);
|
2004-06-29 19:00:36 +00:00
|
|
|
static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
|
|
|
|
ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
|
|
|
|
ACPI_BUFFER *ret);
|
2004-07-15 16:29:08 +00:00
|
|
|
static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
|
|
|
|
void *context, void **retval);
|
|
|
|
static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
|
|
|
|
int max_depth, acpi_scan_cb_t user_fn, void *arg);
|
2003-08-28 16:06:30 +00:00
|
|
|
static int acpi_isa_pnp_probe(device_t bus, device_t child,
|
2004-07-15 16:29:08 +00:00
|
|
|
struct isa_pnp_id *ids);
|
2000-10-28 06:59:48 +00:00
|
|
|
static void acpi_probe_children(device_t bus);
|
2008-04-07 18:35:11 +00:00
|
|
|
static void acpi_probe_order(ACPI_HANDLE handle, int *order);
|
2003-08-28 16:06:30 +00:00
|
|
|
static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
|
2004-07-15 16:29:08 +00:00
|
|
|
void *context, void **status);
|
2009-04-30 17:35:44 +00:00
|
|
|
static void acpi_sleep_enable(void *arg);
|
|
|
|
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
|
2007-06-21 22:50:37 +00:00
|
|
|
static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
|
2000-10-28 06:59:48 +00:00
|
|
|
static void acpi_shutdown_final(void *arg, int howto);
|
2001-01-10 18:01:51 +00:00
|
|
|
static void acpi_enable_fixed_events(struct acpi_softc *sc);
|
2012-02-08 21:23:20 +00:00
|
|
|
static void acpi_resync_clock(struct acpi_softc *sc);
|
2004-06-30 16:08:03 +00:00
|
|
|
static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
|
|
|
|
static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
|
|
|
|
static int acpi_wake_prep_walk(int sstate);
|
2004-05-28 06:28:55 +00:00
|
|
|
static int acpi_wake_sysctl_walk(device_t dev);
|
|
|
|
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
|
2000-10-28 06:59:48 +00:00
|
|
|
static void acpi_system_eventhandler_sleep(void *arg, int state);
|
|
|
|
static void acpi_system_eventhandler_wakeup(void *arg, int state);
|
2009-04-30 17:35:44 +00:00
|
|
|
static int acpi_sname2sstate(const char *sname);
|
2009-04-30 17:45:43 +00:00
|
|
|
static const char *acpi_sstate2sname(int sstate);
|
2003-04-11 16:53:56 +00:00
|
|
|
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
|
2001-01-13 21:28:57 +00:00
|
|
|
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
|
2010-03-09 19:02:02 +00:00
|
|
|
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
|
2001-11-01 16:34:07 +00:00
|
|
|
static int acpi_pm_func(u_long cmd, void *arg, ...);
|
2004-03-27 16:26:00 +00:00
|
|
|
static int acpi_child_location_str_method(device_t acdev, device_t child,
|
|
|
|
char *buf, size_t buflen);
|
|
|
|
static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
|
|
|
|
char *buf, size_t buflen);
|
2008-08-22 02:14:23 +00:00
|
|
|
static void acpi_enable_pcie(void);
|
2008-11-18 21:01:54 +00:00
|
|
|
static void acpi_hint_device_unit(device_t acdev, device_t child,
|
|
|
|
const char *name, int *unitp);
|
2010-10-26 18:59:50 +00:00
|
|
|
static void acpi_reset_interfaces(device_t dev);
|
2004-03-27 16:26:00 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
static device_method_t acpi_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, acpi_probe),
|
|
|
|
DEVMETHOD(device_attach, acpi_attach),
|
2004-06-05 07:25:58 +00:00
|
|
|
DEVMETHOD(device_shutdown, acpi_shutdown),
|
2003-11-19 20:27:06 +00:00
|
|
|
DEVMETHOD(device_detach, bus_generic_detach),
|
2004-12-02 08:07:12 +00:00
|
|
|
DEVMETHOD(device_suspend, acpi_suspend),
|
|
|
|
DEVMETHOD(device_resume, acpi_resume),
|
2000-10-28 06:59:48 +00:00
|
|
|
|
|
|
|
/* Bus interface */
|
|
|
|
DEVMETHOD(bus_add_child, acpi_add_child),
|
|
|
|
DEVMETHOD(bus_print_child, acpi_print_child),
|
2004-12-02 08:07:12 +00:00
|
|
|
DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch),
|
|
|
|
DEVMETHOD(bus_driver_added, acpi_driver_added),
|
2020-03-09 20:27:25 +00:00
|
|
|
DEVMETHOD(bus_child_deleted, acpi_child_deleted),
|
2000-10-28 06:59:48 +00:00
|
|
|
DEVMETHOD(bus_read_ivar, acpi_read_ivar),
|
|
|
|
DEVMETHOD(bus_write_ivar, acpi_write_ivar),
|
2004-06-13 22:52:30 +00:00
|
|
|
DEVMETHOD(bus_get_resource_list, acpi_get_rlist),
|
2010-12-22 20:27:20 +00:00
|
|
|
DEVMETHOD(bus_set_resource, acpi_set_resource),
|
2004-06-13 22:52:30 +00:00
|
|
|
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
|
2000-10-28 06:59:48 +00:00
|
|
|
DEVMETHOD(bus_alloc_resource, acpi_alloc_resource),
|
2011-06-10 12:30:16 +00:00
|
|
|
DEVMETHOD(bus_adjust_resource, acpi_adjust_resource),
|
2000-10-28 06:59:48 +00:00
|
|
|
DEVMETHOD(bus_release_resource, acpi_release_resource),
|
2005-03-22 20:00:57 +00:00
|
|
|
DEVMETHOD(bus_delete_resource, acpi_delete_resource),
|
2004-03-31 17:35:28 +00:00
|
|
|
DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method),
|
|
|
|
DEVMETHOD(bus_child_location_str, acpi_child_location_str_method),
|
2000-10-28 06:59:48 +00:00
|
|
|
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
|
|
|
|
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
|
|
|
|
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
|
|
|
|
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
|
2008-11-18 21:01:54 +00:00
|
|
|
DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit),
|
2016-05-09 20:50:21 +00:00
|
|
|
DEVMETHOD(bus_get_cpus, acpi_get_cpus),
|
2014-10-09 05:33:25 +00:00
|
|
|
DEVMETHOD(bus_get_domain, acpi_get_domain),
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-06-29 19:00:36 +00:00
|
|
|
/* ACPI bus */
|
|
|
|
DEVMETHOD(acpi_id_probe, acpi_device_id_probe),
|
|
|
|
DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj),
|
2004-12-02 08:07:12 +00:00
|
|
|
DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep),
|
2004-07-15 16:29:08 +00:00
|
|
|
DEVMETHOD(acpi_scan_children, acpi_device_scan_children),
|
2004-06-29 19:00:36 +00:00
|
|
|
|
2001-08-30 00:50:58 +00:00
|
|
|
/* ISA emulation */
|
|
|
|
DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe),
|
|
|
|
|
2013-01-30 18:01:20 +00:00
|
|
|
DEVMETHOD_END
|
2000-10-28 06:59:48 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t acpi_driver = {
|
|
|
|
"acpi",
|
|
|
|
acpi_methods,
|
|
|
|
sizeof(struct acpi_softc),
|
|
|
|
};
|
|
|
|
|
2002-01-08 06:46:01 +00:00
|
|
|
static devclass_t acpi_devclass;
|
2020-02-17 15:32:21 +00:00
|
|
|
EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0,
|
|
|
|
BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
|
2004-04-08 16:45:12 +00:00
|
|
|
MODULE_VERSION(acpi, 1);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_SERIAL_DECL(acpi, "ACPI root bus");
|
|
|
|
|
2004-08-23 16:28:42 +00:00
|
|
|
/* Local pools for managing system resources for ACPI child devices. */
|
|
|
|
static struct rman acpi_rman_io, acpi_rman_mem;
|
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
#define ACPI_MINIMUM_AWAKETIME 5
|
|
|
|
|
2008-03-13 20:39:04 +00:00
|
|
|
/* Holds the description of the acpi0 device. */
|
|
|
|
static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
|
|
|
|
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
|
|
|
|
"ACPI debugging");
|
2003-09-26 21:22:10 +00:00
|
|
|
static char acpi_ca_version[12];
|
|
|
|
SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
|
|
|
|
acpi_ca_version, 0, "Version of Intel ACPI-CA");
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2010-10-26 18:59:50 +00:00
|
|
|
/*
|
|
|
|
* Allow overriding _OSI methods.
|
|
|
|
*/
|
|
|
|
static char acpi_install_interface[256];
|
|
|
|
TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
|
|
|
|
sizeof(acpi_install_interface));
|
|
|
|
static char acpi_remove_interface[256];
|
|
|
|
TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
|
|
|
|
sizeof(acpi_remove_interface));
|
|
|
|
|
2010-03-09 19:02:02 +00:00
|
|
|
/* Allow users to dump Debug objects without ACPI debugger. */
|
|
|
|
static int acpi_debug_objects;
|
|
|
|
TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
|
|
|
|
SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
|
2020-02-26 14:26:36 +00:00
|
|
|
CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_NEEDGIANT, NULL, 0,
|
|
|
|
acpi_debug_objects_sysctl, "I",
|
2010-03-09 19:02:02 +00:00
|
|
|
"Enable Debug objects");
|
|
|
|
|
|
|
|
/* Allow the interpreter to ignore common mistakes in BIOS. */
|
|
|
|
static int acpi_interpreter_slack = 1;
|
|
|
|
TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
|
|
|
|
SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
|
|
|
|
&acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
|
|
|
|
|
2014-10-02 19:11:18 +00:00
|
|
|
/* Ignore register widths set by FADT and use default widths instead. */
|
|
|
|
static int acpi_ignore_reg_width = 1;
|
|
|
|
TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width);
|
|
|
|
SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN,
|
|
|
|
&acpi_ignore_reg_width, 1, "Ignore register widths set by FADT");
|
|
|
|
|
2005-02-13 20:10:28 +00:00
|
|
|
/* Allow users to override quirks. */
|
|
|
|
TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
|
|
|
|
|
2018-05-03 19:00:50 +00:00
|
|
|
int acpi_susp_bounce;
|
2007-05-25 05:26:21 +00:00
|
|
|
SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
|
|
|
|
&acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
|
|
|
|
|
2001-07-30 08:59:43 +00:00
|
|
|
/*
|
|
|
|
* ACPI can only be loaded as a module by the loader; activating it after
|
|
|
|
* system bootstrap time is not useful, and can be fatal to the system.
|
2008-08-04 19:45:15 +00:00
|
|
|
* It also cannot be unloaded, since the entire system bus hierarchy hangs
|
2003-08-28 16:06:30 +00:00
|
|
|
* off it.
|
2001-07-30 08:59:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_modevent(struct module *mod, int event, void *junk)
|
|
|
|
{
|
2004-06-29 19:00:36 +00:00
|
|
|
switch (event) {
|
2001-07-30 08:59:43 +00:00
|
|
|
case MOD_LOAD:
|
2002-11-24 02:27:07 +00:00
|
|
|
if (!cold) {
|
|
|
|
printf("The ACPI driver cannot be loaded after boot.\n");
|
2003-08-28 16:06:30 +00:00
|
|
|
return (EPERM);
|
2002-11-24 02:27:07 +00:00
|
|
|
}
|
2001-07-30 08:59:43 +00:00
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
2001-11-01 16:34:07 +00:00
|
|
|
if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (EBUSY);
|
2001-11-01 16:34:07 +00:00
|
|
|
break;
|
2001-07-30 08:59:43 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2001-07-30 08:59:43 +00:00
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2004-01-26 19:29:04 +00:00
|
|
|
* Perform early initialization.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2004-01-26 19:29:04 +00:00
|
|
|
ACPI_STATUS
|
|
|
|
acpi_Startup(void)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2004-06-30 04:49:54 +00:00
|
|
|
static int started = 0;
|
2007-03-22 18:16:43 +00:00
|
|
|
ACPI_STATUS status;
|
|
|
|
int val;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-01-28 07:48:03 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
/* Only run the startup code once. The MADT driver also calls this. */
|
2004-01-26 19:29:04 +00:00
|
|
|
if (started)
|
2007-03-22 18:16:43 +00:00
|
|
|
return_VALUE (AE_OK);
|
2004-01-26 19:29:04 +00:00
|
|
|
started = 1;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2017-07-28 22:23:29 +00:00
|
|
|
/*
|
|
|
|
* Initialize the ACPICA subsystem.
|
|
|
|
*/
|
|
|
|
if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) {
|
|
|
|
printf("ACPI: Could not initialize Subsystem: %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
return_VALUE (status);
|
|
|
|
}
|
|
|
|
|
2004-03-18 18:42:22 +00:00
|
|
|
/*
|
2007-03-22 18:16:43 +00:00
|
|
|
* Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
|
|
|
|
* if more tables exist.
|
2004-03-18 18:42:22 +00:00
|
|
|
*/
|
2007-03-22 18:16:43 +00:00
|
|
|
if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
|
|
|
|
printf("ACPI: Table initialisation failed: %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
return_VALUE (status);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
2004-03-03 03:02:17 +00:00
|
|
|
|
2004-06-30 04:49:54 +00:00
|
|
|
/* Set up any quirks we have for this system. */
|
2007-03-22 18:16:43 +00:00
|
|
|
if (acpi_quirks == ACPI_Q_OK)
|
2005-02-13 20:10:28 +00:00
|
|
|
acpi_table_quirks(&acpi_quirks);
|
2004-06-30 04:49:54 +00:00
|
|
|
|
2005-02-13 20:10:28 +00:00
|
|
|
/* If the user manually set the disabled hint to 0, force-enable ACPI. */
|
2004-06-30 04:49:54 +00:00
|
|
|
if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
|
|
|
|
acpi_quirks &= ~ACPI_Q_BROKEN;
|
|
|
|
if (acpi_quirks & ACPI_Q_BROKEN) {
|
|
|
|
printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n");
|
2007-03-22 18:16:43 +00:00
|
|
|
status = AE_SUPPORT;
|
2004-06-30 04:49:54 +00:00
|
|
|
}
|
2004-03-03 03:02:17 +00:00
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
return_VALUE (status);
|
2004-01-26 19:29:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-03-13 20:39:04 +00:00
|
|
|
* Detect ACPI and perform early initialisation.
|
2004-01-26 19:29:04 +00:00
|
|
|
*/
|
2008-03-13 20:39:04 +00:00
|
|
|
int
|
|
|
|
acpi_identify(void)
|
2004-01-26 19:29:04 +00:00
|
|
|
{
|
2008-03-13 20:39:04 +00:00
|
|
|
ACPI_TABLE_RSDP *rsdp;
|
|
|
|
ACPI_TABLE_HEADER *rsdt;
|
|
|
|
ACPI_PHYSICAL_ADDRESS paddr;
|
|
|
|
struct sbuf sb;
|
2004-01-26 19:29:04 +00:00
|
|
|
|
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
|
|
|
if (!cold)
|
2008-03-13 20:39:04 +00:00
|
|
|
return (ENXIO);
|
2004-01-26 19:29:04 +00:00
|
|
|
|
2012-10-22 13:06:09 +00:00
|
|
|
/* Check that we haven't been disabled with a hint. */
|
|
|
|
if (resource_disabled("acpi", 0))
|
|
|
|
return (ENXIO);
|
|
|
|
|
2008-03-13 20:39:04 +00:00
|
|
|
/* Check for other PM systems. */
|
|
|
|
if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
|
|
|
|
power_pm_get_type() != POWER_PM_TYPE_ACPI) {
|
|
|
|
printf("ACPI identify failed, other PM system enabled.\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
2007-05-08 14:31:49 +00:00
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
/* Initialize root tables. */
|
|
|
|
if (ACPI_FAILURE(acpi_Startup())) {
|
|
|
|
printf("ACPI: Try disabling either ACPI or apic support.\n");
|
2008-03-13 20:39:04 +00:00
|
|
|
return (ENXIO);
|
2001-11-01 16:34:07 +00:00
|
|
|
}
|
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
if ((paddr = AcpiOsGetRootPointer()) == 0 ||
|
|
|
|
(rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
|
2008-03-13 20:39:04 +00:00
|
|
|
return (ENXIO);
|
2007-03-22 18:16:43 +00:00
|
|
|
if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
|
|
|
|
paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
|
|
|
|
else
|
|
|
|
paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
|
|
|
|
AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
|
2008-03-13 20:39:04 +00:00
|
|
|
return (ENXIO);
|
|
|
|
sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
|
2007-03-22 18:16:43 +00:00
|
|
|
sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
|
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_putc(&sb, ' ');
|
|
|
|
sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
|
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
sbuf_delete(&sb);
|
|
|
|
AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
|
|
|
|
|
2008-03-13 20:39:04 +00:00
|
|
|
snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch some descriptive data from ACPI to put in our attach message.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_probe(device_t dev)
|
|
|
|
{
|
|
|
|
|
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
|
|
|
device_set_desc(dev, acpi_desc);
|
|
|
|
|
2013-10-29 14:19:42 +00:00
|
|
|
return_VALUE (BUS_PROBE_NOWILDCARD);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_attach(device_t dev)
|
|
|
|
{
|
|
|
|
struct acpi_softc *sc;
|
2001-06-29 20:29:59 +00:00
|
|
|
ACPI_STATUS status;
|
2004-05-06 08:04:34 +00:00
|
|
|
int error, state;
|
2001-09-07 02:57:29 +00:00
|
|
|
UINT32 flags;
|
2004-05-06 08:04:34 +00:00
|
|
|
UINT8 TypeA, TypeB;
|
2002-10-31 20:23:41 +00:00
|
|
|
char *env;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
sc = device_get_softc(dev);
|
|
|
|
sc->acpi_dev = dev;
|
2015-05-22 17:05:21 +00:00
|
|
|
callout_init(&sc->susp_force_to, 1);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
error = ENXIO;
|
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
/* Initialize resource manager. */
|
|
|
|
acpi_rman_io.rm_type = RMAN_ARRAY;
|
|
|
|
acpi_rman_io.rm_start = 0;
|
|
|
|
acpi_rman_io.rm_end = 0xffff;
|
2006-09-11 19:32:54 +00:00
|
|
|
acpi_rman_io.rm_descr = "ACPI I/O ports";
|
2004-06-13 22:52:30 +00:00
|
|
|
if (rman_init(&acpi_rman_io) != 0)
|
|
|
|
panic("acpi rman_init IO ports failed");
|
|
|
|
acpi_rman_mem.rm_type = RMAN_ARRAY;
|
2006-09-11 19:32:54 +00:00
|
|
|
acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
|
2004-06-13 22:52:30 +00:00
|
|
|
if (rman_init(&acpi_rman_mem) != 0)
|
|
|
|
panic("acpi rman_init memory failed");
|
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
/* Initialise the ACPI mutex */
|
|
|
|
mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the globals from our tunables. This is needed because ACPI-CA
|
|
|
|
* uses UINT8 for some values and we have no tunable_byte.
|
|
|
|
*/
|
2010-03-09 19:02:02 +00:00
|
|
|
AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
|
|
|
|
AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
|
2014-10-02 19:11:18 +00:00
|
|
|
AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE;
|
2010-03-09 19:02:02 +00:00
|
|
|
|
|
|
|
#ifndef ACPI_DEBUG
|
|
|
|
/*
|
|
|
|
* Disable all debugging layers and levels.
|
|
|
|
*/
|
|
|
|
AcpiDbgLayer = 0;
|
|
|
|
AcpiDbgLevel = 0;
|
|
|
|
#endif
|
2007-03-22 18:16:43 +00:00
|
|
|
|
2010-10-26 18:59:50 +00:00
|
|
|
/* Override OS interfaces if the user requested. */
|
|
|
|
acpi_reset_interfaces(dev);
|
|
|
|
|
2007-03-22 18:16:43 +00:00
|
|
|
/* Load ACPI name space. */
|
|
|
|
status = AcpiLoadTables();
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(dev, "Could not load Namespace: %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-08-22 02:14:23 +00:00
|
|
|
/* Handle MCFG table if present. */
|
|
|
|
acpi_enable_pcie();
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2003-08-28 16:06:30 +00:00
|
|
|
* Note that some systems (specifically, those with namespace evaluation
|
|
|
|
* issues that require the avoidance of parts of the namespace) must
|
|
|
|
* avoid running _INI and _STA on everything, as well as dodging the final
|
|
|
|
* object init pass.
|
2001-09-07 02:57:29 +00:00
|
|
|
*
|
|
|
|
* For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
|
2000-10-28 06:59:48 +00:00
|
|
|
*
|
2003-08-28 16:06:30 +00:00
|
|
|
* XXX We should arrange for the object init pass after we have attached
|
|
|
|
* all our child devices, but on many systems it works here.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2001-09-07 02:57:29 +00:00
|
|
|
flags = 0;
|
2002-04-17 13:06:36 +00:00
|
|
|
if (testenv("debug.acpi.avoid"))
|
2001-09-07 02:57:29 +00:00
|
|
|
flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
|
2004-08-03 05:13:56 +00:00
|
|
|
|
|
|
|
/* Bring the hardware and basic handlers online. */
|
2002-02-23 05:21:56 +00:00
|
|
|
if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
|
2003-08-28 16:06:30 +00:00
|
|
|
device_printf(dev, "Could not enable ACPI: %s\n",
|
|
|
|
AcpiFormatException(status));
|
2001-06-29 20:29:59 +00:00
|
|
|
goto out;
|
2002-07-30 19:35:32 +00:00
|
|
|
}
|
|
|
|
|
2003-07-20 00:48:38 +00:00
|
|
|
/*
|
|
|
|
* Call the ECDT probe function to provide EC functionality before
|
|
|
|
* the namespace has been evaluated.
|
2006-05-07 03:28:10 +00:00
|
|
|
*
|
|
|
|
* XXX This happens before the sysresource devices have been probed and
|
|
|
|
* attached so its resources come from nexus0. In practice, this isn't
|
|
|
|
* a problem but should be addressed eventually.
|
2003-07-20 00:48:38 +00:00
|
|
|
*/
|
|
|
|
acpi_ec_ecdt_probe(dev);
|
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
/* Bring device objects and regions online. */
|
2002-07-30 19:35:32 +00:00
|
|
|
if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
|
2003-08-28 16:06:30 +00:00
|
|
|
device_printf(dev, "Could not initialize ACPI objects: %s\n",
|
|
|
|
AcpiFormatException(status));
|
2002-07-30 19:35:32 +00:00
|
|
|
goto out;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2001-01-13 21:28:57 +00:00
|
|
|
/*
|
|
|
|
* Setup our sysctl tree.
|
|
|
|
*
|
|
|
|
* XXX: This doesn't check to make sure that none of these fail.
|
|
|
|
*/
|
|
|
|
sysctl_ctx_init(&sc->acpi_sysctl_ctx);
|
|
|
|
sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev),
|
|
|
|
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
|
2003-04-11 16:53:56 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "supported_sleep_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
|
2016-02-19 05:02:17 +00:00
|
|
|
0, 0, acpi_supported_sleep_state_sysctl, "A",
|
|
|
|
"List supported ACPI sleep states.");
|
2001-01-13 21:28:57 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "power_button_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
|
2016-02-19 05:02:17 +00:00
|
|
|
&sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
|
|
|
|
"Power button ACPI sleep state.");
|
2001-01-13 21:28:57 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "sleep_button_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
|
2016-02-19 05:02:17 +00:00
|
|
|
&sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
|
|
|
|
"Sleep button ACPI sleep state.");
|
2001-01-13 21:28:57 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "lid_switch_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
|
2016-02-19 05:02:17 +00:00
|
|
|
&sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
|
|
|
|
"Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
|
2001-10-26 17:43:05 +00:00
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "standby_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
|
2001-10-26 17:43:05 +00:00
|
|
|
&sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
|
|
|
|
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2020-02-26 14:26:36 +00:00
|
|
|
OID_AUTO, "suspend_state",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
|
2001-10-26 17:43:05 +00:00
|
|
|
&sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
|
2002-08-25 06:13:53 +00:00
|
|
|
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2006-06-10 08:04:38 +00:00
|
|
|
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
|
2011-09-13 15:57:29 +00:00
|
|
|
"sleep delay in seconds");
|
2001-11-06 15:00:30 +00:00
|
|
|
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2006-06-10 08:04:38 +00:00
|
|
|
OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
|
2001-10-29 18:09:43 +00:00
|
|
|
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
2006-06-10 08:04:38 +00:00
|
|
|
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
|
2006-06-11 20:31:41 +00:00
|
|
|
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
|
|
|
OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
|
|
|
|
&sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
|
2006-07-29 21:46:16 +00:00
|
|
|
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
|
|
|
|
OID_AUTO, "handle_reboot", CTLFLAG_RW,
|
|
|
|
&sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
|
2003-08-08 03:19:22 +00:00
|
|
|
|
|
|
|
/*
|
2004-05-29 05:34:04 +00:00
|
|
|
* Default to 1 second before sleeping to give some machines time to
|
2003-08-08 03:19:22 +00:00
|
|
|
* stabilize.
|
|
|
|
*/
|
2004-05-29 05:34:04 +00:00
|
|
|
sc->acpi_sleep_delay = 1;
|
2001-10-29 18:09:43 +00:00
|
|
|
if (bootverbose)
|
|
|
|
sc->acpi_verbose = 1;
|
2014-10-16 18:04:43 +00:00
|
|
|
if ((env = kern_getenv("hw.acpi.verbose")) != NULL) {
|
2005-03-31 19:07:26 +00:00
|
|
|
if (strcmp(env, "0") != 0)
|
|
|
|
sc->acpi_verbose = 1;
|
2002-10-31 20:23:41 +00:00
|
|
|
freeenv(env);
|
|
|
|
}
|
|
|
|
|
2010-10-13 00:21:53 +00:00
|
|
|
/* Only enable reboot by default if the FADT says it is available. */
|
|
|
|
if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
|
|
|
|
sc->acpi_handle_reboot = 1;
|
|
|
|
|
2015-05-06 14:14:14 +00:00
|
|
|
#if !ACPI_REDUCED_HARDWARE
|
2003-09-15 06:29:31 +00:00
|
|
|
/* Only enable S4BIOS by default if the FACS says it is available. */
|
2015-04-28 16:06:58 +00:00
|
|
|
if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
|
2004-06-30 04:49:54 +00:00
|
|
|
sc->acpi_s4bios = 1;
|
2015-05-06 14:14:14 +00:00
|
|
|
#endif
|
2003-09-15 06:29:31 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Probe all supported sleep states. */
|
|
|
|
acpi_sleep_states[ACPI_STATE_S0] = TRUE;
|
|
|
|
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
|
2013-01-17 23:56:43 +00:00
|
|
|
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
|
|
|
|
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
|
|
|
|
ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
|
2009-04-30 17:35:44 +00:00
|
|
|
acpi_sleep_states[state] = TRUE;
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2004-05-06 08:04:34 +00:00
|
|
|
* Dispatch the default sleep state to devices. The lid switch is set
|
2009-04-30 17:35:44 +00:00
|
|
|
* to UNKNOWN by default to avoid surprising users.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2009-04-30 17:35:44 +00:00
|
|
|
sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
|
|
|
|
ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
|
|
|
|
sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
|
|
|
|
sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
|
|
|
|
ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
|
|
|
|
sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
|
|
|
|
ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-05-06 08:04:34 +00:00
|
|
|
/* Pick the first valid sleep state for the sleep button default. */
|
2009-04-30 17:35:44 +00:00
|
|
|
sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
|
2007-06-21 22:50:37 +00:00
|
|
|
for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
|
2009-04-30 17:35:44 +00:00
|
|
|
if (acpi_sleep_states[state]) {
|
2004-05-06 08:04:34 +00:00
|
|
|
sc->acpi_sleep_button_sx = state;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2001-01-10 18:01:51 +00:00
|
|
|
acpi_enable_fixed_events(sc);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the namespace and attach/initialise children.
|
|
|
|
*/
|
|
|
|
|
2004-06-14 04:37:45 +00:00
|
|
|
/* Register our shutdown handler. */
|
2003-08-28 16:06:30 +00:00
|
|
|
EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
|
|
|
|
SHUTDOWN_PRI_LAST);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register our acpi event handlers.
|
|
|
|
* XXX should be configurable eg. via userland policy manager.
|
|
|
|
*/
|
2003-08-28 16:06:30 +00:00
|
|
|
EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
|
|
|
|
sc, ACPI_EVENT_PRI_LAST);
|
|
|
|
EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
|
|
|
|
sc, ACPI_EVENT_PRI_LAST);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Flag our initial states. */
|
2009-04-30 17:35:44 +00:00
|
|
|
sc->acpi_enabled = TRUE;
|
2000-10-28 06:59:48 +00:00
|
|
|
sc->acpi_sstate = ACPI_STATE_S0;
|
2009-04-30 17:35:44 +00:00
|
|
|
sc->acpi_sleep_disabled = TRUE;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Create the control device */
|
2018-07-02 14:15:30 +00:00
|
|
|
sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664,
|
2003-08-28 16:06:30 +00:00
|
|
|
"acpi");
|
2000-10-28 06:59:48 +00:00
|
|
|
sc->acpi_dev_t->si_drv1 = sc;
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
if ((error = acpi_machdep_init(dev)))
|
2001-10-26 17:43:05 +00:00
|
|
|
goto out;
|
|
|
|
|
2001-11-01 16:34:07 +00:00
|
|
|
/* Register ACPI again to pass the correct argument of pm_func. */
|
|
|
|
power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
|
|
|
|
|
2015-02-06 16:09:01 +00:00
|
|
|
if (!acpi_disabled("bus")) {
|
|
|
|
EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000);
|
2002-09-06 17:01:06 +00:00
|
|
|
acpi_probe_children(dev);
|
2015-02-06 16:09:01 +00:00
|
|
|
}
|
2002-09-06 17:01:06 +00:00
|
|
|
|
2010-12-15 23:48:45 +00:00
|
|
|
/* Update all GPEs and enable runtime GPEs. */
|
|
|
|
status = AcpiUpdateAllGpes();
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
device_printf(dev, "Could not update all GPEs: %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Allow sleep request after a while. */
|
2014-09-22 14:27:26 +00:00
|
|
|
callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0);
|
|
|
|
callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME,
|
|
|
|
acpi_sleep_enable, sc);
|
2009-04-30 17:35:44 +00:00
|
|
|
|
2001-06-29 20:29:59 +00:00
|
|
|
error = 0;
|
|
|
|
|
|
|
|
out:
|
2003-08-28 16:06:30 +00:00
|
|
|
return_VALUE (error);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2010-10-19 19:53:06 +00:00
|
|
|
static void
|
|
|
|
acpi_set_power_children(device_t dev, int state)
|
|
|
|
{
|
2014-11-11 19:42:10 +00:00
|
|
|
device_t child;
|
2010-10-19 19:53:06 +00:00
|
|
|
device_t *devlist;
|
|
|
|
int dstate, i, numdevs;
|
|
|
|
|
|
|
|
if (device_get_children(dev, &devlist, &numdevs) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Retrieve and set D-state for the sleep state if _SxD is present.
|
|
|
|
* Skip children who aren't attached since they are handled separately.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < numdevs; i++) {
|
|
|
|
child = devlist[i];
|
|
|
|
dstate = state;
|
|
|
|
if (device_is_attached(child) &&
|
2014-11-11 19:42:10 +00:00
|
|
|
acpi_device_pwr_for_sleep(dev, child, &dstate) == 0)
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_powerstate(child, dstate);
|
|
|
|
}
|
|
|
|
free(devlist, M_TEMP);
|
|
|
|
}
|
|
|
|
|
2004-12-02 08:07:12 +00:00
|
|
|
static int
|
|
|
|
acpi_suspend(device_t dev)
|
|
|
|
{
|
2010-10-19 19:53:06 +00:00
|
|
|
int error;
|
2004-12-02 08:07:12 +00:00
|
|
|
|
2009-08-20 19:17:53 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2004-12-02 08:07:12 +00:00
|
|
|
error = bus_generic_suspend(dev);
|
2010-10-19 19:53:06 +00:00
|
|
|
if (error == 0)
|
|
|
|
acpi_set_power_children(dev, ACPI_STATE_D3);
|
2004-12-02 08:07:12 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_resume(device_t dev)
|
|
|
|
{
|
|
|
|
|
2009-08-20 19:17:53 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_power_children(dev, ACPI_STATE_D0);
|
2004-12-02 08:07:12 +00:00
|
|
|
|
|
|
|
return (bus_generic_resume(dev));
|
|
|
|
}
|
|
|
|
|
2004-06-05 07:25:58 +00:00
|
|
|
static int
|
|
|
|
acpi_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
|
2009-08-20 19:17:53 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2004-06-05 09:56:04 +00:00
|
|
|
/* Allow children to shutdown first. */
|
|
|
|
bus_generic_shutdown(dev);
|
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
/*
|
|
|
|
* Enable any GPEs that are able to power-on the system (i.e., RTC).
|
|
|
|
* Also, disable any that are not valid for this state (most).
|
|
|
|
*/
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_wake_prep_walk(ACPI_STATE_S5);
|
|
|
|
|
2004-06-05 07:25:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Handle a new device being added
|
|
|
|
*/
|
|
|
|
static device_t
|
2010-09-10 11:19:03 +00:00
|
|
|
acpi_add_child(device_t bus, u_int order, const char *name, int unit)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
struct acpi_device *ad;
|
|
|
|
device_t child;
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
|
|
|
|
return (NULL);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
|
|
|
resource_list_init(&ad->ad_rl);
|
2004-05-27 18:38:45 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
child = device_add_child_ordered(bus, order, name, unit);
|
|
|
|
if (child != NULL)
|
|
|
|
device_set_ivars(child, ad);
|
2005-03-27 03:37:43 +00:00
|
|
|
else
|
|
|
|
free(ad, M_ACPIDEV);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (child);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_print_child(device_t bus, device_t child)
|
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
struct acpi_device *adev = device_get_ivars(child);
|
|
|
|
struct resource_list *rl = &adev->ad_rl;
|
2000-10-28 06:59:48 +00:00
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
retval += bus_print_child_header(bus, child);
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
|
|
|
|
retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx");
|
|
|
|
retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
|
|
|
|
retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd");
|
2004-10-13 07:27:21 +00:00
|
|
|
if (device_get_flags(child))
|
|
|
|
retval += printf(" flags %#x", device_get_flags(child));
|
2014-10-09 05:33:25 +00:00
|
|
|
retval += bus_print_child_domain(bus, child);
|
2004-10-13 07:29:29 +00:00
|
|
|
retval += bus_print_child_footer(bus, child);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return (retval);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2004-12-02 08:07:12 +00:00
|
|
|
/*
|
|
|
|
* If this device is an ACPI child but no one claimed it, attempt
|
|
|
|
* to power it off. We'll power it back up when a driver is added.
|
|
|
|
*
|
|
|
|
* XXX Disabled for now since many necessary devices (like fdc and
|
|
|
|
* ATA) don't claim the devices we created for them but still expect
|
|
|
|
* them to be powered up.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_probe_nomatch(device_t bus, device_t child)
|
|
|
|
{
|
2008-11-02 17:35:15 +00:00
|
|
|
#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_powerstate(child, ACPI_STATE_D3);
|
2008-11-02 17:35:15 +00:00
|
|
|
#endif
|
2004-12-02 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a new driver has a chance to probe a child, first power it up.
|
|
|
|
*
|
|
|
|
* XXX Disabled for now (see acpi_probe_nomatch for details).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_driver_added(device_t dev, driver_t *driver)
|
|
|
|
{
|
|
|
|
device_t child, *devlist;
|
|
|
|
int i, numdevs;
|
|
|
|
|
|
|
|
DEVICE_IDENTIFY(driver, dev);
|
2008-08-23 16:38:20 +00:00
|
|
|
if (device_get_children(dev, &devlist, &numdevs))
|
|
|
|
return;
|
2004-12-02 08:07:12 +00:00
|
|
|
for (i = 0; i < numdevs; i++) {
|
|
|
|
child = devlist[i];
|
|
|
|
if (device_get_state(child) == DS_NOTPRESENT) {
|
2008-11-02 17:35:15 +00:00
|
|
|
#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_powerstate(child, ACPI_STATE_D0);
|
2004-12-02 08:07:12 +00:00
|
|
|
if (device_probe_and_attach(child) != 0)
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_powerstate(child, ACPI_STATE_D3);
|
2008-11-02 17:35:15 +00:00
|
|
|
#else
|
|
|
|
device_probe_and_attach(child);
|
|
|
|
#endif
|
2004-12-02 08:07:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
free(devlist, M_TEMP);
|
|
|
|
}
|
|
|
|
|
2004-03-31 17:35:28 +00:00
|
|
|
/* Location hint for devctl(8) */
|
|
|
|
static int
|
2004-03-31 17:21:14 +00:00
|
|
|
acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
|
|
|
|
size_t buflen)
|
|
|
|
{
|
|
|
|
struct acpi_device *dinfo = device_get_ivars(child);
|
2014-09-20 04:31:12 +00:00
|
|
|
char buf2[32];
|
|
|
|
int pxm;
|
|
|
|
|
|
|
|
if (dinfo->ad_handle) {
|
|
|
|
snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
|
|
|
|
if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) {
|
|
|
|
snprintf(buf2, 32, " _PXM=%d", pxm);
|
|
|
|
strlcat(buf, buf2, buflen);
|
|
|
|
}
|
|
|
|
} else {
|
2019-08-19 17:51:06 +00:00
|
|
|
snprintf(buf, buflen, "");
|
2014-09-20 04:31:12 +00:00
|
|
|
}
|
2004-03-31 17:21:14 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-03-31 17:35:28 +00:00
|
|
|
/* PnP information for devctl(8) */
|
2020-03-09 20:28:45 +00:00
|
|
|
int
|
|
|
|
acpi_pnpinfo_str(ACPI_HANDLE handle, char *buf, size_t buflen)
|
2004-03-31 17:21:14 +00:00
|
|
|
{
|
2009-09-11 22:49:34 +00:00
|
|
|
ACPI_DEVICE_INFO *adinfo;
|
2004-03-31 17:21:14 +00:00
|
|
|
|
2020-03-09 20:28:45 +00:00
|
|
|
if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) {
|
2004-07-13 18:59:49 +00:00
|
|
|
snprintf(buf, buflen, "unknown");
|
2009-09-11 22:49:34 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2019-12-18 06:22:28 +00:00
|
|
|
snprintf(buf, buflen, "_HID=%s _UID=%lu _CID=%s",
|
2009-09-11 22:49:34 +00:00
|
|
|
(adinfo->Valid & ACPI_VALID_HID) ?
|
|
|
|
adinfo->HardwareId.String : "none",
|
|
|
|
(adinfo->Valid & ACPI_VALID_UID) ?
|
2019-12-18 06:22:28 +00:00
|
|
|
strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL,
|
|
|
|
((adinfo->Valid & ACPI_VALID_CID) &&
|
|
|
|
adinfo->CompatibleIdList.Count > 0) ?
|
|
|
|
adinfo->CompatibleIdList.Ids[0].String : "none");
|
2009-09-11 22:49:34 +00:00
|
|
|
AcpiOsFree(adinfo);
|
2004-03-31 17:21:14 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:28:45 +00:00
|
|
|
static int
|
|
|
|
acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
|
|
|
|
size_t buflen)
|
|
|
|
{
|
|
|
|
struct acpi_device *dinfo = device_get_ivars(child);
|
|
|
|
|
|
|
|
return (acpi_pnpinfo_str(dinfo->ad_handle, buf, buflen));
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:27:25 +00:00
|
|
|
/*
|
|
|
|
* Handle device deletion.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_child_deleted(device_t dev, device_t child)
|
|
|
|
{
|
|
|
|
struct acpi_device *dinfo = device_get_ivars(child);
|
|
|
|
|
|
|
|
if (acpi_get_device(dinfo->ad_handle) == child)
|
|
|
|
AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Handle per-device ivars
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
|
|
|
|
{
|
|
|
|
struct acpi_device *ad;
|
|
|
|
|
|
|
|
if ((ad = device_get_ivars(child)) == NULL) {
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(child, "device has no ivars\n");
|
2003-08-28 16:06:30 +00:00
|
|
|
return (ENOENT);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* ACPI and ISA compatibility ivars */
|
2000-10-28 06:59:48 +00:00
|
|
|
switch(index) {
|
|
|
|
case ACPI_IVAR_HANDLE:
|
|
|
|
*(ACPI_HANDLE *)result = ad->ad_handle;
|
|
|
|
break;
|
|
|
|
case ACPI_IVAR_PRIVATE:
|
|
|
|
*(void **)result = ad->ad_private;
|
|
|
|
break;
|
2004-06-30 16:08:03 +00:00
|
|
|
case ACPI_IVAR_FLAGS:
|
|
|
|
*(int *)result = ad->ad_flags;
|
|
|
|
break;
|
2001-09-07 02:57:29 +00:00
|
|
|
case ISA_IVAR_VENDORID:
|
|
|
|
case ISA_IVAR_SERIAL:
|
|
|
|
case ISA_IVAR_COMPATID:
|
|
|
|
*(int *)result = -1;
|
|
|
|
break;
|
|
|
|
case ISA_IVAR_LOGICALID:
|
|
|
|
*(int *)result = acpi_isa_get_logicalid(child);
|
|
|
|
break;
|
2016-12-16 10:40:00 +00:00
|
|
|
case PCI_IVAR_CLASS:
|
|
|
|
*(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff;
|
|
|
|
break;
|
|
|
|
case PCI_IVAR_SUBCLASS:
|
|
|
|
*(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff;
|
|
|
|
break;
|
|
|
|
case PCI_IVAR_PROGIF:
|
|
|
|
*(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff;
|
|
|
|
break;
|
2000-10-28 06:59:48 +00:00
|
|
|
default:
|
2003-08-28 16:06:30 +00:00
|
|
|
return (ENOENT);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return (0);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
|
|
|
|
{
|
|
|
|
struct acpi_device *ad;
|
|
|
|
|
|
|
|
if ((ad = device_get_ivars(child)) == NULL) {
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(child, "device has no ivars\n");
|
2003-08-28 16:06:30 +00:00
|
|
|
return (ENOENT);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch(index) {
|
|
|
|
case ACPI_IVAR_HANDLE:
|
|
|
|
ad->ad_handle = (ACPI_HANDLE)value;
|
|
|
|
break;
|
|
|
|
case ACPI_IVAR_PRIVATE:
|
|
|
|
ad->ad_private = (void *)value;
|
|
|
|
break;
|
2004-06-30 16:08:03 +00:00
|
|
|
case ACPI_IVAR_FLAGS:
|
|
|
|
ad->ad_flags = (int)value;
|
|
|
|
break;
|
2000-10-28 06:59:48 +00:00
|
|
|
default:
|
2002-08-12 22:36:18 +00:00
|
|
|
panic("bad ivar write request (%d)", index);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (ENOENT);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Handle child resource allocation/removal
|
|
|
|
*/
|
2004-06-13 22:52:30 +00:00
|
|
|
static struct resource_list *
|
|
|
|
acpi_get_rlist(device_t dev, device_t child)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2004-06-13 22:52:30 +00:00
|
|
|
struct acpi_device *ad;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
ad = device_get_ivars(child);
|
|
|
|
return (&ad->ad_rl);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2008-11-18 21:01:54 +00:00
|
|
|
static int
|
|
|
|
acpi_match_resource_hint(device_t dev, int type, long value)
|
|
|
|
{
|
|
|
|
struct acpi_device *ad = device_get_ivars(dev);
|
|
|
|
struct resource_list *rl = &ad->ad_rl;
|
|
|
|
struct resource_list_entry *rle;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(rle, rl, link) {
|
|
|
|
if (rle->type != type)
|
|
|
|
continue;
|
|
|
|
if (rle->start <= value && rle->end >= value)
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wire device unit numbers based on resource matches in hints.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
|
|
|
|
int *unitp)
|
|
|
|
{
|
|
|
|
const char *s;
|
|
|
|
long value;
|
|
|
|
int line, matches, unit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over all the hints for the devices with the specified
|
|
|
|
* name to see if one's resources are a subset of this device.
|
|
|
|
*/
|
|
|
|
line = 0;
|
2017-12-20 19:14:16 +00:00
|
|
|
while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) {
|
2008-11-18 21:01:54 +00:00
|
|
|
/* Must have an "at" for acpi or isa. */
|
|
|
|
resource_string_value(name, unit, "at", &s);
|
|
|
|
if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
|
|
|
|
strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
2009-08-24 21:51:46 +00:00
|
|
|
* Check for matching resources. We must have at least one match.
|
|
|
|
* Since I/O and memory resources cannot be shared, if we get a
|
|
|
|
* match on either of those, ignore any mismatches in IRQs or DRQs.
|
2008-11-18 21:01:54 +00:00
|
|
|
*
|
|
|
|
* XXX: We may want to revisit this to be more lenient and wire
|
|
|
|
* as long as it gets one match.
|
|
|
|
*/
|
|
|
|
matches = 0;
|
|
|
|
if (resource_long_value(name, unit, "port", &value) == 0) {
|
2009-08-24 21:51:46 +00:00
|
|
|
/*
|
|
|
|
* Floppy drive controllers are notorious for having a
|
|
|
|
* wide variety of resources not all of which include the
|
|
|
|
* first port that is specified by the hint (typically
|
|
|
|
* 0x3f0) (see the comment above fdc_isa_alloc_resources()
|
|
|
|
* in fdc_isa.c). However, they do all seem to include
|
|
|
|
* port + 2 (e.g. 0x3f2) so for a floppy device, look for
|
|
|
|
* 'value + 2' in the port resources instead of the hint
|
|
|
|
* value.
|
|
|
|
*/
|
|
|
|
if (strcmp(name, "fdc") == 0)
|
|
|
|
value += 2;
|
2008-11-18 21:01:54 +00:00
|
|
|
if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
|
|
|
|
matches++;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (resource_long_value(name, unit, "maddr", &value) == 0) {
|
|
|
|
if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
|
|
|
|
matches++;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
2009-08-24 21:51:46 +00:00
|
|
|
if (matches > 0)
|
|
|
|
goto matched;
|
2008-11-18 21:01:54 +00:00
|
|
|
if (resource_long_value(name, unit, "irq", &value) == 0) {
|
|
|
|
if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
|
|
|
|
matches++;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (resource_long_value(name, unit, "drq", &value) == 0) {
|
|
|
|
if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
|
|
|
|
matches++;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-08-24 21:51:46 +00:00
|
|
|
matched:
|
2008-11-18 21:01:54 +00:00
|
|
|
if (matches > 0) {
|
|
|
|
/* We have a winner! */
|
|
|
|
*unitp = unit;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-09 05:33:25 +00:00
|
|
|
/*
|
2016-05-09 20:50:21 +00:00
|
|
|
* Fetch the NUMA domain for a device by mapping the value returned by
|
|
|
|
* _PXM to a NUMA domain. If the device does not have a _PXM method,
|
|
|
|
* -2 is returned. If any other error occurs, -1 is returned.
|
2014-10-09 05:33:25 +00:00
|
|
|
*/
|
2016-05-09 20:50:21 +00:00
|
|
|
static int
|
|
|
|
acpi_parse_pxm(device_t dev)
|
2014-10-09 05:33:25 +00:00
|
|
|
{
|
2018-01-14 03:36:03 +00:00
|
|
|
#ifdef NUMA
|
2018-06-04 14:56:02 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2016-05-09 20:50:21 +00:00
|
|
|
ACPI_HANDLE handle;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
int pxm;
|
|
|
|
|
|
|
|
handle = acpi_get_handle(dev);
|
|
|
|
if (handle == NULL)
|
|
|
|
return (-2);
|
|
|
|
status = acpi_GetInteger(handle, "_PXM", &pxm);
|
|
|
|
if (ACPI_SUCCESS(status))
|
|
|
|
return (acpi_map_pxm_to_vm_domainid(pxm));
|
|
|
|
if (status == AE_NOT_FOUND)
|
|
|
|
return (-2);
|
2018-06-04 14:56:02 +00:00
|
|
|
#endif
|
2014-10-09 05:33:25 +00:00
|
|
|
#endif
|
2016-05-09 20:50:21 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
2016-05-02 18:00:38 +00:00
|
|
|
|
2016-05-09 20:50:21 +00:00
|
|
|
int
|
|
|
|
acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
|
|
|
|
cpuset_t *cpuset)
|
|
|
|
{
|
|
|
|
int d, error;
|
|
|
|
|
|
|
|
d = acpi_parse_pxm(child);
|
|
|
|
if (d < 0)
|
|
|
|
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case LOCAL_CPUS:
|
|
|
|
if (setsize != sizeof(cpuset_t))
|
|
|
|
return (EINVAL);
|
|
|
|
*cpuset = cpuset_domain[d];
|
|
|
|
return (0);
|
|
|
|
case INTR_CPUS:
|
|
|
|
error = bus_generic_get_cpus(dev, child, op, setsize, cpuset);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (setsize != sizeof(cpuset_t))
|
|
|
|
return (EINVAL);
|
|
|
|
CPU_AND(cpuset, &cpuset_domain[d]);
|
|
|
|
return (0);
|
|
|
|
default:
|
|
|
|
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
|
|
|
|
}
|
2015-04-19 17:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-09 20:50:21 +00:00
|
|
|
* Fetch the NUMA domain for the given device 'dev'.
|
2015-04-19 17:15:55 +00:00
|
|
|
*
|
|
|
|
* If a device has a _PXM method, map that to a NUMA domain.
|
2016-05-09 20:50:21 +00:00
|
|
|
* Otherwise, pass the request up to the parent.
|
|
|
|
* If there's no matching domain or the domain cannot be
|
|
|
|
* determined, return ENOENT.
|
2015-04-19 17:15:55 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
acpi_get_domain(device_t dev, device_t child, int *domain)
|
|
|
|
{
|
2016-05-09 20:50:21 +00:00
|
|
|
int d;
|
2015-04-19 17:15:55 +00:00
|
|
|
|
2016-05-09 20:50:21 +00:00
|
|
|
d = acpi_parse_pxm(child);
|
|
|
|
if (d >= 0) {
|
|
|
|
*domain = d;
|
2016-05-03 01:17:40 +00:00
|
|
|
return (0);
|
2016-05-09 20:50:21 +00:00
|
|
|
}
|
|
|
|
if (d == -1)
|
|
|
|
return (ENOENT);
|
2015-04-19 17:15:55 +00:00
|
|
|
|
2014-10-09 05:33:25 +00:00
|
|
|
/* No _PXM node; go up a level */
|
|
|
|
return (bus_generic_get_domain(dev, child, domain));
|
|
|
|
}
|
|
|
|
|
2004-08-23 16:28:42 +00:00
|
|
|
/*
|
|
|
|
* Pre-allocate/manage all memory and IO resources. Since rman can't handle
|
|
|
|
* duplicates, we merge any in the sysresource attach routine.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
acpi_sysres_alloc(device_t dev)
|
|
|
|
{
|
|
|
|
struct resource *res;
|
|
|
|
struct resource_list *rl;
|
|
|
|
struct resource_list_entry *rle;
|
|
|
|
struct rman *rm;
|
2006-05-07 03:28:10 +00:00
|
|
|
device_t *children;
|
|
|
|
int child_count, i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Probe/attach any sysresource devices. This would be unnecessary if we
|
|
|
|
* had multi-pass probe/attach.
|
|
|
|
*/
|
|
|
|
if (device_get_children(dev, &children, &child_count) != 0)
|
|
|
|
return (ENXIO);
|
|
|
|
for (i = 0; i < child_count; i++) {
|
2018-10-26 00:05:46 +00:00
|
|
|
if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0)
|
2006-05-07 03:28:10 +00:00
|
|
|
device_probe_and_attach(children[i]);
|
|
|
|
}
|
|
|
|
free(children, M_TEMP);
|
2004-08-23 16:28:42 +00:00
|
|
|
|
|
|
|
rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
|
2005-03-18 11:57:44 +00:00
|
|
|
STAILQ_FOREACH(rle, rl, link) {
|
2004-08-23 16:28:42 +00:00
|
|
|
if (rle->res != NULL) {
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
device_printf(dev, "duplicate resource for %jx\n", rle->start);
|
2004-08-23 16:28:42 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only memory and IO resources are valid here. */
|
|
|
|
switch (rle->type) {
|
|
|
|
case SYS_RES_IOPORT:
|
|
|
|
rm = &acpi_rman_io;
|
|
|
|
break;
|
|
|
|
case SYS_RES_MEMORY:
|
|
|
|
rm = &acpi_rman_mem;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pre-allocate resource and add to our rman pool. */
|
|
|
|
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
|
|
|
|
&rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
|
|
|
|
if (res != NULL) {
|
|
|
|
rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
|
|
|
|
rle->res = res;
|
2015-04-06 17:39:36 +00:00
|
|
|
} else if (bootverbose)
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
device_printf(dev, "reservation of %jx, %jx (%d) failed\n",
|
2004-08-23 16:28:42 +00:00
|
|
|
rle->start, rle->count, rle->type);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/*
|
|
|
|
* Reserve declared resources for devices found during attach once system
|
|
|
|
* resources have been allocated.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_reserve_resources(device_t dev)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2004-06-13 22:52:30 +00:00
|
|
|
struct resource_list_entry *rle;
|
2010-12-22 20:27:20 +00:00
|
|
|
struct resource_list *rl;
|
|
|
|
struct acpi_device *ad;
|
|
|
|
struct acpi_softc *sc;
|
|
|
|
device_t *children;
|
|
|
|
int child_count, i;
|
2004-06-13 22:52:30 +00:00
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
sc = device_get_softc(dev);
|
|
|
|
if (device_get_children(dev, &children, &child_count) != 0)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < child_count; i++) {
|
|
|
|
ad = device_get_ivars(children[i]);
|
|
|
|
rl = &ad->ad_rl;
|
2007-03-21 15:39:11 +00:00
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/* Don't reserve system resources. */
|
2018-10-26 00:05:46 +00:00
|
|
|
if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0)
|
2010-12-22 20:27:20 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(rle, rl, link) {
|
|
|
|
/*
|
|
|
|
* Don't reserve IRQ resources. There are many sticky things
|
|
|
|
* to get right otherwise (e.g. IRQs for psm, atkbd, and HPET
|
|
|
|
* when using legacy routing).
|
|
|
|
*/
|
|
|
|
if (rle->type == SYS_RES_IRQ)
|
|
|
|
continue;
|
|
|
|
|
2010-12-23 18:50:14 +00:00
|
|
|
/*
|
|
|
|
* Don't reserve the resource if it is already allocated.
|
|
|
|
* The acpi_ec(4) driver can allocate its resources early
|
|
|
|
* if ECDT is present.
|
|
|
|
*/
|
|
|
|
if (rle->res != NULL)
|
|
|
|
continue;
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/*
|
|
|
|
* Try to reserve the resource from our parent. If this
|
|
|
|
* fails because the resource is a system resource, just
|
|
|
|
* let it be. The resource range is already reserved so
|
|
|
|
* that other devices will not use it. If the driver
|
|
|
|
* needs to allocate the resource, then
|
|
|
|
* acpi_alloc_resource() will sub-alloc from the system
|
|
|
|
* resource.
|
|
|
|
*/
|
|
|
|
resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid,
|
|
|
|
rle->start, rle->end, rle->count, 0);
|
|
|
|
}
|
2007-03-21 15:39:11 +00:00
|
|
|
}
|
2010-12-22 20:27:20 +00:00
|
|
|
free(children, M_TEMP);
|
|
|
|
sc->acpi_resources_reserved = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_set_resource(device_t dev, device_t child, int type, int rid,
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t start, rman_res_t count)
|
2010-12-22 20:27:20 +00:00
|
|
|
{
|
|
|
|
struct acpi_softc *sc = device_get_softc(dev);
|
|
|
|
struct acpi_device *ad = device_get_ivars(child);
|
|
|
|
struct resource_list *rl = &ad->ad_rl;
|
2014-01-28 20:53:33 +00:00
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t end;
|
2018-11-19 03:16:16 +00:00
|
|
|
int allow;
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/* Ignore IRQ resources for PCI link devices. */
|
2018-10-26 00:05:46 +00:00
|
|
|
if (type == SYS_RES_IRQ &&
|
|
|
|
ACPI_ID_PROBE(dev, child, pcilink_ids, NULL) <= 0)
|
2010-12-22 20:27:20 +00:00
|
|
|
return (0);
|
|
|
|
|
2014-01-28 20:53:33 +00:00
|
|
|
/*
|
2014-06-25 20:30:47 +00:00
|
|
|
* Ignore most resources for PCI root bridges. Some BIOSes
|
2014-01-28 20:53:33 +00:00
|
|
|
* incorrectly enumerate the memory ranges they decode as plain
|
2014-06-25 20:30:47 +00:00
|
|
|
* memory resources instead of as ResourceProducer ranges. Other
|
|
|
|
* BIOSes incorrectly list system resource entries for I/O ranges
|
|
|
|
* under the PCI bridge. Do allow the one known-correct case on
|
|
|
|
* x86 of a PCI bridge claiming the I/O ports used for PCI config
|
|
|
|
* access.
|
2014-01-28 20:53:33 +00:00
|
|
|
*/
|
2014-06-25 20:30:47 +00:00
|
|
|
if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
|
2014-01-28 20:53:33 +00:00
|
|
|
if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
|
|
|
|
if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
|
2018-11-19 03:16:16 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT);
|
|
|
|
#else
|
|
|
|
allow = 0;
|
|
|
|
#endif
|
|
|
|
if (!allow) {
|
2014-06-25 20:30:47 +00:00
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
return (0);
|
|
|
|
}
|
2014-01-28 20:53:33 +00:00
|
|
|
}
|
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-19 03:02:47 +00:00
|
|
|
#ifdef INTRNG
|
|
|
|
/* map with default for now */
|
|
|
|
if (type == SYS_RES_IRQ)
|
|
|
|
start = (rman_res_t)acpi_map_intr(child, (u_int)start,
|
|
|
|
acpi_get_handle(child));
|
|
|
|
#endif
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/* If the resource is already allocated, fail. */
|
|
|
|
if (resource_list_busy(rl, type, rid))
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
/* If the resource is already reserved, release it. */
|
|
|
|
if (resource_list_reserved(rl, type, rid))
|
|
|
|
resource_list_unreserve(rl, dev, child, type, rid);
|
|
|
|
|
|
|
|
/* Add the resource. */
|
|
|
|
end = (start + count - 1);
|
|
|
|
resource_list_add(rl, type, rid, start, end, count);
|
|
|
|
|
|
|
|
/* Don't reserve resources until the system resources are allocated. */
|
|
|
|
if (!sc->acpi_resources_reserved)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* Don't reserve system resources. */
|
2018-10-26 00:05:46 +00:00
|
|
|
if (ACPI_ID_PROBE(dev, child, sysres_ids, NULL) <= 0)
|
2010-12-22 20:27:20 +00:00
|
|
|
return (0);
|
2004-08-13 06:21:32 +00:00
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
/*
|
2010-12-22 20:27:20 +00:00
|
|
|
* Don't reserve IRQ resources. There are many sticky things to
|
|
|
|
* get right otherwise (e.g. IRQs for psm, atkbd, and HPET when
|
|
|
|
* using legacy routing).
|
2004-06-13 22:52:30 +00:00
|
|
|
*/
|
2010-12-22 20:27:20 +00:00
|
|
|
if (type == SYS_RES_IRQ)
|
|
|
|
return (0);
|
2004-06-13 22:52:30 +00:00
|
|
|
|
2019-04-09 19:22:08 +00:00
|
|
|
/*
|
|
|
|
* Don't reserve resources for CPU devices. Some of these
|
|
|
|
* resources need to be allocated as shareable, but reservations
|
|
|
|
* are always non-shareable.
|
|
|
|
*/
|
|
|
|
if (device_get_devclass(child) == devclass_find("cpu"))
|
|
|
|
return (0);
|
|
|
|
|
2007-03-21 15:39:11 +00:00
|
|
|
/*
|
2010-12-22 20:27:20 +00:00
|
|
|
* Reserve the resource.
|
|
|
|
*
|
|
|
|
* XXX: Ignores failure for now. Failure here is probably a
|
|
|
|
* BIOS/firmware bug?
|
2007-03-21 15:39:11 +00:00
|
|
|
*/
|
2010-12-22 20:27:20 +00:00
|
|
|
resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct resource *
|
|
|
|
acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
|
2010-12-22 20:27:20 +00:00
|
|
|
{
|
2018-01-11 17:09:12 +00:00
|
|
|
#ifndef INTRNG
|
2010-12-22 20:27:20 +00:00
|
|
|
ACPI_RESOURCE ares;
|
2018-01-11 17:09:12 +00:00
|
|
|
#endif
|
2010-12-22 20:27:20 +00:00
|
|
|
struct acpi_device *ad;
|
|
|
|
struct resource_list_entry *rle;
|
|
|
|
struct resource_list *rl;
|
|
|
|
struct resource *res;
|
2016-02-20 01:32:58 +00:00
|
|
|
int isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
|
2010-12-22 20:27:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First attempt at allocating the resource. For direct children,
|
|
|
|
* use resource_list_alloc() to handle reserved resources. For
|
2011-06-24 13:58:56 +00:00
|
|
|
* other devices, pass the request up to our parent.
|
2010-12-22 20:27:20 +00:00
|
|
|
*/
|
|
|
|
if (bus == device_get_parent(child)) {
|
|
|
|
ad = device_get_ivars(child);
|
|
|
|
rl = &ad->ad_rl;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/*
|
|
|
|
* Simulate the behavior of the ISA bus for direct children
|
|
|
|
* devices. That is, if a non-default range is specified for
|
|
|
|
* a resource that doesn't exist, use bus_set_resource() to
|
|
|
|
* add the resource before allocating it. Note that these
|
|
|
|
* resources will not be reserved.
|
|
|
|
*/
|
|
|
|
if (!isdefault && resource_list_find(rl, type, *rid) == NULL)
|
|
|
|
resource_list_add(rl, type, *rid, start, end, count);
|
|
|
|
res = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
|
|
|
|
flags);
|
2018-01-11 17:09:12 +00:00
|
|
|
#ifndef INTRNG
|
2010-12-22 20:27:20 +00:00
|
|
|
if (res != NULL && type == SYS_RES_IRQ) {
|
2004-06-23 17:21:02 +00:00
|
|
|
/*
|
|
|
|
* Since bus_config_intr() takes immediate effect, we cannot
|
|
|
|
* configure the interrupt associated with a device when we
|
|
|
|
* parse the resources but have to defer it until a driver
|
|
|
|
* actually allocates the interrupt via bus_alloc_resource().
|
|
|
|
*
|
|
|
|
* XXX: Should we handle the lookup failing?
|
|
|
|
*/
|
|
|
|
if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
|
|
|
|
acpi_config_intr(child, &ares);
|
2004-06-13 22:52:30 +00:00
|
|
|
}
|
2018-01-11 17:09:12 +00:00
|
|
|
#endif
|
2004-08-13 06:21:32 +00:00
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/*
|
|
|
|
* If this is an allocation of the "default" range for a given
|
|
|
|
* RID, fetch the exact bounds for this resource from the
|
|
|
|
* resource list entry to try to allocate the range from the
|
|
|
|
* system resource regions.
|
|
|
|
*/
|
|
|
|
if (res == NULL && isdefault) {
|
|
|
|
rle = resource_list_find(rl, type, *rid);
|
|
|
|
if (rle != NULL) {
|
|
|
|
start = rle->start;
|
|
|
|
end = rle->end;
|
|
|
|
count = rle->count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
|
|
|
|
start, end, count, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the first attempt failed and this is an allocation of a
|
|
|
|
* specific range, try to satisfy the request via a suballocation
|
2011-10-12 14:13:32 +00:00
|
|
|
* from our system resource regions.
|
2010-12-22 20:27:20 +00:00
|
|
|
*/
|
2011-10-12 14:13:32 +00:00
|
|
|
if (res == NULL && start + count - 1 == end)
|
|
|
|
res = acpi_alloc_sysres(child, type, rid, start, end, count, flags);
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to allocate a specific resource range from the system
|
|
|
|
* resource ranges. Note that we only handle memory and I/O port
|
|
|
|
* system resources.
|
|
|
|
*/
|
|
|
|
struct resource *
|
2016-01-27 02:23:54 +00:00
|
|
|
acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start,
|
|
|
|
rman_res_t end, rman_res_t count, u_int flags)
|
2011-10-12 14:13:32 +00:00
|
|
|
{
|
|
|
|
struct rman *rm;
|
|
|
|
struct resource *res;
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
switch (type) {
|
|
|
|
case SYS_RES_IOPORT:
|
|
|
|
rm = &acpi_rman_io;
|
|
|
|
break;
|
|
|
|
case SYS_RES_MEMORY:
|
|
|
|
rm = &acpi_rman_mem;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2011-10-12 14:13:32 +00:00
|
|
|
KASSERT(start + count - 1 == end, ("wildcard resource range"));
|
2010-12-22 20:27:20 +00:00
|
|
|
res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
|
|
|
|
child);
|
|
|
|
if (res == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
rman_set_rid(res, *rid);
|
|
|
|
|
|
|
|
/* If requested, activate the resource using the parent's method. */
|
|
|
|
if (flags & RF_ACTIVE)
|
|
|
|
if (bus_activate_resource(child, type, *rid, res) != 0) {
|
|
|
|
rman_release_resource(res);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
return (res);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-06-10 12:30:16 +00:00
|
|
|
acpi_is_resource_managed(int type, struct resource *r)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
|
2007-03-21 15:39:11 +00:00
|
|
|
/* We only handle memory and IO resources through rman. */
|
|
|
|
switch (type) {
|
|
|
|
case SYS_RES_IOPORT:
|
2011-06-10 12:30:16 +00:00
|
|
|
return (rman_is_region_manager(r, &acpi_rman_io));
|
2007-03-21 15:39:11 +00:00
|
|
|
case SYS_RES_MEMORY:
|
2011-06-10 12:30:16 +00:00
|
|
|
return (rman_is_region_manager(r, &acpi_rman_mem));
|
2007-03-21 15:39:11 +00:00
|
|
|
}
|
2011-06-10 12:30:16 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t start, rman_res_t end)
|
2011-06-10 12:30:16 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if (acpi_is_resource_managed(type, r))
|
|
|
|
return (rman_adjust_resource(r, start, end));
|
|
|
|
return (bus_generic_adjust_resource(bus, child, type, r, start, end));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_release_resource(device_t bus, device_t child, int type, int rid,
|
|
|
|
struct resource *r)
|
|
|
|
{
|
|
|
|
int ret;
|
2007-03-21 15:39:11 +00:00
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
/*
|
2007-03-21 15:39:11 +00:00
|
|
|
* If this resource belongs to one of our internal managers,
|
2010-12-22 20:27:20 +00:00
|
|
|
* deactivate it and release it to the local pool.
|
2004-06-13 22:52:30 +00:00
|
|
|
*/
|
2011-06-10 12:30:16 +00:00
|
|
|
if (acpi_is_resource_managed(type, r)) {
|
2004-06-13 22:52:30 +00:00
|
|
|
if (rman_get_flags(r) & RF_ACTIVE) {
|
|
|
|
ret = bus_deactivate_resource(child, type, rid, r);
|
|
|
|
if (ret != 0)
|
2010-12-22 20:27:20 +00:00
|
|
|
return (ret);
|
2004-06-13 22:52:30 +00:00
|
|
|
}
|
2010-12-22 20:27:20 +00:00
|
|
|
return (rman_release_resource(r));
|
|
|
|
}
|
2004-06-13 22:52:30 +00:00
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
return (bus_generic_rl_release_resource(bus, child, type, rid, r));
|
2003-11-15 19:18:29 +00:00
|
|
|
}
|
|
|
|
|
2005-03-22 20:00:57 +00:00
|
|
|
static void
|
|
|
|
acpi_delete_resource(device_t bus, device_t child, int type, int rid)
|
|
|
|
{
|
|
|
|
struct resource_list *rl;
|
|
|
|
|
|
|
|
rl = acpi_get_rlist(bus, child);
|
2010-12-22 20:27:20 +00:00
|
|
|
if (resource_list_busy(rl, type, rid)) {
|
|
|
|
device_printf(bus, "delete_resource: Resource still owned by child"
|
|
|
|
" (type=%d, rid=%d)\n", type, rid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
resource_list_unreserve(rl, bus, child, type, rid);
|
2005-03-22 20:00:57 +00:00
|
|
|
resource_list_delete(rl, type, rid);
|
|
|
|
}
|
|
|
|
|
2003-11-15 19:18:29 +00:00
|
|
|
/* Allocate an IO port or memory resource, given its GAS. */
|
2005-02-05 22:28:36 +00:00
|
|
|
int
|
|
|
|
acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
|
2007-01-07 21:53:42 +00:00
|
|
|
struct resource **res, u_int flags)
|
2003-11-15 19:18:29 +00:00
|
|
|
{
|
2005-02-05 22:28:36 +00:00
|
|
|
int error, res_type;
|
2003-11-15 19:18:29 +00:00
|
|
|
|
2005-02-05 22:28:36 +00:00
|
|
|
error = ENOMEM;
|
2005-02-17 19:00:14 +00:00
|
|
|
if (type == NULL || rid == NULL || gas == NULL || res == NULL)
|
2005-02-05 22:28:36 +00:00
|
|
|
return (EINVAL);
|
2003-11-15 19:18:29 +00:00
|
|
|
|
2005-02-17 19:00:14 +00:00
|
|
|
/* We only support memory and IO spaces. */
|
2007-03-22 18:16:43 +00:00
|
|
|
switch (gas->SpaceId) {
|
2003-11-15 19:18:29 +00:00
|
|
|
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
|
2005-02-05 22:28:36 +00:00
|
|
|
res_type = SYS_RES_MEMORY;
|
2003-11-15 19:18:29 +00:00
|
|
|
break;
|
|
|
|
case ACPI_ADR_SPACE_SYSTEM_IO:
|
2005-02-05 22:28:36 +00:00
|
|
|
res_type = SYS_RES_IOPORT;
|
2003-11-15 19:18:29 +00:00
|
|
|
break;
|
|
|
|
default:
|
2005-02-05 22:28:36 +00:00
|
|
|
return (EOPNOTSUPP);
|
2003-11-15 19:18:29 +00:00
|
|
|
}
|
|
|
|
|
2005-02-23 03:20:00 +00:00
|
|
|
/*
|
2005-02-25 16:57:34 +00:00
|
|
|
* If the register width is less than 8, assume the BIOS author means
|
|
|
|
* it is a bit field and just allocate a byte.
|
2005-02-23 03:20:00 +00:00
|
|
|
*/
|
2007-03-22 18:16:43 +00:00
|
|
|
if (gas->BitWidth && gas->BitWidth < 8)
|
|
|
|
gas->BitWidth = 8;
|
2005-02-23 03:20:00 +00:00
|
|
|
|
2005-02-17 19:00:14 +00:00
|
|
|
/* Validate the address after we're sure we support the space. */
|
2007-03-22 18:16:43 +00:00
|
|
|
if (gas->Address == 0 || gas->BitWidth == 0)
|
2005-02-17 19:00:14 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
2005-02-05 22:28:36 +00:00
|
|
|
bus_set_resource(dev, res_type, *rid, gas->Address,
|
2007-03-22 18:16:43 +00:00
|
|
|
gas->BitWidth / 8);
|
2007-01-07 21:53:42 +00:00
|
|
|
*res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
|
2005-02-05 22:28:36 +00:00
|
|
|
if (*res != NULL) {
|
|
|
|
*type = res_type;
|
|
|
|
error = 0;
|
2005-03-27 22:38:28 +00:00
|
|
|
} else
|
|
|
|
bus_delete_resource(dev, res_type, *rid);
|
|
|
|
|
2005-02-05 22:28:36 +00:00
|
|
|
return (error);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2004-06-29 01:33:35 +00:00
|
|
|
/* Probe _HID and _CID for compatible ISA PNP ids. */
|
2003-12-18 03:25:22 +00:00
|
|
|
static uint32_t
|
2001-09-07 02:57:29 +00:00
|
|
|
acpi_isa_get_logicalid(device_t dev)
|
2001-08-30 00:50:58 +00:00
|
|
|
{
|
2003-12-18 03:25:22 +00:00
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
2001-08-30 00:50:58 +00:00
|
|
|
ACPI_HANDLE h;
|
2009-09-11 22:49:34 +00:00
|
|
|
uint32_t pnpid;
|
2001-08-30 00:50:58 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2001-08-30 00:50:58 +00:00
|
|
|
|
2003-07-13 22:57:16 +00:00
|
|
|
/* Fetch and validate the HID. */
|
2009-09-11 22:49:34 +00:00
|
|
|
if ((h = acpi_get_handle(dev)) == NULL ||
|
|
|
|
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
|
|
|
|
return_VALUE (0);
|
2001-08-30 00:50:58 +00:00
|
|
|
|
2009-09-11 22:49:34 +00:00
|
|
|
pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
|
|
|
|
devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
|
|
|
|
PNP_EISAID(devinfo->HardwareId.String) : 0;
|
|
|
|
AcpiOsFree(devinfo);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return_VALUE (pnpid);
|
2001-09-07 02:57:29 +00:00
|
|
|
}
|
|
|
|
|
2003-12-18 03:25:22 +00:00
|
|
|
static int
|
|
|
|
acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
|
2003-03-06 14:40:15 +00:00
|
|
|
{
|
2003-12-18 03:25:22 +00:00
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
2012-10-23 23:49:17 +00:00
|
|
|
ACPI_PNP_DEVICE_ID *ids;
|
2003-03-06 14:40:15 +00:00
|
|
|
ACPI_HANDLE h;
|
2003-12-18 03:25:22 +00:00
|
|
|
uint32_t *pnpid;
|
2009-09-11 22:49:34 +00:00
|
|
|
int i, valid;
|
2003-03-06 14:40:15 +00:00
|
|
|
|
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
|
|
|
|
2003-12-18 03:25:22 +00:00
|
|
|
pnpid = cids;
|
2003-12-23 18:47:31 +00:00
|
|
|
|
2003-12-18 03:25:22 +00:00
|
|
|
/* Fetch and validate the CID */
|
2009-09-11 22:49:34 +00:00
|
|
|
if ((h = acpi_get_handle(dev)) == NULL ||
|
|
|
|
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
|
|
|
|
return_VALUE (0);
|
2003-12-18 03:25:22 +00:00
|
|
|
|
2009-09-11 22:49:34 +00:00
|
|
|
if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
|
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
return_VALUE (0);
|
2003-12-18 03:25:22 +00:00
|
|
|
}
|
2003-03-06 14:40:15 +00:00
|
|
|
|
2009-09-11 22:49:34 +00:00
|
|
|
if (devinfo->CompatibleIdList.Count < count)
|
|
|
|
count = devinfo->CompatibleIdList.Count;
|
|
|
|
ids = devinfo->CompatibleIdList.Ids;
|
|
|
|
for (i = 0, valid = 0; i < count; i++)
|
|
|
|
if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
|
|
|
|
strncmp(ids[i].String, "PNP", 3) == 0) {
|
|
|
|
*pnpid++ = PNP_EISAID(ids[i].String);
|
|
|
|
valid++;
|
|
|
|
}
|
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
|
2003-12-18 03:25:22 +00:00
|
|
|
return_VALUE (valid);
|
2003-03-06 14:40:15 +00:00
|
|
|
}
|
|
|
|
|
2018-10-26 00:05:46 +00:00
|
|
|
static int
|
|
|
|
acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match)
|
2004-06-29 19:00:36 +00:00
|
|
|
{
|
|
|
|
ACPI_HANDLE h;
|
2009-09-11 22:49:34 +00:00
|
|
|
ACPI_OBJECT_TYPE t;
|
2018-10-26 00:05:46 +00:00
|
|
|
int rv;
|
2004-06-29 19:00:36 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
h = acpi_get_handle(dev);
|
2009-09-11 22:49:34 +00:00
|
|
|
if (ids == NULL || h == NULL)
|
2018-10-26 00:05:46 +00:00
|
|
|
return (ENXIO);
|
2009-09-11 22:49:34 +00:00
|
|
|
t = acpi_get_type(dev);
|
|
|
|
if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
|
2018-10-26 00:05:46 +00:00
|
|
|
return (ENXIO);
|
2004-06-29 19:00:36 +00:00
|
|
|
|
|
|
|
/* Try to match one of the array of IDs with a HID or CID. */
|
|
|
|
for (i = 0; ids[i] != NULL; i++) {
|
2018-10-26 00:05:46 +00:00
|
|
|
rv = acpi_MatchHid(h, ids[i]);
|
|
|
|
if (rv == ACPI_MATCHHID_NOMATCH)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (match != NULL) {
|
|
|
|
*match = ids[i];
|
|
|
|
}
|
|
|
|
return ((rv == ACPI_MATCHHID_HID)?
|
|
|
|
BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY);
|
2004-06-29 19:00:36 +00:00
|
|
|
}
|
2018-10-26 00:05:46 +00:00
|
|
|
return (ENXIO);
|
2004-06-29 19:00:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ACPI_STATUS
|
|
|
|
acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
|
|
|
|
ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
|
|
|
|
{
|
|
|
|
ACPI_HANDLE h;
|
|
|
|
|
2004-07-15 16:29:08 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
h = ACPI_ROOT_OBJECT;
|
|
|
|
else if ((h = acpi_get_handle(dev)) == NULL)
|
2004-06-29 19:00:36 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
|
|
|
return (AcpiEvaluateObject(h, pathname, parameters, ret));
|
|
|
|
}
|
|
|
|
|
2010-08-17 15:44:52 +00:00
|
|
|
int
|
2004-12-02 08:07:12 +00:00
|
|
|
acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
|
|
|
|
{
|
|
|
|
struct acpi_softc *sc;
|
|
|
|
ACPI_HANDLE handle;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
char sxd[8];
|
|
|
|
|
|
|
|
handle = acpi_get_handle(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX If we find these devices, don't try to power them down.
|
|
|
|
* The serial and IRDA ports on my T23 hang the system when
|
|
|
|
* set to D3 and it appears that such legacy devices may
|
|
|
|
* need special handling in their drivers.
|
|
|
|
*/
|
2010-10-19 19:53:06 +00:00
|
|
|
if (dstate == NULL || handle == NULL ||
|
2004-12-02 08:07:12 +00:00
|
|
|
acpi_MatchHid(handle, "PNP0500") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0501") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0502") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0510") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0511"))
|
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
/*
|
2010-10-19 19:53:06 +00:00
|
|
|
* Override next state with the value from _SxD, if present.
|
|
|
|
* Note illegal _S0D is evaluated because some systems expect this.
|
2004-12-02 08:07:12 +00:00
|
|
|
*/
|
2010-10-19 19:53:06 +00:00
|
|
|
sc = device_get_softc(bus);
|
2004-12-02 08:07:12 +00:00
|
|
|
snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
|
2010-10-19 19:53:06 +00:00
|
|
|
status = acpi_GetInteger(handle, sxd, dstate);
|
|
|
|
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
|
|
|
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
|
|
|
|
acpi_name(handle), AcpiFormatException(status));
|
|
|
|
return (ENXIO);
|
2004-12-02 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
2010-10-19 19:53:06 +00:00
|
|
|
return (0);
|
2004-12-02 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
2004-07-15 16:29:08 +00:00
|
|
|
/* Callback arg for our implementation of walking the namespace. */
|
|
|
|
struct acpi_device_scan_ctx {
|
|
|
|
acpi_scan_cb_t user_fn;
|
|
|
|
void *arg;
|
|
|
|
ACPI_HANDLE parent;
|
|
|
|
};
|
|
|
|
|
2004-06-29 19:00:36 +00:00
|
|
|
static ACPI_STATUS
|
2004-07-15 16:29:08 +00:00
|
|
|
acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
|
|
|
|
{
|
|
|
|
struct acpi_device_scan_ctx *ctx;
|
|
|
|
device_t dev, old_dev;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
ACPI_OBJECT_TYPE type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip this device if we think we'll have trouble with it or it is
|
|
|
|
* the parent where the scan began.
|
|
|
|
*/
|
|
|
|
ctx = (struct acpi_device_scan_ctx *)arg;
|
|
|
|
if (acpi_avoid(h) || h == ctx->parent)
|
|
|
|
return (AE_OK);
|
|
|
|
|
|
|
|
/* If this is not a valid device type (e.g., a method), skip it. */
|
|
|
|
if (ACPI_FAILURE(AcpiGetType(h, &type)))
|
|
|
|
return (AE_OK);
|
|
|
|
if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
|
|
|
|
type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
|
|
|
|
return (AE_OK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the user function with the current device. If it is unchanged
|
|
|
|
* afterwards, return. Otherwise, we update the handle to the new dev.
|
|
|
|
*/
|
|
|
|
old_dev = acpi_get_device(h);
|
|
|
|
dev = old_dev;
|
|
|
|
status = ctx->user_fn(h, &dev, level, ctx->arg);
|
|
|
|
if (ACPI_FAILURE(status) || old_dev == dev)
|
|
|
|
return (status);
|
|
|
|
|
|
|
|
/* Remove the old child and its connection to the handle. */
|
2020-03-09 20:27:25 +00:00
|
|
|
if (old_dev != NULL)
|
2004-07-15 16:29:08 +00:00
|
|
|
device_delete_child(device_get_parent(old_dev), old_dev);
|
|
|
|
|
|
|
|
/* Recreate the handle association if the user created a device. */
|
|
|
|
if (dev != NULL)
|
|
|
|
AcpiAttachData(h, acpi_fake_objhandler, dev);
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ACPI_STATUS
|
|
|
|
acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
|
|
|
|
acpi_scan_cb_t user_fn, void *arg)
|
2004-06-29 19:00:36 +00:00
|
|
|
{
|
|
|
|
ACPI_HANDLE h;
|
2004-07-15 16:29:08 +00:00
|
|
|
struct acpi_device_scan_ctx ctx;
|
2004-06-29 19:00:36 +00:00
|
|
|
|
2004-07-15 16:29:08 +00:00
|
|
|
if (acpi_disabled("children"))
|
|
|
|
return (AE_OK);
|
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
h = ACPI_ROOT_OBJECT;
|
|
|
|
else if ((h = acpi_get_handle(dev)) == NULL)
|
2004-06-29 19:00:36 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
2004-07-15 16:29:08 +00:00
|
|
|
ctx.user_fn = user_fn;
|
|
|
|
ctx.arg = arg;
|
|
|
|
ctx.parent = h;
|
|
|
|
return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
|
2009-11-16 21:47:12 +00:00
|
|
|
acpi_device_scan_cb, NULL, &ctx, NULL));
|
2004-06-29 19:00:36 +00:00
|
|
|
}
|
|
|
|
|
2004-12-02 08:07:12 +00:00
|
|
|
/*
|
|
|
|
* Even though ACPI devices are not PCI, we use the PCI approach for setting
|
|
|
|
* device power states since it's close enough to ACPI.
|
|
|
|
*/
|
2020-03-09 20:28:45 +00:00
|
|
|
int
|
2010-10-19 19:53:06 +00:00
|
|
|
acpi_set_powerstate(device_t child, int state)
|
2004-12-02 08:07:12 +00:00
|
|
|
{
|
|
|
|
ACPI_HANDLE h;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
|
|
|
h = acpi_get_handle(child);
|
2009-04-30 17:35:44 +00:00
|
|
|
if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
|
2004-12-02 08:07:12 +00:00
|
|
|
return (EINVAL);
|
|
|
|
if (h == NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* Ignore errors if the power methods aren't present. */
|
|
|
|
status = acpi_pwr_switch_consumer(h, state);
|
2010-10-19 19:53:06 +00:00
|
|
|
if (ACPI_SUCCESS(status)) {
|
|
|
|
if (bootverbose)
|
|
|
|
device_printf(child, "set ACPI power state D%d on %s\n",
|
|
|
|
state, acpi_name(h));
|
|
|
|
} else if (status != AE_NOT_FOUND)
|
|
|
|
device_printf(child,
|
|
|
|
"failed to set ACPI power state D%d on %s: %s\n", state,
|
|
|
|
acpi_name(h), AcpiFormatException(status));
|
2004-12-02 08:07:12 +00:00
|
|
|
|
2010-10-19 19:53:06 +00:00
|
|
|
return (0);
|
2004-12-02 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
2001-09-07 02:57:29 +00:00
|
|
|
static int
|
|
|
|
acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
|
|
|
|
{
|
2003-12-18 03:25:22 +00:00
|
|
|
int result, cid_count, i;
|
|
|
|
uint32_t lid, cids[8];
|
2001-09-07 02:57:29 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2001-09-07 02:57:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ISA-style drivers attached to ACPI may persist and
|
|
|
|
* probe manually if we return ENOENT. We never want
|
|
|
|
* that to happen, so don't ever return it.
|
|
|
|
*/
|
|
|
|
result = ENXIO;
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Scan the supplied IDs for a match */
|
2003-03-06 14:40:15 +00:00
|
|
|
lid = acpi_isa_get_logicalid(child);
|
2003-12-18 03:25:22 +00:00
|
|
|
cid_count = acpi_isa_get_compatid(child, cids, 8);
|
2001-08-30 00:50:58 +00:00
|
|
|
while (ids && ids->ip_id) {
|
2003-12-18 03:25:22 +00:00
|
|
|
if (lid == ids->ip_id) {
|
2001-08-30 00:50:58 +00:00
|
|
|
result = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2003-12-18 03:25:22 +00:00
|
|
|
for (i = 0; i < cid_count; i++) {
|
|
|
|
if (cids[i] == ids->ip_id) {
|
|
|
|
result = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2001-08-30 00:50:58 +00:00
|
|
|
ids++;
|
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2001-08-30 00:50:58 +00:00
|
|
|
out:
|
2004-10-12 21:33:08 +00:00
|
|
|
if (result == 0 && ids->ip_desc)
|
|
|
|
device_set_desc(child, ids->ip_desc);
|
|
|
|
|
2003-12-18 03:25:22 +00:00
|
|
|
return_VALUE (result);
|
2001-08-30 00:50:58 +00:00
|
|
|
}
|
|
|
|
|
2008-08-22 02:14:23 +00:00
|
|
|
/*
|
|
|
|
* Look for a MCFG table. If it is present, use the settings for
|
|
|
|
* domain (segment) 0 to setup PCI config space access via the memory
|
|
|
|
* map.
|
2018-11-19 03:16:16 +00:00
|
|
|
*
|
|
|
|
* On non-x86 architectures (arm64 for now), this will be done from the
|
|
|
|
* PCI host bridge driver.
|
2008-08-22 02:14:23 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_enable_pcie(void)
|
|
|
|
{
|
2018-11-19 03:16:16 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
2008-08-22 02:14:23 +00:00
|
|
|
ACPI_TABLE_HEADER *hdr;
|
|
|
|
ACPI_MCFG_ALLOCATION *alloc, *end;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
|
|
|
status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return;
|
|
|
|
|
|
|
|
end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
|
|
|
|
alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
|
|
|
|
while (alloc < end) {
|
|
|
|
if (alloc->PciSegment == 0) {
|
|
|
|
pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
|
|
|
|
alloc->EndBusNumber);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
alloc++;
|
|
|
|
}
|
|
|
|
#endif
|
2018-11-19 03:16:16 +00:00
|
|
|
}
|
2008-08-22 02:14:23 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2005-01-08 09:10:20 +00:00
|
|
|
* Scan all of the ACPI namespace and attach child devices.
|
2000-10-28 06:59:48 +00:00
|
|
|
*
|
2005-01-08 09:10:20 +00:00
|
|
|
* We should only expect to find devices in the \_PR, \_TZ, \_SI, and
|
|
|
|
* \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
|
|
|
|
* However, in violation of the spec, some systems place their PCI link
|
|
|
|
* devices in \, so we have to walk the whole namespace. We check the
|
|
|
|
* type of namespace nodes, so this should be ok.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
acpi_probe_children(device_t bus)
|
|
|
|
{
|
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Scan the namespace and insert placeholders for all the devices that
|
2004-07-13 17:57:41 +00:00
|
|
|
* we find. We also probe/attach any early devices.
|
2000-10-28 06:59:48 +00:00
|
|
|
*
|
|
|
|
* Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
|
2003-08-28 16:06:30 +00:00
|
|
|
* we want to create nodes for all devices, not just those that are
|
|
|
|
* currently present. (This assumes that we don't want to create/remove
|
|
|
|
* devices as they appear, which might be smarter.)
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2001-08-26 22:50:15 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
|
2005-01-08 09:10:20 +00:00
|
|
|
AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
|
2009-11-16 21:47:12 +00:00
|
|
|
NULL, bus, NULL);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-08-23 16:28:42 +00:00
|
|
|
/* Pre-allocate resources for our rman from any sysresource devices. */
|
|
|
|
acpi_sysres_alloc(bus);
|
|
|
|
|
2010-12-22 20:27:20 +00:00
|
|
|
/* Reserve resources already allocated to children. */
|
|
|
|
acpi_reserve_resources(bus);
|
|
|
|
|
2004-07-13 17:57:41 +00:00
|
|
|
/* Create any static children by calling device identify methods. */
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
|
|
|
|
bus_generic_probe(bus);
|
|
|
|
|
2010-12-16 16:55:22 +00:00
|
|
|
/* Probe/attach all children, created statically and from the namespace. */
|
2010-02-11 18:24:00 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
|
2000-10-28 06:59:48 +00:00
|
|
|
bus_generic_attach(bus);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-05-28 06:28:55 +00:00
|
|
|
/* Attach wake sysctls. */
|
2004-05-28 06:29:30 +00:00
|
|
|
acpi_wake_sysctl_walk(bus);
|
2004-05-28 06:28:55 +00:00
|
|
|
|
2001-08-30 00:50:58 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
|
2000-12-08 09:16:20 +00:00
|
|
|
return_VOID;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2004-06-14 04:01:12 +00:00
|
|
|
/*
|
2008-04-07 18:35:11 +00:00
|
|
|
* Determine the probe order for a given device.
|
2004-06-14 04:01:12 +00:00
|
|
|
*/
|
2008-04-07 18:35:11 +00:00
|
|
|
static void
|
2004-06-14 04:01:12 +00:00
|
|
|
acpi_probe_order(ACPI_HANDLE handle, int *order)
|
2004-06-13 22:52:30 +00:00
|
|
|
{
|
2012-02-07 20:54:44 +00:00
|
|
|
ACPI_OBJECT_TYPE type;
|
2004-06-13 22:52:30 +00:00
|
|
|
|
2012-02-07 20:54:44 +00:00
|
|
|
/*
|
|
|
|
* 0. CPUs
|
|
|
|
* 1. I/O port and memory system resource holders
|
|
|
|
* 2. Clocks and timers (to handle early accesses)
|
|
|
|
* 3. Embedded controllers (to handle early accesses)
|
|
|
|
* 4. PCI Link Devices
|
|
|
|
*/
|
|
|
|
AcpiGetType(handle, &type);
|
|
|
|
if (type == ACPI_TYPE_PROCESSOR)
|
|
|
|
*order = 0;
|
|
|
|
else if (acpi_MatchHid(handle, "PNP0C01") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0C02"))
|
|
|
|
*order = 1;
|
|
|
|
else if (acpi_MatchHid(handle, "PNP0100") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0103") ||
|
|
|
|
acpi_MatchHid(handle, "PNP0B00"))
|
|
|
|
*order = 2;
|
|
|
|
else if (acpi_MatchHid(handle, "PNP0C09"))
|
|
|
|
*order = 3;
|
|
|
|
else if (acpi_MatchHid(handle, "PNP0C0F"))
|
|
|
|
*order = 4;
|
2004-06-13 22:52:30 +00:00
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Evaluate a child device and determine whether we might attach a device to
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
static ACPI_STATUS
|
|
|
|
acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
|
|
|
|
{
|
2016-12-16 10:40:00 +00:00
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
|
|
|
struct acpi_device *ad;
|
2010-12-15 23:48:45 +00:00
|
|
|
struct acpi_prw_data prw;
|
2005-01-08 09:10:20 +00:00
|
|
|
ACPI_OBJECT_TYPE type;
|
2006-04-15 12:31:34 +00:00
|
|
|
ACPI_HANDLE h;
|
2005-01-08 09:10:20 +00:00
|
|
|
device_t bus, child;
|
2010-11-03 23:16:35 +00:00
|
|
|
char *handle_str;
|
2012-05-23 13:45:52 +00:00
|
|
|
int order;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2010-11-03 23:16:35 +00:00
|
|
|
if (acpi_disabled("children"))
|
|
|
|
return_ACPI_STATUS (AE_OK);
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Skip this device if we think we'll have trouble with it. */
|
2000-12-08 09:16:20 +00:00
|
|
|
if (acpi_avoid(handle))
|
2003-08-28 16:06:30 +00:00
|
|
|
return_ACPI_STATUS (AE_OK);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-06-13 22:52:30 +00:00
|
|
|
bus = (device_t)context;
|
2002-02-23 05:21:56 +00:00
|
|
|
if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
|
2010-11-03 23:16:35 +00:00
|
|
|
handle_str = acpi_name(handle);
|
2004-06-29 19:00:36 +00:00
|
|
|
switch (type) {
|
2000-10-28 06:59:48 +00:00
|
|
|
case ACPI_TYPE_DEVICE:
|
2005-01-08 09:10:20 +00:00
|
|
|
/*
|
|
|
|
* Since we scan from \, be sure to skip system scope objects.
|
2010-11-03 23:16:35 +00:00
|
|
|
* \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
|
2010-11-08 19:52:14 +00:00
|
|
|
* BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run
|
2016-05-03 03:41:25 +00:00
|
|
|
* during the initialization and \_TZ_ is to support Notify() on it.
|
2005-01-08 09:10:20 +00:00
|
|
|
*/
|
2010-11-03 23:16:35 +00:00
|
|
|
if (strcmp(handle_str, "\\_SB_") == 0 ||
|
|
|
|
strcmp(handle_str, "\\_TZ_") == 0)
|
2005-01-08 09:10:20 +00:00
|
|
|
break;
|
2010-12-15 23:48:45 +00:00
|
|
|
if (acpi_parse_prw(handle, &prw) == 0)
|
|
|
|
AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
|
2011-06-17 21:19:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore devices that do not have a _HID or _CID. They should
|
|
|
|
* be discovered by other buses (e.g. the PCI bus driver).
|
|
|
|
*/
|
|
|
|
if (!acpi_has_hid(handle))
|
|
|
|
break;
|
2010-11-03 23:16:35 +00:00
|
|
|
/* FALLTHROUGH */
|
|
|
|
case ACPI_TYPE_PROCESSOR:
|
|
|
|
case ACPI_TYPE_THERMAL:
|
|
|
|
case ACPI_TYPE_POWER:
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2008-03-10 22:18:07 +00:00
|
|
|
* Create a placeholder device for this node. Sort the
|
|
|
|
* placeholder so that the probe/attach passes will run
|
|
|
|
* breadth-first. Orders less than ACPI_DEV_BASE_ORDER
|
|
|
|
* are reserved for special objects (i.e., system
|
2010-09-03 08:07:04 +00:00
|
|
|
* resources).
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2005-01-08 09:10:20 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
|
2012-02-07 20:54:44 +00:00
|
|
|
order = level * 10 + ACPI_DEV_BASE_ORDER;
|
2006-05-07 03:28:10 +00:00
|
|
|
acpi_probe_order(handle, &order);
|
2012-05-23 13:45:52 +00:00
|
|
|
child = BUS_ADD_CHILD(bus, order, NULL, -1);
|
2001-08-30 00:50:58 +00:00
|
|
|
if (child == NULL)
|
|
|
|
break;
|
2004-06-13 17:29:35 +00:00
|
|
|
|
|
|
|
/* Associate the handle with the device_t and vice versa. */
|
2000-10-28 06:59:48 +00:00
|
|
|
acpi_set_handle(child, handle);
|
2004-06-13 17:29:35 +00:00
|
|
|
AcpiAttachData(handle, acpi_fake_objhandler, child);
|
2001-08-30 00:50:58 +00:00
|
|
|
|
2001-09-15 04:14:31 +00:00
|
|
|
/*
|
|
|
|
* Check that the device is present. If it's not present,
|
|
|
|
* leave it disabled (so that we have a device_t attached to
|
|
|
|
* the handle, but we don't probe it).
|
2005-10-23 00:28:39 +00:00
|
|
|
*
|
|
|
|
* XXX PCI link devices sometimes report "present" but not
|
|
|
|
* "functional" (i.e. if disabled). Go ahead and probe them
|
|
|
|
* anyway since we may enable them later.
|
2001-09-15 04:14:31 +00:00
|
|
|
*/
|
2006-04-15 12:31:34 +00:00
|
|
|
if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
|
|
|
|
/* Never disable PCI link devices. */
|
|
|
|
if (acpi_MatchHid(handle, "PNP0C0F"))
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Docking stations should remain enabled since the system
|
|
|
|
* may be undocked at boot.
|
|
|
|
*/
|
|
|
|
if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
|
|
|
|
break;
|
|
|
|
|
2001-09-15 04:14:31 +00:00
|
|
|
device_disable(child);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2001-08-30 00:50:58 +00:00
|
|
|
/*
|
|
|
|
* Get the device's resource settings and attach them.
|
|
|
|
* Note that if the device has _PRS but no _CRS, we need
|
|
|
|
* to decide when it's appropriate to try to configure the
|
|
|
|
* device. Ignore the return value here; it's OK for the
|
|
|
|
* device not to have any resources.
|
|
|
|
*/
|
2004-03-31 17:23:46 +00:00
|
|
|
acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
|
2016-12-16 10:40:00 +00:00
|
|
|
|
|
|
|
ad = device_get_ivars(child);
|
|
|
|
ad->ad_cls_class = 0xffffff;
|
|
|
|
if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) {
|
|
|
|
if ((devinfo->Valid & ACPI_VALID_CLS) != 0 &&
|
|
|
|
devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) {
|
|
|
|
ad->ad_cls_class = strtoul(devinfo->ClassCode.String,
|
|
|
|
NULL, 16);
|
|
|
|
}
|
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
}
|
2000-12-08 09:16:20 +00:00
|
|
|
break;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return_ACPI_STATUS (AE_OK);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2004-06-13 17:29:35 +00:00
|
|
|
/*
|
|
|
|
* AcpiAttachData() requires an object handler but never uses it. This is a
|
|
|
|
* placeholder object handler so we can store a device_t in an ACPI_HANDLE.
|
|
|
|
*/
|
|
|
|
void
|
2009-09-11 22:49:34 +00:00
|
|
|
acpi_fake_objhandler(ACPI_HANDLE h, void *data)
|
2004-06-13 17:29:35 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
static void
|
|
|
|
acpi_shutdown_final(void *arg, int howto)
|
|
|
|
{
|
2009-04-30 17:42:11 +00:00
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)arg;
|
2012-03-27 23:26:58 +00:00
|
|
|
register_t intr;
|
2006-06-11 20:31:41 +00:00
|
|
|
ACPI_STATUS status;
|
2001-06-29 20:29:59 +00:00
|
|
|
|
2004-03-19 07:05:01 +00:00
|
|
|
/*
|
2004-06-24 00:48:45 +00:00
|
|
|
* XXX Shutdown code should only run on the BSP (cpuid 0).
|
|
|
|
* Some chipsets do not power off the system correctly if called from
|
|
|
|
* an AP.
|
2004-03-19 07:05:01 +00:00
|
|
|
*/
|
2003-08-28 16:06:30 +00:00
|
|
|
if ((howto & RB_POWEROFF) != 0) {
|
2004-04-26 05:25:06 +00:00
|
|
|
status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
|
|
|
|
AcpiFormatException(status));
|
2004-04-26 05:25:06 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "Powering system off\n");
|
2012-03-27 23:26:58 +00:00
|
|
|
intr = intr_disable();
|
2012-08-16 20:54:52 +00:00
|
|
|
status = AcpiEnterSleepState(ACPI_STATE_S5);
|
2012-03-27 23:26:58 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
intr_restore(intr);
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "power-off failed - %s\n",
|
|
|
|
AcpiFormatException(status));
|
2012-03-27 23:26:58 +00:00
|
|
|
} else {
|
2004-06-24 00:48:45 +00:00
|
|
|
DELAY(1000000);
|
2012-03-27 23:26:58 +00:00
|
|
|
intr_restore(intr);
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "power-off failed - timeout\n");
|
2004-06-24 00:48:45 +00:00
|
|
|
}
|
2010-10-13 00:21:53 +00:00
|
|
|
} else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
|
2006-06-11 20:31:41 +00:00
|
|
|
/* Reboot using the reset register. */
|
2010-10-13 00:21:53 +00:00
|
|
|
status = AcpiReset();
|
|
|
|
if (ACPI_SUCCESS(status)) {
|
2006-03-29 06:30:47 +00:00
|
|
|
DELAY(1000000);
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "reset failed - timeout\n");
|
2010-10-13 00:21:53 +00:00
|
|
|
} else if (status != AE_NOT_EXIST)
|
|
|
|
device_printf(sc->acpi_dev, "reset failed - %s\n",
|
|
|
|
AcpiFormatException(status));
|
2020-01-12 06:07:54 +00:00
|
|
|
} else if (sc->acpi_do_disable && !KERNEL_PANICKED()) {
|
2006-06-11 20:31:41 +00:00
|
|
|
/*
|
|
|
|
* Only disable ACPI if the user requested. On some systems, writing
|
|
|
|
* the disable value to SMI_CMD hangs the system.
|
|
|
|
*/
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev, "Shutting down\n");
|
2002-05-19 15:12:40 +00:00
|
|
|
AcpiTerminate();
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-01-10 18:01:51 +00:00
|
|
|
static void
|
|
|
|
acpi_enable_fixed_events(struct acpi_softc *sc)
|
|
|
|
{
|
2001-06-29 20:29:59 +00:00
|
|
|
static int first_time = 1;
|
2001-01-10 18:01:51 +00:00
|
|
|
|
2001-06-29 20:29:59 +00:00
|
|
|
/* Enable and clear fixed events and install handlers. */
|
2007-03-22 18:16:43 +00:00
|
|
|
if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
|
2003-08-15 02:10:38 +00:00
|
|
|
AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
|
2001-06-29 20:29:59 +00:00
|
|
|
AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_power_button_sleep, sc);
|
2003-08-28 16:06:30 +00:00
|
|
|
if (first_time)
|
2003-09-18 05:12:45 +00:00
|
|
|
device_printf(sc->acpi_dev, "Power Button (fixed)\n");
|
2001-06-29 20:29:59 +00:00
|
|
|
}
|
2007-03-22 18:16:43 +00:00
|
|
|
if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
|
2003-08-15 02:10:38 +00:00
|
|
|
AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
|
2001-06-29 20:29:59 +00:00
|
|
|
AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_sleep_button_sleep, sc);
|
2003-08-28 16:06:30 +00:00
|
|
|
if (first_time)
|
2003-09-18 05:12:45 +00:00
|
|
|
device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
|
2001-06-29 20:29:59 +00:00
|
|
|
}
|
2001-01-10 18:01:51 +00:00
|
|
|
|
2001-06-29 20:29:59 +00:00
|
|
|
first_time = 0;
|
2001-01-10 18:01:51 +00:00
|
|
|
}
|
|
|
|
|
2001-06-28 06:17:16 +00:00
|
|
|
/*
|
|
|
|
* Returns true if the device is actually present and should
|
|
|
|
* be attached to. This requires the present, enabled, UI-visible
|
|
|
|
* and diagnostics-passed bits to be set.
|
|
|
|
*/
|
|
|
|
BOOLEAN
|
|
|
|
acpi_DeviceIsPresent(device_t dev)
|
|
|
|
{
|
2018-03-14 23:45:48 +00:00
|
|
|
ACPI_HANDLE h;
|
|
|
|
UINT32 s;
|
|
|
|
ACPI_STATUS status;
|
2001-06-29 20:29:59 +00:00
|
|
|
|
2018-03-14 23:45:48 +00:00
|
|
|
h = acpi_get_handle(dev);
|
|
|
|
if (h == NULL)
|
|
|
|
return (FALSE);
|
2018-08-19 21:10:21 +00:00
|
|
|
/*
|
2018-10-25 17:17:11 +00:00
|
|
|
* Certain Treadripper boards always returns 0 for FreeBSD because it
|
|
|
|
* only returns non-zero for the OS string "Windows 2015". Otherwise it
|
|
|
|
* will return zero. Force them to always be treated as present.
|
|
|
|
* Beata versions were worse: they always returned 0.
|
2018-08-19 21:10:21 +00:00
|
|
|
*/
|
|
|
|
if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010"))
|
|
|
|
return (TRUE);
|
|
|
|
|
2018-12-06 12:34:34 +00:00
|
|
|
status = acpi_GetInteger(h, "_STA", &s);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If no _STA method or if it failed, then assume that
|
|
|
|
* the device is present.
|
|
|
|
*/
|
2018-03-14 23:45:48 +00:00
|
|
|
if (ACPI_FAILURE(status))
|
2018-12-06 12:34:34 +00:00
|
|
|
return (TRUE);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2018-03-14 23:45:48 +00:00
|
|
|
return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE);
|
2001-06-28 06:17:16 +00:00
|
|
|
}
|
|
|
|
|
2002-02-23 05:21:56 +00:00
|
|
|
/*
|
|
|
|
* Returns true if the battery is actually present and inserted.
|
|
|
|
*/
|
|
|
|
BOOLEAN
|
|
|
|
acpi_BatteryIsPresent(device_t dev)
|
|
|
|
{
|
2018-03-14 23:45:48 +00:00
|
|
|
ACPI_HANDLE h;
|
|
|
|
UINT32 s;
|
|
|
|
ACPI_STATUS status;
|
2002-02-23 05:21:56 +00:00
|
|
|
|
2018-03-14 23:45:48 +00:00
|
|
|
h = acpi_get_handle(dev);
|
|
|
|
if (h == NULL)
|
|
|
|
return (FALSE);
|
|
|
|
status = acpi_GetInteger(h, "_STA", &s);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2018-12-06 12:34:34 +00:00
|
|
|
/*
|
|
|
|
* If no _STA method or if it failed, then assume that
|
|
|
|
* the device is present.
|
|
|
|
*/
|
2018-03-14 23:45:48 +00:00
|
|
|
if (ACPI_FAILURE(status))
|
2018-12-06 12:34:34 +00:00
|
|
|
return (TRUE);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2018-03-14 23:45:48 +00:00
|
|
|
return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE);
|
2002-02-23 05:21:56 +00:00
|
|
|
}
|
|
|
|
|
2011-06-17 21:19:01 +00:00
|
|
|
/*
|
|
|
|
* Returns true if a device has at least one valid device ID.
|
|
|
|
*/
|
2020-03-09 20:28:45 +00:00
|
|
|
BOOLEAN
|
2011-06-17 21:19:01 +00:00
|
|
|
acpi_has_hid(ACPI_HANDLE h)
|
|
|
|
{
|
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
|
|
|
BOOLEAN ret;
|
|
|
|
|
|
|
|
if (h == NULL ||
|
|
|
|
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
|
|
|
|
return (FALSE);
|
|
|
|
|
|
|
|
ret = FALSE;
|
|
|
|
if ((devinfo->Valid & ACPI_VALID_HID) != 0)
|
|
|
|
ret = TRUE;
|
|
|
|
else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
|
|
|
|
if (devinfo->CompatibleIdList.Count > 0)
|
|
|
|
ret = TRUE;
|
|
|
|
|
|
|
|
AcpiOsFree(devinfo);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2004-06-13 22:52:30 +00:00
|
|
|
* Match a HID string against a handle
|
2018-10-26 00:05:46 +00:00
|
|
|
* returns ACPI_MATCHHID_HID if _HID match
|
|
|
|
* ACPI_MATCHHID_CID if _CID match and not _HID match.
|
|
|
|
* ACPI_MATCHHID_NOMATCH=0 if no match.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2018-10-26 00:05:46 +00:00
|
|
|
int
|
2004-06-29 19:00:36 +00:00
|
|
|
acpi_MatchHid(ACPI_HANDLE h, const char *hid)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2003-12-18 03:25:22 +00:00
|
|
|
ACPI_DEVICE_INFO *devinfo;
|
2009-09-11 22:49:34 +00:00
|
|
|
BOOLEAN ret;
|
|
|
|
int i;
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2009-09-11 22:49:34 +00:00
|
|
|
if (hid == NULL || h == NULL ||
|
|
|
|
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
|
2018-10-26 00:05:46 +00:00
|
|
|
return (ACPI_MATCHHID_NOMATCH);
|
2003-12-18 03:25:22 +00:00
|
|
|
|
2018-12-07 16:05:39 +00:00
|
|
|
ret = ACPI_MATCHHID_NOMATCH;
|
2003-12-26 15:42:13 +00:00
|
|
|
if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
|
2009-09-11 22:49:34 +00:00
|
|
|
strcmp(hid, devinfo->HardwareId.String) == 0)
|
2018-10-26 00:05:46 +00:00
|
|
|
ret = ACPI_MATCHHID_HID;
|
2009-09-11 22:49:34 +00:00
|
|
|
else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
|
|
|
|
for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
|
|
|
|
if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
|
2018-10-26 00:05:46 +00:00
|
|
|
ret = ACPI_MATCHHID_CID;
|
2003-12-18 03:25:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
|
2009-09-11 22:49:34 +00:00
|
|
|
AcpiOsFree(devinfo);
|
2003-12-18 03:25:22 +00:00
|
|
|
return (ret);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2001-06-28 06:17:16 +00:00
|
|
|
/*
|
|
|
|
* Return the handle of a named object within our scope, ie. that of (parent)
|
|
|
|
* or one if its parents.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
|
|
|
acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
|
|
|
|
{
|
|
|
|
ACPI_HANDLE r;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Walk back up the tree to the root */
|
2001-06-28 06:17:16 +00:00
|
|
|
for (;;) {
|
2003-08-28 16:06:30 +00:00
|
|
|
status = AcpiGetHandle(parent, path, &r);
|
|
|
|
if (ACPI_SUCCESS(status)) {
|
2001-06-28 06:17:16 +00:00
|
|
|
*result = r;
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2001-06-28 06:17:16 +00:00
|
|
|
}
|
2004-08-03 05:13:56 +00:00
|
|
|
/* XXX Return error here? */
|
2001-06-28 06:17:16 +00:00
|
|
|
if (status != AE_NOT_FOUND)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2002-02-23 05:21:56 +00:00
|
|
|
if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_NOT_FOUND);
|
2001-06-28 06:17:16 +00:00
|
|
|
parent = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a buffer with a preset data size.
|
|
|
|
*/
|
|
|
|
ACPI_BUFFER *
|
|
|
|
acpi_AllocBuffer(int size)
|
|
|
|
{
|
|
|
|
ACPI_BUFFER *buf;
|
|
|
|
|
|
|
|
if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (NULL);
|
2001-06-28 06:17:16 +00:00
|
|
|
buf->Length = size;
|
|
|
|
buf->Pointer = (void *)(buf + 1);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (buf);
|
2001-06-28 06:17:16 +00:00
|
|
|
}
|
|
|
|
|
2004-03-03 18:34:42 +00:00
|
|
|
ACPI_STATUS
|
2004-03-09 05:41:28 +00:00
|
|
|
acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
|
2004-03-03 18:34:42 +00:00
|
|
|
{
|
|
|
|
ACPI_OBJECT arg1;
|
|
|
|
ACPI_OBJECT_LIST args;
|
|
|
|
|
|
|
|
arg1.Type = ACPI_TYPE_INTEGER;
|
|
|
|
arg1.Integer.Value = number;
|
|
|
|
args.Count = 1;
|
|
|
|
args.Pointer = &arg1;
|
|
|
|
|
|
|
|
return (AcpiEvaluateObject(handle, path, &args, NULL));
|
|
|
|
}
|
|
|
|
|
2001-06-28 06:17:16 +00:00
|
|
|
/*
|
|
|
|
* Evaluate a path that should return an integer.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
2004-03-09 05:41:28 +00:00
|
|
|
acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
|
2001-06-28 06:17:16 +00:00
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
ACPI_STATUS status;
|
2001-06-28 06:17:16 +00:00
|
|
|
ACPI_BUFFER buf;
|
2001-12-22 16:05:41 +00:00
|
|
|
ACPI_OBJECT param;
|
2001-06-28 06:17:16 +00:00
|
|
|
|
|
|
|
if (handle == NULL)
|
|
|
|
handle = ACPI_ROOT_OBJECT;
|
2001-09-06 23:16:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume that what we've been pointed at is an Integer object, or
|
|
|
|
* a method that will return an Integer.
|
|
|
|
*/
|
2001-06-28 06:17:16 +00:00
|
|
|
buf.Pointer = ¶m;
|
|
|
|
buf.Length = sizeof(param);
|
2003-08-28 16:06:30 +00:00
|
|
|
status = AcpiEvaluateObject(handle, path, NULL, &buf);
|
|
|
|
if (ACPI_SUCCESS(status)) {
|
|
|
|
if (param.Type == ACPI_TYPE_INTEGER)
|
2001-06-28 06:17:16 +00:00
|
|
|
*number = param.Integer.Value;
|
2003-08-28 16:06:30 +00:00
|
|
|
else
|
|
|
|
status = AE_TYPE;
|
2001-06-28 06:17:16 +00:00
|
|
|
}
|
2001-09-06 23:16:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In some applications, a method that's expected to return an Integer
|
|
|
|
* may instead return a Buffer (probably to simplify some internal
|
|
|
|
* arithmetic). We'll try to fetch whatever it is, and if it's a Buffer,
|
|
|
|
* convert it into an Integer as best we can.
|
|
|
|
*
|
|
|
|
* This is a hack.
|
|
|
|
*/
|
2003-08-28 16:06:30 +00:00
|
|
|
if (status == AE_BUFFER_OVERFLOW) {
|
2002-02-23 05:21:56 +00:00
|
|
|
if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
|
2003-08-28 16:06:30 +00:00
|
|
|
status = AE_NO_MEMORY;
|
2001-09-06 23:16:55 +00:00
|
|
|
} else {
|
2003-08-28 16:06:30 +00:00
|
|
|
status = AcpiEvaluateObject(handle, path, NULL, &buf);
|
|
|
|
if (ACPI_SUCCESS(status))
|
|
|
|
status = acpi_ConvertBufferToInteger(&buf, number);
|
2003-09-04 15:55:41 +00:00
|
|
|
AcpiOsFree(buf.Pointer);
|
2001-09-06 23:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
return (status);
|
2001-06-28 06:17:16 +00:00
|
|
|
}
|
|
|
|
|
2001-12-22 16:05:41 +00:00
|
|
|
ACPI_STATUS
|
2004-03-09 05:41:28 +00:00
|
|
|
acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
|
2001-12-22 16:05:41 +00:00
|
|
|
{
|
|
|
|
ACPI_OBJECT *p;
|
2004-03-09 05:44:47 +00:00
|
|
|
UINT8 *val;
|
2001-12-22 16:05:41 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
p = (ACPI_OBJECT *)bufp->Pointer;
|
|
|
|
if (p->Type == ACPI_TYPE_INTEGER) {
|
|
|
|
*number = p->Integer.Value;
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2001-12-22 16:05:41 +00:00
|
|
|
}
|
|
|
|
if (p->Type != ACPI_TYPE_BUFFER)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_TYPE);
|
2001-12-22 16:05:41 +00:00
|
|
|
if (p->Buffer.Length > sizeof(int))
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_BAD_DATA);
|
|
|
|
|
2001-12-22 16:05:41 +00:00
|
|
|
*number = 0;
|
2004-03-09 05:44:47 +00:00
|
|
|
val = p->Buffer.Pointer;
|
2001-12-22 16:05:41 +00:00
|
|
|
for (i = 0; i < p->Buffer.Length; i++)
|
2004-03-09 05:44:47 +00:00
|
|
|
*number += val[i] << (i * 8);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2001-12-22 16:05:41 +00:00
|
|
|
}
|
|
|
|
|
2001-06-28 06:17:16 +00:00
|
|
|
/*
|
|
|
|
* Iterate over the elements of an a package object, calling the supplied
|
|
|
|
* function for each element.
|
|
|
|
*
|
|
|
|
* XXX possible enhancement might be to abort traversal on error.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
2003-08-28 16:06:30 +00:00
|
|
|
acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
|
|
|
|
void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
|
2001-06-28 06:17:16 +00:00
|
|
|
{
|
|
|
|
ACPI_OBJECT *comp;
|
|
|
|
int i;
|
2004-08-18 05:48:24 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
|
|
|
|
return (AE_BAD_PARAMETER);
|
2001-06-28 06:17:16 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Iterate over components */
|
|
|
|
i = 0;
|
|
|
|
comp = pkg->Package.Elements;
|
|
|
|
for (; i < pkg->Package.Count; i++, comp++)
|
2001-06-28 06:17:16 +00:00
|
|
|
func(comp, arg);
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2001-07-05 07:14:30 +00:00
|
|
|
/*
|
|
|
|
* Find the (index)th resource object in a set.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
2001-07-30 08:59:43 +00:00
|
|
|
acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
|
2001-07-05 07:14:30 +00:00
|
|
|
{
|
2001-07-30 08:59:43 +00:00
|
|
|
ACPI_RESOURCE *rp;
|
2001-07-05 07:14:30 +00:00
|
|
|
int i;
|
|
|
|
|
2001-07-30 08:59:43 +00:00
|
|
|
rp = (ACPI_RESOURCE *)buf->Pointer;
|
2001-07-05 07:14:30 +00:00
|
|
|
i = index;
|
2001-07-30 08:59:43 +00:00
|
|
|
while (i-- > 0) {
|
2004-12-27 05:36:47 +00:00
|
|
|
/* Range check */
|
2001-07-30 08:59:43 +00:00
|
|
|
if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
|
|
|
|
|
|
|
/* Check for terminator */
|
2005-11-01 22:44:08 +00:00
|
|
|
if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_NOT_FOUND);
|
2004-02-19 18:20:03 +00:00
|
|
|
rp = ACPI_NEXT_RESOURCE(rp);
|
2001-07-05 07:14:30 +00:00
|
|
|
}
|
|
|
|
if (resp != NULL)
|
2001-07-30 08:59:43 +00:00
|
|
|
*resp = rp;
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return (AE_OK);
|
2001-07-30 08:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Append an ACPI_RESOURCE to an ACPI_BUFFER.
|
|
|
|
*
|
|
|
|
* Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
|
|
|
|
* provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible
|
|
|
|
* backing block. If the ACPI_RESOURCE is NULL, return an empty set of
|
|
|
|
* resources.
|
|
|
|
*/
|
|
|
|
#define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512
|
|
|
|
|
|
|
|
ACPI_STATUS
|
|
|
|
acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
|
|
|
|
{
|
|
|
|
ACPI_RESOURCE *rp;
|
|
|
|
void *newp;
|
2004-08-18 05:48:24 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Initialise the buffer if necessary. */
|
2001-07-30 08:59:43 +00:00
|
|
|
if (buf->Pointer == NULL) {
|
|
|
|
buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
|
|
|
|
if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_NO_MEMORY);
|
2001-07-30 08:59:43 +00:00
|
|
|
rp = (ACPI_RESOURCE *)buf->Pointer;
|
2005-11-01 22:44:08 +00:00
|
|
|
rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
|
2013-04-22 15:51:06 +00:00
|
|
|
rp->Length = ACPI_RS_SIZE_MIN;
|
2001-07-30 08:59:43 +00:00
|
|
|
}
|
|
|
|
if (res == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2004-08-18 05:48:24 +00:00
|
|
|
|
2001-07-30 08:59:43 +00:00
|
|
|
/*
|
|
|
|
* Scan the current buffer looking for the terminator.
|
|
|
|
* This will either find the terminator or hit the end
|
|
|
|
* of the buffer and return an error.
|
|
|
|
*/
|
|
|
|
rp = (ACPI_RESOURCE *)buf->Pointer;
|
|
|
|
for (;;) {
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Range check, don't go outside the buffer */
|
2001-07-30 08:59:43 +00:00
|
|
|
if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
2005-11-01 22:44:08 +00:00
|
|
|
if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
|
2001-07-30 08:59:43 +00:00
|
|
|
break;
|
2004-02-19 18:20:03 +00:00
|
|
|
rp = ACPI_NEXT_RESOURCE(rp);
|
2001-07-30 08:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the size of the buffer and expand if required.
|
|
|
|
*
|
|
|
|
* Required size is:
|
|
|
|
* size of existing resources before terminator +
|
|
|
|
* size of new resource and header +
|
|
|
|
* size of terminator.
|
|
|
|
*
|
|
|
|
* Note that this loop should really only run once, unless
|
|
|
|
* for some reason we are stuffing a *really* huge resource.
|
|
|
|
*/
|
|
|
|
while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) +
|
2005-11-01 22:44:08 +00:00
|
|
|
res->Length + ACPI_RS_SIZE_NO_DATA +
|
|
|
|
ACPI_RS_SIZE_MIN) >= buf->Length) {
|
2001-07-30 08:59:43 +00:00
|
|
|
if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_NO_MEMORY);
|
2001-07-30 08:59:43 +00:00
|
|
|
bcopy(buf->Pointer, newp, buf->Length);
|
2003-08-28 16:06:30 +00:00
|
|
|
rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
|
2001-08-03 08:38:11 +00:00
|
|
|
((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
|
2001-07-30 08:59:43 +00:00
|
|
|
AcpiOsFree(buf->Pointer);
|
|
|
|
buf->Pointer = newp;
|
|
|
|
buf->Length += buf->Length;
|
|
|
|
}
|
2004-08-18 05:48:24 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Insert the new resource. */
|
2005-11-01 22:44:08 +00:00
|
|
|
bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
|
2004-08-18 05:48:24 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* And add the terminator. */
|
2004-02-19 18:20:03 +00:00
|
|
|
rp = ACPI_NEXT_RESOURCE(rp);
|
2005-11-01 22:44:08 +00:00
|
|
|
rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
|
2013-04-22 15:51:06 +00:00
|
|
|
rp->Length = ACPI_RS_SIZE_MIN;
|
2001-07-30 08:59:43 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return (AE_OK);
|
2001-07-05 07:14:30 +00:00
|
|
|
}
|
2001-06-28 06:17:16 +00:00
|
|
|
|
2018-10-22 03:29:54 +00:00
|
|
|
UINT8
|
|
|
|
acpi_DSMQuery(ACPI_HANDLE h, uint8_t *uuid, int revision)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* ACPI spec 9.1.1 defines this.
|
|
|
|
*
|
|
|
|
* "Arg2: Function Index Represents a specific function whose meaning is
|
|
|
|
* specific to the UUID and Revision ID. Function indices should start
|
|
|
|
* with 1. Function number zero is a query function (see the special
|
|
|
|
* return code defined below)."
|
|
|
|
*/
|
|
|
|
ACPI_BUFFER buf;
|
|
|
|
ACPI_OBJECT *obj;
|
|
|
|
UINT8 ret = 0;
|
|
|
|
|
|
|
|
if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) {
|
|
|
|
ACPI_INFO(("Failed to enumerate DSM functions\n"));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
obj = (ACPI_OBJECT *)buf.Pointer;
|
|
|
|
KASSERT(obj, ("Object not allowed to be NULL\n"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From ACPI 6.2 spec 9.1.1:
|
|
|
|
* If Function Index = 0, a Buffer containing a function index bitfield.
|
|
|
|
* Otherwise, the return value and type depends on the UUID and revision
|
|
|
|
* ID (see below).
|
|
|
|
*/
|
|
|
|
switch (obj->Type) {
|
|
|
|
case ACPI_TYPE_BUFFER:
|
|
|
|
ret = *(uint8_t *)obj->Buffer.Pointer;
|
|
|
|
break;
|
|
|
|
case ACPI_TYPE_INTEGER:
|
|
|
|
ACPI_BIOS_WARNING((AE_INFO,
|
|
|
|
"Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n"));
|
|
|
|
ret = obj->Integer.Value & 0xFF;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type));
|
|
|
|
};
|
|
|
|
|
|
|
|
AcpiOsFree(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DSM may return multiple types depending on the function. It is therefore
|
|
|
|
* unsafe to use the typed evaluation. It is highly recommended that the caller
|
|
|
|
* check the type of the returned object.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
|
|
|
acpi_EvaluateDSM(ACPI_HANDLE handle, uint8_t *uuid, int revision,
|
|
|
|
uint64_t function, union acpi_object *package, ACPI_BUFFER *out_buf)
|
|
|
|
{
|
|
|
|
ACPI_OBJECT arg[4];
|
|
|
|
ACPI_OBJECT_LIST arglist;
|
|
|
|
ACPI_BUFFER buf;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
|
|
|
if (out_buf == NULL)
|
|
|
|
return (AE_NO_MEMORY);
|
|
|
|
|
|
|
|
arg[0].Type = ACPI_TYPE_BUFFER;
|
|
|
|
arg[0].Buffer.Length = ACPI_UUID_LENGTH;
|
|
|
|
arg[0].Buffer.Pointer = uuid;
|
|
|
|
arg[1].Type = ACPI_TYPE_INTEGER;
|
|
|
|
arg[1].Integer.Value = revision;
|
|
|
|
arg[2].Type = ACPI_TYPE_INTEGER;
|
|
|
|
arg[2].Integer.Value = function;
|
|
|
|
if (package) {
|
|
|
|
arg[3] = *package;
|
|
|
|
} else {
|
|
|
|
arg[3].Type = ACPI_TYPE_PACKAGE;
|
|
|
|
arg[3].Package.Count = 0;
|
|
|
|
arg[3].Package.Elements = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
arglist.Pointer = arg;
|
|
|
|
arglist.Count = 4;
|
|
|
|
buf.Pointer = NULL;
|
|
|
|
buf.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObject(handle, "_DSM", &arglist, &buf);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return (status);
|
|
|
|
|
|
|
|
KASSERT(ACPI_SUCCESS(status), ("Unexpected status"));
|
|
|
|
|
|
|
|
*out_buf = buf;
|
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
2016-04-20 20:55:58 +00:00
|
|
|
ACPI_STATUS
|
|
|
|
acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count,
|
2016-04-22 17:51:19 +00:00
|
|
|
uint32_t *caps_in, uint32_t *caps_out, bool query)
|
2016-04-20 20:55:58 +00:00
|
|
|
{
|
2016-04-22 17:51:19 +00:00
|
|
|
ACPI_OBJECT arg[4], *ret;
|
2016-04-20 20:55:58 +00:00
|
|
|
ACPI_OBJECT_LIST arglist;
|
2016-04-22 17:51:19 +00:00
|
|
|
ACPI_BUFFER buf;
|
|
|
|
ACPI_STATUS status;
|
2016-04-20 20:55:58 +00:00
|
|
|
|
|
|
|
arglist.Pointer = arg;
|
|
|
|
arglist.Count = 4;
|
|
|
|
arg[0].Type = ACPI_TYPE_BUFFER;
|
|
|
|
arg[0].Buffer.Length = ACPI_UUID_LENGTH;
|
|
|
|
arg[0].Buffer.Pointer = uuid;
|
|
|
|
arg[1].Type = ACPI_TYPE_INTEGER;
|
|
|
|
arg[1].Integer.Value = revision;
|
|
|
|
arg[2].Type = ACPI_TYPE_INTEGER;
|
|
|
|
arg[2].Integer.Value = count;
|
|
|
|
arg[3].Type = ACPI_TYPE_BUFFER;
|
2016-04-22 17:51:19 +00:00
|
|
|
arg[3].Buffer.Length = count * sizeof(*caps_in);
|
|
|
|
arg[3].Buffer.Pointer = (uint8_t *)caps_in;
|
|
|
|
caps_in[0] = query ? 1 : 0;
|
|
|
|
buf.Pointer = NULL;
|
|
|
|
buf.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf,
|
|
|
|
ACPI_TYPE_BUFFER);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return (status);
|
|
|
|
if (caps_out != NULL) {
|
|
|
|
ret = buf.Pointer;
|
|
|
|
if (ret->Buffer.Length != count * sizeof(*caps_out)) {
|
|
|
|
AcpiOsFree(buf.Pointer);
|
|
|
|
return (AE_BUFFER_OVERFLOW);
|
|
|
|
}
|
|
|
|
bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length);
|
|
|
|
}
|
|
|
|
AcpiOsFree(buf.Pointer);
|
|
|
|
return (status);
|
2016-04-20 20:55:58 +00:00
|
|
|
}
|
|
|
|
|
2002-09-06 16:08:08 +00:00
|
|
|
/*
|
|
|
|
* Set interrupt model.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
|
|
|
acpi_SetIntrModel(int model)
|
|
|
|
{
|
2004-03-03 18:34:42 +00:00
|
|
|
|
|
|
|
return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
|
2002-09-06 16:08:08 +00:00
|
|
|
}
|
|
|
|
|
2009-09-23 15:42:35 +00:00
|
|
|
/*
|
|
|
|
* Walk subtables of a table and call a callback routine for each
|
|
|
|
* subtable. The caller should provide the first subtable and a
|
|
|
|
* pointer to the end of the table. This can be used to walk tables
|
|
|
|
* such as MADT and SRAT that use subtable entries.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
ACPI_SUBTABLE_HEADER *entry;
|
|
|
|
|
|
|
|
for (entry = first; (void *)entry < end; ) {
|
|
|
|
/* Avoid an infinite loop if we hit a bogus entry. */
|
|
|
|
if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
|
|
|
|
return;
|
|
|
|
|
|
|
|
handler(entry, arg);
|
|
|
|
entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
/*
|
|
|
|
* DEPRECATED. This interface has serious deficiencies and will be
|
|
|
|
* removed.
|
|
|
|
*
|
|
|
|
* Immediately enter the sleep state. In the old model, acpiconf(8) ran
|
|
|
|
* rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
|
|
|
|
*/
|
|
|
|
ACPI_STATUS
|
|
|
|
acpi_SetSleepState(struct acpi_softc *sc, int state)
|
|
|
|
{
|
|
|
|
static int once;
|
|
|
|
|
|
|
|
if (!once) {
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev,
|
2007-06-21 22:50:37 +00:00
|
|
|
"warning: acpi_SetSleepState() deprecated, need to update your software\n");
|
|
|
|
once = 1;
|
|
|
|
}
|
|
|
|
return (acpi_EnterSleepState(sc, state));
|
|
|
|
}
|
|
|
|
|
2009-03-17 00:48:11 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2012-05-29 05:09:40 +00:00
|
|
|
static void
|
|
|
|
acpi_sleep_force_task(void *context)
|
|
|
|
{
|
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)context;
|
|
|
|
|
|
|
|
if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
|
|
|
|
device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
|
|
|
|
sc->acpi_next_sstate);
|
|
|
|
}
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
static void
|
|
|
|
acpi_sleep_force(void *arg)
|
|
|
|
{
|
2009-04-30 17:42:11 +00:00
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)arg;
|
2007-06-21 22:50:37 +00:00
|
|
|
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev,
|
|
|
|
"suspend request timed out, forcing sleep now\n");
|
2012-05-29 05:09:40 +00:00
|
|
|
/*
|
2014-09-22 14:27:26 +00:00
|
|
|
* XXX Suspending from callout causes freezes in DEVICE_SUSPEND().
|
|
|
|
* Suspend from acpi_task thread instead.
|
2012-05-29 05:09:40 +00:00
|
|
|
*/
|
|
|
|
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
|
|
|
|
acpi_sleep_force_task, sc)))
|
|
|
|
device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
|
2007-06-21 22:50:37 +00:00
|
|
|
}
|
2009-03-17 00:48:11 +00:00
|
|
|
#endif
|
2007-06-21 22:50:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Request that the system enter the given suspend state. All /dev/apm
|
|
|
|
* devices and devd(8) will be notified. Userland then has a chance to
|
|
|
|
* save state and acknowledge the request. The system sleeps once all
|
|
|
|
* acks are in.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
acpi_ReqSleepState(struct acpi_softc *sc, int state)
|
|
|
|
{
|
2009-09-27 14:00:16 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2007-06-21 22:50:37 +00:00
|
|
|
struct apm_clone_data *clone;
|
2010-07-15 23:24:06 +00:00
|
|
|
ACPI_STATUS status;
|
2007-06-21 22:50:37 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
|
2007-06-21 22:50:37 +00:00
|
|
|
return (EINVAL);
|
2009-04-30 17:35:44 +00:00
|
|
|
if (!acpi_sleep_states[state])
|
|
|
|
return (EOPNOTSUPP);
|
2007-06-21 22:50:37 +00:00
|
|
|
|
2015-10-01 10:52:26 +00:00
|
|
|
/*
|
|
|
|
* If a reboot/shutdown/suspend request is already in progress or
|
|
|
|
* suspend is blocked due to an upcoming shutdown, just return.
|
|
|
|
*/
|
|
|
|
if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
|
2012-05-22 05:18:30 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-05-20 16:36:54 +00:00
|
|
|
/* Wait until sleep is enabled. */
|
|
|
|
while (sc->acpi_sleep_disabled) {
|
|
|
|
AcpiOsSleep(1000);
|
|
|
|
}
|
|
|
|
|
2010-07-15 23:24:06 +00:00
|
|
|
ACPI_LOCK(acpi);
|
2007-06-21 22:50:37 +00:00
|
|
|
|
2012-05-20 16:36:54 +00:00
|
|
|
sc->acpi_next_sstate = state;
|
2007-06-21 22:50:37 +00:00
|
|
|
|
2010-07-15 23:24:06 +00:00
|
|
|
/* S5 (soft-off) should be entered directly with no waiting. */
|
|
|
|
if (state == ACPI_STATE_S5) {
|
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
status = acpi_EnterSleepState(sc, state);
|
|
|
|
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
|
|
|
|
}
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
/* Record the pending state and notify all apm devices. */
|
|
|
|
STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
|
|
|
|
clone->notify_status = APM_EV_NONE;
|
|
|
|
if ((clone->flags & ACPI_EVF_DEVD) == 0) {
|
|
|
|
selwakeuppri(&clone->sel_read, PZERO);
|
2010-06-08 21:27:05 +00:00
|
|
|
KNOTE_LOCKED(&clone->sel_read.si_note, 0);
|
2007-06-21 22:50:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-27 16:11:04 +00:00
|
|
|
/* If devd(8) is not running, immediately enter the sleep state. */
|
2009-03-23 22:06:09 +00:00
|
|
|
if (!devctl_process_running()) {
|
2008-01-28 01:57:48 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
2010-07-15 23:24:06 +00:00
|
|
|
status = acpi_EnterSleepState(sc, state);
|
|
|
|
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
|
2008-01-27 16:11:04 +00:00
|
|
|
}
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
/*
|
|
|
|
* Set a timeout to fire if userland doesn't ack the suspend request
|
|
|
|
* in time. This way we still eventually go to sleep if we were
|
|
|
|
* overheating or running low on battery, even if userland is hung.
|
|
|
|
* We cancel this timeout once all userland acks are in or the
|
|
|
|
* suspend request is aborted.
|
|
|
|
*/
|
|
|
|
callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
|
|
|
|
ACPI_UNLOCK(acpi);
|
2009-03-23 22:06:09 +00:00
|
|
|
|
|
|
|
/* Now notify devd(8) also. */
|
|
|
|
acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
return (0);
|
2009-03-17 00:48:11 +00:00
|
|
|
#else
|
|
|
|
/* This platform does not support acpi suspend/resume. */
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
#endif
|
2007-06-21 22:50:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Acknowledge (or reject) a pending sleep state. The caller has
|
|
|
|
* prepared for suspend and is now ready for it to proceed. If the
|
|
|
|
* error argument is non-zero, it indicates suspend should be cancelled
|
|
|
|
* and gives an errno value describing why. Once all votes are in,
|
|
|
|
* we suspend the system.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
acpi_AckSleepState(struct apm_clone_data *clone, int error)
|
|
|
|
{
|
2009-03-17 00:48:11 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2007-06-21 22:50:37 +00:00
|
|
|
struct acpi_softc *sc;
|
|
|
|
int ret, sleeping;
|
|
|
|
|
|
|
|
/* If no pending sleep state, return an error. */
|
|
|
|
ACPI_LOCK(acpi);
|
|
|
|
sc = clone->acpi_sc;
|
|
|
|
if (sc->acpi_next_sstate == 0) {
|
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Caller wants to abort suspend process. */
|
|
|
|
if (error) {
|
|
|
|
sc->acpi_next_sstate = 0;
|
|
|
|
callout_stop(&sc->susp_force_to);
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev,
|
|
|
|
"listener on %s cancelled the pending suspend\n",
|
2007-06-21 22:50:37 +00:00
|
|
|
devtoname(clone->cdev));
|
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark this device as acking the suspend request. Then, walk through
|
|
|
|
* all devices, seeing if they agree yet. We only count devices that
|
|
|
|
* are writable since read-only devices couldn't ack the request.
|
|
|
|
*/
|
|
|
|
sleeping = TRUE;
|
2009-03-17 00:48:11 +00:00
|
|
|
clone->notify_status = APM_EV_ACKED;
|
2007-06-21 22:50:37 +00:00
|
|
|
STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
|
|
|
|
if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
|
|
|
|
clone->notify_status != APM_EV_ACKED) {
|
|
|
|
sleeping = FALSE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If all devices have voted "yes", we will suspend now. */
|
|
|
|
if (sleeping)
|
|
|
|
callout_stop(&sc->susp_force_to);
|
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
ret = 0;
|
|
|
|
if (sleeping) {
|
|
|
|
if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
|
|
|
|
ret = ENODEV;
|
|
|
|
}
|
|
|
|
return (ret);
|
2009-03-17 00:48:11 +00:00
|
|
|
#else
|
|
|
|
/* This platform does not support acpi suspend/resume. */
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
#endif
|
2007-06-21 22:50:37 +00:00
|
|
|
}
|
|
|
|
|
2001-12-09 18:02:36 +00:00
|
|
|
static void
|
|
|
|
acpi_sleep_enable(void *arg)
|
|
|
|
{
|
2009-03-23 22:06:09 +00:00
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)arg;
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2014-09-22 14:27:26 +00:00
|
|
|
ACPI_LOCK_ASSERT(acpi);
|
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Reschedule if the system is not fully up and running. */
|
|
|
|
if (!AcpiGbl_SystemAwakeAndRunning) {
|
2014-09-22 14:27:26 +00:00
|
|
|
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
|
2009-04-30 17:35:44 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sc->acpi_sleep_disabled = FALSE;
|
2009-03-23 22:06:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ACPI_STATUS
|
|
|
|
acpi_sleep_disable(struct acpi_softc *sc)
|
|
|
|
{
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Fail if the system is not fully up and running. */
|
|
|
|
if (!AcpiGbl_SystemAwakeAndRunning)
|
|
|
|
return (AE_ERROR);
|
|
|
|
|
2009-03-23 22:06:09 +00:00
|
|
|
ACPI_LOCK(acpi);
|
|
|
|
status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
|
2009-04-30 17:35:44 +00:00
|
|
|
sc->acpi_sleep_disabled = TRUE;
|
2009-03-23 22:06:09 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
|
|
|
|
return (status);
|
2001-12-09 18:02:36 +00:00
|
|
|
}
|
2001-03-07 15:22:14 +00:00
|
|
|
|
2004-08-13 06:21:32 +00:00
|
|
|
enum acpi_sleep_state {
|
|
|
|
ACPI_SS_NONE,
|
|
|
|
ACPI_SS_GPE_SET,
|
|
|
|
ACPI_SS_DEV_SUSPEND,
|
|
|
|
ACPI_SS_SLP_PREP,
|
|
|
|
ACPI_SS_SLEPT,
|
|
|
|
};
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
2007-06-21 22:50:37 +00:00
|
|
|
* Enter the desired system sleep state.
|
2000-10-28 06:59:48 +00:00
|
|
|
*
|
2003-08-28 16:06:30 +00:00
|
|
|
* Currently we support S1-S5 but S4 is only S4BIOS
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2007-06-21 22:50:37 +00:00
|
|
|
static ACPI_STATUS
|
|
|
|
acpi_EnterSleepState(struct acpi_softc *sc, int state)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2012-03-27 23:26:58 +00:00
|
|
|
register_t intr;
|
|
|
|
ACPI_STATUS status;
|
2013-02-02 12:44:19 +00:00
|
|
|
ACPI_EVENT_STATUS power_button_status;
|
2004-08-13 06:21:32 +00:00
|
|
|
enum acpi_sleep_state slp_state;
|
2012-06-01 15:26:32 +00:00
|
|
|
int sleep_result;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
|
2009-04-28 11:56:54 +00:00
|
|
|
return_ACPI_STATUS (AE_BAD_PARAMETER);
|
2009-04-30 17:35:44 +00:00
|
|
|
if (!acpi_sleep_states[state]) {
|
|
|
|
device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
|
|
|
|
state);
|
|
|
|
return (AE_SUPPORT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Re-entry once we're suspending is not allowed. */
|
|
|
|
status = acpi_sleep_disable(sc);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(sc->acpi_dev,
|
|
|
|
"suspend request ignored (not ready yet)\n");
|
|
|
|
return (status);
|
|
|
|
}
|
2009-04-28 11:56:54 +00:00
|
|
|
|
|
|
|
if (state == ACPI_STATE_S5) {
|
|
|
|
/*
|
|
|
|
* Shut down cleanly and power off. This will call us back through the
|
|
|
|
* shutdown handlers.
|
|
|
|
*/
|
|
|
|
shutdown_nice(RB_POWEROFF);
|
|
|
|
return_ACPI_STATUS (AE_OK);
|
|
|
|
}
|
|
|
|
|
2015-01-27 17:33:18 +00:00
|
|
|
EVENTHANDLER_INVOKE(power_suspend_early);
|
|
|
|
stop_all_proc();
|
2011-04-14 22:17:39 +00:00
|
|
|
EVENTHANDLER_INVOKE(power_suspend);
|
|
|
|
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#ifdef EARLY_AP_STARTUP
|
|
|
|
MPASS(mp_ncpus == 1 || smp_started);
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, 0);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
#else
|
2010-07-13 16:35:41 +00:00
|
|
|
if (smp_started) {
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, 0);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
}
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#endif
|
2009-03-17 00:48:11 +00:00
|
|
|
|
2009-08-20 19:17:53 +00:00
|
|
|
/*
|
|
|
|
* Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
|
|
|
|
* drivers need this.
|
|
|
|
*/
|
|
|
|
mtx_lock(&Giant);
|
2009-03-17 00:48:11 +00:00
|
|
|
|
2004-08-13 06:21:32 +00:00
|
|
|
slp_state = ACPI_SS_NONE;
|
2001-07-22 19:13:54 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
sc->acpi_sstate = state;
|
2002-07-22 12:52:54 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
/* Enable any GPEs as appropriate and requested by the user. */
|
|
|
|
acpi_wake_prep_walk(state);
|
|
|
|
slp_state = ACPI_SS_GPE_SET;
|
2004-05-27 18:38:45 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
/*
|
|
|
|
* Inform all devices that we are going to sleep. If at least one
|
|
|
|
* device fails, DEVICE_SUSPEND() automatically resumes the tree.
|
|
|
|
*
|
|
|
|
* XXX Note that a better two-pass approach with a 'veto' pass
|
|
|
|
* followed by a "real thing" pass would be better, but the current
|
|
|
|
* bus interface does not provide for this.
|
|
|
|
*/
|
|
|
|
if (DEVICE_SUSPEND(root_bus) != 0) {
|
|
|
|
device_printf(sc->acpi_dev, "device_suspend failed\n");
|
|
|
|
goto backout;
|
|
|
|
}
|
|
|
|
slp_state = ACPI_SS_DEV_SUSPEND;
|
2001-11-28 12:09:42 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
status = AcpiEnterSleepStatePrep(state);
|
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
goto backout;
|
|
|
|
}
|
|
|
|
slp_state = ACPI_SS_SLP_PREP;
|
2001-11-28 12:09:42 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
if (sc->acpi_sleep_delay > 0)
|
|
|
|
DELAY(sc->acpi_sleep_delay * 1000000);
|
2002-08-25 06:13:53 +00:00
|
|
|
|
2018-05-21 20:23:04 +00:00
|
|
|
suspendclock();
|
2012-06-01 17:00:33 +00:00
|
|
|
intr = intr_disable();
|
2009-04-28 11:56:54 +00:00
|
|
|
if (state != ACPI_STATE_S1) {
|
2012-06-01 15:26:32 +00:00
|
|
|
sleep_result = acpi_sleep_machdep(sc, state);
|
|
|
|
acpi_wakeup_machdep(sc, state, sleep_result, 0);
|
2013-02-02 12:42:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX According to ACPI specification SCI_EN bit should be restored
|
|
|
|
* by ACPI platform (BIOS, firmware) to its pre-sleep state.
|
|
|
|
* Unfortunately some BIOSes fail to do that and that leads to
|
|
|
|
* unexpected and serious consequences during wake up like a system
|
|
|
|
* getting stuck in SMI handlers.
|
|
|
|
* This hack is picked up from Linux, which claims that it follows
|
|
|
|
* Windows behavior.
|
|
|
|
*/
|
|
|
|
if (sleep_result == 1 && state != ACPI_STATE_S4)
|
|
|
|
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
|
|
|
|
|
2013-02-02 12:44:19 +00:00
|
|
|
if (sleep_result == 1 && state == ACPI_STATE_S3) {
|
|
|
|
/*
|
|
|
|
* Prevent mis-interpretation of the wakeup by power button
|
|
|
|
* as a request for power off.
|
|
|
|
* Ideally we should post an appropriate wakeup event,
|
|
|
|
* perhaps using acpi_event_power_button_wake or alike.
|
|
|
|
*
|
|
|
|
* Clearing of power button status after wakeup is mandated
|
|
|
|
* by ACPI specification in section "Fixed Power Button".
|
|
|
|
*
|
|
|
|
* XXX As of ACPICA 20121114 AcpiGetEventStatus provides
|
|
|
|
* status as 0/1 corressponding to inactive/active despite
|
|
|
|
* its type being ACPI_EVENT_STATUS. In other words,
|
|
|
|
* we should not test for ACPI_EVENT_FLAG_SET for time being.
|
|
|
|
*/
|
|
|
|
if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
|
|
|
|
&power_button_status)) && power_button_status != 0) {
|
|
|
|
AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
|
|
|
|
device_printf(sc->acpi_dev,
|
|
|
|
"cleared fixed power button status\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-01 15:26:32 +00:00
|
|
|
intr_restore(intr);
|
|
|
|
|
|
|
|
/* call acpi_wakeup_machdep() again with interrupt enabled */
|
|
|
|
acpi_wakeup_machdep(sc, state, sleep_result, 1);
|
|
|
|
|
call AcpiLeaveSleepStatePrep after re-enabling interrupts
I want to do this change because this call (actually,
AcpiHwLegacyWakePrep) does a memory allocation and ACPI namespace
evaluation. Although it is not very likely to run into any trouble, it
is still not safe to make those calls with interrupts disabled.
witness(4) and malloc(9) do not currently check for a context with
interrupts disabled via intr_disable and we lack a facility for doing
that. So, those unsafe operations fly under the radar. But if
intr_disable in acpi_EnterSleepState was replaced with spinlock_enter
(which it probably should be), then witness and malloc would immediately
complain.
Also, AcpiLeaveSleepStatePrep is documented as called when interrupts
are enabled. It used to require disabled interrupts, but that
requirement was changed a long time ago when support for _BFS and _GTS
was removed from ACPICA.
The ACPI wakeup sequence is very sensitive to changes. I consider this
change to be correct, but there can be fallouts from it.
What AcpiHwLegacyWakePrep essentially does is writing a value
corresponding to S0 into SLP_TYPx bits of PM1 Control Register(s).
According to ACPI specifications that write should be a NOP as SLP_EN
bit is not set. But I see in some chipset specifications that they
allow to ignore SLP_EN altogether and to act on a change of SLP_TYPx
alone.
Also, there are a couple of accesses to ACPI hardware before the new
location of the call to AcpiLeaveSleepStatePrep. One is to clear the
power button status and the other is to enable SCI. So, the move may
affect the interaction between then OS and ACPI platform.
I have not seen any regressions on my test system, but it's a desktop.
MFC after: 5 weeks
2018-06-01 09:44:23 +00:00
|
|
|
AcpiLeaveSleepStatePrep(state);
|
|
|
|
|
2012-06-01 15:26:32 +00:00
|
|
|
if (sleep_result == -1)
|
2012-02-16 22:59:29 +00:00
|
|
|
goto backout;
|
2001-07-20 06:07:34 +00:00
|
|
|
|
2009-04-28 11:56:54 +00:00
|
|
|
/* Re-enable ACPI hardware on wakeup from sleep state 4. */
|
|
|
|
if (state == ACPI_STATE_S4)
|
|
|
|
AcpiEnable();
|
|
|
|
} else {
|
2012-08-16 20:54:52 +00:00
|
|
|
status = AcpiEnterSleepState(state);
|
2012-03-27 23:26:58 +00:00
|
|
|
intr_restore(intr);
|
call AcpiLeaveSleepStatePrep after re-enabling interrupts
I want to do this change because this call (actually,
AcpiHwLegacyWakePrep) does a memory allocation and ACPI namespace
evaluation. Although it is not very likely to run into any trouble, it
is still not safe to make those calls with interrupts disabled.
witness(4) and malloc(9) do not currently check for a context with
interrupts disabled via intr_disable and we lack a facility for doing
that. So, those unsafe operations fly under the radar. But if
intr_disable in acpi_EnterSleepState was replaced with spinlock_enter
(which it probably should be), then witness and malloc would immediately
complain.
Also, AcpiLeaveSleepStatePrep is documented as called when interrupts
are enabled. It used to require disabled interrupts, but that
requirement was changed a long time ago when support for _BFS and _GTS
was removed from ACPICA.
The ACPI wakeup sequence is very sensitive to changes. I consider this
change to be correct, but there can be fallouts from it.
What AcpiHwLegacyWakePrep essentially does is writing a value
corresponding to S0 into SLP_TYPx bits of PM1 Control Register(s).
According to ACPI specifications that write should be a NOP as SLP_EN
bit is not set. But I see in some chipset specifications that they
allow to ignore SLP_EN altogether and to act on a change of SLP_TYPx
alone.
Also, there are a couple of accesses to ACPI hardware before the new
location of the call to AcpiLeaveSleepStatePrep. One is to clear the
power button status and the other is to enable SCI. So, the move may
affect the interaction between then OS and ACPI platform.
I have not seen any regressions on my test system, but it's a desktop.
MFC after: 5 weeks
2018-06-01 09:44:23 +00:00
|
|
|
AcpiLeaveSleepStatePrep(state);
|
2009-04-28 11:56:54 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
|
|
|
|
AcpiFormatException(status));
|
|
|
|
goto backout;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-28 11:56:54 +00:00
|
|
|
slp_state = ACPI_SS_SLEPT;
|
2001-12-09 18:02:36 +00:00
|
|
|
|
2004-08-13 06:21:32 +00:00
|
|
|
/*
|
|
|
|
* Back out state according to how far along we got in the suspend
|
|
|
|
* process. This handles both the error and success cases.
|
|
|
|
*/
|
2009-04-28 11:56:54 +00:00
|
|
|
backout:
|
2018-05-21 20:23:04 +00:00
|
|
|
if (slp_state >= ACPI_SS_SLP_PREP)
|
|
|
|
resumeclock();
|
2004-08-13 06:21:32 +00:00
|
|
|
if (slp_state >= ACPI_SS_GPE_SET) {
|
|
|
|
acpi_wake_prep_walk(state);
|
|
|
|
sc->acpi_sstate = ACPI_STATE_S0;
|
|
|
|
}
|
2012-05-29 05:28:34 +00:00
|
|
|
if (slp_state >= ACPI_SS_DEV_SUSPEND)
|
|
|
|
DEVICE_RESUME(root_bus);
|
2012-06-01 15:26:32 +00:00
|
|
|
if (slp_state >= ACPI_SS_SLP_PREP)
|
2004-08-13 06:21:32 +00:00
|
|
|
AcpiLeaveSleepState(state);
|
2012-02-08 21:23:20 +00:00
|
|
|
if (slp_state >= ACPI_SS_SLEPT) {
|
2018-05-25 07:33:20 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
/* NB: we are still using ACPI timecounter at this point. */
|
|
|
|
resume_TSC();
|
|
|
|
#endif
|
2012-02-08 21:23:20 +00:00
|
|
|
acpi_resync_clock(sc);
|
2004-08-13 06:21:32 +00:00
|
|
|
acpi_enable_fixed_events(sc);
|
2012-02-08 21:23:20 +00:00
|
|
|
}
|
2010-07-15 23:24:06 +00:00
|
|
|
sc->acpi_next_sstate = 0;
|
2004-08-13 06:21:32 +00:00
|
|
|
|
2009-08-20 19:17:53 +00:00
|
|
|
mtx_unlock(&Giant);
|
2009-03-17 00:48:11 +00:00
|
|
|
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#ifdef EARLY_AP_STARTUP
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
#else
|
2010-07-13 16:35:41 +00:00
|
|
|
if (smp_started) {
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
}
|
Add an EARLY_AP_STARTUP option to start APs earlier during boot.
Currently, Application Processors (non-boot CPUs) are started by
MD code at SI_SUB_CPU, but they are kept waiting in a "pen" until
SI_SUB_SMP at which point they are released to run kernel threads.
SI_SUB_SMP is one of the last SYSINIT levels, so APs don't enter
the scheduler and start running threads until fairly late in the
boot.
This change moves SI_SUB_SMP up to just before software interrupt
threads are created allowing the APs to start executing kernel
threads much sooner (before any devices are probed). This allows
several initialization routines that need to perform initialization
on all CPUs to now perform that initialization in one step rather
than having to defer the AP initialization to a second SYSINIT run
at SI_SUB_SMP. It also permits all CPUs to be available for
handling interrupts before any devices are probed.
This last feature fixes a problem on with interrupt vector exhaustion.
Specifically, in the old model all device interrupts were routed
onto the boot CPU during boot. Later after the APs were released at
SI_SUB_SMP, interrupts were redistributed across all CPUs.
However, several drivers for multiqueue hardware allocate N interrupts
per CPU in the system. In a system with many CPUs, just a few drivers
doing this could exhaust the available pool of interrupt vectors on
the boot CPU as each driver was allocating N * mp_ncpu vectors on the
boot CPU. Now, drivers will allocate interrupts on their desired CPUs
during boot meaning that only N interrupts are allocated from the boot
CPU instead of N * mp_ncpu.
Some other bits of code can also be simplified as smp_started is
now true much earlier and will now always be true for these bits of
code. This removes the need to treat the single-CPU boot environment
as a special case.
As a transition aid, the new behavior is available under a new kernel
option (EARLY_AP_STARTUP). This will allow the option to be turned off
if need be during initial testing. I plan to enable this on x86 by
default in a followup commit in the next few days and to have all
platforms moved over before 11.0. Once the transition is complete,
the option will be removed along with the !EARLY_AP_STARTUP code.
These changes have only been tested on x86. Other platform maintainers
are encouraged to port their architectures over as well. The main
things to check for are any uses of smp_started in MD code that can be
simplified and SI_SUB_SMP SYSINITs in MD code that can be removed in
the EARLY_AP_STARTUP case (e.g. the interrupt shuffling).
PR: kern/199321
Reviewed by: markj, gnn, kib
Sponsored by: Netflix
2016-05-14 18:22:52 +00:00
|
|
|
#endif
|
2009-03-17 00:48:11 +00:00
|
|
|
|
2015-01-27 17:33:18 +00:00
|
|
|
resume_all_proc();
|
|
|
|
|
2011-04-14 22:17:39 +00:00
|
|
|
EVENTHANDLER_INVOKE(power_resume);
|
|
|
|
|
2009-03-23 22:06:09 +00:00
|
|
|
/* Allow another sleep request after a while. */
|
2014-09-22 14:27:26 +00:00
|
|
|
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
|
2009-03-23 22:06:09 +00:00
|
|
|
|
|
|
|
/* Run /etc/rc.resume after we are back. */
|
|
|
|
if (devctl_process_running())
|
|
|
|
acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return_ACPI_STATUS (status);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2012-02-08 21:23:20 +00:00
|
|
|
static void
|
2009-03-23 22:12:33 +00:00
|
|
|
acpi_resync_clock(struct acpi_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Warm up timecounter again and reset system clock.
|
|
|
|
*/
|
|
|
|
(void)timecounter->tc_get_timecount(timecounter);
|
|
|
|
inittodr(time_second + sc->acpi_sleep_delay);
|
|
|
|
}
|
|
|
|
|
2004-05-27 18:38:45 +00:00
|
|
|
/* Enable or disable the device's wake GPE. */
|
|
|
|
int
|
|
|
|
acpi_wake_set_enable(device_t dev, int enable)
|
|
|
|
{
|
|
|
|
struct acpi_prw_data prw;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
int flags;
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/* Make sure the device supports waking the system and get the GPE. */
|
2007-03-22 18:16:43 +00:00
|
|
|
if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
|
2004-05-27 18:38:45 +00:00
|
|
|
return (ENXIO);
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
flags = acpi_get_flags(dev);
|
2004-05-27 18:38:45 +00:00
|
|
|
if (enable) {
|
2010-12-15 23:48:45 +00:00
|
|
|
status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
|
|
|
|
ACPI_GPE_ENABLE);
|
2004-05-27 18:38:45 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(dev, "enable wake failed\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
|
2004-05-27 18:38:45 +00:00
|
|
|
} else {
|
2010-12-15 23:48:45 +00:00
|
|
|
status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
|
|
|
|
ACPI_GPE_DISABLE);
|
2004-05-27 18:38:45 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
device_printf(dev, "disable wake failed\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
|
2004-05-27 18:38:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
static int
|
|
|
|
acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
|
2004-05-27 18:38:45 +00:00
|
|
|
{
|
|
|
|
struct acpi_prw_data prw;
|
2004-06-30 16:08:03 +00:00
|
|
|
device_t dev;
|
2004-05-28 07:04:09 +00:00
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/* Check that this is a wake-capable device and get its GPE. */
|
2004-05-27 18:38:45 +00:00
|
|
|
if (acpi_parse_prw(handle, &prw) != 0)
|
|
|
|
return (ENXIO);
|
2004-06-30 16:08:03 +00:00
|
|
|
dev = acpi_get_device(handle);
|
2004-05-27 18:38:45 +00:00
|
|
|
|
|
|
|
/*
|
2004-06-30 16:08:03 +00:00
|
|
|
* The destination sleep state must be less than (i.e., higher power)
|
|
|
|
* or equal to the value specified by _PRW. If this GPE cannot be
|
|
|
|
* enabled for the next sleep state, then disable it. If it can and
|
|
|
|
* the user requested it be enabled, turn on any required power resources
|
|
|
|
* and set _PSW.
|
2004-05-27 18:38:45 +00:00
|
|
|
*/
|
2004-06-30 16:08:03 +00:00
|
|
|
if (sstate > prw.lowest_wake) {
|
2010-12-15 23:48:45 +00:00
|
|
|
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
|
2004-05-27 18:38:45 +00:00
|
|
|
if (bootverbose)
|
2004-06-30 16:08:03 +00:00
|
|
|
device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
|
|
|
|
acpi_name(handle), sstate);
|
|
|
|
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
|
|
|
|
acpi_pwr_wake_enable(handle, 1);
|
2004-05-28 07:04:09 +00:00
|
|
|
acpi_SetInteger(handle, "_PSW", 1);
|
2004-06-30 16:08:03 +00:00
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "wake_prep enabled for %s (S%d)\n",
|
|
|
|
acpi_name(handle), sstate);
|
2004-05-27 18:38:45 +00:00
|
|
|
}
|
|
|
|
|
2004-05-28 07:04:09 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
static int
|
|
|
|
acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
|
2004-05-28 07:04:09 +00:00
|
|
|
{
|
|
|
|
struct acpi_prw_data prw;
|
2004-06-30 16:08:03 +00:00
|
|
|
device_t dev;
|
2004-05-28 07:04:09 +00:00
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/*
|
|
|
|
* Check that this is a wake-capable device and get its GPE. Return
|
|
|
|
* now if the user didn't enable this device for wake.
|
|
|
|
*/
|
2004-05-28 07:04:09 +00:00
|
|
|
if (acpi_parse_prw(handle, &prw) != 0)
|
|
|
|
return (ENXIO);
|
2004-06-30 16:08:03 +00:00
|
|
|
dev = acpi_get_device(handle);
|
|
|
|
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
|
|
|
|
return (0);
|
2004-05-28 07:04:09 +00:00
|
|
|
|
|
|
|
/*
|
2004-06-30 16:08:03 +00:00
|
|
|
* If this GPE couldn't be enabled for the previous sleep state, it was
|
|
|
|
* disabled before going to sleep so re-enable it. If it was enabled,
|
|
|
|
* clear _PSW and turn off any power resources it used.
|
2004-05-28 07:04:09 +00:00
|
|
|
*/
|
2004-06-30 16:08:03 +00:00
|
|
|
if (sstate > prw.lowest_wake) {
|
2010-12-15 23:48:45 +00:00
|
|
|
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
|
2004-06-30 16:08:03 +00:00
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
|
|
|
|
} else {
|
|
|
|
acpi_SetInteger(handle, "_PSW", 0);
|
|
|
|
acpi_pwr_wake_enable(handle, 0);
|
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "run_prep cleaned up for %s\n",
|
|
|
|
acpi_name(handle));
|
|
|
|
}
|
|
|
|
|
2004-05-27 18:38:45 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ACPI_STATUS
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
|
2004-05-27 18:38:45 +00:00
|
|
|
{
|
2004-06-30 16:08:03 +00:00
|
|
|
int sstate;
|
2004-05-27 18:38:45 +00:00
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/* If suspending, run the sleep prep function, otherwise wake. */
|
|
|
|
sstate = *(int *)context;
|
|
|
|
if (AcpiGbl_SystemAwakeAndRunning)
|
|
|
|
acpi_wake_sleep_prep(handle, sstate);
|
|
|
|
else
|
|
|
|
acpi_wake_run_prep(handle, sstate);
|
2004-05-27 18:38:45 +00:00
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
|
2004-05-27 18:38:45 +00:00
|
|
|
static int
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_wake_prep_walk(int sstate)
|
2004-05-27 18:38:45 +00:00
|
|
|
{
|
|
|
|
ACPI_HANDLE sb_handle;
|
|
|
|
|
|
|
|
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
|
2004-06-30 16:08:03 +00:00
|
|
|
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
|
2009-11-16 21:47:12 +00:00
|
|
|
acpi_wake_prep, NULL, &sstate, NULL);
|
2004-05-27 18:38:45 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-05-28 06:28:55 +00:00
|
|
|
/* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
|
|
|
|
static int
|
|
|
|
acpi_wake_sysctl_walk(device_t dev)
|
|
|
|
{
|
|
|
|
int error, i, numdevs;
|
|
|
|
device_t *devlist;
|
|
|
|
device_t child;
|
2004-06-30 16:08:03 +00:00
|
|
|
ACPI_STATUS status;
|
2004-05-28 06:28:55 +00:00
|
|
|
|
|
|
|
error = device_get_children(dev, &devlist, &numdevs);
|
2005-05-20 05:00:43 +00:00
|
|
|
if (error != 0 || numdevs == 0) {
|
|
|
|
if (numdevs == 0)
|
|
|
|
free(devlist, M_TEMP);
|
2004-05-28 06:28:55 +00:00
|
|
|
return (error);
|
2005-05-20 05:00:43 +00:00
|
|
|
}
|
2004-05-28 06:28:55 +00:00
|
|
|
for (i = 0; i < numdevs; i++) {
|
|
|
|
child = devlist[i];
|
2004-06-30 16:08:03 +00:00
|
|
|
acpi_wake_sysctl_walk(child);
|
2004-05-28 06:28:55 +00:00
|
|
|
if (!device_is_attached(child))
|
|
|
|
continue;
|
2004-06-30 16:08:03 +00:00
|
|
|
status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
|
|
|
|
if (ACPI_SUCCESS(status)) {
|
2004-05-28 06:28:55 +00:00
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
|
2020-02-26 14:26:36 +00:00
|
|
|
"wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0,
|
2004-05-28 06:28:55 +00:00
|
|
|
acpi_wake_set_sysctl, "I", "Device set to wake the system");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(devlist, M_TEMP);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable or disable wake from userland. */
|
|
|
|
static int
|
|
|
|
acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int enable, error;
|
|
|
|
device_t dev;
|
|
|
|
|
|
|
|
dev = (device_t)arg1;
|
2004-06-30 16:08:03 +00:00
|
|
|
enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
|
2004-05-28 06:28:55 +00:00
|
|
|
|
|
|
|
error = sysctl_handle_int(oidp, &enable, 0, req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (enable != 0 && enable != 1)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
return (acpi_wake_set_enable(dev, enable));
|
|
|
|
}
|
|
|
|
|
2004-05-27 18:38:45 +00:00
|
|
|
/* Parse a device's _PRW into a structure. */
|
2004-06-30 16:08:03 +00:00
|
|
|
int
|
2004-05-27 18:38:45 +00:00
|
|
|
acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
|
|
|
|
{
|
|
|
|
ACPI_STATUS status;
|
|
|
|
ACPI_BUFFER prw_buffer;
|
|
|
|
ACPI_OBJECT *res, *res2;
|
2004-06-30 16:08:03 +00:00
|
|
|
int error, i, power_count;
|
2004-05-27 18:38:45 +00:00
|
|
|
|
|
|
|
if (h == NULL || prw == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The _PRW object (7.2.9) is only required for devices that have the
|
|
|
|
* ability to wake the system from a sleeping state.
|
|
|
|
*/
|
|
|
|
error = EINVAL;
|
|
|
|
prw_buffer.Pointer = NULL;
|
|
|
|
prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
|
|
|
|
status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return (ENOENT);
|
|
|
|
res = (ACPI_OBJECT *)prw_buffer.Pointer;
|
|
|
|
if (res == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
if (!ACPI_PKG_VALID(res, 2))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Element 1 of the _PRW object:
|
|
|
|
* The lowest power system sleeping state that can be entered while still
|
|
|
|
* providing wake functionality. The sleeping state being entered must
|
|
|
|
* be less than (i.e., higher power) or equal to this value.
|
|
|
|
*/
|
|
|
|
if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Element 0 of the _PRW object:
|
|
|
|
*/
|
|
|
|
switch (res->Package.Elements[0].Type) {
|
|
|
|
case ACPI_TYPE_INTEGER:
|
|
|
|
/*
|
|
|
|
* If the data type of this package element is numeric, then this
|
|
|
|
* _PRW package element is the bit index in the GPEx_EN, in the
|
|
|
|
* GPE blocks described in the FADT, of the enable bit that is
|
|
|
|
* enabled for the wake event.
|
|
|
|
*/
|
|
|
|
prw->gpe_handle = NULL;
|
|
|
|
prw->gpe_bit = res->Package.Elements[0].Integer.Value;
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
case ACPI_TYPE_PACKAGE:
|
|
|
|
/*
|
|
|
|
* If the data type of this package element is a package, then this
|
|
|
|
* _PRW package element is itself a package containing two
|
|
|
|
* elements. The first is an object reference to the GPE Block
|
|
|
|
* device that contains the GPE that will be triggered by the wake
|
|
|
|
* event. The second element is numeric and it contains the bit
|
|
|
|
* index in the GPEx_EN, in the GPE Block referenced by the
|
|
|
|
* first element in the package, of the enable bit that is enabled for
|
|
|
|
* the wake event.
|
|
|
|
*
|
|
|
|
* For example, if this field is a package then it is of the form:
|
|
|
|
* Package() {\_SB.PCI0.ISA.GPE, 2}
|
|
|
|
*/
|
|
|
|
res2 = &res->Package.Elements[0];
|
|
|
|
if (!ACPI_PKG_VALID(res2, 2))
|
|
|
|
goto out;
|
|
|
|
prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
|
|
|
|
if (prw->gpe_handle == NULL)
|
|
|
|
goto out;
|
|
|
|
if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
|
|
|
|
goto out;
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2004-06-30 16:08:03 +00:00
|
|
|
/* Elements 2 to N of the _PRW object are power resources. */
|
|
|
|
power_count = res->Package.Count - 2;
|
|
|
|
if (power_count > ACPI_PRW_MAX_POWERRES) {
|
|
|
|
printf("ACPI device %s has too many power resources\n", acpi_name(h));
|
|
|
|
power_count = 0;
|
|
|
|
}
|
|
|
|
prw->power_res_count = power_count;
|
|
|
|
for (i = 0; i < power_count; i++)
|
|
|
|
prw->power_res[i] = res->Package.Elements[i];
|
2004-05-27 18:38:45 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (prw_buffer.Pointer != NULL)
|
|
|
|
AcpiOsFree(prw_buffer.Pointer);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* ACPI Event Handlers
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_system_eventhandler_sleep(void *arg, int state)
|
|
|
|
{
|
2009-04-30 17:42:11 +00:00
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)arg;
|
2007-06-21 22:50:37 +00:00
|
|
|
int ret;
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
/* Check if button action is disabled or unknown. */
|
|
|
|
if (state == ACPI_STATE_UNKNOWN)
|
2007-06-30 17:27:31 +00:00
|
|
|
return;
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
/* Request that the system prepare to enter the given suspend state. */
|
2009-04-30 17:42:11 +00:00
|
|
|
ret = acpi_ReqSleepState(sc, state);
|
2007-06-21 22:50:37 +00:00
|
|
|
if (ret != 0)
|
2009-04-30 17:42:11 +00:00
|
|
|
device_printf(sc->acpi_dev,
|
|
|
|
"request to enter state S%d failed (err %d)\n", state, ret);
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2000-12-08 09:16:20 +00:00
|
|
|
return_VOID;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_system_eventhandler_wakeup(void *arg, int state)
|
|
|
|
{
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
/* Currently, nothing to do for wakeup. */
|
2001-06-29 20:29:59 +00:00
|
|
|
|
2000-12-08 09:16:20 +00:00
|
|
|
return_VOID;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ACPICA Event Handlers (FixedEvent, also called from button notify handler)
|
|
|
|
*/
|
2011-01-11 19:26:39 +00:00
|
|
|
static void
|
|
|
|
acpi_invoke_sleep_eventhandler(void *context)
|
|
|
|
{
|
|
|
|
|
|
|
|
EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_invoke_wake_eventhandler(void *context)
|
|
|
|
{
|
|
|
|
|
|
|
|
EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
UINT32
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_power_button_sleep(void *context)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)context;
|
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2011-01-11 19:26:39 +00:00
|
|
|
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
|
|
|
|
acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
|
|
|
|
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
|
2003-08-28 16:06:30 +00:00
|
|
|
return_VALUE (ACPI_INTERRUPT_HANDLED);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UINT32
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_power_button_wake(void *context)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)context;
|
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2011-01-11 19:26:39 +00:00
|
|
|
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
|
|
|
|
acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
|
|
|
|
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
|
2003-08-28 16:06:30 +00:00
|
|
|
return_VALUE (ACPI_INTERRUPT_HANDLED);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UINT32
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_sleep_button_sleep(void *context)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)context;
|
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2011-01-11 19:26:39 +00:00
|
|
|
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
|
|
|
|
acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
|
|
|
|
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
|
2003-08-28 16:06:30 +00:00
|
|
|
return_VALUE (ACPI_INTERRUPT_HANDLED);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UINT32
|
2004-02-11 02:57:33 +00:00
|
|
|
acpi_event_sleep_button_wake(void *context)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
|
|
|
struct acpi_softc *sc = (struct acpi_softc *)context;
|
|
|
|
|
2002-05-19 06:16:47 +00:00
|
|
|
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2011-01-11 19:26:39 +00:00
|
|
|
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
|
|
|
|
acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
|
|
|
|
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
|
2003-08-28 16:06:30 +00:00
|
|
|
return_VALUE (ACPI_INTERRUPT_HANDLED);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-08-03 05:13:56 +00:00
|
|
|
* XXX This static buffer is suboptimal. There is no locking so only
|
|
|
|
* use this for single-threaded callers.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
|
|
|
char *
|
|
|
|
acpi_name(ACPI_HANDLE handle)
|
|
|
|
{
|
2004-08-03 05:13:56 +00:00
|
|
|
ACPI_BUFFER buf;
|
|
|
|
static char data[256];
|
2001-06-29 20:29:59 +00:00
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
buf.Length = sizeof(data);
|
|
|
|
buf.Pointer = data;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-08-12 17:02:53 +00:00
|
|
|
if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
|
2004-08-03 05:13:56 +00:00
|
|
|
return (data);
|
|
|
|
return ("(unknown)");
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugging/bug-avoidance. Avoid trying to fetch info on various
|
|
|
|
* parts of the namespace.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
acpi_avoid(ACPI_HANDLE handle)
|
|
|
|
{
|
2002-05-09 21:13:37 +00:00
|
|
|
char *cp, *env, *np;
|
2000-10-28 06:59:48 +00:00
|
|
|
int len;
|
|
|
|
|
|
|
|
np = acpi_name(handle);
|
|
|
|
if (*np == '\\')
|
|
|
|
np++;
|
2014-10-16 18:04:43 +00:00
|
|
|
if ((env = kern_getenv("debug.acpi.avoid")) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Scan the avoid list checking for a match */
|
2002-05-09 21:13:37 +00:00
|
|
|
cp = env;
|
2000-10-28 06:59:48 +00:00
|
|
|
for (;;) {
|
2004-08-03 05:13:56 +00:00
|
|
|
while (*cp != 0 && isspace(*cp))
|
2000-10-28 06:59:48 +00:00
|
|
|
cp++;
|
|
|
|
if (*cp == 0)
|
|
|
|
break;
|
|
|
|
len = 0;
|
2004-08-03 05:13:56 +00:00
|
|
|
while (cp[len] != 0 && !isspace(cp[len]))
|
2000-10-28 06:59:48 +00:00
|
|
|
len++;
|
2002-04-17 13:06:36 +00:00
|
|
|
if (!strncmp(cp, np, len)) {
|
2002-05-09 21:13:37 +00:00
|
|
|
freeenv(env);
|
2000-12-08 09:16:20 +00:00
|
|
|
return(1);
|
2002-04-17 13:06:36 +00:00
|
|
|
}
|
2000-12-08 09:16:20 +00:00
|
|
|
cp += len;
|
|
|
|
}
|
2002-05-09 21:13:37 +00:00
|
|
|
freeenv(env);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return (0);
|
2000-12-08 09:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugging/bug-avoidance. Disable ACPI subsystem components.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
acpi_disabled(char *subsys)
|
|
|
|
{
|
2002-04-24 17:49:21 +00:00
|
|
|
char *cp, *env;
|
2000-12-08 09:16:20 +00:00
|
|
|
int len;
|
|
|
|
|
2014-10-16 18:04:43 +00:00
|
|
|
if ((env = kern_getenv("debug.acpi.disabled")) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2004-03-03 03:02:17 +00:00
|
|
|
if (strcmp(env, "all") == 0) {
|
2002-04-24 17:49:21 +00:00
|
|
|
freeenv(env);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (1);
|
2002-04-17 13:06:36 +00:00
|
|
|
}
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-03-03 03:02:17 +00:00
|
|
|
/* Scan the disable list, checking for a match. */
|
2002-04-24 17:49:21 +00:00
|
|
|
cp = env;
|
2000-12-08 09:16:20 +00:00
|
|
|
for (;;) {
|
2004-03-03 03:02:17 +00:00
|
|
|
while (*cp != '\0' && isspace(*cp))
|
2000-12-08 09:16:20 +00:00
|
|
|
cp++;
|
2004-03-03 03:02:17 +00:00
|
|
|
if (*cp == '\0')
|
2000-12-08 09:16:20 +00:00
|
|
|
break;
|
|
|
|
len = 0;
|
2004-03-03 03:02:17 +00:00
|
|
|
while (cp[len] != '\0' && !isspace(cp[len]))
|
2000-12-08 09:16:20 +00:00
|
|
|
len++;
|
2004-03-03 03:02:17 +00:00
|
|
|
if (strncmp(cp, subsys, len) == 0) {
|
2002-04-24 17:49:21 +00:00
|
|
|
freeenv(env);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (1);
|
2002-04-17 13:06:36 +00:00
|
|
|
}
|
2000-10-28 06:59:48 +00:00
|
|
|
cp += len;
|
|
|
|
}
|
2002-04-24 17:49:21 +00:00
|
|
|
freeenv(env);
|
2003-08-28 16:06:30 +00:00
|
|
|
|
|
|
|
return (0);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 16:09:01 +00:00
|
|
|
static void
|
|
|
|
acpi_lookup(void *arg, const char *name, device_t *dev)
|
|
|
|
{
|
|
|
|
ACPI_HANDLE handle;
|
|
|
|
|
|
|
|
if (*dev != NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow any handle name that is specified as an absolute path and
|
|
|
|
* starts with '\'. We could restrict this to \_SB and friends,
|
|
|
|
* but see acpi_probe_children() for notes on why we scan the entire
|
|
|
|
* namespace for devices.
|
|
|
|
*
|
|
|
|
* XXX: The pathname argument to AcpiGetHandle() should be fixed to
|
|
|
|
* be const.
|
|
|
|
*/
|
|
|
|
if (name[0] != '\\')
|
|
|
|
return;
|
|
|
|
if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name),
|
|
|
|
&handle)))
|
|
|
|
return;
|
|
|
|
*dev = acpi_get_device(handle);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
/*
|
|
|
|
* Control interface.
|
|
|
|
*
|
2000-12-08 09:16:20 +00:00
|
|
|
* We multiplex ioctls for all participating ACPI devices here. Individual
|
2003-08-28 16:06:30 +00:00
|
|
|
* drivers wanting to be accessible via /dev/acpi should use the
|
|
|
|
* register/deregister interface to make their handlers visible.
|
2000-10-28 06:59:48 +00:00
|
|
|
*/
|
2000-12-08 09:16:20 +00:00
|
|
|
struct acpi_ioctl_hook
|
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
TAILQ_ENTRY(acpi_ioctl_hook) link;
|
|
|
|
u_long cmd;
|
|
|
|
acpi_ioctl_fn fn;
|
|
|
|
void *arg;
|
2000-12-08 09:16:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks;
|
|
|
|
static int acpi_ioctl_hooks_initted;
|
|
|
|
|
|
|
|
int
|
2003-08-28 16:06:30 +00:00
|
|
|
acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
|
2000-12-08 09:16:20 +00:00
|
|
|
{
|
|
|
|
struct acpi_ioctl_hook *hp;
|
|
|
|
|
|
|
|
if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (ENOMEM);
|
2000-12-08 09:16:20 +00:00
|
|
|
hp->cmd = cmd;
|
|
|
|
hp->fn = fn;
|
|
|
|
hp->arg = arg;
|
2004-08-13 06:21:32 +00:00
|
|
|
|
|
|
|
ACPI_LOCK(acpi);
|
2000-12-08 09:16:20 +00:00
|
|
|
if (acpi_ioctl_hooks_initted == 0) {
|
|
|
|
TAILQ_INIT(&acpi_ioctl_hooks);
|
|
|
|
acpi_ioctl_hooks_initted = 1;
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2000-12-08 09:16:20 +00:00
|
|
|
}
|
|
|
|
|
2004-12-27 05:36:47 +00:00
|
|
|
void
|
2003-08-28 16:06:30 +00:00
|
|
|
acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
|
2000-12-08 09:16:20 +00:00
|
|
|
{
|
|
|
|
struct acpi_ioctl_hook *hp;
|
|
|
|
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_LOCK(acpi);
|
2000-12-08 09:16:20 +00:00
|
|
|
TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
|
2004-08-13 06:21:32 +00:00
|
|
|
if (hp->cmd == cmd && hp->fn == fn)
|
2000-12-08 09:16:20 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (hp != NULL) {
|
|
|
|
TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
|
|
|
|
free(hp, M_ACPIDEV);
|
|
|
|
}
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
2000-12-08 09:16:20 +00:00
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
static int
|
2009-05-20 17:29:21 +00:00
|
|
|
acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-05-20 17:29:21 +00:00
|
|
|
acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
return (0);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-05-20 17:29:21 +00:00
|
|
|
acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2000-12-08 09:16:20 +00:00
|
|
|
struct acpi_softc *sc;
|
|
|
|
struct acpi_ioctl_hook *hp;
|
2004-08-03 05:13:56 +00:00
|
|
|
int error, state;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
2004-08-03 14:14:53 +00:00
|
|
|
error = 0;
|
|
|
|
hp = NULL;
|
2000-10-28 06:59:48 +00:00
|
|
|
sc = dev->si_drv1;
|
|
|
|
|
2000-12-08 09:16:20 +00:00
|
|
|
/*
|
|
|
|
* Scan the list of registered ioctls, looking for handlers.
|
|
|
|
*/
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_LOCK(acpi);
|
2004-08-03 05:13:56 +00:00
|
|
|
if (acpi_ioctl_hooks_initted)
|
2000-12-08 09:16:20 +00:00
|
|
|
TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
|
2004-08-03 05:13:56 +00:00
|
|
|
if (hp->cmd == cmd)
|
|
|
|
break;
|
2000-12-08 09:16:20 +00:00
|
|
|
}
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_UNLOCK(acpi);
|
2004-08-03 05:13:56 +00:00
|
|
|
if (hp)
|
|
|
|
return (hp->fn(cmd, addr, hp->arg));
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2003-02-15 01:46:22 +00:00
|
|
|
/*
|
2003-08-28 16:06:30 +00:00
|
|
|
* Core ioctls are not permitted for non-writable user.
|
2003-02-15 01:46:22 +00:00
|
|
|
* Currently, other ioctls just fetch information.
|
|
|
|
* Not changing system behavior.
|
|
|
|
*/
|
2004-06-29 19:00:36 +00:00
|
|
|
if ((flag & FWRITE) == 0)
|
2003-08-28 16:06:30 +00:00
|
|
|
return (EPERM);
|
2003-02-15 01:46:22 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
/* Core system ioctls. */
|
2000-10-28 06:59:48 +00:00
|
|
|
switch (cmd) {
|
2007-06-21 22:50:37 +00:00
|
|
|
case ACPIIO_REQSLPSTATE:
|
|
|
|
state = *(int *)addr;
|
|
|
|
if (state != ACPI_STATE_S5)
|
2009-04-30 17:35:44 +00:00
|
|
|
return (acpi_ReqSleepState(sc, state));
|
|
|
|
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
|
|
|
|
error = EOPNOTSUPP;
|
2007-06-21 22:50:37 +00:00
|
|
|
break;
|
|
|
|
case ACPIIO_ACKSLPSTATE:
|
|
|
|
error = *(int *)addr;
|
|
|
|
error = acpi_AckSleepState(sc->acpi_clone, error);
|
|
|
|
break;
|
|
|
|
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
|
2000-10-28 06:59:48 +00:00
|
|
|
state = *(int *)addr;
|
2009-04-30 17:35:44 +00:00
|
|
|
if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
if (!acpi_sleep_states[state])
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
|
|
|
|
error = ENXIO;
|
2000-10-28 06:59:48 +00:00
|
|
|
break;
|
|
|
|
default:
|
2004-08-03 05:13:56 +00:00
|
|
|
error = ENXIO;
|
2000-10-28 06:59:48 +00:00
|
|
|
break;
|
|
|
|
}
|
2000-12-24 19:12:10 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
return (error);
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
static int
|
|
|
|
acpi_sname2sstate(const char *sname)
|
|
|
|
{
|
|
|
|
int sstate;
|
|
|
|
|
|
|
|
if (toupper(sname[0]) == 'S') {
|
|
|
|
sstate = sname[1] - '0';
|
|
|
|
if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
|
|
|
|
sname[2] == '\0')
|
|
|
|
return (sstate);
|
|
|
|
} else if (strcasecmp(sname, "NONE") == 0)
|
|
|
|
return (ACPI_STATE_UNKNOWN);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2009-04-30 17:45:43 +00:00
|
|
|
static const char *
|
2009-04-30 17:35:44 +00:00
|
|
|
acpi_sstate2sname(int sstate)
|
|
|
|
{
|
|
|
|
static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
|
|
|
|
|
|
|
|
if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
|
|
|
|
return (snames[sstate]);
|
|
|
|
else if (sstate == ACPI_STATE_UNKNOWN)
|
|
|
|
return ("NONE");
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2003-04-11 16:53:56 +00:00
|
|
|
static int
|
|
|
|
acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error;
|
2004-08-03 05:13:56 +00:00
|
|
|
struct sbuf sb;
|
2009-04-30 17:35:44 +00:00
|
|
|
UINT8 state;
|
2003-04-11 16:53:56 +00:00
|
|
|
|
2004-08-03 05:13:56 +00:00
|
|
|
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
|
2009-04-30 17:35:44 +00:00
|
|
|
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
|
|
|
|
if (acpi_sleep_states[state])
|
|
|
|
sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
|
2004-08-03 05:13:56 +00:00
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
|
|
|
|
sbuf_delete(&sb);
|
2003-08-28 16:06:30 +00:00
|
|
|
return (error);
|
2003-04-11 16:53:56 +00:00
|
|
|
}
|
|
|
|
|
2001-01-13 21:28:57 +00:00
|
|
|
static int
|
|
|
|
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
char sleep_state[10];
|
2009-04-30 17:35:44 +00:00
|
|
|
int error, new_state, old_state;
|
2001-01-13 21:28:57 +00:00
|
|
|
|
2009-04-30 17:35:44 +00:00
|
|
|
old_state = *(int *)oidp->oid_arg1;
|
|
|
|
strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
|
2001-01-13 21:28:57 +00:00
|
|
|
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
|
|
|
|
if (error == 0 && req->newptr != NULL) {
|
2009-04-30 17:35:44 +00:00
|
|
|
new_state = acpi_sname2sstate(sleep_state);
|
|
|
|
if (new_state < ACPI_STATE_S1)
|
|
|
|
return (EINVAL);
|
2009-04-30 18:00:53 +00:00
|
|
|
if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
|
2009-04-30 17:35:44 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
if (new_state != old_state)
|
|
|
|
*(int *)oidp->oid_arg1 = new_state;
|
2001-01-13 21:28:57 +00:00
|
|
|
}
|
2003-08-28 16:06:30 +00:00
|
|
|
return (error);
|
2001-01-13 21:28:57 +00:00
|
|
|
}
|
|
|
|
|
2003-10-25 05:03:25 +00:00
|
|
|
/* Inform devctl(4) when we receive a Notify. */
|
|
|
|
void
|
|
|
|
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
|
|
|
|
{
|
|
|
|
char notify_buf[16];
|
|
|
|
ACPI_BUFFER handle_buf;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
|
|
|
|
if (subsystem == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
handle_buf.Pointer = NULL;
|
|
|
|
handle_buf.Length = ACPI_ALLOCATE_BUFFER;
|
2015-07-22 16:26:17 +00:00
|
|
|
status = AcpiNsHandleToPathname(h, &handle_buf, FALSE);
|
2003-10-25 05:03:25 +00:00
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return;
|
|
|
|
snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
|
|
|
|
devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
|
|
|
|
AcpiOsFree(handle_buf.Pointer);
|
|
|
|
}
|
|
|
|
|
2000-10-28 06:59:48 +00:00
|
|
|
#ifdef ACPI_DEBUG
|
2000-12-08 09:16:20 +00:00
|
|
|
/*
|
|
|
|
* Support for parsing debug options from the kernel environment.
|
|
|
|
*
|
|
|
|
* Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
|
|
|
|
* by specifying the names of the bits in the debug.acpi.layer and
|
|
|
|
* debug.acpi.level environment variables. Bits may be unset by
|
|
|
|
* prefixing the bit name with !.
|
|
|
|
*/
|
2000-10-28 06:59:48 +00:00
|
|
|
struct debugtag
|
|
|
|
{
|
|
|
|
char *name;
|
|
|
|
UINT32 value;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct debugtag dbg_layer[] = {
|
2001-06-28 06:17:16 +00:00
|
|
|
{"ACPI_UTILITIES", ACPI_UTILITIES},
|
|
|
|
{"ACPI_HARDWARE", ACPI_HARDWARE},
|
|
|
|
{"ACPI_EVENTS", ACPI_EVENTS},
|
|
|
|
{"ACPI_TABLES", ACPI_TABLES},
|
|
|
|
{"ACPI_NAMESPACE", ACPI_NAMESPACE},
|
|
|
|
{"ACPI_PARSER", ACPI_PARSER},
|
|
|
|
{"ACPI_DISPATCHER", ACPI_DISPATCHER},
|
|
|
|
{"ACPI_EXECUTER", ACPI_EXECUTER},
|
|
|
|
{"ACPI_RESOURCES", ACPI_RESOURCES},
|
2002-08-29 01:52:27 +00:00
|
|
|
{"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_OS_SERVICES", ACPI_OS_SERVICES},
|
2002-08-29 01:52:27 +00:00
|
|
|
{"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER},
|
2003-09-26 04:32:40 +00:00
|
|
|
{"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS},
|
2001-08-26 22:50:15 +00:00
|
|
|
|
2001-06-28 06:17:16 +00:00
|
|
|
{"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER},
|
|
|
|
{"ACPI_BATTERY", ACPI_BATTERY},
|
2004-03-03 03:02:17 +00:00
|
|
|
{"ACPI_BUS", ACPI_BUS},
|
2001-06-28 06:17:16 +00:00
|
|
|
{"ACPI_BUTTON", ACPI_BUTTON},
|
2004-03-03 03:02:17 +00:00
|
|
|
{"ACPI_EC", ACPI_EC},
|
|
|
|
{"ACPI_FAN", ACPI_FAN},
|
|
|
|
{"ACPI_POWERRES", ACPI_POWERRES},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_PROCESSOR", ACPI_PROCESSOR},
|
2001-06-29 20:29:59 +00:00
|
|
|
{"ACPI_THERMAL", ACPI_THERMAL},
|
2004-03-03 03:02:17 +00:00
|
|
|
{"ACPI_TIMER", ACPI_TIMER},
|
2002-02-23 05:21:56 +00:00
|
|
|
{"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS},
|
2000-10-28 06:59:48 +00:00
|
|
|
{NULL, 0}
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct debugtag dbg_level[] = {
|
2003-05-01 18:51:43 +00:00
|
|
|
{"ACPI_LV_INIT", ACPI_LV_INIT},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT},
|
2003-05-01 18:51:43 +00:00
|
|
|
{"ACPI_LV_INFO", ACPI_LV_INFO},
|
2012-03-22 17:01:25 +00:00
|
|
|
{"ACPI_LV_REPAIR", ACPI_LV_REPAIR},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS},
|
2002-08-29 01:52:27 +00:00
|
|
|
|
|
|
|
/* Trace verbosity level 1 [Standard Trace Level] */
|
2003-09-26 04:32:40 +00:00
|
|
|
{"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_LV_PARSE", ACPI_LV_PARSE},
|
|
|
|
{"ACPI_LV_LOAD", ACPI_LV_LOAD},
|
2002-08-29 01:52:27 +00:00
|
|
|
{"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_LV_EXEC", ACPI_LV_EXEC},
|
|
|
|
{"ACPI_LV_NAMES", ACPI_LV_NAMES},
|
|
|
|
{"ACPI_LV_OPREGION", ACPI_LV_OPREGION},
|
|
|
|
{"ACPI_LV_BFIELD", ACPI_LV_BFIELD},
|
|
|
|
{"ACPI_LV_TABLES", ACPI_LV_TABLES},
|
|
|
|
{"ACPI_LV_VALUES", ACPI_LV_VALUES},
|
|
|
|
{"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS},
|
|
|
|
{"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES},
|
|
|
|
{"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS},
|
|
|
|
{"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE},
|
2002-08-29 01:52:27 +00:00
|
|
|
{"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1},
|
|
|
|
|
|
|
|
/* Trace verbosity level 2 [Function tracing and memory allocation] */
|
|
|
|
{"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS},
|
|
|
|
{"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS},
|
|
|
|
{"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS},
|
|
|
|
{"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2},
|
2001-08-26 22:50:15 +00:00
|
|
|
{"ACPI_LV_ALL", ACPI_LV_ALL},
|
2002-08-29 01:52:27 +00:00
|
|
|
|
|
|
|
/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
|
|
|
|
{"ACPI_LV_MUTEX", ACPI_LV_MUTEX},
|
|
|
|
{"ACPI_LV_THREADS", ACPI_LV_THREADS},
|
|
|
|
{"ACPI_LV_IO", ACPI_LV_IO},
|
|
|
|
{"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS},
|
|
|
|
{"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3},
|
|
|
|
|
|
|
|
/* Exceptionally verbose output -- also used in the global "DebugLevel" */
|
2002-07-09 17:54:02 +00:00
|
|
|
{"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
|
|
|
|
{"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO},
|
|
|
|
{"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES},
|
|
|
|
{"ACPI_LV_EVENTS", ACPI_LV_EVENTS},
|
|
|
|
{"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE},
|
2000-10-28 06:59:48 +00:00
|
|
|
{NULL, 0}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
|
|
|
|
{
|
|
|
|
char *ep;
|
|
|
|
int i, l;
|
2000-12-08 09:16:20 +00:00
|
|
|
int set;
|
2000-10-28 06:59:48 +00:00
|
|
|
|
|
|
|
while (*cp) {
|
|
|
|
if (isspace(*cp)) {
|
|
|
|
cp++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ep = cp;
|
|
|
|
while (*ep && !isspace(*ep))
|
|
|
|
ep++;
|
2000-12-08 09:16:20 +00:00
|
|
|
if (*cp == '!') {
|
|
|
|
set = 0;
|
|
|
|
cp++;
|
|
|
|
if (cp == ep)
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
set = 1;
|
|
|
|
}
|
2000-10-28 06:59:48 +00:00
|
|
|
l = ep - cp;
|
|
|
|
for (i = 0; tag[i].name != NULL; i++) {
|
|
|
|
if (!strncmp(cp, tag[i].name, l)) {
|
2003-08-28 16:06:30 +00:00
|
|
|
if (set)
|
2000-12-08 09:16:20 +00:00
|
|
|
*flag |= tag[i].value;
|
2003-08-28 16:06:30 +00:00
|
|
|
else
|
2000-12-08 09:16:20 +00:00
|
|
|
*flag &= ~tag[i].value;
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
cp = ep;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2001-05-29 20:13:42 +00:00
|
|
|
acpi_set_debugging(void *junk)
|
2000-10-28 06:59:48 +00:00
|
|
|
{
|
2004-05-06 02:05:45 +00:00
|
|
|
char *layer, *level;
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2003-09-26 21:22:10 +00:00
|
|
|
if (cold) {
|
|
|
|
AcpiDbgLayer = 0;
|
|
|
|
AcpiDbgLevel = 0;
|
|
|
|
}
|
2002-11-24 02:27:07 +00:00
|
|
|
|
2014-10-16 18:04:43 +00:00
|
|
|
layer = kern_getenv("debug.acpi.layer");
|
|
|
|
level = kern_getenv("debug.acpi.level");
|
2004-05-06 02:05:45 +00:00
|
|
|
if (layer == NULL && level == NULL)
|
|
|
|
return;
|
2000-12-08 09:16:20 +00:00
|
|
|
|
2004-05-06 02:05:45 +00:00
|
|
|
printf("ACPI set debug");
|
|
|
|
if (layer != NULL) {
|
|
|
|
if (strcmp("NONE", layer) != 0)
|
|
|
|
printf(" layer '%s'", layer);
|
|
|
|
acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
|
|
|
|
freeenv(layer);
|
|
|
|
}
|
|
|
|
if (level != NULL) {
|
|
|
|
if (strcmp("NONE", level) != 0)
|
|
|
|
printf(" level '%s'", level);
|
|
|
|
acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
|
|
|
|
freeenv(level);
|
2003-09-26 21:22:10 +00:00
|
|
|
}
|
2004-05-06 02:05:45 +00:00
|
|
|
printf("\n");
|
2000-10-28 06:59:48 +00:00
|
|
|
}
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2003-08-28 16:06:30 +00:00
|
|
|
SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
|
|
|
|
NULL);
|
2003-09-26 21:22:10 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2003-12-09 06:29:57 +00:00
|
|
|
int error, *dbg;
|
2003-09-26 21:22:10 +00:00
|
|
|
struct debugtag *tag;
|
2003-12-09 06:29:57 +00:00
|
|
|
struct sbuf sb;
|
2014-10-28 12:00:39 +00:00
|
|
|
char temp[128];
|
2003-09-26 21:22:10 +00:00
|
|
|
|
2003-12-09 06:29:57 +00:00
|
|
|
if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
|
|
|
|
return (ENOMEM);
|
2003-09-26 21:22:10 +00:00
|
|
|
if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
|
|
|
|
tag = &dbg_layer[0];
|
|
|
|
dbg = &AcpiDbgLayer;
|
|
|
|
} else {
|
|
|
|
tag = &dbg_level[0];
|
|
|
|
dbg = &AcpiDbgLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get old values if this is a get request. */
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_SERIAL_BEGIN(acpi);
|
2003-09-26 21:22:10 +00:00
|
|
|
if (*dbg == 0) {
|
2003-12-09 06:29:57 +00:00
|
|
|
sbuf_cpy(&sb, "NONE");
|
2003-09-26 21:22:10 +00:00
|
|
|
} else if (req->newptr == NULL) {
|
|
|
|
for (; tag->name != NULL; tag++) {
|
2003-12-09 06:29:57 +00:00
|
|
|
if ((*dbg & tag->value) == tag->value)
|
|
|
|
sbuf_printf(&sb, "%s ", tag->name);
|
2003-09-26 21:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
2003-12-09 06:29:57 +00:00
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
2014-10-28 12:00:39 +00:00
|
|
|
strlcpy(temp, sbuf_data(&sb), sizeof(temp));
|
2003-12-09 06:29:57 +00:00
|
|
|
sbuf_delete(&sb);
|
2003-09-26 21:22:10 +00:00
|
|
|
|
2014-10-28 12:00:39 +00:00
|
|
|
error = sysctl_handle_string(oidp, temp, sizeof(temp), req);
|
|
|
|
|
|
|
|
/* Check for error or no change */
|
2003-09-26 21:22:10 +00:00
|
|
|
if (error == 0 && req->newptr != NULL) {
|
2014-10-28 12:00:39 +00:00
|
|
|
*dbg = 0;
|
|
|
|
kern_setenv((char *)oidp->oid_arg1, temp);
|
|
|
|
acpi_set_debugging(NULL);
|
2003-09-26 21:22:10 +00:00
|
|
|
}
|
2004-08-13 06:21:32 +00:00
|
|
|
ACPI_SERIAL_END(acpi);
|
2003-09-26 21:22:10 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
2004-08-03 05:13:56 +00:00
|
|
|
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_PROC(_debug_acpi, OID_AUTO, layer,
|
|
|
|
CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, "debug.acpi.layer", 0,
|
|
|
|
acpi_debug_sysctl, "A",
|
|
|
|
"");
|
|
|
|
SYSCTL_PROC(_debug_acpi, OID_AUTO, level,
|
|
|
|
CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, "debug.acpi.level", 0,
|
|
|
|
acpi_debug_sysctl, "A",
|
|
|
|
"");
|
2004-08-03 05:13:56 +00:00
|
|
|
#endif /* ACPI_DEBUG */
|
2010-03-09 19:02:02 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int old;
|
|
|
|
|
|
|
|
old = acpi_debug_objects;
|
|
|
|
error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (old == acpi_debug_objects || (old && acpi_debug_objects))
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ACPI_SERIAL_BEGIN(acpi);
|
|
|
|
AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
|
|
|
|
ACPI_SERIAL_END(acpi);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2010-10-26 18:59:50 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_parse_interfaces(char *str, struct acpi_interface *iface)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
size_t len;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
p = str;
|
|
|
|
while (isspace(*p) || *p == ',')
|
|
|
|
p++;
|
|
|
|
len = strlen(p);
|
|
|
|
if (len == 0)
|
|
|
|
return (0);
|
|
|
|
p = strdup(p, M_TEMP);
|
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
if (p[i] == ',')
|
|
|
|
p[i] = '\0';
|
|
|
|
i = j = 0;
|
|
|
|
while (i < len)
|
|
|
|
if (isspace(p[i]) || p[i] == '\0')
|
|
|
|
i++;
|
|
|
|
else {
|
|
|
|
i += strlen(p + i) + 1;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
if (j == 0) {
|
|
|
|
free(p, M_TEMP);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
|
|
|
|
iface->num = j;
|
|
|
|
i = j = 0;
|
|
|
|
while (i < len)
|
|
|
|
if (isspace(p[i]) || p[i] == '\0')
|
|
|
|
i++;
|
|
|
|
else {
|
|
|
|
iface->data[j] = p + i;
|
|
|
|
i += strlen(p + i) + 1;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (j);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_free_interfaces(struct acpi_interface *iface)
|
|
|
|
{
|
|
|
|
|
|
|
|
free(iface->data[0], M_TEMP);
|
|
|
|
free(iface->data, M_TEMP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_reset_interfaces(device_t dev)
|
|
|
|
{
|
|
|
|
struct acpi_interface list;
|
|
|
|
ACPI_STATUS status;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
|
|
|
|
for (i = 0; i < list.num; i++) {
|
|
|
|
status = AcpiInstallInterface(list.data[i]);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to install _OSI(\"%s\"): %s\n",
|
|
|
|
list.data[i], AcpiFormatException(status));
|
|
|
|
else if (bootverbose)
|
|
|
|
device_printf(dev, "installed _OSI(\"%s\")\n",
|
|
|
|
list.data[i]);
|
|
|
|
}
|
|
|
|
acpi_free_interfaces(&list);
|
|
|
|
}
|
|
|
|
if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
|
|
|
|
for (i = 0; i < list.num; i++) {
|
|
|
|
status = AcpiRemoveInterface(list.data[i]);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to remove _OSI(\"%s\"): %s\n",
|
|
|
|
list.data[i], AcpiFormatException(status));
|
|
|
|
else if (bootverbose)
|
|
|
|
device_printf(dev, "removed _OSI(\"%s\")\n",
|
|
|
|
list.data[i]);
|
|
|
|
}
|
|
|
|
acpi_free_interfaces(&list);
|
|
|
|
}
|
|
|
|
}
|
2001-11-01 16:34:07 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
acpi_pm_func(u_long cmd, void *arg, ...)
|
|
|
|
{
|
|
|
|
int state, acpi_state;
|
|
|
|
int error;
|
|
|
|
struct acpi_softc *sc;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
switch (cmd) {
|
|
|
|
case POWER_CMD_SUSPEND:
|
|
|
|
sc = (struct acpi_softc *)arg;
|
|
|
|
if (sc == NULL) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, arg);
|
|
|
|
state = va_arg(ap, int);
|
2004-12-27 05:36:47 +00:00
|
|
|
va_end(ap);
|
2001-11-01 16:34:07 +00:00
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case POWER_SLEEP_STATE_STANDBY:
|
|
|
|
acpi_state = sc->acpi_standby_sx;
|
|
|
|
break;
|
|
|
|
case POWER_SLEEP_STATE_SUSPEND:
|
|
|
|
acpi_state = sc->acpi_suspend_sx;
|
|
|
|
break;
|
|
|
|
case POWER_SLEEP_STATE_HIBERNATE:
|
|
|
|
acpi_state = ACPI_STATE_S4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-06-21 22:50:37 +00:00
|
|
|
if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
|
|
|
|
error = ENXIO;
|
2001-11-01 16:34:07 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
acpi_pm_register(void *arg)
|
|
|
|
{
|
2003-08-28 16:06:30 +00:00
|
|
|
if (!cold || resource_disabled("acpi", 0))
|
2002-11-24 02:27:07 +00:00
|
|
|
return;
|
2002-08-30 11:11:07 +00:00
|
|
|
|
2002-11-24 02:27:07 +00:00
|
|
|
power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
|
2001-11-01 16:34:07 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 17:58:09 +00:00
|
|
|
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
|