Rewrite the EC driver event model. The main goal is to avoid
polling/interrupt-driven fallback and instead use polling only during boot and pure interrupt-driven mode after boot. Polled mode could be relegated completely to a legacy role if we could enable interrupts during boot. Polled mode can be forced after boot by setting debug.acpi.ec.polled="1", i.e. if there are timeouts. - Use polling only during boot, shutdown, or if requested by the user. Otherwise, use a generation count of GPEs, incremented atomically. This prevents an old status value from being used if the EC is really slow and the same condition (i.e. multiple IBEs for a write transaction) is being checked. - Check for and run the query handler directly if the SCI bit is set in the status register during boot. Previously, the query handler wouldn't run until interrupts were finally enabled late in boot. - During boot and after starting a command, check if the event appears to already have occurred before we even start waiting. If so, it's possible the EC is very slow and we might accept an old status value. Print a warning in this case. Once we've booted, interrupt-driven mode should work just fine but polled mode could be unreliable. There's not much more we can do about this until interrupts are enabled during boot. - In the above case, we also do one final check if the interrupt-driven mode gets a timeout. If the status is complete, it will force the system back into polled mode since interrupt mode doesn't work. For polled mode during boot, if the status appears to be already complete before beginning the check loop, it waits 10 us before actually checking the status, just in case the EC is really slow and hasn't gotten to work on the new request yet. - Use upper-case hex for the _Qxx method - Use device_printf for errors, don't hide them under verbose - Increase default total timeout to 750 ms and decrease polling interval to 5 us. - Don't pass the status value via the softc. Just read it directly. - Remove the mutex. We use the sx lock for transaction serialization with the query handler. - Remove the Intel copyright notice as no code of theirs was ever present in this file (verified against rev 1.1) - Allow KTR module-only builds for ease of testing Thanks to jkim and Alexey Starikovskiy for helpful discussions and testing. Approved by: re MFC after: 2 weeks
This commit is contained in:
parent
dd74194c9c
commit
b50224d3f9
@ -1,5 +1,5 @@
|
||||
/*-
|
||||
* Copyright (c) 2003 Nate Lawson
|
||||
* Copyright (c) 2003-2007 Nate Lawson
|
||||
* Copyright (c) 2000 Michael Smith
|
||||
* Copyright (c) 2000 BSDi
|
||||
* All rights reserved.
|
||||
@ -25,115 +25,6 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
/*-
|
||||
******************************************************************************
|
||||
*
|
||||
* 1. Copyright Notice
|
||||
*
|
||||
* Some or all of this work - Copyright (c) 1999, Intel Corp. All rights
|
||||
* reserved.
|
||||
*
|
||||
* 2. License
|
||||
*
|
||||
* 2.1. This is your license from Intel Corp. under its intellectual property
|
||||
* rights. You may have additional license terms from the party that provided
|
||||
* you this software, covering your right to use that party's intellectual
|
||||
* property rights.
|
||||
*
|
||||
* 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a
|
||||
* copy of the source code appearing in this file ("Covered Code") an
|
||||
* irrevocable, perpetual, worldwide license under Intel's copyrights in the
|
||||
* base code distributed originally by Intel ("Original Intel Code") to copy,
|
||||
* make derivatives, distribute, use and display any portion of the Covered
|
||||
* Code in any form, with the right to sublicense such rights; and
|
||||
*
|
||||
* 2.3. Intel grants Licensee a non-exclusive and non-transferable patent
|
||||
* license (with the right to sublicense), under only those claims of Intel
|
||||
* patents that are infringed by the Original Intel Code, to make, use, sell,
|
||||
* offer to sell, and import the Covered Code and derivative works thereof
|
||||
* solely to the minimum extent necessary to exercise the above copyright
|
||||
* license, and in no event shall the patent license extend to any additions
|
||||
* to or modifications of the Original Intel Code. No other license or right
|
||||
* is granted directly or by implication, estoppel or otherwise;
|
||||
*
|
||||
* The above copyright and patent license is granted only if the following
|
||||
* conditions are met:
|
||||
*
|
||||
* 3. Conditions
|
||||
*
|
||||
* 3.1. Redistribution of Source with Rights to Further Distribute Source.
|
||||
* Redistribution of source code of any substantial portion of the Covered
|
||||
* Code or modification with rights to further distribute source must include
|
||||
* the above Copyright Notice, the above License, this list of Conditions,
|
||||
* and the following Disclaimer and Export Compliance provision. In addition,
|
||||
* Licensee must cause all Covered Code to which Licensee contributes to
|
||||
* contain a file documenting the changes Licensee made to create that Covered
|
||||
* Code and the date of any change. Licensee must include in that file the
|
||||
* documentation of any changes made by any predecessor Licensee. Licensee
|
||||
* must include a prominent statement that the modification is derived,
|
||||
* directly or indirectly, from Original Intel Code.
|
||||
*
|
||||
* 3.2. Redistribution of Source with no Rights to Further Distribute Source.
|
||||
* Redistribution of source code of any substantial portion of the Covered
|
||||
* Code or modification without rights to further distribute source must
|
||||
* include the following Disclaimer and Export Compliance provision in the
|
||||
* documentation and/or other materials provided with distribution. In
|
||||
* addition, Licensee may not authorize further sublicense of source of any
|
||||
* portion of the Covered Code, and must include terms to the effect that the
|
||||
* license from Licensee to its licensee is limited to the intellectual
|
||||
* property embodied in the software Licensee provides to its licensee, and
|
||||
* not to intellectual property embodied in modifications its licensee may
|
||||
* make.
|
||||
*
|
||||
* 3.3. Redistribution of Executable. Redistribution in executable form of any
|
||||
* substantial portion of the Covered Code or modification must reproduce the
|
||||
* above Copyright Notice, and the following Disclaimer and Export Compliance
|
||||
* provision in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3.4. Intel retains all right, title, and interest in and to the Original
|
||||
* Intel Code.
|
||||
*
|
||||
* 3.5. Neither the name Intel nor any other trademark owned or controlled by
|
||||
* Intel shall be used in advertising or otherwise to promote the sale, use or
|
||||
* other dealings in products derived from or relating to the Covered Code
|
||||
* without prior written authorization from Intel.
|
||||
*
|
||||
* 4. Disclaimer and Export Compliance
|
||||
*
|
||||
* 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED
|
||||
* HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE
|
||||
* IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE,
|
||||
* INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY
|
||||
* UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A
|
||||
* PARTICULAR PURPOSE.
|
||||
*
|
||||
* 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES
|
||||
* OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR
|
||||
* COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT,
|
||||
* SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY
|
||||
* CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL
|
||||
* HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS
|
||||
* SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY
|
||||
* LIMITED REMEDY.
|
||||
*
|
||||
* 4.3. Licensee shall not export, either directly or indirectly, any of this
|
||||
* software or system incorporating such software without first obtaining any
|
||||
* required license or other approval from the U. S. Department of Commerce or
|
||||
* any other agency or department of the United States Government. In the
|
||||
* event Licensee exports any such software from the United States or
|
||||
* re-exports any such software from a foreign destination, Licensee shall
|
||||
* ensure that the distribution and export/re-export of the software is in
|
||||
* compliance with all laws, regulations, orders, or other restrictions of the
|
||||
* U.S. Export Administration Regulations. Licensee agrees that neither it nor
|
||||
* any of its subsidiaries will export/re-export any technical data, process,
|
||||
* software, or service, directly or indirectly, to any country for which the
|
||||
* United States government or any agency thereof requires an export license,
|
||||
* other governmental approval, or letter of assurance, without first obtaining
|
||||
* such license, approval or letter.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
@ -171,7 +62,7 @@ typedef UINT8 EC_COMMAND;
|
||||
#define EC_COMMAND_BURST_DISABLE ((EC_COMMAND) 0x83)
|
||||
#define EC_COMMAND_QUERY ((EC_COMMAND) 0x84)
|
||||
|
||||
/*
|
||||
/*
|
||||
* EC_STATUS:
|
||||
* ----------
|
||||
* The encoding of the EC status register is illustrated below.
|
||||
@ -248,8 +139,7 @@ struct acpi_ec_softc {
|
||||
int ec_uid;
|
||||
ACPI_HANDLE ec_gpehandle;
|
||||
UINT8 ec_gpebit;
|
||||
UINT8 ec_csrvalue;
|
||||
|
||||
|
||||
int ec_data_rid;
|
||||
struct resource *ec_data_res;
|
||||
bus_space_tag_t ec_data_tag;
|
||||
@ -260,11 +150,11 @@ struct acpi_ec_softc {
|
||||
bus_space_tag_t ec_csr_tag;
|
||||
bus_space_handle_t ec_csr_handle;
|
||||
|
||||
struct mtx ec_mtx;
|
||||
int ec_glk;
|
||||
int ec_glkhandle;
|
||||
int ec_burstactive;
|
||||
int ec_sci_pend;
|
||||
u_int ec_gencount;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -275,13 +165,10 @@ struct acpi_ec_softc {
|
||||
#define EC_LOCK_TIMEOUT 1000
|
||||
|
||||
/* Default delay in microseconds between each run of the status polling loop. */
|
||||
#define EC_POLL_DELAY 10
|
||||
|
||||
/* Default time in microseconds spent polling before sleep waiting. */
|
||||
#define EC_POLL_TIME 500
|
||||
#define EC_POLL_DELAY 5
|
||||
|
||||
/* Total time in ms spent waiting for a response from EC. */
|
||||
#define EC_TIMEOUT 500
|
||||
#define EC_TIMEOUT 750
|
||||
|
||||
#define EVENT_READY(event, status) \
|
||||
(((event) == EC_EVENT_OUTPUT_BUFFER_FULL && \
|
||||
@ -298,17 +185,17 @@ static int ec_burst_mode;
|
||||
TUNABLE_INT("debug.acpi.ec.burst", &ec_burst_mode);
|
||||
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RW, &ec_burst_mode, 0,
|
||||
"Enable use of burst mode (faster for nearly all systems)");
|
||||
static int ec_poll_time = EC_POLL_TIME;
|
||||
TUNABLE_INT("debug.acpi.ec.poll_time", &ec_poll_time);
|
||||
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, poll_time, CTLFLAG_RW, &ec_poll_time,
|
||||
EC_POLL_TIME, "Time spent polling vs. sleeping (CPU intensive)");
|
||||
static int ec_polled_mode;
|
||||
TUNABLE_INT("debug.acpi.ec.polled", &ec_polled_mode);
|
||||
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RW, &ec_polled_mode, 0,
|
||||
"Force use of polled mode (only if interrupt mode doesn't work)");
|
||||
static int ec_timeout = EC_TIMEOUT;
|
||||
TUNABLE_INT("debug.acpi.ec.timeout", &ec_timeout);
|
||||
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RW, &ec_timeout,
|
||||
EC_TIMEOUT, "Total time spent waiting for a response (poll+sleep)");
|
||||
|
||||
static __inline ACPI_STATUS
|
||||
EcLock(struct acpi_ec_softc *sc, int serialize)
|
||||
static ACPI_STATUS
|
||||
EcLock(struct acpi_ec_softc *sc)
|
||||
{
|
||||
ACPI_STATUS status;
|
||||
|
||||
@ -319,37 +206,27 @@ EcLock(struct acpi_ec_softc *sc, int serialize)
|
||||
if (ACPI_FAILURE(status))
|
||||
return (status);
|
||||
}
|
||||
|
||||
/*
|
||||
* If caller is executing a series of commands, acquire the exclusive lock
|
||||
* to serialize with other users.
|
||||
* To sync with bottom-half interrupt handler, always acquire the mutex.
|
||||
*/
|
||||
if (serialize)
|
||||
ACPI_SERIAL_BEGIN(ec);
|
||||
mtx_lock(&sc->ec_mtx);
|
||||
|
||||
ACPI_SERIAL_BEGIN(ec);
|
||||
return (status);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
static void
|
||||
EcUnlock(struct acpi_ec_softc *sc)
|
||||
{
|
||||
mtx_unlock(&sc->ec_mtx);
|
||||
if (sx_xlocked(&ec_sxlock))
|
||||
ACPI_SERIAL_END(ec);
|
||||
ACPI_SERIAL_END(ec);
|
||||
if (sc->ec_glk)
|
||||
AcpiReleaseGlobalLock(sc->ec_glkhandle);
|
||||
}
|
||||
|
||||
static uint32_t EcGpeHandler(void *Context);
|
||||
static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function,
|
||||
static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function,
|
||||
void *Context, void **return_Context);
|
||||
static ACPI_STATUS EcSpaceHandler(UINT32 Function,
|
||||
ACPI_PHYSICAL_ADDRESS Address,
|
||||
UINT32 width, ACPI_INTEGER *Value,
|
||||
void *Context, void *RegionContext);
|
||||
static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event);
|
||||
static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event,
|
||||
u_int gen_count);
|
||||
static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd);
|
||||
static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address,
|
||||
UINT8 *Data);
|
||||
@ -387,10 +264,12 @@ DRIVER_MODULE(acpi_ec, acpi, acpi_ec_driver, acpi_ec_devclass, 0, 0);
|
||||
MODULE_DEPEND(acpi_ec, acpi, 1, 1, 1);
|
||||
|
||||
/*
|
||||
* Look for an ECDT and if we find one, set up default GPE and
|
||||
* Look for an ECDT and if we find one, set up default GPE and
|
||||
* space handlers to catch attempts to access EC space before
|
||||
* we have a real driver instance in place.
|
||||
* TODO: if people report invalid ECDTs, add a tunable to disable them.
|
||||
*
|
||||
* TODO: Some old Gateway laptops need us to fake up an ECDT or
|
||||
* otherwise attach early so that _REG methods can run.
|
||||
*/
|
||||
void
|
||||
acpi_ec_ecdt_probe(device_t parent)
|
||||
@ -578,7 +457,6 @@ acpi_ec_attach(device_t dev)
|
||||
params = acpi_get_private(dev);
|
||||
sc->ec_dev = dev;
|
||||
sc->ec_handle = acpi_get_handle(dev);
|
||||
mtx_init(&sc->ec_mtx, "ACPI EC lock", NULL, MTX_DEF);
|
||||
|
||||
/* Retrieve previously probed values via device ivars. */
|
||||
sc->ec_glk = params->glk;
|
||||
@ -621,7 +499,7 @@ acpi_ec_attach(device_t dev)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Install address space handler
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching address space handler\n"));
|
||||
@ -656,12 +534,11 @@ acpi_ec_attach(device_t dev)
|
||||
AcpiRemoveAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC,
|
||||
EcSpaceHandler);
|
||||
if (sc->ec_csr_res)
|
||||
bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_csr_rid,
|
||||
bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_csr_rid,
|
||||
sc->ec_csr_res);
|
||||
if (sc->ec_data_res)
|
||||
bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_data_rid,
|
||||
sc->ec_data_res);
|
||||
mtx_destroy(&sc->ec_mtx);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
@ -715,57 +592,55 @@ EcGpeQueryHandler(void *Context)
|
||||
KASSERT(Context != NULL, ("EcGpeQueryHandler called with NULL"));
|
||||
|
||||
/* Serialize user access with EcSpaceHandler(). */
|
||||
Status = EcLock(sc, TRUE);
|
||||
Status = EcLock(sc);
|
||||
if (ACPI_FAILURE(Status)) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"GpeQuery lock error: %s\n", AcpiFormatException(Status));
|
||||
device_printf(sc->ec_dev, "GpeQuery lock error: %s\n",
|
||||
AcpiFormatException(Status));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a query command to the EC to find out which _Qxx call it
|
||||
* wants to make. This command clears the SCI bit and also the
|
||||
* interrupt source since we are edge-triggered.
|
||||
* interrupt source since we are edge-triggered. To prevent the GPE
|
||||
* that may arise from running the query from causing another query
|
||||
* to be queued, we clear the pending flag only after running it.
|
||||
*/
|
||||
Status = EcCommand(sc, EC_COMMAND_QUERY);
|
||||
sc->ec_sci_pend = FALSE;
|
||||
if (ACPI_FAILURE(Status)) {
|
||||
EcUnlock(sc);
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"GPE query failed - %s\n", AcpiFormatException(Status));
|
||||
goto re_enable;
|
||||
device_printf(sc->ec_dev, "GPE query failed: %s\n",
|
||||
AcpiFormatException(Status));
|
||||
return;
|
||||
}
|
||||
Data = EC_GET_DATA(sc);
|
||||
sc->ec_sci_pend = FALSE;
|
||||
|
||||
/* Drop locks before evaluating _Qxx method since it may trigger GPEs. */
|
||||
/*
|
||||
* We have to unlock before running the _Qxx method below since that
|
||||
* method may attempt to read/write from EC address space, causing
|
||||
* recursive acquisition of the lock.
|
||||
*/
|
||||
EcUnlock(sc);
|
||||
|
||||
/* Ignore the value for "no outstanding event". (13.3.5) */
|
||||
CTR2(KTR_ACPI, "ec query ok,%s running _Q%02x", Data ? "" : " not", Data);
|
||||
CTR2(KTR_ACPI, "ec query ok,%s running _Q%02X", Data ? "" : " not", Data);
|
||||
if (Data == 0)
|
||||
goto re_enable;
|
||||
return;
|
||||
|
||||
/* Evaluate _Qxx to respond to the controller. */
|
||||
snprintf(qxx, sizeof(qxx), "_Q%02x", Data);
|
||||
snprintf(qxx, sizeof(qxx), "_Q%02X", Data);
|
||||
AcpiUtStrupr(qxx);
|
||||
Status = AcpiEvaluateObject(sc->ec_handle, qxx, NULL, NULL);
|
||||
if (ACPI_FAILURE(Status) && Status != AE_NOT_FOUND) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"evaluation of GPE query method %s failed - %s\n",
|
||||
qxx, AcpiFormatException(Status));
|
||||
device_printf(sc->ec_dev, "evaluation of query method %s failed: %s\n",
|
||||
qxx, AcpiFormatException(Status));
|
||||
}
|
||||
|
||||
re_enable:
|
||||
/* Re-enable the GPE event so we'll get future requests. */
|
||||
Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_ISR);
|
||||
if (ACPI_FAILURE(Status))
|
||||
printf("EcGpeQueryHandler: AcpiEnableEvent failed\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a GPE. Currently we only handle SCI events as others must
|
||||
* be handled by polling in EcWaitEvent(). This is because some ECs
|
||||
* treat events as level when they should be edge-triggered.
|
||||
* The GPE handler is called when IBE/OBF or SCI events occur. We are
|
||||
* called from an unknown lock context.
|
||||
*/
|
||||
static uint32_t
|
||||
EcGpeHandler(void *Context)
|
||||
@ -773,68 +648,32 @@ EcGpeHandler(void *Context)
|
||||
struct acpi_ec_softc *sc = Context;
|
||||
ACPI_STATUS Status;
|
||||
EC_STATUS EcStatus;
|
||||
int query_pend;
|
||||
|
||||
KASSERT(Context != NULL, ("EcGpeHandler called with NULL"));
|
||||
CTR0(KTR_ACPI, "ec gpe handler start");
|
||||
|
||||
/*
|
||||
* Disable further GPEs while we handle this one. Since we are directly
|
||||
* called by ACPI-CA and it may have unknown locks held, we specify the
|
||||
* ACPI_ISR flag to keep it from acquiring any more mutexes (although
|
||||
* sleeping would be ok since we're in an ithread.)
|
||||
* Notify EcWaitEvent() that the status register is now fresh. If we
|
||||
* didn't do this, it wouldn't be possible to distinguish an old IBE
|
||||
* from a new one, for example when doing a write transaction (writing
|
||||
* address and then data values.)
|
||||
*/
|
||||
AcpiDisableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_ISR);
|
||||
|
||||
/* For interrupt (GPE) handler, don't acquire serialization lock. */
|
||||
Status = EcLock(sc, FALSE);
|
||||
if (ACPI_FAILURE(Status)) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"GpeQuery lock error: %s\n", AcpiFormatException(Status));
|
||||
return (-1);
|
||||
}
|
||||
atomic_add_int(&sc->ec_gencount, 1);
|
||||
wakeup(&sc->ec_gencount);
|
||||
|
||||
/*
|
||||
* If burst was active, but the status bit was cleared, the EC had to
|
||||
* exit burst mode for some reason. Record this for later.
|
||||
* If the EC_SCI bit of the status register is set, queue a query handler.
|
||||
* It will run the query and _Qxx method later, under the lock.
|
||||
*/
|
||||
EcStatus = EC_GET_CSR(sc);
|
||||
if (sc->ec_burstactive && (EcStatus & EC_FLAG_BURST_MODE) == 0) {
|
||||
CTR0(KTR_ACPI, "ec burst disabled in query handler");
|
||||
sc->ec_burstactive = FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the EC_SCI bit of the status register is not set, then pass
|
||||
* it along to any potential waiters as it may be an IBE/OBF event.
|
||||
* If it is set, queue a query handler.
|
||||
*/
|
||||
query_pend = FALSE;
|
||||
if ((EcStatus & EC_EVENT_SCI) == 0) {
|
||||
CTR1(KTR_ACPI, "ec event was IBE/OBF, status %#x", EcStatus);
|
||||
sc->ec_csrvalue = EcStatus;
|
||||
wakeup(&sc->ec_csrvalue);
|
||||
} else if (!sc->ec_sci_pend) {
|
||||
/* SCI bit set and no pending query handler, so schedule one. */
|
||||
CTR0(KTR_ACPI, "ec queueing gpe handler");
|
||||
if ((EcStatus & EC_EVENT_SCI) && !sc->ec_sci_pend) {
|
||||
CTR0(KTR_ACPI, "ec gpe queueing query handler");
|
||||
Status = AcpiOsExecute(OSL_GPE_HANDLER, EcGpeQueryHandler, Context);
|
||||
if (ACPI_SUCCESS(Status)) {
|
||||
if (ACPI_SUCCESS(Status))
|
||||
sc->ec_sci_pend = TRUE;
|
||||
query_pend = TRUE;
|
||||
} else
|
||||
printf("Queuing GPE query handler failed.\n");
|
||||
else
|
||||
printf("EcGpeHandler: queuing GPE query handler failed\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* If we didn't queue a query handler, which will eventually re-enable
|
||||
* the GPE, re-enable it right now so we can get more events.
|
||||
*/
|
||||
if (!query_pend) {
|
||||
Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_ISR);
|
||||
if (ACPI_FAILURE(Status))
|
||||
printf("EcGpeHandler: AcpiEnableGpe failed\n");
|
||||
}
|
||||
|
||||
EcUnlock(sc);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -878,8 +717,19 @@ EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width,
|
||||
EcAddr = Address;
|
||||
Status = AE_ERROR;
|
||||
|
||||
/* Grab serialization lock to hold across command sequence. */
|
||||
Status = EcLock(sc, TRUE);
|
||||
/*
|
||||
* If booting, check if we need to run the query handler. If so, we
|
||||
* we call it directly here since our thread taskq is not active yet.
|
||||
*/
|
||||
if (cold || rebooting) {
|
||||
if ((EC_GET_CSR(sc) & EC_EVENT_SCI)) {
|
||||
CTR0(KTR_ACPI, "ec running gpe handler directly");
|
||||
EcGpeQueryHandler(sc);
|
||||
}
|
||||
}
|
||||
|
||||
/* Serialize with EcGpeQueryHandler() at transaction granularity. */
|
||||
Status = EcLock(sc);
|
||||
if (ACPI_FAILURE(Status))
|
||||
return_ACPI_STATUS (Status);
|
||||
|
||||
@ -910,83 +760,119 @@ EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width,
|
||||
}
|
||||
|
||||
static ACPI_STATUS
|
||||
EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event)
|
||||
EcCheckStatus(struct acpi_ec_softc *sc, const char *msg, EC_EVENT event)
|
||||
{
|
||||
ACPI_STATUS status;
|
||||
EC_STATUS ec_status;
|
||||
|
||||
status = AE_NO_HARDWARE_RESPONSE;
|
||||
ec_status = EC_GET_CSR(sc);
|
||||
if (sc->ec_burstactive && !(ec_status & EC_FLAG_BURST_MODE)) {
|
||||
CTR1(KTR_ACPI, "ec burst disabled in waitevent (%s)", msg);
|
||||
sc->ec_burstactive = FALSE;
|
||||
}
|
||||
if (EVENT_READY(event, ec_status)) {
|
||||
CTR2(KTR_ACPI, "ec %s wait ready, status %#x", msg, ec_status);
|
||||
status = AE_OK;
|
||||
}
|
||||
return (status);
|
||||
}
|
||||
|
||||
static ACPI_STATUS
|
||||
EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event, u_int gen_count)
|
||||
{
|
||||
EC_STATUS EcStatus;
|
||||
ACPI_STATUS Status;
|
||||
int count, i, retval, slp_ival;
|
||||
int count, i, slp_ival;
|
||||
|
||||
ACPI_SERIAL_ASSERT(ec);
|
||||
Status = AE_NO_HARDWARE_RESPONSE;
|
||||
EcStatus = 0;
|
||||
|
||||
/*
|
||||
* Poll for up to ec_poll_time microseconds since many ECs complete
|
||||
* the command quickly, especially if in burst mode.
|
||||
* The main CPU should be much faster than the EC. So the status should
|
||||
* be "not ready" when we start waiting. But if the main CPU is really
|
||||
* slow, it's possible we see the current "ready" response. Since that
|
||||
* can't be distinguished from the previous response in polled mode,
|
||||
* this is a potential issue. We really should have interrupts enabled
|
||||
* during boot so there is no ambiguity in polled mode.
|
||||
*
|
||||
* If this occurs, we add an additional delay before actually entering
|
||||
* the status checking loop, hopefully to allow the EC to go to work
|
||||
* and produce a non-stale status.
|
||||
*/
|
||||
#if 0 /* Enable this as a possible workaround if EC times out. */
|
||||
AcpiOsStall(EC_POLL_DELAY);
|
||||
#endif
|
||||
count = ec_poll_time / EC_POLL_DELAY;
|
||||
if (count <= 0)
|
||||
count = 1;
|
||||
for (i = 0; i < count; i++) {
|
||||
EcStatus = EC_GET_CSR(sc);
|
||||
if (sc->ec_burstactive && (EcStatus & EC_FLAG_BURST_MODE) == 0) {
|
||||
CTR0(KTR_ACPI, "ec burst disabled in waitevent (poll)");
|
||||
sc->ec_burstactive = FALSE;
|
||||
if (cold || rebooting || ec_polled_mode) {
|
||||
static int once;
|
||||
|
||||
if (EcCheckStatus(sc, "pre-check", Event) == AE_OK) {
|
||||
if (!once) {
|
||||
device_printf(sc->ec_dev,
|
||||
"warning: EC done before starting event wait\n");
|
||||
once = 1;
|
||||
}
|
||||
AcpiOsStall(10);
|
||||
}
|
||||
if (EVENT_READY(Event, EcStatus)) {
|
||||
CTR1(KTR_ACPI, "ec poll wait ready, status %#x", EcStatus);
|
||||
Status = AE_OK;
|
||||
break;
|
||||
}
|
||||
AcpiOsStall(EC_POLL_DELAY);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we still don't have a response and we're up and running, wait up
|
||||
* to ec_timeout ms for completion, sleeping for chunks of 1 ms or the
|
||||
* smallest resolution hz supports.
|
||||
*/
|
||||
slp_ival = 0;
|
||||
if (Status != AE_OK) {
|
||||
retval = ENXIO;
|
||||
if (!cold) {
|
||||
slp_ival = hz / 1000;
|
||||
if (slp_ival != 0) {
|
||||
count = ec_timeout / slp_ival;
|
||||
} else {
|
||||
/* hz has less than 1000 Hz resolution so scale timeout. */
|
||||
slp_ival = 1;
|
||||
count = ec_timeout / (1000 / hz);
|
||||
}
|
||||
} else
|
||||
count = ec_timeout;
|
||||
/* Wait for event by polling or GPE (interrupt). */
|
||||
if (cold || rebooting || ec_polled_mode) {
|
||||
count = (ec_timeout * 1000) / EC_POLL_DELAY;
|
||||
if (count == 0)
|
||||
count = 1;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (retval != 0)
|
||||
EcStatus = EC_GET_CSR(sc);
|
||||
else
|
||||
EcStatus = sc->ec_csrvalue;
|
||||
if (sc->ec_burstactive && (EcStatus & EC_FLAG_BURST_MODE) == 0) {
|
||||
CTR0(KTR_ACPI, "ec burst disabled in waitevent (slp)");
|
||||
sc->ec_burstactive = FALSE;
|
||||
}
|
||||
if (EVENT_READY(Event, EcStatus)) {
|
||||
CTR1(KTR_ACPI, "ec sleep wait ready, status %#x", EcStatus);
|
||||
Status = AE_OK;
|
||||
Status = EcCheckStatus(sc, "poll", Event);
|
||||
if (Status == AE_OK)
|
||||
break;
|
||||
AcpiOsStall(EC_POLL_DELAY);
|
||||
}
|
||||
} else {
|
||||
slp_ival = hz / 1000;
|
||||
if (slp_ival != 0) {
|
||||
count = ec_timeout;
|
||||
} else {
|
||||
/* hz has less than 1 ms resolution so scale timeout. */
|
||||
slp_ival = 1;
|
||||
count = ec_timeout / (1000 / hz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the GPE to signal the status changed, checking the
|
||||
* status register each time we get one. It's possible to get a
|
||||
* GPE for an event we're not interested in here (i.e., SCI for
|
||||
* EC query).
|
||||
*/
|
||||
for (i = 0; i < count; i++) {
|
||||
if (gen_count != sc->ec_gencount) {
|
||||
/*
|
||||
* Record new generation count. It's possible the GPE was
|
||||
* just to notify us that a query is needed and we need to
|
||||
* wait for a second GPE to signal the completion of the
|
||||
* event we are actually waiting for.
|
||||
*/
|
||||
gen_count = sc->ec_gencount;
|
||||
Status = EcCheckStatus(sc, "sleep", Event);
|
||||
if (Status == AE_OK)
|
||||
break;
|
||||
}
|
||||
if (!cold) {
|
||||
retval = msleep(&sc->ec_csrvalue, &sc->ec_mtx, PZERO, "ecpoll",
|
||||
slp_ival);
|
||||
} else
|
||||
AcpiOsStall(1000);
|
||||
tsleep(&sc->ec_gencount, PZERO, "ecgpe", slp_ival);
|
||||
}
|
||||
|
||||
/*
|
||||
* We finished waiting for the GPE and it never arrived. Try to
|
||||
* read the register once and trust whatever value we got. This is
|
||||
* the best we can do at this point. Then, force polled mode on
|
||||
* since this system doesn't appear to generate GPEs.
|
||||
*/
|
||||
if (Status != AE_OK) {
|
||||
Status = EcCheckStatus(sc, "sleep_end", Event);
|
||||
device_printf(sc->ec_dev,
|
||||
"wait timed out (%sresponse), forcing polled mode\n",
|
||||
Status == AE_OK ? "" : "no ");
|
||||
ec_polled_mode = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
if (Status != AE_OK)
|
||||
CTR0(KTR_ACPI, "error: ec wait timed out");
|
||||
return (Status);
|
||||
}
|
||||
}
|
||||
|
||||
static ACPI_STATUS
|
||||
EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd)
|
||||
@ -994,6 +880,7 @@ EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd)
|
||||
ACPI_STATUS status;
|
||||
EC_EVENT event;
|
||||
EC_STATUS ec_status;
|
||||
u_int gen_count;
|
||||
|
||||
ACPI_SERIAL_ASSERT(ec);
|
||||
|
||||
@ -1013,15 +900,15 @@ EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd)
|
||||
event = EC_EVENT_OUTPUT_BUFFER_FULL;
|
||||
break;
|
||||
default:
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"EcCommand: Invalid command %#x\n", cmd);
|
||||
device_printf(sc->ec_dev, "EcCommand: invalid command %#x\n", cmd);
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Run the command and wait for the chosen event. */
|
||||
CTR1(KTR_ACPI, "ec running command %#x", cmd);
|
||||
gen_count = sc->ec_gencount;
|
||||
EC_SET_CSR(sc, cmd);
|
||||
status = EcWaitEvent(sc, event);
|
||||
status = EcWaitEvent(sc, event, gen_count);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
/* If we succeeded, burst flag should now be present. */
|
||||
if (cmd == EC_COMMAND_BURST_ENABLE) {
|
||||
@ -1029,11 +916,8 @@ EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd)
|
||||
if ((ec_status & EC_FLAG_BURST_MODE) == 0)
|
||||
status = AE_ERROR;
|
||||
}
|
||||
} else {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"EcCommand: no response to %#x\n", cmd);
|
||||
}
|
||||
|
||||
} else
|
||||
device_printf(sc->ec_dev, "EcCommand: no response to %#x\n", cmd);
|
||||
return (status);
|
||||
}
|
||||
|
||||
@ -1042,6 +926,7 @@ EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data)
|
||||
{
|
||||
ACPI_STATUS status;
|
||||
UINT8 data;
|
||||
u_int gen_count;
|
||||
|
||||
ACPI_SERIAL_ASSERT(ec);
|
||||
CTR1(KTR_ACPI, "ec read from %#x", Address);
|
||||
@ -1060,32 +945,32 @@ EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data)
|
||||
if (ACPI_FAILURE(status))
|
||||
return (status);
|
||||
|
||||
gen_count = sc->ec_gencount;
|
||||
EC_SET_DATA(sc, Address);
|
||||
status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL);
|
||||
status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL, gen_count);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"EcRead: Failed waiting for EC to send data.\n");
|
||||
device_printf(sc->ec_dev, "EcRead: failed waiting to get data\n");
|
||||
return (status);
|
||||
}
|
||||
|
||||
*Data = EC_GET_DATA(sc);
|
||||
|
||||
if (sc->ec_burstactive) {
|
||||
sc->ec_burstactive = FALSE;
|
||||
status = EcCommand(sc, EC_COMMAND_BURST_DISABLE);
|
||||
if (ACPI_FAILURE(status))
|
||||
return (status);
|
||||
sc->ec_burstactive = FALSE;
|
||||
CTR0(KTR_ACPI, "ec disabled burst ok");
|
||||
}
|
||||
|
||||
return (AE_OK);
|
||||
}
|
||||
}
|
||||
|
||||
static ACPI_STATUS
|
||||
EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data)
|
||||
{
|
||||
ACPI_STATUS status;
|
||||
UINT8 data;
|
||||
u_int gen_count;
|
||||
|
||||
ACPI_SERIAL_ASSERT(ec);
|
||||
CTR2(KTR_ACPI, "ec write to %#x, data %#x", Address, *Data);
|
||||
@ -1104,27 +989,27 @@ EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data)
|
||||
if (ACPI_FAILURE(status))
|
||||
return (status);
|
||||
|
||||
gen_count = sc->ec_gencount;
|
||||
EC_SET_DATA(sc, Address);
|
||||
status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY);
|
||||
status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"EcRead: Failed waiting for EC to process address\n");
|
||||
device_printf(sc->ec_dev, "EcRead: failed waiting for sent address\n");
|
||||
return (status);
|
||||
}
|
||||
|
||||
gen_count = sc->ec_gencount;
|
||||
EC_SET_DATA(sc, *Data);
|
||||
status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY);
|
||||
status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev),
|
||||
"EcWrite: Failed waiting for EC to process data\n");
|
||||
device_printf(sc->ec_dev, "EcWrite: failed waiting for sent data\n");
|
||||
return (status);
|
||||
}
|
||||
|
||||
if (sc->ec_burstactive) {
|
||||
sc->ec_burstactive = FALSE;
|
||||
status = EcCommand(sc, EC_COMMAND_BURST_DISABLE);
|
||||
if (ACPI_FAILURE(status))
|
||||
return (status);
|
||||
sc->ec_burstactive = FALSE;
|
||||
CTR0(KTR_ACPI, "ec disabled burst ok");
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,9 @@ DBSRC+= dmbuffer.c dmnames.c dmopcode.c dmobject.c dmresrc.c dmresrcl.c
|
||||
DBSRC+= dmresrcs.c dmutils.c dmwalk.c
|
||||
|
||||
.if !defined(KERNBUILDDIR)
|
||||
.if KTR
|
||||
CFLAGS+=-DKTR
|
||||
.endif
|
||||
.if ACPI_MAX_THREADS
|
||||
CFLAGS+=-DACPI_MAX_THREADS=${ACPI_MAX_THREADS}
|
||||
.endif
|
||||
|
Loading…
Reference in New Issue
Block a user