Allwinner: mmc: Rename driver to aw_mmc and add a man page for it

Reviewed by:	bcr (manpages)
Differential Revision:	https://reviews.freebsd.org/D13616
This commit is contained in:
Emmanuel Vadot 2017-12-26 12:06:56 +00:00
parent bc5719c817
commit b5be541f1d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=327198
7 changed files with 1204 additions and 1127 deletions

View File

@ -70,6 +70,7 @@ MAN= aac.4 \
audit.4 \
auditpipe.4 \
aue.4 \
aw_mmc.4 \
aw_rtc.4 \
axe.4 \
axge.4 \

76
share/man/man4/aw_mmc.4 Normal file
View File

@ -0,0 +1,76 @@
.\"-
.\" Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd Dec 25, 2017
.Dt AW_MMC 4
.Os
.Sh NAME
.Nm aw_mmc
.Nd driver for the SD/MMC controller in Allwinner SoC
.Sh SYNOPSIS
.Cd "device mmc"
.Sh DESCRIPTION
The
.Nm
device driver provides support for the Allwinner SD/MMC host controller.
.Sh HARDWARE
The current version of the
.Nm
driver supports the SD/MMC controller with one of the following compatible strings :
.Pp
.Bl -bullet -compact
.It
allwinner,sun4i-a10-mmc
.It
allwinner,sun5i-a13-mmc
.It
allwinner,sun7i-a20-mmc
.It
allwinner,sun50i-a64-mmc
.El
.Sh SYSCTL VARIABLES
The following read-only variables are available via
.Xr sysctl 8 :
.Bl -tag -width indent
.It Va dev.aw_mmc.req_timeout
Request timeout in seconds (default: 10) .
.El
.Sh SEE ALSO
.Xr fdt 4 ,
.Xr mmc 4
.Sh HISTORY
The
.Nm
device driver first appeared in
.Fx 10.0 .
.Sh AUTHORS
The
.Nm
device driver was originally written by
.An Alexander Fedorov Aq Mt alexander.fedorov@rtlservice.com .
Later work and this manual page was done by
.An Emmanuel Vadot Aq Mt manu@freebsd.org .

View File

@ -1,922 +0,0 @@
/*-
* Copyright (c) 2013 Alexander Fedorov
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/resource.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mmc/bridge.h>
#include <dev/mmc/mmcbrvar.h>
#include <arm/allwinner/a10_mmc.h>
#include <dev/extres/clk/clk.h>
#include <dev/extres/hwreset/hwreset.h>
#define A10_MMC_MEMRES 0
#define A10_MMC_IRQRES 1
#define A10_MMC_RESSZ 2
#define A10_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
#define A10_MMC_DMA_MAX_SIZE 0x2000
#define A10_MMC_DMA_FTRGLEVEL 0x20070008
#define A10_MMC_RESET_RETRY 1000
#define CARD_ID_FREQUENCY 400000
static struct ofw_compat_data compat_data[] = {
{"allwinner,sun4i-a10-mmc", 1},
{"allwinner,sun5i-a13-mmc", 1},
{"allwinner,sun7i-a20-mmc", 1},
{"allwinner,sun50i-a64-mmc", 1},
{NULL, 0}
};
struct a10_mmc_softc {
device_t a10_dev;
clk_t a10_clk_ahb;
clk_t a10_clk_mmc;
hwreset_t a10_rst_ahb;
int a10_bus_busy;
int a10_resid;
int a10_timeout;
struct callout a10_timeoutc;
struct mmc_host a10_host;
struct mmc_request * a10_req;
struct mtx a10_mtx;
struct resource * a10_res[A10_MMC_RESSZ];
uint32_t a10_intr;
uint32_t a10_intr_wait;
void * a10_intrhand;
/* Fields required for DMA access. */
bus_addr_t a10_dma_desc_phys;
bus_dmamap_t a10_dma_map;
bus_dma_tag_t a10_dma_tag;
void * a10_dma_desc;
bus_dmamap_t a10_dma_buf_map;
bus_dma_tag_t a10_dma_buf_tag;
int a10_dma_map_err;
};
static struct resource_spec a10_mmc_res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static int a10_mmc_probe(device_t);
static int a10_mmc_attach(device_t);
static int a10_mmc_detach(device_t);
static int a10_mmc_setup_dma(struct a10_mmc_softc *);
static int a10_mmc_reset(struct a10_mmc_softc *);
static void a10_mmc_intr(void *);
static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
static int a10_mmc_update_ios(device_t, device_t);
static int a10_mmc_request(device_t, device_t, struct mmc_request *);
static int a10_mmc_get_ro(device_t, device_t);
static int a10_mmc_acquire_host(device_t, device_t);
static int a10_mmc_release_host(device_t, device_t);
#define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx)
#define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx)
#define A10_MMC_READ_4(_sc, _reg) \
bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
#define A10_MMC_WRITE_4(_sc, _reg, _value) \
bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
static int
a10_mmc_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
return (BUS_PROBE_DEFAULT);
}
static int
a10_mmc_attach(device_t dev)
{
device_t child;
struct a10_mmc_softc *sc;
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *tree;
uint32_t bus_width;
phandle_t node;
int error;
node = ofw_bus_get_node(dev);
sc = device_get_softc(dev);
sc->a10_dev = dev;
sc->a10_req = NULL;
if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
device_printf(dev, "cannot allocate device resources\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
&sc->a10_intrhand)) {
bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
device_printf(dev, "cannot setup interrupt handler\n");
return (ENXIO);
}
mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
MTX_DEF);
callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
/* De-assert reset */
if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->a10_rst_ahb) == 0) {
error = hwreset_deassert(sc->a10_rst_ahb);
if (error != 0) {
device_printf(dev, "cannot de-assert reset\n");
goto fail;
}
}
/* Activate the module clock. */
error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->a10_clk_ahb);
if (error != 0) {
device_printf(dev, "cannot get ahb clock\n");
goto fail;
}
error = clk_enable(sc->a10_clk_ahb);
if (error != 0) {
device_printf(dev, "cannot enable ahb clock\n");
goto fail;
}
error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->a10_clk_mmc);
if (error != 0) {
device_printf(dev, "cannot get mmc clock\n");
goto fail;
}
error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
CLK_SET_ROUND_DOWN);
if (error != 0) {
device_printf(dev, "cannot init mmc clock\n");
goto fail;
}
error = clk_enable(sc->a10_clk_mmc);
if (error != 0) {
device_printf(dev, "cannot enable mmc clock\n");
goto fail;
}
sc->a10_timeout = 10;
ctx = device_get_sysctl_ctx(dev);
tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
&sc->a10_timeout, 0, "Request timeout in seconds");
/* Hardware reset */
A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
DELAY(100);
A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
DELAY(500);
/* Soft Reset controller. */
if (a10_mmc_reset(sc) != 0) {
device_printf(dev, "cannot reset the controller\n");
goto fail;
}
if (a10_mmc_setup_dma(sc) != 0) {
device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
goto fail;
}
if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
bus_width = 4;
sc->a10_host.f_min = 400000;
sc->a10_host.f_max = 52000000;
sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
sc->a10_host.mode = mode_sd;
sc->a10_host.caps = MMC_CAP_HSPEED;
if (bus_width >= 4)
sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
if (bus_width >= 8)
sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
child = device_add_child(dev, "mmc", -1);
if (child == NULL) {
device_printf(dev, "attaching MMC bus failed!\n");
goto fail;
}
if (device_probe_and_attach(child) != 0) {
device_printf(dev, "attaching MMC child failed!\n");
device_delete_child(dev, child);
goto fail;
}
return (0);
fail:
callout_drain(&sc->a10_timeoutc);
mtx_destroy(&sc->a10_mtx);
bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
return (ENXIO);
}
static int
a10_mmc_detach(device_t dev)
{
return (EBUSY);
}
static void
a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
{
struct a10_mmc_softc *sc;
sc = (struct a10_mmc_softc *)arg;
if (err) {
sc->a10_dma_map_err = err;
return;
}
sc->a10_dma_desc_phys = segs[0].ds_addr;
}
static int
a10_mmc_setup_dma(struct a10_mmc_softc *sc)
{
int dma_desc_size, error;
/* Allocate the DMA descriptor memory. */
dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
A10_MMC_DMA_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
if (error)
return (error);
error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
if (error)
return (error);
error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
if (error)
return (error);
if (sc->a10_dma_map_err)
return (sc->a10_dma_map_err);
/* Create the DMA map for data transfers. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
A10_MMC_DMA_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
&sc->a10_dma_buf_tag);
if (error)
return (error);
error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
&sc->a10_dma_buf_map);
if (error)
return (error);
return (0);
}
static void
a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
{
int i;
struct a10_mmc_dma_desc *dma_desc;
struct a10_mmc_softc *sc;
sc = (struct a10_mmc_softc *)arg;
sc->a10_dma_map_err = err;
if (err)
return;
dma_desc = sc->a10_dma_desc;
for (i = 0; i < nsegs; i++) {
dma_desc[i].buf_size = segs[i].ds_len;
dma_desc[i].buf_addr = segs[i].ds_addr;
dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
A10_MMC_DMA_CONFIG_OWN;
if (i == 0)
dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
if (i < (nsegs - 1)) {
dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
dma_desc[i].next = sc->a10_dma_desc_phys +
((i + 1) * sizeof(struct a10_mmc_dma_desc));
} else {
dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
A10_MMC_DMA_CONFIG_ER;
dma_desc[i].next = 0;
}
}
}
static int
a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
{
bus_dmasync_op_t sync_op;
int error;
struct mmc_command *cmd;
uint32_t val;
cmd = sc->a10_req->cmd;
if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
return (EFBIG);
error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
if (error)
return (error);
if (sc->a10_dma_map_err)
return (sc->a10_dma_map_err);
if (cmd->data->flags & MMC_DATA_WRITE)
sync_op = BUS_DMASYNC_PREWRITE;
else
sync_op = BUS_DMASYNC_PREREAD;
bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
/* Enable DMA */
val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
val |= A10_MMC_CTRL_DMA_ENB;
A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
/* Reset DMA */
val |= A10_MMC_CTRL_DMA_RST;
A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
/* Enable RX or TX DMA interrupt */
if (cmd->data->flags & MMC_DATA_WRITE)
val |= A10_MMC_IDST_TX_INT;
else
val |= A10_MMC_IDST_RX_INT;
A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
/* Set DMA descritptor list address */
A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
/* FIFO trigger level */
A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
return (0);
}
static int
a10_mmc_reset(struct a10_mmc_softc *sc)
{
int timeout;
A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
timeout = 1000;
while (--timeout > 0) {
if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
break;
DELAY(100);
}
if (timeout == 0)
return (ETIMEDOUT);
/* Set the timeout. */
A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
/* Clear pending interrupts. */
A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
/* Unmask interrupts. */
A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
/* Enable interrupts and AHB access. */
A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
return (0);
}
static void
a10_mmc_req_done(struct a10_mmc_softc *sc)
{
struct mmc_command *cmd;
struct mmc_request *req;
uint32_t val, mask;
int retry;
cmd = sc->a10_req->cmd;
if (cmd->error != MMC_ERR_NONE) {
/* Reset the FIFO and DMA engines. */
mask = A10_MMC_CTRL_FIFO_RST | A10_MMC_CTRL_DMA_RST;
val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val | mask);
retry = A10_MMC_RESET_RETRY;
while (--retry > 0) {
val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
if ((val & mask) == 0)
break;
DELAY(10);
}
if (retry == 0)
device_printf(sc->a10_dev,
"timeout resetting DMA/FIFO\n");
a10_mmc_update_clock(sc, 1);
}
req = sc->a10_req;
callout_stop(&sc->a10_timeoutc);
sc->a10_req = NULL;
sc->a10_intr = 0;
sc->a10_resid = 0;
sc->a10_dma_map_err = 0;
sc->a10_intr_wait = 0;
req->done(req);
}
static void
a10_mmc_req_ok(struct a10_mmc_softc *sc)
{
int timeout;
struct mmc_command *cmd;
uint32_t status;
timeout = 1000;
while (--timeout > 0) {
status = A10_MMC_READ_4(sc, A10_MMC_STAR);
if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
break;
DELAY(1000);
}
cmd = sc->a10_req->cmd;
if (timeout == 0) {
cmd->error = MMC_ERR_FAILED;
a10_mmc_req_done(sc);
return;
}
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
} else
cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
}
/* All data has been transferred ? */
if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
cmd->error = MMC_ERR_FAILED;
a10_mmc_req_done(sc);
}
static void
a10_mmc_timeout(void *arg)
{
struct a10_mmc_softc *sc;
sc = (struct a10_mmc_softc *)arg;
if (sc->a10_req != NULL) {
device_printf(sc->a10_dev, "controller timeout\n");
sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
a10_mmc_req_done(sc);
} else
device_printf(sc->a10_dev,
"Spurious timeout - no active request\n");
}
static void
a10_mmc_intr(void *arg)
{
bus_dmasync_op_t sync_op;
struct a10_mmc_softc *sc;
struct mmc_data *data;
uint32_t idst, imask, rint;
sc = (struct a10_mmc_softc *)arg;
A10_MMC_LOCK(sc);
rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
if (idst == 0 && imask == 0 && rint == 0) {
A10_MMC_UNLOCK(sc);
return;
}
#ifdef DEBUG
device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
idst, imask, rint);
#endif
if (sc->a10_req == NULL) {
device_printf(sc->a10_dev,
"Spurious interrupt - no active request, rint: 0x%08X\n",
rint);
goto end;
}
if (rint & A10_MMC_INT_ERR_BIT) {
device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
if (rint & A10_MMC_INT_RESP_TIMEOUT)
sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
else
sc->a10_req->cmd->error = MMC_ERR_FAILED;
a10_mmc_req_done(sc);
goto end;
}
if (idst & A10_MMC_IDST_ERROR) {
device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
sc->a10_req->cmd->error = MMC_ERR_FAILED;
a10_mmc_req_done(sc);
goto end;
}
sc->a10_intr |= rint;
data = sc->a10_req->cmd->data;
if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
if (data->flags & MMC_DATA_WRITE)
sync_op = BUS_DMASYNC_POSTWRITE;
else
sync_op = BUS_DMASYNC_POSTREAD;
bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
sync_op);
bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
sc->a10_resid = data->len >> 2;
}
if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
a10_mmc_req_ok(sc);
end:
A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
A10_MMC_UNLOCK(sc);
}
static int
a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
{
int blksz;
struct a10_mmc_softc *sc;
struct mmc_command *cmd;
uint32_t cmdreg;
int err;
sc = device_get_softc(bus);
A10_MMC_LOCK(sc);
if (sc->a10_req) {
A10_MMC_UNLOCK(sc);
return (EBUSY);
}
sc->a10_req = req;
cmd = req->cmd;
cmdreg = A10_MMC_CMDR_LOAD;
if (cmd->opcode == MMC_GO_IDLE_STATE)
cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
if (cmd->flags & MMC_RSP_PRESENT)
cmdreg |= A10_MMC_CMDR_RESP_RCV;
if (cmd->flags & MMC_RSP_136)
cmdreg |= A10_MMC_CMDR_LONG_RESP;
if (cmd->flags & MMC_RSP_CRC)
cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
sc->a10_intr = 0;
sc->a10_resid = 0;
sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
cmd->error = MMC_ERR_NONE;
if (cmd->data != NULL) {
sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
if (cmd->data->flags & MMC_DATA_MULTI) {
cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
}
if (cmd->data->flags & MMC_DATA_WRITE)
cmdreg |= A10_MMC_CMDR_DIR_WRITE;
blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
err = a10_mmc_prepare_dma(sc);
if (err != 0)
device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
}
A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
a10_mmc_timeout, sc);
A10_MMC_UNLOCK(sc);
return (0);
}
static int
a10_mmc_read_ivar(device_t bus, device_t child, int which,
uintptr_t *result)
{
struct a10_mmc_softc *sc;
sc = device_get_softc(bus);
switch (which) {
default:
return (EINVAL);
case MMCBR_IVAR_BUS_MODE:
*(int *)result = sc->a10_host.ios.bus_mode;
break;
case MMCBR_IVAR_BUS_WIDTH:
*(int *)result = sc->a10_host.ios.bus_width;
break;
case MMCBR_IVAR_CHIP_SELECT:
*(int *)result = sc->a10_host.ios.chip_select;
break;
case MMCBR_IVAR_CLOCK:
*(int *)result = sc->a10_host.ios.clock;
break;
case MMCBR_IVAR_F_MIN:
*(int *)result = sc->a10_host.f_min;
break;
case MMCBR_IVAR_F_MAX:
*(int *)result = sc->a10_host.f_max;
break;
case MMCBR_IVAR_HOST_OCR:
*(int *)result = sc->a10_host.host_ocr;
break;
case MMCBR_IVAR_MODE:
*(int *)result = sc->a10_host.mode;
break;
case MMCBR_IVAR_OCR:
*(int *)result = sc->a10_host.ocr;
break;
case MMCBR_IVAR_POWER_MODE:
*(int *)result = sc->a10_host.ios.power_mode;
break;
case MMCBR_IVAR_VDD:
*(int *)result = sc->a10_host.ios.vdd;
break;
case MMCBR_IVAR_CAPS:
*(int *)result = sc->a10_host.caps;
break;
case MMCBR_IVAR_MAX_DATA:
*(int *)result = 65535;
break;
}
return (0);
}
static int
a10_mmc_write_ivar(device_t bus, device_t child, int which,
uintptr_t value)
{
struct a10_mmc_softc *sc;
sc = device_get_softc(bus);
switch (which) {
default:
return (EINVAL);
case MMCBR_IVAR_BUS_MODE:
sc->a10_host.ios.bus_mode = value;
break;
case MMCBR_IVAR_BUS_WIDTH:
sc->a10_host.ios.bus_width = value;
break;
case MMCBR_IVAR_CHIP_SELECT:
sc->a10_host.ios.chip_select = value;
break;
case MMCBR_IVAR_CLOCK:
sc->a10_host.ios.clock = value;
break;
case MMCBR_IVAR_MODE:
sc->a10_host.mode = value;
break;
case MMCBR_IVAR_OCR:
sc->a10_host.ocr = value;
break;
case MMCBR_IVAR_POWER_MODE:
sc->a10_host.ios.power_mode = value;
break;
case MMCBR_IVAR_VDD:
sc->a10_host.ios.vdd = value;
break;
/* These are read-only */
case MMCBR_IVAR_CAPS:
case MMCBR_IVAR_HOST_OCR:
case MMCBR_IVAR_F_MIN:
case MMCBR_IVAR_F_MAX:
case MMCBR_IVAR_MAX_DATA:
return (EINVAL);
}
return (0);
}
static int
a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
{
uint32_t cmdreg;
int retry;
uint32_t ckcr;
ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
if (clkon)
ckcr |= A10_MMC_CKCR_CCLK_ENB;
A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
A10_MMC_CMDR_WAIT_PRE_OVER;
A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
retry = 0xfffff;
while (--retry > 0) {
if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
return (0);
}
DELAY(10);
}
A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
device_printf(sc->a10_dev, "timeout updating clock\n");
return (ETIMEDOUT);
}
static int
a10_mmc_update_ios(device_t bus, device_t child)
{
int error;
struct a10_mmc_softc *sc;
struct mmc_ios *ios;
uint32_t ckcr;
sc = device_get_softc(bus);
ios = &sc->a10_host.ios;
/* Set the bus width. */
switch (ios->bus_width) {
case bus_width_1:
A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
break;
case bus_width_4:
A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
break;
case bus_width_8:
A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
break;
}
if (ios->clock) {
/* Disable clock */
error = a10_mmc_update_clock(sc, 0);
if (error != 0)
return (error);
/* Reset the divider. */
ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
/* Set the MMC clock. */
error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
CLK_SET_ROUND_DOWN);
if (error != 0) {
device_printf(sc->a10_dev,
"failed to set frequency to %u Hz: %d\n",
ios->clock, error);
return (error);
}
/* Enable clock. */
error = a10_mmc_update_clock(sc, 1);
if (error != 0)
return (error);
}
return (0);
}
static int
a10_mmc_get_ro(device_t bus, device_t child)
{
return (0);
}
static int
a10_mmc_acquire_host(device_t bus, device_t child)
{
struct a10_mmc_softc *sc;
int error;
sc = device_get_softc(bus);
A10_MMC_LOCK(sc);
while (sc->a10_bus_busy) {
error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
if (error != 0) {
A10_MMC_UNLOCK(sc);
return (error);
}
}
sc->a10_bus_busy++;
A10_MMC_UNLOCK(sc);
return (0);
}
static int
a10_mmc_release_host(device_t bus, device_t child)
{
struct a10_mmc_softc *sc;
sc = device_get_softc(bus);
A10_MMC_LOCK(sc);
sc->a10_bus_busy--;
wakeup(sc);
A10_MMC_UNLOCK(sc);
return (0);
}
static device_method_t a10_mmc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, a10_mmc_probe),
DEVMETHOD(device_attach, a10_mmc_attach),
DEVMETHOD(device_detach, a10_mmc_detach),
/* Bus interface */
DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar),
DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar),
/* MMC bridge interface */
DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios),
DEVMETHOD(mmcbr_request, a10_mmc_request),
DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro),
DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host),
DEVMETHOD(mmcbr_release_host, a10_mmc_release_host),
DEVMETHOD_END
};
static devclass_t a10_mmc_devclass;
static driver_t a10_mmc_driver = {
"a10_mmc",
a10_mmc_methods,
sizeof(struct a10_mmc_softc),
};
DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, NULL,
NULL);
MMC_DECLARE_BRIDGE(a10_mmc);

View File

@ -1,204 +0,0 @@
/*-
* Copyright (c) 2013 Alexander Fedorov <alexander.fedorov@rtlservice.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _A10_MMC_H_
#define _A10_MMC_H_
#define A10_MMC_GCTL 0x00 /* Control Register */
#define A10_MMC_CKCR 0x04 /* Clock Control Register */
#define A10_MMC_TMOR 0x08 /* Timeout Register */
#define A10_MMC_BWDR 0x0C /* Bus Width Register */
#define A10_MMC_BKSR 0x10 /* Block Size Register */
#define A10_MMC_BYCR 0x14 /* Byte Count Register */
#define A10_MMC_CMDR 0x18 /* Command Register */
#define A10_MMC_CAGR 0x1C /* Argument Register */
#define A10_MMC_RESP0 0x20 /* Response Register 0 */
#define A10_MMC_RESP1 0x24 /* Response Register 1 */
#define A10_MMC_RESP2 0x28 /* Response Register 2 */
#define A10_MMC_RESP3 0x2C /* Response Register 3 */
#define A10_MMC_IMKR 0x30 /* Interrupt Mask Register */
#define A10_MMC_MISR 0x34 /* Masked Interrupt Status Register */
#define A10_MMC_RISR 0x38 /* Raw Interrupt Status Register */
#define A10_MMC_STAR 0x3C /* Status Register */
#define A10_MMC_FWLR 0x40 /* FIFO Threshold Watermark Register */
#define A10_MMC_FUNS 0x44 /* Function Select Register */
#define A10_MMC_HWRST 0x78 /* Hardware reset (not documented) */
#define A10_MMC_DMAC 0x80 /* IDMAC Control Register */
#define A10_MMC_DLBA 0x84 /* IDMAC Desc List Base Address Reg */
#define A10_MMC_IDST 0x88 /* IDMAC Status Register */
#define A10_MMC_IDIE 0x8C /* IDMAC Interrupt Enable Register */
#define A10_MMC_FIFO 0x100 /* FIFO Access Address (A10/A20) */
#define A31_MMC_FIFO 0x200 /* FIFO Access Address (A31) */
/* A10_MMC_GCTL */
#define A10_MMC_CTRL_SOFT_RST (1U << 0)
#define A10_MMC_CTRL_FIFO_RST (1U << 1)
#define A10_MMC_CTRL_DMA_RST (1U << 2)
#define A10_MMC_CTRL_INT_ENB (1U << 4)
#define A10_MMC_CTRL_DMA_ENB (1U << 5)
#define A10_MMC_CTRL_CD_DBC_ENB (1U << 8)
#define A10_MMC_CTRL_DDR_MOD_SEL (1U << 10)
#define A10_MMC_CTRL_FIFO_AC_MOD (1U << 31)
#define A10_MMC_RESET \
(A10_MMC_CTRL_SOFT_RST | A10_MMC_CTRL_FIFO_RST | A10_MMC_CTRL_DMA_RST)
/* A10_MMC_CKCR */
#define A10_MMC_CKCR_CCLK_ENB (1U << 16)
#define A10_MMC_CKCR_CCLK_CTRL (1U << 17)
#define A10_MMC_CKCR_CCLK_DIV 0xff
/* A10_MMC_TMOR */
#define A10_MMC_TMOR_RTO_LMT_SHIFT(x) x /* Response timeout limit */
#define A10_MMC_TMOR_RTO_LMT_MASK 0xff
#define A10_MMC_TMOR_DTO_LMT_SHIFT(x) (x << 8) /* Data timeout limit */
#define A10_MMC_TMOR_DTO_LMT_MASK 0xffffff
/* A10_MMC_BWDR */
#define A10_MMC_BWDR1 0
#define A10_MMC_BWDR4 1
#define A10_MMC_BWDR8 2
/* A10_MMC_CMDR */
#define A10_MMC_CMDR_RESP_RCV (1U << 6)
#define A10_MMC_CMDR_LONG_RESP (1U << 7)
#define A10_MMC_CMDR_CHK_RESP_CRC (1U << 8)
#define A10_MMC_CMDR_DATA_TRANS (1U << 9)
#define A10_MMC_CMDR_DIR_WRITE (1U << 10)
#define A10_MMC_CMDR_TRANS_MODE_STREAM (1U << 11)
#define A10_MMC_CMDR_STOP_CMD_FLAG (1U << 12)
#define A10_MMC_CMDR_WAIT_PRE_OVER (1U << 13)
#define A10_MMC_CMDR_STOP_ABT_CMD (1U << 14)
#define A10_MMC_CMDR_SEND_INIT_SEQ (1U << 15)
#define A10_MMC_CMDR_PRG_CLK (1U << 21)
#define A10_MMC_CMDR_RD_CEDATA_DEV (1U << 22)
#define A10_MMC_CMDR_CCS_EXP (1U << 23)
#define A10_MMC_CMDR_BOOT_MOD_SHIFT 24
#define A10_MMC_CMDR_BOOT_MOD_NORMAL 0
#define A10_MMC_CMDR_BOOT_MOD_MANDATORY 1
#define A10_MMC_CMDR_BOOT_MOD_ALT 2
#define A10_MMC_CMDR_EXP_BOOT_ACK (1U << 26)
#define A10_MMC_CMDR_BOOT_ABT (1U << 27)
#define A10_MMC_CMDR_VOL_SW (1U << 28)
#define A10_MMC_CMDR_LOAD (1U << 31)
/* A10_MMC_IMKR and A10_MMC_RISR */
#define A10_MMC_INT_RESP_ERR (1U << 1)
#define A10_MMC_INT_CMD_DONE (1U << 2)
#define A10_MMC_INT_DATA_OVER (1U << 3)
#define A10_MMC_INT_TX_DATA_REQ (1U << 4)
#define A10_MMC_INT_RX_DATA_REQ (1U << 5)
#define A10_MMC_INT_RESP_CRC_ERR (1U << 6)
#define A10_MMC_INT_DATA_CRC_ERR (1U << 7)
#define A10_MMC_INT_RESP_TIMEOUT (1U << 8)
#define A10_MMC_INT_BOOT_ACK_RECV (1U << 8)
#define A10_MMC_INT_DATA_TIMEOUT (1U << 9)
#define A10_MMC_INT_BOOT_START (1U << 9)
#define A10_MMC_INT_DATA_STARVE (1U << 10)
#define A10_MMC_INT_VOL_CHG_DONE (1U << 10)
#define A10_MMC_INT_FIFO_RUN_ERR (1U << 11)
#define A10_MMC_INT_CMD_BUSY (1U << 12)
#define A10_MMC_INT_DATA_START_ERR (1U << 13)
#define A10_MMC_INT_AUTO_STOP_DONE (1U << 14)
#define A10_MMC_INT_DATA_END_BIT_ERR (1U << 15)
#define A10_MMC_INT_SDIO (1U << 16)
#define A10_MMC_INT_CARD_INSERT (1U << 30)
#define A10_MMC_INT_CARD_REMOVE (1U << 31)
#define A10_MMC_INT_ERR_BIT \
(A10_MMC_INT_RESP_ERR | A10_MMC_INT_RESP_CRC_ERR | \
A10_MMC_INT_DATA_CRC_ERR | A10_MMC_INT_RESP_TIMEOUT | \
A10_MMC_INT_FIFO_RUN_ERR | A10_MMC_INT_CMD_BUSY | \
A10_MMC_INT_DATA_START_ERR | A10_MMC_INT_DATA_END_BIT_ERR)
/* A10_MMC_STAR */
#define A10_MMC_STAR_FIFO_RX_LEVEL (1U << 0)
#define A10_MMC_STAR_FIFO_TX_LEVEL (1U << 1)
#define A10_MMC_STAR_FIFO_EMPTY (1U << 2)
#define A10_MMC_STAR_FIFO_FULL (1U << 3)
#define A10_MMC_STAR_CARD_PRESENT (1U << 8)
#define A10_MMC_STAR_CARD_BUSY (1U << 9)
#define A10_MMC_STAR_FSM_BUSY (1U << 10)
#define A10_MMC_STAR_DMA_REQ (1U << 31)
/* A10_MMC_FUNS */
#define A10_MMC_CE_ATA_ON (0xceaaU << 16)
#define A10_MMC_SEND_IRQ_RESP (1U << 0)
#define A10_MMC_SDIO_RD_WAIT (1U << 1)
#define A10_MMC_ABT_RD_DATA (1U << 2)
#define A10_MMC_SEND_CC_SD (1U << 8)
#define A10_MMC_SEND_AUTOSTOP_CC_SD (1U << 9)
#define A10_MMC_CE_ATA_DEV_INT_ENB (1U << 10)
/* IDMA CONTROLLER BUS MOD BIT FIELD */
#define A10_MMC_DMAC_IDMAC_SOFT_RST (1U << 0)
#define A10_MMC_DMAC_IDMAC_FIX_BURST (1U << 1)
#define A10_MMC_DMAC_IDMAC_IDMA_ON (1U << 7)
#define A10_MMC_DMAC_IDMAC_REFETCH_DES (1U << 31)
/* A10_MMC_IDST */
#define A10_MMC_IDST_TX_INT (1U << 0)
#define A10_MMC_IDST_RX_INT (1U << 1)
#define A10_MMC_IDST_FATAL_BERR_INT (1U << 2)
#define A10_MMC_IDST_DES_UNAVL_INT (1U << 4)
#define A10_MMC_IDST_ERR_FLAG_SUM (1U << 5)
#define A10_MMC_IDST_NOR_INT_SUM (1U << 8)
#define A10_MMC_IDST_ABN_INT_SUM (1U << 9)
#define A10_MMC_IDST_HOST_ABT_INTX (1U << 10)
#define A10_MMC_IDST_HOST_ABT_INRX (1U << 10)
#define A10_MMC_IDST_IDLE (0U << 13)
#define A10_MMC_IDST_SUSPEND (1U << 13)
#define A10_MMC_IDST_DESC_RD (2U << 13)
#define A10_MMC_IDST_DESC_CHECK (3U << 13)
#define A10_MMC_IDST_RD_REQ_WAIT (4U << 13)
#define A10_MMC_IDST_WR_REQ_WAIT (5U << 13)
#define A10_MMC_IDST_RD (6U << 13)
#define A10_MMC_IDST_WR (7U << 13)
#define A10_MMC_IDST_DESC_CLOSE (8U << 13)
#define A10_MMC_IDST_ERROR \
(A10_MMC_IDST_FATAL_BERR_INT | A10_MMC_IDST_ERR_FLAG_SUM | \
A10_MMC_IDST_DES_UNAVL_INT | A10_MMC_IDST_ABN_INT_SUM)
#define A10_MMC_IDST_COMPLETE \
(A10_MMC_IDST_TX_INT | A10_MMC_IDST_RX_INT)
/* The DMA descriptor table. */
struct a10_mmc_dma_desc {
uint32_t config;
#define A10_MMC_DMA_CONFIG_DIC (1U << 1) /* Disable Interrupt Completion */
#define A10_MMC_DMA_CONFIG_LD (1U << 2) /* Last DES */
#define A10_MMC_DMA_CONFIG_FD (1U << 3) /* First DES */
#define A10_MMC_DMA_CONFIG_CH (1U << 4) /* CHAIN MOD */
#define A10_MMC_DMA_CONFIG_ER (1U << 5) /* End of Ring (undocumented register) */
#define A10_MMC_DMA_CONFIG_CES (1U << 30) /* Card Error Summary */
#define A10_MMC_DMA_CONFIG_OWN (1U << 31) /* DES Own Flag */
uint32_t buf_size;
uint32_t buf_addr;
uint32_t next;
};
#define A10_MMC_DMA_ALIGN 4
#endif /* _A10_MMC_H_ */

922
sys/arm/allwinner/aw_mmc.c Normal file
View File

@ -0,0 +1,922 @@
/*-
* Copyright (c) 2013 Alexander Fedorov
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/resource.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mmc/bridge.h>
#include <dev/mmc/mmcbrvar.h>
#include <arm/allwinner/aw_mmc.h>
#include <dev/extres/clk/clk.h>
#include <dev/extres/hwreset/hwreset.h>
#define AW_MMC_MEMRES 0
#define AW_MMC_IRQRES 1
#define AW_MMC_RESSZ 2
#define AW_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
#define AW_MMC_DMA_MAX_SIZE 0x2000
#define AW_MMC_DMA_FTRGLEVEL 0x20070008
#define AW_MMC_RESET_RETRY 1000
#define CARD_ID_FREQUENCY 400000
static struct ofw_compat_data compat_data[] = {
{"allwinner,sun4i-a10-mmc", 1},
{"allwinner,sun5i-a13-mmc", 1},
{"allwinner,sun7i-a20-mmc", 1},
{"allwinner,sun50i-a64-mmc", 1},
{NULL, 0}
};
struct aw_mmc_softc {
device_t aw_dev;
clk_t aw_clk_ahb;
clk_t aw_clk_mmc;
hwreset_t aw_rst_ahb;
int aw_bus_busy;
int aw_resid;
int aw_timeout;
struct callout aw_timeoutc;
struct mmc_host aw_host;
struct mmc_request * aw_req;
struct mtx aw_mtx;
struct resource * aw_res[AW_MMC_RESSZ];
uint32_t aw_intr;
uint32_t aw_intr_wait;
void * aw_intrhand;
/* Fields required for DMA access. */
bus_addr_t aw_dma_desc_phys;
bus_dmamap_t aw_dma_map;
bus_dma_tag_t aw_dma_tag;
void * aw_dma_desc;
bus_dmamap_t aw_dma_buf_map;
bus_dma_tag_t aw_dma_buf_tag;
int aw_dma_map_err;
};
static struct resource_spec aw_mmc_res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static int aw_mmc_probe(device_t);
static int aw_mmc_attach(device_t);
static int aw_mmc_detach(device_t);
static int aw_mmc_setup_dma(struct aw_mmc_softc *);
static int aw_mmc_reset(struct aw_mmc_softc *);
static void aw_mmc_intr(void *);
static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
static int aw_mmc_update_ios(device_t, device_t);
static int aw_mmc_request(device_t, device_t, struct mmc_request *);
static int aw_mmc_get_ro(device_t, device_t);
static int aw_mmc_acquire_host(device_t, device_t);
static int aw_mmc_release_host(device_t, device_t);
#define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
#define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
#define AW_MMC_READ_4(_sc, _reg) \
bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
#define AW_MMC_WRITE_4(_sc, _reg, _value) \
bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
static int
aw_mmc_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
return (BUS_PROBE_DEFAULT);
}
static int
aw_mmc_attach(device_t dev)
{
device_t child;
struct aw_mmc_softc *sc;
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *tree;
uint32_t bus_width;
phandle_t node;
int error;
node = ofw_bus_get_node(dev);
sc = device_get_softc(dev);
sc->aw_dev = dev;
sc->aw_req = NULL;
if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
device_printf(dev, "cannot allocate device resources\n");
return (ENXIO);
}
if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
&sc->aw_intrhand)) {
bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
device_printf(dev, "cannot setup interrupt handler\n");
return (ENXIO);
}
mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
MTX_DEF);
callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
/* De-assert reset */
if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
error = hwreset_deassert(sc->aw_rst_ahb);
if (error != 0) {
device_printf(dev, "cannot de-assert reset\n");
goto fail;
}
}
/* Activate the module clock. */
error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
if (error != 0) {
device_printf(dev, "cannot get ahb clock\n");
goto fail;
}
error = clk_enable(sc->aw_clk_ahb);
if (error != 0) {
device_printf(dev, "cannot enable ahb clock\n");
goto fail;
}
error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
if (error != 0) {
device_printf(dev, "cannot get mmc clock\n");
goto fail;
}
error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
CLK_SET_ROUND_DOWN);
if (error != 0) {
device_printf(dev, "cannot init mmc clock\n");
goto fail;
}
error = clk_enable(sc->aw_clk_mmc);
if (error != 0) {
device_printf(dev, "cannot enable mmc clock\n");
goto fail;
}
sc->aw_timeout = 10;
ctx = device_get_sysctl_ctx(dev);
tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
&sc->aw_timeout, 0, "Request timeout in seconds");
/* Hardware reset */
AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 1);
DELAY(100);
AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 0);
DELAY(500);
/* Soft Reset controller. */
if (aw_mmc_reset(sc) != 0) {
device_printf(dev, "cannot reset the controller\n");
goto fail;
}
if (aw_mmc_setup_dma(sc) != 0) {
device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
goto fail;
}
if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
bus_width = 4;
sc->aw_host.f_min = 400000;
sc->aw_host.f_max = 52000000;
sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
sc->aw_host.mode = mode_sd;
sc->aw_host.caps = MMC_CAP_HSPEED;
if (bus_width >= 4)
sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
if (bus_width >= 8)
sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
child = device_add_child(dev, "mmc", -1);
if (child == NULL) {
device_printf(dev, "attaching MMC bus failed!\n");
goto fail;
}
if (device_probe_and_attach(child) != 0) {
device_printf(dev, "attaching MMC child failed!\n");
device_delete_child(dev, child);
goto fail;
}
return (0);
fail:
callout_drain(&sc->aw_timeoutc);
mtx_destroy(&sc->aw_mtx);
bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
return (ENXIO);
}
static int
aw_mmc_detach(device_t dev)
{
return (EBUSY);
}
static void
aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
{
struct aw_mmc_softc *sc;
sc = (struct aw_mmc_softc *)arg;
if (err) {
sc->aw_dma_map_err = err;
return;
}
sc->aw_dma_desc_phys = segs[0].ds_addr;
}
static int
aw_mmc_setup_dma(struct aw_mmc_softc *sc)
{
int dma_desc_size, error;
/* Allocate the DMA descriptor memory. */
dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
AW_MMC_DMA_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
if (error)
return (error);
error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
if (error)
return (error);
error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
if (error)
return (error);
if (sc->aw_dma_map_err)
return (sc->aw_dma_map_err);
/* Create the DMA map for data transfers. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
AW_MMC_DMA_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
AW_MMC_DMA_MAX_SIZE * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
AW_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
&sc->aw_dma_buf_tag);
if (error)
return (error);
error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
&sc->aw_dma_buf_map);
if (error)
return (error);
return (0);
}
static void
aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
{
int i;
struct aw_mmc_dma_desc *dma_desc;
struct aw_mmc_softc *sc;
sc = (struct aw_mmc_softc *)arg;
sc->aw_dma_map_err = err;
if (err)
return;
dma_desc = sc->aw_dma_desc;
for (i = 0; i < nsegs; i++) {
dma_desc[i].buf_size = segs[i].ds_len;
dma_desc[i].buf_addr = segs[i].ds_addr;
dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
AW_MMC_DMA_CONFIG_OWN;
if (i == 0)
dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
if (i < (nsegs - 1)) {
dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
dma_desc[i].next = sc->aw_dma_desc_phys +
((i + 1) * sizeof(struct aw_mmc_dma_desc));
} else {
dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
AW_MMC_DMA_CONFIG_ER;
dma_desc[i].next = 0;
}
}
}
static int
aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
{
bus_dmasync_op_t sync_op;
int error;
struct mmc_command *cmd;
uint32_t val;
cmd = sc->aw_req->cmd;
if (cmd->data->len > AW_MMC_DMA_MAX_SIZE * AW_MMC_DMA_SEGS)
return (EFBIG);
error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
if (error)
return (error);
if (sc->aw_dma_map_err)
return (sc->aw_dma_map_err);
if (cmd->data->flags & MMC_DATA_WRITE)
sync_op = BUS_DMASYNC_PREWRITE;
else
sync_op = BUS_DMASYNC_PREREAD;
bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
/* Enable DMA */
val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
val |= AW_MMC_CTRL_DMA_ENB;
AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
/* Reset DMA */
val |= AW_MMC_CTRL_DMA_RST;
AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
/* Enable RX or TX DMA interrupt */
if (cmd->data->flags & MMC_DATA_WRITE)
val |= AW_MMC_IDST_TX_INT;
else
val |= AW_MMC_IDST_RX_INT;
AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
/* Set DMA descritptor list address */
AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
/* FIFO trigger level */
AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
return (0);
}
static int
aw_mmc_reset(struct aw_mmc_softc *sc)
{
int timeout;
AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
timeout = 1000;
while (--timeout > 0) {
if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
break;
DELAY(100);
}
if (timeout == 0)
return (ETIMEDOUT);
/* Set the timeout. */
AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
/* Clear pending interrupts. */
AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
/* Unmask interrupts. */
AW_MMC_WRITE_4(sc, AW_MMC_IMKR,
AW_MMC_INT_CMD_DONE | AW_MMC_INT_ERR_BIT |
AW_MMC_INT_DATA_OVER | AW_MMC_INT_AUTO_STOP_DONE);
/* Enable interrupts and AHB access. */
AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
return (0);
}
static void
aw_mmc_req_done(struct aw_mmc_softc *sc)
{
struct mmc_command *cmd;
struct mmc_request *req;
uint32_t val, mask;
int retry;
cmd = sc->aw_req->cmd;
if (cmd->error != MMC_ERR_NONE) {
/* Reset the FIFO and DMA engines. */
mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
retry = AW_MMC_RESET_RETRY;
while (--retry > 0) {
val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
if ((val & mask) == 0)
break;
DELAY(10);
}
if (retry == 0)
device_printf(sc->aw_dev,
"timeout resetting DMA/FIFO\n");
aw_mmc_update_clock(sc, 1);
}
req = sc->aw_req;
callout_stop(&sc->aw_timeoutc);
sc->aw_req = NULL;
sc->aw_intr = 0;
sc->aw_resid = 0;
sc->aw_dma_map_err = 0;
sc->aw_intr_wait = 0;
req->done(req);
}
static void
aw_mmc_req_ok(struct aw_mmc_softc *sc)
{
int timeout;
struct mmc_command *cmd;
uint32_t status;
timeout = 1000;
while (--timeout > 0) {
status = AW_MMC_READ_4(sc, AW_MMC_STAR);
if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
break;
DELAY(1000);
}
cmd = sc->aw_req->cmd;
if (timeout == 0) {
cmd->error = MMC_ERR_FAILED;
aw_mmc_req_done(sc);
return;
}
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
} else
cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
}
/* All data has been transferred ? */
if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
cmd->error = MMC_ERR_FAILED;
aw_mmc_req_done(sc);
}
static void
aw_mmc_timeout(void *arg)
{
struct aw_mmc_softc *sc;
sc = (struct aw_mmc_softc *)arg;
if (sc->aw_req != NULL) {
device_printf(sc->aw_dev, "controller timeout\n");
sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
aw_mmc_req_done(sc);
} else
device_printf(sc->aw_dev,
"Spurious timeout - no active request\n");
}
static void
aw_mmc_intr(void *arg)
{
bus_dmasync_op_t sync_op;
struct aw_mmc_softc *sc;
struct mmc_data *data;
uint32_t idst, imask, rint;
sc = (struct aw_mmc_softc *)arg;
AW_MMC_LOCK(sc);
rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
if (idst == 0 && imask == 0 && rint == 0) {
AW_MMC_UNLOCK(sc);
return;
}
#ifdef DEBUG
device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
idst, imask, rint);
#endif
if (sc->aw_req == NULL) {
device_printf(sc->aw_dev,
"Spurious interrupt - no active request, rint: 0x%08X\n",
rint);
goto end;
}
if (rint & AW_MMC_INT_ERR_BIT) {
device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
if (rint & AW_MMC_INT_RESP_TIMEOUT)
sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
else
sc->aw_req->cmd->error = MMC_ERR_FAILED;
aw_mmc_req_done(sc);
goto end;
}
if (idst & AW_MMC_IDST_ERROR) {
device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
sc->aw_req->cmd->error = MMC_ERR_FAILED;
aw_mmc_req_done(sc);
goto end;
}
sc->aw_intr |= rint;
data = sc->aw_req->cmd->data;
if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
if (data->flags & MMC_DATA_WRITE)
sync_op = BUS_DMASYNC_POSTWRITE;
else
sync_op = BUS_DMASYNC_POSTREAD;
bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
sync_op);
bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
sc->aw_resid = data->len >> 2;
}
if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
aw_mmc_req_ok(sc);
end:
AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
AW_MMC_UNLOCK(sc);
}
static int
aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
{
int blksz;
struct aw_mmc_softc *sc;
struct mmc_command *cmd;
uint32_t cmdreg;
int err;
sc = device_get_softc(bus);
AW_MMC_LOCK(sc);
if (sc->aw_req) {
AW_MMC_UNLOCK(sc);
return (EBUSY);
}
sc->aw_req = req;
cmd = req->cmd;
cmdreg = AW_MMC_CMDR_LOAD;
if (cmd->opcode == MMC_GO_IDLE_STATE)
cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
if (cmd->flags & MMC_RSP_PRESENT)
cmdreg |= AW_MMC_CMDR_RESP_RCV;
if (cmd->flags & MMC_RSP_136)
cmdreg |= AW_MMC_CMDR_LONG_RESP;
if (cmd->flags & MMC_RSP_CRC)
cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
sc->aw_intr = 0;
sc->aw_resid = 0;
sc->aw_intr_wait = AW_MMC_INT_CMD_DONE;
cmd->error = MMC_ERR_NONE;
if (cmd->data != NULL) {
sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
if (cmd->data->flags & MMC_DATA_MULTI) {
cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
}
if (cmd->data->flags & MMC_DATA_WRITE)
cmdreg |= AW_MMC_CMDR_DIR_WRITE;
blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
err = aw_mmc_prepare_dma(sc);
if (err != 0)
device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
}
AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
aw_mmc_timeout, sc);
AW_MMC_UNLOCK(sc);
return (0);
}
static int
aw_mmc_read_ivar(device_t bus, device_t child, int which,
uintptr_t *result)
{
struct aw_mmc_softc *sc;
sc = device_get_softc(bus);
switch (which) {
default:
return (EINVAL);
case MMCBR_IVAR_BUS_MODE:
*(int *)result = sc->aw_host.ios.bus_mode;
break;
case MMCBR_IVAR_BUS_WIDTH:
*(int *)result = sc->aw_host.ios.bus_width;
break;
case MMCBR_IVAR_CHIP_SELECT:
*(int *)result = sc->aw_host.ios.chip_select;
break;
case MMCBR_IVAR_CLOCK:
*(int *)result = sc->aw_host.ios.clock;
break;
case MMCBR_IVAR_F_MIN:
*(int *)result = sc->aw_host.f_min;
break;
case MMCBR_IVAR_F_MAX:
*(int *)result = sc->aw_host.f_max;
break;
case MMCBR_IVAR_HOST_OCR:
*(int *)result = sc->aw_host.host_ocr;
break;
case MMCBR_IVAR_MODE:
*(int *)result = sc->aw_host.mode;
break;
case MMCBR_IVAR_OCR:
*(int *)result = sc->aw_host.ocr;
break;
case MMCBR_IVAR_POWER_MODE:
*(int *)result = sc->aw_host.ios.power_mode;
break;
case MMCBR_IVAR_VDD:
*(int *)result = sc->aw_host.ios.vdd;
break;
case MMCBR_IVAR_CAPS:
*(int *)result = sc->aw_host.caps;
break;
case MMCBR_IVAR_MAX_DATA:
*(int *)result = 65535;
break;
}
return (0);
}
static int
aw_mmc_write_ivar(device_t bus, device_t child, int which,
uintptr_t value)
{
struct aw_mmc_softc *sc;
sc = device_get_softc(bus);
switch (which) {
default:
return (EINVAL);
case MMCBR_IVAR_BUS_MODE:
sc->aw_host.ios.bus_mode = value;
break;
case MMCBR_IVAR_BUS_WIDTH:
sc->aw_host.ios.bus_width = value;
break;
case MMCBR_IVAR_CHIP_SELECT:
sc->aw_host.ios.chip_select = value;
break;
case MMCBR_IVAR_CLOCK:
sc->aw_host.ios.clock = value;
break;
case MMCBR_IVAR_MODE:
sc->aw_host.mode = value;
break;
case MMCBR_IVAR_OCR:
sc->aw_host.ocr = value;
break;
case MMCBR_IVAR_POWER_MODE:
sc->aw_host.ios.power_mode = value;
break;
case MMCBR_IVAR_VDD:
sc->aw_host.ios.vdd = value;
break;
/* These are read-only */
case MMCBR_IVAR_CAPS:
case MMCBR_IVAR_HOST_OCR:
case MMCBR_IVAR_F_MIN:
case MMCBR_IVAR_F_MAX:
case MMCBR_IVAR_MAX_DATA:
return (EINVAL);
}
return (0);
}
static int
aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
{
uint32_t cmdreg;
int retry;
uint32_t ckcr;
ckcr = AW_MMC_READ_4(sc, AW_MMC_CKCR);
ckcr &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL);
if (clkon)
ckcr |= AW_MMC_CKCR_CCLK_ENB;
AW_MMC_WRITE_4(sc, AW_MMC_CKCR, ckcr);
cmdreg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
AW_MMC_CMDR_WAIT_PRE_OVER;
AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg);
retry = 0xfffff;
while (--retry > 0) {
if ((AW_MMC_READ_4(sc, AW_MMC_CMDR) & AW_MMC_CMDR_LOAD) == 0) {
AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
return (0);
}
DELAY(10);
}
AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
device_printf(sc->aw_dev, "timeout updating clock\n");
return (ETIMEDOUT);
}
static int
aw_mmc_update_ios(device_t bus, device_t child)
{
int error;
struct aw_mmc_softc *sc;
struct mmc_ios *ios;
uint32_t ckcr;
sc = device_get_softc(bus);
ios = &sc->aw_host.ios;
/* Set the bus width. */
switch (ios->bus_width) {
case bus_width_1:
AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
break;
case bus_width_4:
AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
break;
case bus_width_8:
AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
break;
}
if (ios->clock) {
/* Disable clock */
error = aw_mmc_update_clock(sc, 0);
if (error != 0)
return (error);
/* Reset the divider. */
ckcr = AW_MMC_READ_4(sc, AW_MMC_CKCR);
ckcr &= ~AW_MMC_CKCR_CCLK_DIV;
AW_MMC_WRITE_4(sc, AW_MMC_CKCR, ckcr);
/* Set the MMC clock. */
error = clk_set_freq(sc->aw_clk_mmc, ios->clock,
CLK_SET_ROUND_DOWN);
if (error != 0) {
device_printf(sc->aw_dev,
"failed to set frequency to %u Hz: %d\n",
ios->clock, error);
return (error);
}
/* Enable clock. */
error = aw_mmc_update_clock(sc, 1);
if (error != 0)
return (error);
}
return (0);
}
static int
aw_mmc_get_ro(device_t bus, device_t child)
{
return (0);
}
static int
aw_mmc_acquire_host(device_t bus, device_t child)
{
struct aw_mmc_softc *sc;
int error;
sc = device_get_softc(bus);
AW_MMC_LOCK(sc);
while (sc->aw_bus_busy) {
error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
if (error != 0) {
AW_MMC_UNLOCK(sc);
return (error);
}
}
sc->aw_bus_busy++;
AW_MMC_UNLOCK(sc);
return (0);
}
static int
aw_mmc_release_host(device_t bus, device_t child)
{
struct aw_mmc_softc *sc;
sc = device_get_softc(bus);
AW_MMC_LOCK(sc);
sc->aw_bus_busy--;
wakeup(sc);
AW_MMC_UNLOCK(sc);
return (0);
}
static device_method_t aw_mmc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, aw_mmc_probe),
DEVMETHOD(device_attach, aw_mmc_attach),
DEVMETHOD(device_detach, aw_mmc_detach),
/* Bus interface */
DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
/* MMC bridge interface */
DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
DEVMETHOD(mmcbr_request, aw_mmc_request),
DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
DEVMETHOD_END
};
static devclass_t aw_mmc_devclass;
static driver_t aw_mmc_driver = {
"aw_mmc",
aw_mmc_methods,
sizeof(struct aw_mmc_softc),
};
DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
NULL);
MMC_DECLARE_BRIDGE(aw_mmc);

204
sys/arm/allwinner/aw_mmc.h Normal file
View File

@ -0,0 +1,204 @@
/*-
* Copyright (c) 2013 Alexander Fedorov <alexander.fedorov@rtlservice.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _AW_MMC_H_
#define _AW_MMC_H_
#define AW_MMC_GCTL 0x00 /* Control Register */
#define AW_MMC_CKCR 0x04 /* Clock Control Register */
#define AW_MMC_TMOR 0x08 /* Timeout Register */
#define AW_MMC_BWDR 0x0C /* Bus Width Register */
#define AW_MMC_BKSR 0x10 /* Block Size Register */
#define AW_MMC_BYCR 0x14 /* Byte Count Register */
#define AW_MMC_CMDR 0x18 /* Command Register */
#define AW_MMC_CAGR 0x1C /* Argument Register */
#define AW_MMC_RESP0 0x20 /* Response Register 0 */
#define AW_MMC_RESP1 0x24 /* Response Register 1 */
#define AW_MMC_RESP2 0x28 /* Response Register 2 */
#define AW_MMC_RESP3 0x2C /* Response Register 3 */
#define AW_MMC_IMKR 0x30 /* Interrupt Mask Register */
#define AW_MMC_MISR 0x34 /* Masked Interrupt Status Register */
#define AW_MMC_RISR 0x38 /* Raw Interrupt Status Register */
#define AW_MMC_STAR 0x3C /* Status Register */
#define AW_MMC_FWLR 0x40 /* FIFO Threshold Watermark Register */
#define AW_MMC_FUNS 0x44 /* Function Select Register */
#define AW_MMC_HWRST 0x78 /* Hardware reset (not documented) */
#define AW_MMC_DMAC 0x80 /* IDMAC Control Register */
#define AW_MMC_DLBA 0x84 /* IDMAC Desc List Base Address Reg */
#define AW_MMC_IDST 0x88 /* IDMAC Status Register */
#define AW_MMC_IDIE 0x8C /* IDMAC Interrupt Enable Register */
#define AW_MMC_FIFO 0x100 /* FIFO Access Address (A10/A20) */
#define A31_MMC_FIFO 0x200 /* FIFO Access Address (A31) */
/* AW_MMC_GCTL */
#define AW_MMC_CTRL_SOFT_RST (1U << 0)
#define AW_MMC_CTRL_FIFO_RST (1U << 1)
#define AW_MMC_CTRL_DMA_RST (1U << 2)
#define AW_MMC_CTRL_INT_ENB (1U << 4)
#define AW_MMC_CTRL_DMA_ENB (1U << 5)
#define AW_MMC_CTRL_CD_DBC_ENB (1U << 8)
#define AW_MMC_CTRL_DDR_MOD_SEL (1U << 10)
#define AW_MMC_CTRL_FIFO_AC_MOD (1U << 31)
#define AW_MMC_RESET \
(AW_MMC_CTRL_SOFT_RST | AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST)
/* AW_MMC_CKCR */
#define AW_MMC_CKCR_CCLK_ENB (1U << 16)
#define AW_MMC_CKCR_CCLK_CTRL (1U << 17)
#define AW_MMC_CKCR_CCLK_DIV 0xff
/* AW_MMC_TMOR */
#define AW_MMC_TMOR_RTO_LMT_SHIFT(x) x /* Response timeout limit */
#define AW_MMC_TMOR_RTO_LMT_MASK 0xff
#define AW_MMC_TMOR_DTO_LMT_SHIFT(x) (x << 8) /* Data timeout limit */
#define AW_MMC_TMOR_DTO_LMT_MASK 0xffffff
/* AW_MMC_BWDR */
#define AW_MMC_BWDR1 0
#define AW_MMC_BWDR4 1
#define AW_MMC_BWDR8 2
/* AW_MMC_CMDR */
#define AW_MMC_CMDR_RESP_RCV (1U << 6)
#define AW_MMC_CMDR_LONG_RESP (1U << 7)
#define AW_MMC_CMDR_CHK_RESP_CRC (1U << 8)
#define AW_MMC_CMDR_DATA_TRANS (1U << 9)
#define AW_MMC_CMDR_DIR_WRITE (1U << 10)
#define AW_MMC_CMDR_TRANS_MODE_STREAM (1U << 11)
#define AW_MMC_CMDR_STOP_CMD_FLAG (1U << 12)
#define AW_MMC_CMDR_WAIT_PRE_OVER (1U << 13)
#define AW_MMC_CMDR_STOP_ABT_CMD (1U << 14)
#define AW_MMC_CMDR_SEND_INIT_SEQ (1U << 15)
#define AW_MMC_CMDR_PRG_CLK (1U << 21)
#define AW_MMC_CMDR_RD_CEDATA_DEV (1U << 22)
#define AW_MMC_CMDR_CCS_EXP (1U << 23)
#define AW_MMC_CMDR_BOOT_MOD_SHIFT 24
#define AW_MMC_CMDR_BOOT_MOD_NORMAL 0
#define AW_MMC_CMDR_BOOT_MOD_MANDATORY 1
#define AW_MMC_CMDR_BOOT_MOD_ALT 2
#define AW_MMC_CMDR_EXP_BOOT_ACK (1U << 26)
#define AW_MMC_CMDR_BOOT_ABT (1U << 27)
#define AW_MMC_CMDR_VOL_SW (1U << 28)
#define AW_MMC_CMDR_LOAD (1U << 31)
/* AW_MMC_IMKR and AW_MMC_RISR */
#define AW_MMC_INT_RESP_ERR (1U << 1)
#define AW_MMC_INT_CMD_DONE (1U << 2)
#define AW_MMC_INT_DATA_OVER (1U << 3)
#define AW_MMC_INT_TX_DATA_REQ (1U << 4)
#define AW_MMC_INT_RX_DATA_REQ (1U << 5)
#define AW_MMC_INT_RESP_CRC_ERR (1U << 6)
#define AW_MMC_INT_DATA_CRC_ERR (1U << 7)
#define AW_MMC_INT_RESP_TIMEOUT (1U << 8)
#define AW_MMC_INT_BOOT_ACK_RECV (1U << 8)
#define AW_MMC_INT_DATA_TIMEOUT (1U << 9)
#define AW_MMC_INT_BOOT_START (1U << 9)
#define AW_MMC_INT_DATA_STARVE (1U << 10)
#define AW_MMC_INT_VOL_CHG_DONE (1U << 10)
#define AW_MMC_INT_FIFO_RUN_ERR (1U << 11)
#define AW_MMC_INT_CMD_BUSY (1U << 12)
#define AW_MMC_INT_DATA_START_ERR (1U << 13)
#define AW_MMC_INT_AUTO_STOP_DONE (1U << 14)
#define AW_MMC_INT_DATA_END_BIT_ERR (1U << 15)
#define AW_MMC_INT_SDIO (1U << 16)
#define AW_MMC_INT_CARD_INSERT (1U << 30)
#define AW_MMC_INT_CARD_REMOVE (1U << 31)
#define AW_MMC_INT_ERR_BIT \
(AW_MMC_INT_RESP_ERR | AW_MMC_INT_RESP_CRC_ERR | \
AW_MMC_INT_DATA_CRC_ERR | AW_MMC_INT_RESP_TIMEOUT | \
AW_MMC_INT_FIFO_RUN_ERR | AW_MMC_INT_CMD_BUSY | \
AW_MMC_INT_DATA_START_ERR | AW_MMC_INT_DATA_END_BIT_ERR)
/* AW_MMC_STAR */
#define AW_MMC_STAR_FIFO_RX_LEVEL (1U << 0)
#define AW_MMC_STAR_FIFO_TX_LEVEL (1U << 1)
#define AW_MMC_STAR_FIFO_EMPTY (1U << 2)
#define AW_MMC_STAR_FIFO_FULL (1U << 3)
#define AW_MMC_STAR_CARD_PRESENT (1U << 8)
#define AW_MMC_STAR_CARD_BUSY (1U << 9)
#define AW_MMC_STAR_FSM_BUSY (1U << 10)
#define AW_MMC_STAR_DMA_REQ (1U << 31)
/* AW_MMC_FUNS */
#define AW_MMC_CE_ATA_ON (0xceaaU << 16)
#define AW_MMC_SEND_IRQ_RESP (1U << 0)
#define AW_MMC_SDIO_RD_WAIT (1U << 1)
#define AW_MMC_ABT_RD_DATA (1U << 2)
#define AW_MMC_SEND_CC_SD (1U << 8)
#define AW_MMC_SEND_AUTOSTOP_CC_SD (1U << 9)
#define AW_MMC_CE_ATA_DEV_INT_ENB (1U << 10)
/* IDMA CONTROLLER BUS MOD BIT FIELD */
#define AW_MMC_DMAC_IDMAC_SOFT_RST (1U << 0)
#define AW_MMC_DMAC_IDMAC_FIX_BURST (1U << 1)
#define AW_MMC_DMAC_IDMAC_IDMA_ON (1U << 7)
#define AW_MMC_DMAC_IDMAC_REFETCH_DES (1U << 31)
/* AW_MMC_IDST */
#define AW_MMC_IDST_TX_INT (1U << 0)
#define AW_MMC_IDST_RX_INT (1U << 1)
#define AW_MMC_IDST_FATAL_BERR_INT (1U << 2)
#define AW_MMC_IDST_DES_UNAVL_INT (1U << 4)
#define AW_MMC_IDST_ERR_FLAG_SUM (1U << 5)
#define AW_MMC_IDST_NOR_INT_SUM (1U << 8)
#define AW_MMC_IDST_ABN_INT_SUM (1U << 9)
#define AW_MMC_IDST_HOST_ABT_INTX (1U << 10)
#define AW_MMC_IDST_HOST_ABT_INRX (1U << 10)
#define AW_MMC_IDST_IDLE (0U << 13)
#define AW_MMC_IDST_SUSPEND (1U << 13)
#define AW_MMC_IDST_DESC_RD (2U << 13)
#define AW_MMC_IDST_DESC_CHECK (3U << 13)
#define AW_MMC_IDST_RD_REQ_WAIT (4U << 13)
#define AW_MMC_IDST_WR_REQ_WAIT (5U << 13)
#define AW_MMC_IDST_RD (6U << 13)
#define AW_MMC_IDST_WR (7U << 13)
#define AW_MMC_IDST_DESC_CLOSE (8U << 13)
#define AW_MMC_IDST_ERROR \
(AW_MMC_IDST_FATAL_BERR_INT | AW_MMC_IDST_ERR_FLAG_SUM | \
AW_MMC_IDST_DES_UNAVL_INT | AW_MMC_IDST_ABN_INT_SUM)
#define AW_MMC_IDST_COMPLETE \
(AW_MMC_IDST_TX_INT | AW_MMC_IDST_RX_INT)
/* The DMA descriptor table. */
struct aw_mmc_dma_desc {
uint32_t config;
#define AW_MMC_DMA_CONFIG_DIC (1U << 1) /* Disable Interrupt Completion */
#define AW_MMC_DMA_CONFIG_LD (1U << 2) /* Last DES */
#define AW_MMC_DMA_CONFIG_FD (1U << 3) /* First DES */
#define AW_MMC_DMA_CONFIG_CH (1U << 4) /* CHAIN MOD */
#define AW_MMC_DMA_CONFIG_ER (1U << 5) /* End of Ring (undocumented register) */
#define AW_MMC_DMA_CONFIG_CES (1U << 30) /* Card Error Summary */
#define AW_MMC_DMA_CONFIG_OWN (1U << 31) /* DES Own Flag */
uint32_t buf_size;
uint32_t buf_addr;
uint32_t next;
};
#define AW_MMC_DMA_ALIGN 4
#endif /* _AW_MMC_H_ */

View File

@ -8,8 +8,8 @@ arm/allwinner/a31_dmac.c standard
arm/allwinner/a10_ehci.c optional ehci
arm/allwinner/aw_usbphy.c optional ehci | ohci
arm/allwinner/a10_gpio.c optional gpio
arm/allwinner/a10_mmc.c optional mmc
arm/allwinner/a10_sramc.c standard
arm/allwinner/aw_mmc.c optional mmc
arm/allwinner/aw_nmi.c optional intrng
arm/allwinner/aw_if_dwc.c optional dwc
arm/allwinner/aw_rsb.c optional rsb | p2wi